ArmNN
 22.08
ExecuteNetworkParams.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
9 #include <armnn/Logging.hpp>
10 
11 #include <fmt/format.h>
13 
14 void CheckClTuningParameter(const int& tuningLevel,
15  const std::string& tuningPath,
16  const std::vector<armnn::BackendId> computeDevices)
17 {
18  if (!tuningPath.empty())
19  {
20  if (tuningLevel == 0)
21  {
22  ARMNN_LOG(info) << "Using cl tuning file: " << tuningPath << "\n";
23  if (!ValidatePath(tuningPath, true))
24  {
25  throw armnn::InvalidArgumentException("The tuning path is not valid");
26  }
27  }
28  else if ((1 <= tuningLevel) && (tuningLevel <= 3))
29  {
30  ARMNN_LOG(info) << "Starting execution to generate a cl tuning file: " << tuningPath << "\n"
31  << "Tuning level in use: " << tuningLevel << "\n";
32  }
33  else if ((0 < tuningLevel) || (tuningLevel > 3))
34  {
35  throw armnn::InvalidArgumentException(fmt::format("The tuning level {} is not valid.",
36  tuningLevel));
37  }
38 
39  // Ensure that a GpuAcc is enabled. Otherwise no tuning data are used or genereted
40  // Only warn if it's not enabled
41  auto it = std::find(computeDevices.begin(), computeDevices.end(), "GpuAcc");
42  if (it == computeDevices.end())
43  {
44  ARMNN_LOG(warning) << "To use Cl Tuning the compute device GpuAcc needs to be active.";
45  }
46  }
47 }
48 
50 {
51  if (m_DynamicBackendsPath == "")
52  {
53  // Check compute devices are valid unless they are dynamically loaded at runtime
54  std::string invalidBackends;
56  {
57  ARMNN_LOG(fatal) << "The list of preferred devices contains invalid backend IDs: "
58  << invalidBackends;
59  }
60  }
62 
64  {
65  throw armnn::InvalidArgumentException("BFloat16 and Float16 turbo mode cannot be "
66  "enabled at the same time.");
67  }
68 
69  // Check input tensor shapes
70  if ((m_InputTensorShapes.size() != 0) &&
71  (m_InputTensorShapes.size() != m_InputNames.size()))
72  {
73  throw armnn::InvalidArgumentException("input-name and input-tensor-shape must have "
74  "the same amount of elements. ");
75  }
76 
77  if (m_InputTensorDataFilePaths.size() != 0)
78  {
80  {
81  throw armnn::InvalidArgumentException("One or more input data file paths are not valid.");
82  }
83 
84  if (m_InputTensorDataFilePaths.size() < m_InputNames.size())
85  {
87  fmt::format("According to the number of input names the user provided the network has {} "
88  "inputs. But only {} input-tensor-data file paths were provided. Each input of the "
89  "model is expected to be stored in it's own file.",
90  m_InputNames.size(),
92  }
93  }
94 
95  // Check that threshold time is not less than zero
96  if (m_ThresholdTime < 0)
97  {
98  throw armnn::InvalidArgumentException("Threshold time supplied as a command line argument is less than zero.");
99  }
100 
101  // Warn if ExecuteNetwork will generate dummy input data
103  {
104  ARMNN_LOG(warning) << "No input files provided, input tensors will be filled with 0s.";
105  }
106 
108  {
109  throw armnn::InvalidArgumentException("infer-output-shape and allow-expanded-dims cannot be used together.");
110  }
111 }
112 
113 #if defined(ARMNN_TFLITE_DELEGATE)
114 /**
115  * A utility method that populates a DelegateOptions object from this ExecuteNetworkParams.
116  *
117  * @return a populated armnnDelegate::DelegateOptions object.
118  */
119 armnnDelegate::DelegateOptions ExecuteNetworkParams::ToDelegateOptions() const
120 {
123  delegateOptions.SetGpuProfilingState(m_EnableProfiling);
124 
125  armnn::OptimizerOptions options;
128  options.m_Debug = m_PrintIntermediate;
132  if (m_InferOutputShape)
133  {
135  }
136 
137  armnn::BackendOptions gpuAcc("GpuAcc",
138  {
139  { "FastMathEnabled", m_EnableFastMath },
140  { "SaveCachedNetwork", m_SaveCachedNetwork },
141  { "CachedNetworkFilePath", m_CachedNetworkFilePath },
142  { "TuningLevel", m_TuningLevel},
143  { "TuningFile", m_TuningPath.c_str()},
144  { "KernelProfilingEnabled", m_EnableProfiling},
145  { "MLGOTuningFilePath", m_MLGOTuningFilePath}
146  });
147 
148  armnn::BackendOptions cpuAcc("CpuAcc",
149  {
150  { "FastMathEnabled", m_EnableFastMath },
151  { "NumberOfThreads", m_NumberOfThreads }
152  });
153  options.m_ModelOptions.push_back(gpuAcc);
154  options.m_ModelOptions.push_back(cpuAcc);
155 
156  if (m_InferOutputShape)
157  {
158  armnn::BackendOptions networkOption("ShapeInferenceMethod",
159  {
160  {"InferAndValidate", true}
161  });
162  options.m_ModelOptions.push_back(networkOption);
163  }
165  {
166  armnn::BackendOptions networkOption("AllowExpandedDims",
167  {
168  {"AllowExpandedDims", true}
169  });
170  options.m_ModelOptions.push_back(networkOption);
171  }
172  delegateOptions.SetOptimizerOptions(options);
173 
174  return delegateOptions;
175 }
176 
177 #endif
ModelOptions m_ModelOptions
Definition: INetwork.hpp:227
ShapeInferenceMethod m_shapeInferenceMethod
Definition: INetwork.hpp:221
std::vector< armnn::TensorShape > m_InputTensorShapes
#define ARMNN_LOG(severity)
Definition: Logging.hpp:205
bool m_ReduceFp32ToBf16
Reduces all Fp32 operators in the model to Bf16 for faster processing.
Definition: INetwork.hpp:218
void CheckClTuningParameter(const int &tuningLevel, const std::string &tuningPath, const std::vector< armnn::BackendId > computeDevices)
std::vector< armnn::BackendId > m_ComputeDevices
bool m_ReduceFp32ToFp16
Reduces all Fp32 operators in the model to Fp16 for faster processing.
Definition: INetwork.hpp:208
bool CheckRequestedBackendsAreValid(const std::vector< armnn::BackendId > &backendIds, armnn::Optional< std::string &> invalidBackendIds=armnn::EmptyOptional())
std::vector< std::string > m_InputNames
Validate all output shapes.
bool ValidatePaths(const std::vector< std::string > &fileVec, const bool expectFile)
Verifies if a given vector of strings are valid paths.
ArmNN performs an optimization on each model/network before it gets loaded for execution.
Definition: INetwork.hpp:127
std::vector< std::string > m_InputTensorDataFilePaths
Struct for the users to pass backend specific options.
void SetDynamicBackendsPath(const std::string &dynamicBackendsPath)
Infer missing output shapes and validate all output shapes.
void SetOptimizerOptions(const armnn::OptimizerOptions &optimizerOptions)
void SetGpuProfilingState(bool gpuProfilingState)
bool ValidatePath(const std::string &file, const bool expectFile)
Verifies if the given string is a valid path.
void SetInternalProfilingParams(bool internalProfilingState, const armnn::ProfilingDetailsMethod &internalProfilingDetail)