ArmNN
 21.05
ExecuteNetworkParams.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
9 #include <InferenceModel.hpp>
10 #include <armnn/Logging.hpp>
11 
12 #include <fmt/format.h>
13 
14 bool IsModelBinary(const std::string& modelFormat)
15 {
16  // Parse model binary flag from the model-format string we got from the command-line
17  if (modelFormat.find("binary") != std::string::npos)
18  {
19  return true;
20  }
21  else if (modelFormat.find("txt") != std::string::npos || modelFormat.find("text") != std::string::npos)
22  {
23  return false;
24  }
25  else
26  {
27  throw armnn::InvalidArgumentException(fmt::format("Unknown model format: '{}'. "
28  "Please include 'binary' or 'text'",
29  modelFormat));
30  }
31 }
32 
33 void CheckModelFormat(const std::string& modelFormat)
34 {
35  // Forward to implementation based on the parser type
36  if (modelFormat.find("armnn") != std::string::npos)
37  {
38 #if defined(ARMNN_SERIALIZER)
39 #else
40  throw armnn::InvalidArgumentException("Can't run model in armnn format without a "
41  "built with serialization support.");
42 #endif
43  }
44  else if (modelFormat.find("onnx") != std::string::npos)
45  {
46 #if defined(ARMNN_ONNX_PARSER)
47 #else
48  throw armnn::InvalidArgumentException("Can't run model in onnx format without a "
49  "built with Onnx parser support.");
50 #endif
51  }
52  else if (modelFormat.find("tflite") != std::string::npos)
53  {
54 #if defined(ARMNN_TF_LITE_PARSER)
55  if (!IsModelBinary(modelFormat))
56  {
57  throw armnn::InvalidArgumentException(fmt::format("Unknown model format: '{}'. Only 'binary' "
58  "format supported for tflite files",
59  modelFormat));
60  }
61 #elif defined(ARMNN_TFLITE_DELEGATE)
62 #else
63  throw armnn::InvalidArgumentException("Can't run model in tflite format without a "
64  "built with Tensorflow Lite parser support.");
65 #endif
66  }
67  else
68  {
69  throw armnn::InvalidArgumentException(fmt::format("Unknown model format: '{}'. "
70  "Please include 'tflite' or 'onnx'",
71  modelFormat));
72  }
73 }
74 
75 void CheckClTuningParameter(const int& tuningLevel,
76  const std::string& tuningPath,
77  const std::vector<armnn::BackendId> computeDevices)
78 {
79  if (!tuningPath.empty())
80  {
81  if (tuningLevel == 0)
82  {
83  ARMNN_LOG(info) << "Using cl tuning file: " << tuningPath << "\n";
84  if (!ValidatePath(tuningPath, true))
85  {
86  throw armnn::InvalidArgumentException("The tuning path is not valid");
87  }
88  }
89  else if ((1 <= tuningLevel) && (tuningLevel <= 3))
90  {
91  ARMNN_LOG(info) << "Starting execution to generate a cl tuning file: " << tuningPath << "\n"
92  << "Tuning level in use: " << tuningLevel << "\n";
93  }
94  else if ((0 < tuningLevel) || (tuningLevel > 3))
95  {
96  throw armnn::InvalidArgumentException(fmt::format("The tuning level {} is not valid.",
97  tuningLevel));
98  }
99 
100  // Ensure that a GpuAcc is enabled. Otherwise no tuning data are used or genereted
101  // Only warn if it's not enabled
102  auto it = std::find(computeDevices.begin(), computeDevices.end(), "GpuAcc");
103  if (it == computeDevices.end())
104  {
105  ARMNN_LOG(warning) << "To use Cl Tuning the compute device GpuAcc needs to be active.";
106  }
107  }
108 
109 }
110 
112 {
113  // Set to true if it is preferred to throw an exception rather than use ARMNN_LOG
114  bool throwExc = false;
115 
116  try
117  {
118  if (m_DynamicBackendsPath == "")
119  {
120  // Check compute devices are valid unless they are dynamically loaded at runtime
121  std::string invalidBackends;
122  if (!CheckRequestedBackendsAreValid(m_ComputeDevices, armnn::Optional<std::string&>(invalidBackends)))
123  {
124  ARMNN_LOG(fatal) << "The list of preferred devices contains invalid backend IDs: "
125  << invalidBackends;
126  }
127  }
128 
130 
132  {
133  ARMNN_LOG(fatal) << "BFloat16 and Float16 turbo mode cannot be enabled at the same time.";
134  }
135 
137 
139 
140  // Check number of simultaneous iterations
141  if ((m_SimultaneousIterations < 1))
142  {
143  ARMNN_LOG(fatal) << "simultaneous-iterations cannot be less than 1. ";
144  }
145 
146  // Check input tensor shapes
147  if ((m_InputTensorShapes.size() != 0) &&
148  (m_InputTensorShapes.size() != m_InputNames.size()))
149  {
150  ARMNN_LOG(fatal) << "input-name and input-tensor-shape must have the same amount of elements. ";
151  }
152 
153  if (m_InputTensorDataFilePaths.size() != 0)
154  {
156  {
157  ARMNN_LOG(fatal) << "One or more input data file paths are not valid. ";
158  }
159 
160  if (!m_Concurrent && m_InputTensorDataFilePaths.size() != m_InputNames.size())
161  {
162  ARMNN_LOG(fatal) << "input-name and input-tensor-data must have the same amount of elements. ";
163  }
164 
166  {
167  ARMNN_LOG(fatal) << "There is not enough input data for " << m_SimultaneousIterations << " execution.";
168  }
170  {
171  ARMNN_LOG(fatal) << "There is more input data for " << m_SimultaneousIterations << " execution.";
172  }
173  }
174 
175  if ((m_OutputTensorFiles.size() != 0) &&
176  (m_OutputTensorFiles.size() != m_OutputNames.size()))
177  {
178  ARMNN_LOG(fatal) << "output-name and write-outputs-to-file must have the same amount of elements. ";
179  }
180 
181  if ((m_OutputTensorFiles.size() != 0)
183  {
184  ARMNN_LOG(fatal) << "There is not enough output data for " << m_SimultaneousIterations << " execution.";
185  }
186 
187  if (m_InputTypes.size() == 0)
188  {
189  //Defaults the value of all inputs to "float"
190  m_InputTypes.assign(m_InputNames.size(), "float");
191  }
192  else if ((m_InputTypes.size() != 0) &&
193  (m_InputTypes.size() != m_InputNames.size()))
194  {
195  ARMNN_LOG(fatal) << "input-name and input-type must have the same amount of elements.";
196  }
197 
198  if (m_OutputTypes.size() == 0)
199  {
200  //Defaults the value of all outputs to "float"
201  m_OutputTypes.assign(m_OutputNames.size(), "float");
202  }
203  else if ((m_OutputTypes.size() != 0) &&
204  (m_OutputTypes.size() != m_OutputNames.size()))
205  {
206  ARMNN_LOG(fatal) << "output-name and output-type must have the same amount of elements.";
207  }
208 
209  // Check that threshold time is not less than zero
210  if (m_ThresholdTime < 0)
211  {
212  ARMNN_LOG(fatal) << "Threshold time supplied as a command line argument is less than zero.";
213  }
214  }
215  catch (std::string& exc)
216  {
217  if (throwExc)
218  {
220  }
221  else
222  {
223  std::cout << exc;
224  exit(EXIT_FAILURE);
225  }
226  }
227  // Check turbo modes
228 
229  // Warn if ExecuteNetwork will generate dummy input data
231  {
232  ARMNN_LOG(warning) << "No input files provided, input tensors will be filled with 0s.";
233  }
234 }
std::vector< std::string > m_InputTypes
std::vector< TensorShapePtr > m_InputTensorShapes
#define ARMNN_LOG(severity)
Definition: Logging.hpp:202
std::vector< std::string > m_OutputNames
std::vector< std::string > m_OutputTensorFiles
void CheckClTuningParameter(const int &tuningLevel, const std::string &tuningPath, const std::vector< armnn::BackendId > computeDevices)
std::vector< armnn::BackendId > m_ComputeDevices
std::vector< std::string > m_OutputTypes
std::vector< std::string > m_InputNames
void CheckModelFormat(const std::string &modelFormat)
bool ValidatePaths(const std::vector< std::string > &fileVec, const bool expectFile)
Verifies if a given vector of strings are valid paths.
std::vector< std::string > m_InputTensorDataFilePaths
bool IsModelBinary(const std::string &modelFormat)
bool ValidatePath(const std::string &file, const bool expectFile)
Verifies if the given string is a valid path.