ArmNN
 21.02
ExecuteNetworkParams.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
9 #include <InferenceModel.hpp>
10 #include <armnn/Logging.hpp>
11 
12 #include <fmt/format.h>
13 
14 bool IsModelBinary(const std::string& modelFormat)
15 {
16  // Parse model binary flag from the model-format string we got from the command-line
17  if (modelFormat.find("binary") != std::string::npos)
18  {
19  return true;
20  }
21  else if (modelFormat.find("txt") != std::string::npos || modelFormat.find("text") != std::string::npos)
22  {
23  return false;
24  }
25  else
26  {
27  throw armnn::InvalidArgumentException(fmt::format("Unknown model format: '{}'. "
28  "Please include 'binary' or 'text'",
29  modelFormat));
30  }
31 }
32 
33 void CheckModelFormat(const std::string& modelFormat)
34 {
35  // Forward to implementation based on the parser type
36  if (modelFormat.find("armnn") != std::string::npos)
37  {
38 #if defined(ARMNN_SERIALIZER)
39 #else
40  throw armnn::InvalidArgumentException("Can't run model in armnn format without a "
41  "built with serialization support.");
42 #endif
43  }
44  else if (modelFormat.find("caffe") != std::string::npos)
45  {
46 #if defined(ARMNN_CAFFE_PARSER)
47 #else
48  throw armnn::InvalidArgumentException("Can't run model in caffe format without a "
49  "built with Caffe parser support.");
50 #endif
51  }
52  else if (modelFormat.find("onnx") != std::string::npos)
53  {
54 #if defined(ARMNN_ONNX_PARSER)
55 #else
56  throw armnn::InvalidArgumentException("Can't run model in onnx format without a "
57  "built with Onnx parser support.");
58 #endif
59  }
60  else if (modelFormat.find("tensorflow") != std::string::npos)
61  {
62 #if defined(ARMNN_TF_PARSER)
63 #else
64  throw armnn::InvalidArgumentException("Can't run model in onnx format without a "
65  "built with Tensorflow parser support.");
66 #endif
67  }
68  else if (modelFormat.find("tflite") != std::string::npos)
69  {
70 #if defined(ARMNN_TF_LITE_PARSER)
71  if (!IsModelBinary(modelFormat))
72  {
73  throw armnn::InvalidArgumentException(fmt::format("Unknown model format: '{}'. Only 'binary' "
74  "format supported for tflite files",
75  modelFormat));
76  }
77 #elif defined(ARMNN_TFLITE_DELEGATE)
78 #else
79  throw armnn::InvalidArgumentException("Can't run model in tflite format without a "
80  "built with Tensorflow Lite parser support.");
81 #endif
82  }
83  else
84  {
85  throw armnn::InvalidArgumentException(fmt::format("Unknown model format: '{}'. "
86  "Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'",
87  modelFormat));
88  }
89 }
90 
91 void CheckClTuningParameter(const int& tuningLevel,
92  const std::string& tuningPath,
93  const std::vector<armnn::BackendId> computeDevices)
94 {
95  if (!tuningPath.empty())
96  {
97  if (tuningLevel == 0)
98  {
99  ARMNN_LOG(info) << "Using cl tuning file: " << tuningPath << "\n";
100  if (!ValidatePath(tuningPath, true))
101  {
102  throw armnn::InvalidArgumentException("The tuning path is not valid");
103  }
104  }
105  else if ((1 <= tuningLevel) && (tuningLevel <= 3))
106  {
107  ARMNN_LOG(info) << "Starting execution to generate a cl tuning file: " << tuningPath << "\n"
108  << "Tuning level in use: " << tuningLevel << "\n";
109  }
110  else if ((0 < tuningLevel) || (tuningLevel > 3))
111  {
112  throw armnn::InvalidArgumentException(fmt::format("The tuning level {} is not valid.",
113  tuningLevel));
114  }
115 
116  // Ensure that a GpuAcc is enabled. Otherwise no tuning data are used or genereted
117  // Only warn if it's not enabled
118  auto it = std::find(computeDevices.begin(), computeDevices.end(), "GpuAcc");
119  if (it == computeDevices.end())
120  {
121  ARMNN_LOG(warning) << "To use Cl Tuning the compute device GpuAcc needs to be active.";
122  }
123  }
124 
125 }
126 
128 {
129  // Set to true if it is preferred to throw an exception rather than use ARMNN_LOG
130  bool throwExc = false;
131 
132  try
133  {
134  if (m_DynamicBackendsPath == "")
135  {
136  // Check compute devices are valid unless they are dynamically loaded at runtime
137  std::string invalidBackends;
138  if (!CheckRequestedBackendsAreValid(m_ComputeDevices, armnn::Optional<std::string&>(invalidBackends)))
139  {
140  ARMNN_LOG(fatal) << "The list of preferred devices contains invalid backend IDs: "
141  << invalidBackends;
142  }
143  }
144 
146 
148  {
149  ARMNN_LOG(fatal) << "BFloat16 and Float16 turbo mode cannot be enabled at the same time.";
150  }
151 
153 
155 
156  // Check input tensor shapes
157  if ((m_InputTensorShapes.size() != 0) &&
158  (m_InputTensorShapes.size() != m_InputNames.size()))
159  {
160  ARMNN_LOG(fatal) << "input-name and input-tensor-shape must have the same amount of elements. ";
161  }
162 
163  if (m_InputTensorDataFilePaths.size() != 0)
164  {
166  {
167  ARMNN_LOG(fatal) << "One or more input data file paths are not valid. ";
168  }
169 
170  if (m_InputTensorDataFilePaths.size() != m_InputNames.size())
171  {
172  ARMNN_LOG(fatal) << "input-name and input-tensor-data must have the same amount of elements. ";
173  }
174  }
175 
176  if ((m_OutputTensorFiles.size() != 0) &&
177  (m_OutputTensorFiles.size() != m_OutputNames.size()))
178  {
179  ARMNN_LOG(fatal) << "output-name and write-outputs-to-file must have the same amount of elements. ";
180  }
181 
182  if (m_InputTypes.size() == 0)
183  {
184  //Defaults the value of all inputs to "float"
185  m_InputTypes.assign(m_InputNames.size(), "float");
186  }
187  else if ((m_InputTypes.size() != 0) &&
188  (m_InputTypes.size() != m_InputNames.size()))
189  {
190  ARMNN_LOG(fatal) << "input-name and input-type must have the same amount of elements.";
191  }
192 
193  if (m_OutputTypes.size() == 0)
194  {
195  //Defaults the value of all outputs to "float"
196  m_OutputTypes.assign(m_OutputNames.size(), "float");
197  }
198  else if ((m_OutputTypes.size() != 0) &&
199  (m_OutputTypes.size() != m_OutputNames.size()))
200  {
201  ARMNN_LOG(fatal) << "output-name and output-type must have the same amount of elements.";
202  }
203 
204  // Check that threshold time is not less than zero
205  if (m_ThresholdTime < 0)
206  {
207  ARMNN_LOG(fatal) << "Threshold time supplied as a command line argument is less than zero.";
208  }
209  }
210  catch (std::string& exc)
211  {
212  if (throwExc)
213  {
215  }
216  else
217  {
218  std::cout << exc;
219  exit(EXIT_FAILURE);
220  }
221  }
222  // Check turbo modes
223 
224  // Warn if ExecuteNetwork will generate dummy input data
226  {
227  ARMNN_LOG(warning) << "No input files provided, input tensors will be filled with 0s.";
228  }
229 }
std::vector< std::string > m_InputTypes
std::vector< TensorShapePtr > m_InputTensorShapes
#define ARMNN_LOG(severity)
Definition: Logging.hpp:202
std::vector< std::string > m_OutputNames
std::vector< std::string > m_OutputTensorFiles
void CheckClTuningParameter(const int &tuningLevel, const std::string &tuningPath, const std::vector< armnn::BackendId > computeDevices)
std::vector< armnn::BackendId > m_ComputeDevices
std::vector< std::string > m_OutputTypes
std::vector< std::string > m_InputNames
void CheckModelFormat(const std::string &modelFormat)
bool ValidatePaths(const std::vector< std::string > &fileVec, const bool expectFile)
Verifies if a given vector of strings are valid paths.
std::vector< std::string > m_InputTensorDataFilePaths
bool IsModelBinary(const std::string &modelFormat)
bool ValidatePath(const std::string &file, const bool expectFile)
Verifies if the given string is a valid path.