17 #if defined(ARMNN_SERIALIZER) 20 #if defined(ARMNN_TF_LITE_PARSER) 23 #if defined(ARMNN_ONNX_PARSER) 26 #if defined(ARMNN_TFLITE_DELEGATE) 30 #include <tensorflow/lite/builtin_ops.h> 31 #include <tensorflow/lite/c/builtin_op_data.h> 32 #include <tensorflow/lite/c/common.h> 33 #include <tensorflow/lite/optional_debug_tools.h> 34 #include <tensorflow/lite/kernels/builtin_op_kernels.h> 35 #include <tensorflow/lite/interpreter.h> 36 #include <tensorflow/lite/kernels/register.h> 49 const double& thresholdTime)
51 ARMNN_LOG(info) <<
"Inference time: " << std::setprecision(2)
52 << std::fixed << duration.count() <<
" ms\n";
54 if (thresholdTime != 0.0)
56 ARMNN_LOG(info) <<
"Threshold time: " << std::setprecision(2)
57 << std::fixed << thresholdTime <<
" ms";
58 auto thresholdMinusInference = thresholdTime - duration.count();
59 ARMNN_LOG(info) <<
"Threshold time - Inference time: " << std::setprecision(2)
60 << std::fixed << thresholdMinusInference <<
" ms" <<
"\n";
61 if (thresholdMinusInference < 0)
63 std::string errorMessage =
"Elapsed inference time is greater than provided threshold time.";
71 #if defined(ARMNN_TFLITE_DELEGATE) 77 std::unique_ptr<tflite::FlatBufferModel> model = tflite::FlatBufferModel::BuildFromFile(params.
m_ModelPath.c_str());
79 auto tfLiteInterpreter = std::make_unique<Interpreter>();
80 tflite::ops::builtin::BuiltinOpResolver resolver;
82 tflite::InterpreterBuilder builder(*model, resolver);
83 builder(&tfLiteInterpreter);
84 tfLiteInterpreter->AllocateTensors();
97 std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
101 status = tfLiteInterpreter->ModifyGraphWithDelegate(std::move(theArmnnDelegate));
102 if (status != kTfLiteOk)
104 ARMNN_LOG(fatal) <<
"Could not register ArmNN TfLite Delegate to TfLiteInterpreter!";
110 std::cout <<
"Running on TfLite without ArmNN delegate\n";
121 for(
unsigned int inputIndex = 0; inputIndex < numInputs; ++inputIndex)
123 int input = tfLiteInterpreter->inputs()[inputIndex];
124 TfLiteIntArray* inputDims = tfLiteInterpreter->tensor(input)->dims;
126 unsigned int inputSize = 1;
133 for (
unsigned int dim = 0; dim < static_cast<unsigned int>(inputDims->size); ++dim)
135 inputSize *= inputDims->data[dim];
139 if (params.
m_InputTypes[inputIndex].compare(
"float") == 0)
141 auto inputData = tfLiteInterpreter->typed_tensor<
float>(input);
143 if(inputData == NULL)
145 ARMNN_LOG(fatal) <<
"Input tensor is null, input type: " 146 "\"" << params.
m_InputTypes[inputIndex] <<
"\" may be incorrect.";
150 std::vector<float> tensorData;
151 PopulateTensorWithDataGeneric<float>(tensorData,
154 [](
const std::string& s)
155 {
return std::stof(s); });
157 std::copy(tensorData.begin(), tensorData.end(), inputData);
159 else if (params.
m_InputTypes[inputIndex].compare(
"qsymms8") == 0 ||
160 params.
m_InputTypes[inputIndex].compare(
"qasymms8") == 0)
162 auto inputData = tfLiteInterpreter->typed_tensor<int8_t>(input);
164 if(inputData == NULL)
166 ARMNN_LOG(fatal) <<
"Input tensor is null, input type: " 167 "\"" << params.
m_InputTypes[inputIndex] <<
"\" may be incorrect.";
171 std::vector<int8_t> tensorData;
172 PopulateTensorWithDataGeneric<int8_t>(tensorData,
175 [](
const std::string& s)
178 std::copy(tensorData.begin(), tensorData.end(), inputData);
180 else if (params.
m_InputTypes[inputIndex].compare(
"int") == 0)
182 auto inputData = tfLiteInterpreter->typed_tensor<int32_t>(input);
184 if(inputData == NULL)
186 ARMNN_LOG(fatal) <<
"Input tensor is null, input type: " 187 "\"" << params.
m_InputTypes[inputIndex] <<
"\" may be incorrect.";
191 std::vector<int32_t> tensorData;
192 PopulateTensorWithDataGeneric<int32_t>(tensorData,
195 [](
const std::string& s)
196 {
return std::stoi(s); });
198 std::copy(tensorData.begin(), tensorData.end(), inputData);
200 else if (params.
m_InputTypes[inputIndex].compare(
"qasymm8") == 0 ||
201 params.
m_InputTypes[inputIndex].compare(
"qasymmu8") == 0)
203 auto inputData = tfLiteInterpreter->typed_tensor<uint8_t>(input);
205 if(inputData == NULL)
207 ARMNN_LOG(fatal) <<
"Input tensor is null, input type: " 208 "\"" << params.
m_InputTypes[inputIndex] <<
"\" may be incorrect.";
212 std::vector<uint8_t> tensorData;
213 PopulateTensorWithDataGeneric<uint8_t>(tensorData,
216 [](
const std::string& s)
219 std::copy(tensorData.begin(), tensorData.end(), inputData);
223 ARMNN_LOG(fatal) <<
"Unsupported input tensor data type \"" << params.
m_InputTypes[inputIndex] <<
"\". ";
234 status = tfLiteInterpreter->Invoke();
238 std::map<unsigned int, int> paramToTfliteOutputIndex;
239 for (
unsigned int paramIndex = 0; paramIndex < params.
m_OutputNames.size(); ++paramIndex)
241 paramToTfliteOutputIndex[paramIndex] = -1;
242 for (
unsigned int tfLiteIndex = 0; tfLiteIndex < tfLiteInterpreter->outputs().size(); ++tfLiteIndex)
244 if (params.
m_OutputNames[paramIndex] == tfLiteInterpreter->GetOutputName(tfLiteIndex))
246 paramToTfliteOutputIndex[paramIndex] = tfLiteIndex;
252 for (
unsigned int paramOutputIndex = 0; paramOutputIndex < params.
m_OutputNames.size(); ++paramOutputIndex)
254 int outputIndex = paramToTfliteOutputIndex[paramOutputIndex];
255 if (outputIndex == -1)
257 std::cout << fmt::format(
"Output name: {} doesn't exist.", params.
m_OutputNames[paramOutputIndex]) <<
261 auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[outputIndex];
262 TfLiteIntArray* outputDims = tfLiteInterpreter->tensor(tfLiteDelegateOutputId)->dims;
264 FILE* outputTensorFile = stdout;
268 if (outputTensorFile == NULL)
270 ARMNN_LOG(fatal) <<
"Specified output tensor file, \"" <<
272 "\", cannot be created. Defaulting to stdout. " <<
273 "Error was: " << std::strerror(errno);
274 outputTensorFile = stdout;
278 ARMNN_LOG(info) <<
"Writing output " << outputIndex <<
"' of iteration: " << x+1 <<
" to file: '" 283 for (
unsigned int dim = 0; dim < static_cast<unsigned int>(outputDims->size); ++dim)
285 outputSize *= outputDims->data[dim];
288 std::cout << tfLiteInterpreter->GetOutputName(outputIndex) <<
": ";
289 if (params.
m_OutputTypes[paramOutputIndex].compare(
"float") == 0)
291 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<
float>(tfLiteDelegateOutputId);
292 if(tfLiteDelageOutputData == NULL)
294 ARMNN_LOG(fatal) <<
"Output tensor is null, output type: " 295 "\"" << params.
m_OutputTypes[paramOutputIndex] <<
"\" may be incorrect.";
301 for (
int i = 0; i < outputSize; ++i)
303 fprintf(outputTensorFile,
"%f ", tfLiteDelageOutputData[i]);
307 else if (params.
m_OutputTypes[paramOutputIndex].compare(
"int") == 0)
309 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<int32_t>(tfLiteDelegateOutputId);
310 if(tfLiteDelageOutputData == NULL)
312 ARMNN_LOG(fatal) <<
"Output tensor is null, output type: " 313 "\"" << params.
m_OutputTypes[paramOutputIndex] <<
"\" may be incorrect.";
319 for (
int i = 0; i < outputSize; ++i)
321 fprintf(outputTensorFile,
"%d ", tfLiteDelageOutputData[i]);
325 else if (params.
m_OutputTypes[paramOutputIndex].compare(
"qsymms8") == 0 ||
326 params.
m_OutputTypes[paramOutputIndex].compare(
"qasymms8") == 0)
328 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<int8_t>(tfLiteDelegateOutputId);
329 if(tfLiteDelageOutputData == NULL)
331 ARMNN_LOG(fatal) <<
"Output tensor is null, output type: " 332 "\"" << params.
m_OutputTypes[paramOutputIndex] <<
"\" may be incorrect.";
338 for (
int i = 0; i < outputSize; ++i)
340 fprintf(outputTensorFile,
"%d ", tfLiteDelageOutputData[i]);
344 else if (params.
m_OutputTypes[paramOutputIndex].compare(
"qasymm8") == 0 ||
345 params.
m_OutputTypes[paramOutputIndex].compare(
"qasymmu8") == 0)
347 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<uint8_t>(tfLiteDelegateOutputId);
348 if(tfLiteDelageOutputData == NULL)
350 ARMNN_LOG(fatal) <<
"Output tensor is null, output type: " 351 "\"" << params.
m_OutputTypes[paramOutputIndex] <<
"\" may be incorrect.";
357 for (
int i = 0; i < outputSize; ++i)
359 fprintf(outputTensorFile,
"%u ", tfLiteDelageOutputData[i]);
365 ARMNN_LOG(fatal) <<
"Output tensor is null, output type: " 367 "\" may be incorrect. Output type can be specified with -z argument";
370 std::cout << std::endl;
378 template<
typename TParser,
typename TDataType>
380 const std::shared_ptr<armnn::IRuntime>& runtime =
nullptr)
384 std::vector<std::vector<armnnUtils::TContainer>> inputs;
385 std::vector<std::vector<armnnUtils::TContainer>> outputs;
438 armnn::MakeOptional<QuantizationParams>(
444 ARMNN_LOG(info) <<
"Given network has " << numInputs <<
" input/s. One input-tensor-data file is required " 445 <<
"for each input. The user provided " 447 <<
" input-tensor-data file/s which will be used to fill the input/s.\n";
466 ARMNN_LOG(warning) <<
"Model output index: " << outputIdx <<
" has data type Float32. The " 467 <<
"corresponding --output-type is " << params.
m_OutputTypes[outputIdx] <<
468 ". This may cause unexpected problems or random failures.";
472 if (params.
m_OutputTypes[outputIdx].compare(
"qasymmu8") != 0)
474 ARMNN_LOG(warning) <<
"Model output index: " << outputIdx <<
" has data type QAsymmU8. The " 475 <<
"corresponding --output-type is " << params.
m_OutputTypes[outputIdx] <<
476 ". This may cause unexpected problems or random failures.";
482 ARMNN_LOG(warning) <<
"Model output index: " << outputIdx <<
" has data type Signed32. The " 483 <<
"corresponding --output-type is " << params.
m_OutputTypes[outputIdx] <<
484 ". This may cause unexpected problems or random failures.";
488 if (params.
m_OutputTypes[outputIdx].compare(
"qasymms8") != 0)
490 ARMNN_LOG(warning) <<
"Model output index: " << outputIdx <<
" has data type QAsymmS8. The " 491 <<
"corresponding --output-type is " << params.
m_OutputTypes[outputIdx] <<
492 ". This may cause unexpected problems or random failures.";
504 std::vector<armnnUtils::TContainer> inputDataContainers;
505 for (
unsigned int i = 0; i < numInputs; ++i)
509 size_t inputFileIndex = j * numInputs + i;
517 armnn::MakeOptional<std::string>(
536 inputDataContainers.push_back(tensorData);
538 inputs.push_back(inputDataContainers);
543 std::vector<armnnUtils::TContainer> outputDataContainers;
544 for (
unsigned int i = 0; i < numOutputs; ++i)
548 outputDataContainers.push_back(std::vector<float>(model.
GetOutputSize(i)));
552 outputDataContainers.push_back(std::vector<int>(model.
GetOutputSize(i)));
557 outputDataContainers.push_back(std::vector<uint8_t>(model.
GetOutputSize(i)));
561 outputDataContainers.push_back(std::vector<int8_t>(model.
GetOutputSize(i)));
569 outputs.push_back(outputDataContainers);
574 std::stringstream msg;
575 msg <<
"Network will be executed " << params.
m_Iterations;
578 msg <<
" times in an asynchronous manner. ";
582 msg <<
" times successively. ";
584 msg <<
"The input-tensor-data files will be reused recursively if the user didn't provide enough to " 585 "cover each execution.";
595 auto inference_duration = model.
Run(inputs[x], outputs[x]);
599 ARMNN_LOG(warning) <<
"The input data was generated, note that the output will not be useful";
603 ARMNN_LOG(info) <<
"Printing outputs to console is disabled.";
608 for (
size_t i = 0; i < numOutputs; i++)
616 size_t outputFileIndex = x * numOutputs + i;
620 ARMNN_LOG(info) <<
"Writing output " << i <<
" named: '" 622 <<
"' of iteration: " << x+1 <<
" to file: '" 634 mapbox::util::apply_visitor(printer, outputs[x][i]);
637 ARMNN_LOG(info) <<
"\nInference time: " << std::setprecision(2)
638 << std::fixed << inference_duration.count() <<
" ms\n";
643 ARMNN_LOG(info) <<
"Threshold time: " << std::setprecision(2)
645 auto thresholdMinusInference = params.
m_ThresholdTime - inference_duration.count();
646 ARMNN_LOG(info) <<
"Threshold time - Inference time: " << std::setprecision(2)
647 << std::fixed << thresholdMinusInference <<
" ms" <<
"\n";
649 if (thresholdMinusInference < 0)
651 std::string errorMessage =
"Elapsed inference time is greater than provided threshold time.";
660 std::vector<armnnUtils::TContainer> input;
661 std::vector<armnnUtils::TContainer> output;
663 for (
unsigned int i = 0; i < numInputs; ++i)
667 size_t inputFileIndex = numInputs + i;
675 armnn::MakeOptional<std::string>(
694 input.push_back(tensorData);
697 for (
unsigned int i = 0; i < numOutputs; ++i)
701 output.push_back(std::vector<float>(model.
GetOutputSize(i)));
704 }
else if (params.
m_OutputTypes[i].compare(
"qasymm8") == 0 ||
707 output.push_back(std::vector<uint8_t>(model.
GetOutputSize(i)));
710 output.push_back(std::vector<int8_t>(model.
GetOutputSize(i)));
717 std::vector<std::chrono::duration<double, std::milli>> timings;
722 auto inference_duration = model.
Run(input, output);
723 timings.push_back(inference_duration);
728 ARMNN_LOG(warning) <<
"The input data was generated, note that the output will not be useful";
732 ARMNN_LOG(info) <<
"Printing outputs to console is disabled.";
737 for (
size_t i = 0; i < numOutputs; i++)
745 size_t outputFileIndex = numOutputs + i;
749 ARMNN_LOG(info) <<
"Writing output " << i <<
" named: '" 762 mapbox::util::apply_visitor(printer, output[i]);
765 for(
auto inference: timings)
768 ARMNN_LOG(info) <<
"\nInference time: " << std::setprecision(2)
769 << std::fixed << inference.count() <<
" ms\n";
774 ARMNN_LOG(info) <<
"Threshold time: " << std::setprecision(2)
776 auto thresholdMinusInference = params.
m_ThresholdTime - inference.count();
777 ARMNN_LOG(info) <<
"Threshold time - Inference time: " << std::setprecision(2)
778 << std::fixed << thresholdMinusInference <<
" ms" <<
"\n";
780 if (thresholdMinusInference < 0)
782 std::string errorMessage =
"Elapsed inference time is greater than provided threshold time.";
794 ARMNN_LOG(info) <<
"Asynchronous execution with Arm NN thread pool... \n";
796 std::unordered_map<armnn::InferenceId, std::vector<armnnUtils::TContainer>&> inferenceOutputMap;
799 std::chrono::high_resolution_clock::time_point earliestStartTime;
800 std::chrono::high_resolution_clock::time_point latestEndTime =
801 std::chrono::high_resolution_clock::now();
807 std::shared_ptr<armnn::AsyncExecutionCallback> cb = callbackManager.
GetNewCallback();
808 inferenceOutputMap.insert({cb->GetInferenceId(), outputs[i]});
809 model.
RunAsync(inputs[i], outputs[i], cb);
814 for (
size_t iteration = 0; iteration < params.
m_Iterations; ++iteration)
819 auto endTime = time_point_cast<std::chrono::milliseconds>(cb->GetEndTime());
820 auto startTime = time_point_cast<std::chrono::milliseconds>(cb->GetStartTime());
821 auto inferenceDuration = endTime - startTime;
823 if (latestEndTime < cb->GetEndTime())
825 latestEndTime = cb->GetEndTime();
828 if (earliestStartTime.time_since_epoch().count() == 0)
830 earliestStartTime = cb->GetStartTime();
832 else if (earliestStartTime > cb->GetStartTime())
834 earliestStartTime = cb->GetStartTime();
839 ARMNN_LOG(warning) <<
"The input data was generated, note that the output will not be useful";
843 ARMNN_LOG(info) <<
"Printing outputs to console is disabled.";
848 for (
size_t i = 0; i < numOutputs; i++)
854 size_t outputFileIndex = iteration * numOutputs + i;
858 ARMNN_LOG(info) <<
"Writing output " << i <<
" named: '" 860 <<
"' of iteration: " << iteration+1 <<
" to file: '" 874 mapbox::util::apply_visitor(printer, inferenceOutputMap.at(cb->GetInferenceId())[i]);
881 auto overallEndTime = time_point_cast<std::chrono::milliseconds>(latestEndTime);
882 auto overallStartTime = time_point_cast<std::chrono::milliseconds>(earliestStartTime);
883 auto totalInferenceDuration = overallEndTime - overallStartTime;
884 ARMNN_LOG(info) <<
"\nOverall Inference time: " << std::setprecision(2)
885 << std::fixed << totalInferenceDuration.count() <<
" ms\n";
898 ARMNN_LOG(info) <<
"Asynchronous Execution with std::launch:async... \n";
899 std::vector<std::future<std::tuple<
unsigned int,
900 std::chrono::duration<double, std::milli>>>> inferenceResults;
904 std::vector<std::unique_ptr<armnn::experimental::IWorkingMemHandle>> workingMemHandles;
918 inferenceResults.push_back(std::async(
919 std::launch::async, [&model, &workingMemHandleRef, &inputs, &outputs, i]() {
920 return model.
RunAsync(workingMemHandleRef, inputs[i], outputs[i], i);
926 for (
unsigned int j = 0; j < inferenceResults.size(); ++j)
929 auto inferenceResult = inferenceResults[j].get();
930 auto inferenceDuration = std::get<1>(inferenceResult);
931 auto inferenceID = std::get<0>(inferenceResult);
935 ARMNN_LOG(warning) <<
"The input data was generated, note that the output will not be useful";
939 ARMNN_LOG(info) <<
"Printing outputs to console is disabled.";
944 for (
size_t i = 0; i < numOutputs; i++)
950 size_t outputFileIndex = j * numOutputs + i;
954 ARMNN_LOG(info) <<
"Writing output " << i <<
" named: '" 956 <<
"' of iteration: " << j+1 <<
" to file: '" 969 mapbox::util::apply_visitor(printer, outputs[j][i]);
972 ARMNN_LOG(info) <<
"Asynchronous Execution is finished for Inference ID: " << inferenceID <<
" \n";
976 ARMNN_LOG(info) <<
"\nOverall Inference time: " << std::setprecision(2)
977 << std::fixed << duration.count() <<
" ms\n";
996 int main(
int argc,
const char* argv[])
1012 }
catch (
const std::exception &e){
1014 return EXIT_FAILURE;
1021 ARMNN_LOG(fatal) <<
"You must enable profiling if you would like to output layer details";
1022 return EXIT_FAILURE;
1028 if (modelFormat.find(
"armnn") != std::string::npos)
1030 #if defined(ARMNN_SERIALIZER) 1032 return MainImpl<armnnDeserializer::IDeserializer, float>(ProgramOptions.
m_ExNetParams, runtime);
1034 ARMNN_LOG(fatal) <<
"Not built with serialization support.";
1035 return EXIT_FAILURE;
1038 else if (modelFormat.find(
"onnx") != std::string::npos)
1040 #if defined(ARMNN_ONNX_PARSER) 1042 return MainImpl<armnnOnnxParser::IOnnxParser, float>(ProgramOptions.
m_ExNetParams, runtime);
1044 ARMNN_LOG(fatal) <<
"Not built with Onnx parser support.";
1045 return EXIT_FAILURE;
1048 else if(modelFormat.find(
"tflite") != std::string::npos)
1052 #if defined(ARMNN_TF_LITE_PARSER) 1054 return MainImpl<armnnTfLiteParser::ITfLiteParser, float>(ProgramOptions.
m_ExNetParams, runtime);
1056 ARMNN_LOG(fatal) <<
"Not built with Tensorflow-Lite parser support.";
1057 return EXIT_FAILURE;
1065 #if defined(ARMNN_TF_LITE_DELEGATE) 1068 ARMNN_LOG(fatal) <<
"Not built with Arm NN Tensorflow-Lite delegate support.";
1069 return EXIT_FAILURE;
1075 ARMNN_LOG(fatal) <<
"Unknown model format: '" << modelFormat
1076 <<
"'. Please include 'tflite' or 'onnx'";
1077 return EXIT_FAILURE;
ExecuteNetworkParams m_ExNetParams
std::chrono::duration< double, std::milli > Run(const std::vector< armnnUtils::TContainer > &inputContainers, std::vector< armnnUtils::TContainer > &outputContainers)
std::vector< std::string > m_InputTypes
static IRuntimePtr Create(const CreationOptions &options)
std::chrono::duration< double, std::milli > GetTimeDuration(std::chrono::high_resolution_clock::time_point start_time)
std::string m_MLGOTuningFilePath
std::shared_ptr< AsyncExecutionCallback > GetNewCallback()
std::vector< TensorShapePtr > m_InputTensorShapes
QuantizationParams GetInputQuantizationParams(unsigned int inputIndex=0u) const
const std::vector< armnn::BindingPointInfo > & GetOutputBindingInfos() const
bool m_OutputDetailsOnlyToStdOut
void ConfigureLogging(bool printToStandardOutput, bool printToDebugOutput, LogSeverity severity)
Configures the logging behaviour of the ARMNN library.
bool m_EnableFp16TurboMode
std::string m_DynamicBackendsPath
std::string m_DynamicBackendsPath
const armnn::BindingPointInfo & GetOutputBindingInfo(unsigned int outputIndex=0u) const
virtual const char * what() const noexcept override
armnn::IRuntime::CreationOptions m_RuntimeOptions
#define ARMNN_LOG(severity)
std::tuple< unsigned int, std::chrono::duration< double, std::milli > > RunAsync(armnn::experimental::IWorkingMemHandle &workingMemHandleRef, const std::vector< armnnUtils::TContainer > &inputContainers, std::vector< armnnUtils::TContainer > &outputContainers, unsigned int inferenceID)
bool CheckInferenceTimeThreshold(const std::chrono::duration< double, std::milli > &duration, const double &thresholdTime)
Given a measured duration and a threshold time tell the user whether we succeeded or not...
bool m_EnableFp16TurboMode
std::chrono::high_resolution_clock::time_point GetTimeNow()
std::vector< std::string > m_OutputNames
TfLiteExecutor m_TfLiteExecutor
std::vector< std::string > m_OutputTensorFiles
bool m_VisualizePostOptimizationModel
bool m_OutputDetailsToStdOut
std::string m_CachedNetworkFilePath
bool m_EnableBf16TurboMode
unsigned int GetOutputSize(unsigned int outputIndex=0u) const
std::vector< std::string > m_InputBindings
std::vector< armnn::BackendId > m_ComputeDevices
std::vector< std::string > m_OutputTypes
void SetExternalProfilingParams(const arm::pipe::ProfilingOptions &externalProfilingParams)
std::vector< armnn::TensorShape > m_InputShapes
bool m_ImportInputsIfAligned
bool m_GenerateTensorData
void PopulateTensorWithData(armnnUtils::TContainer &tensorData, unsigned int numElements, const std::string &dataTypeStr, const armnn::Optional< QuantizationParams > &qParams, const armnn::Optional< std::string > &dataFile)
Holds all parameters necessary to execute a network Check ExecuteNetworkProgramOptions.cpp for a description of each parameter.
std::vector< std::string > m_OutputBindings
std::vector< armnn::BackendId > m_ComputeDevices
unsigned int m_NumberOfThreads
std::vector< std::string > m_InputNames
ProfilingOptions ConvertExternalProfilingOptions(const armnn::IRuntime::CreationOptions::ExternalProfilingOptions &options)
bool m_EnableBf16TurboMode
std::vector< std::string > m_InputTensorDataFilePaths
void ParseOptions(int ac, const char *av[])
Parses program options from the command line or another source and stores the values in member variab...
Holds and parses program options for the ExecuteNetwork application.
TfLiteDelegate * TfLiteArmnnDelegateCreate(armnnDelegate::DelegateOptions options)
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
bool m_PrintIntermediateLayers
std::string m_CachedNetworkFilePath
bool m_ImportInputsIfAligned
std::unique_ptr< armnn::experimental::IWorkingMemHandle > CreateWorkingMemHandle()
bool m_EnableLayerDetails
int main(int argc, const char *argv[])
Base class for all ArmNN exceptions so that users can filter to just those.
mapbox::util::variant< std::vector< float >, std::vector< int >, std::vector< unsigned char >, std::vector< int8_t > > TContainer
unsigned int GetInputSize(unsigned int inputIndex=0u) const
bool m_OutputDetailsToStdOut
std::string m_MLGOTuningFilePath
void TfLiteArmnnDelegateDelete(TfLiteDelegate *tfLiteDelegate)
int MainImpl(const ExecuteNetworkParams ¶ms, const std::shared_ptr< armnn::IRuntime > &runtime=nullptr)
unsigned int m_NumberOfThreads
bool m_OutputDetailsOnlyToStdOut
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
ExternalProfilingOptions m_ProfilingOptions
std::string m_ModelFormat
std::shared_ptr< AsyncExecutionCallback > GetNotifiedCallback()