ArmNN
 20.02
NetworkExecutionUtils.hpp File Reference
#include <armnn/ArmNN.hpp>
#include <armnn/TypesUtils.hpp>
#include "CsvReader.hpp"
#include "../InferenceTest.hpp"
#include <Profiling.hpp>
#include <ResolveType.hpp>
#include <boost/algorithm/string/trim.hpp>
#include <boost/algorithm/string/split.hpp>
#include <boost/algorithm/string/classification.hpp>
#include <boost/program_options.hpp>
#include <boost/variant.hpp>
#include <iostream>
#include <fstream>
#include <functional>
#include <future>
#include <algorithm>
#include <iterator>

Go to the source code of this file.

Classes

struct  ExecuteNetworkParams
 

Functions

template<typename TParser , typename TDataType >
int MainImpl (const ExecuteNetworkParams &params, const std::shared_ptr< armnn::IRuntime > &runtime=nullptr)
 
int RunTest (const std::string &format, const std::string &inputTensorShapesStr, const vector< armnn::BackendId > &computeDevices, const std::string &dynamicBackendsPath, const std::string &path, const std::string &inputNames, const std::string &inputTensorDataFilePaths, const std::string &inputTypes, bool quantizeInput, const std::string &outputTypes, const std::string &outputNames, const std::string &outputTensorFiles, bool dequantizeOuput, bool enableProfiling, bool enableFp16TurboMode, const double &thresholdTime, bool printIntermediate, const size_t subgraphId, bool enableLayerDetails=false, bool parseUnsupported=false, const std::shared_ptr< armnn::IRuntime > &runtime=nullptr)
 
int RunCsvTest (const armnnUtils::CsvRow &csvRow, const std::shared_ptr< armnn::IRuntime > &runtime, const bool enableProfiling, const bool enableFp16TurboMode, const double &thresholdTime, const bool printIntermediate, bool enableLayerDetails=false, bool parseUnuspported=false)
 

Variables

bool generateTensorData = true
 

Function Documentation

◆ MainImpl()

int MainImpl ( const ExecuteNetworkParams params,
const std::shared_ptr< armnn::IRuntime > &  runtime = nullptr 
)

Definition at line 391 of file NetworkExecutionUtils.hpp.

References ARMNN_LOG, InferenceModel< IParser, TDataType >::GetInputQuantizationParams(), InferenceModel< IParser, TDataType >::GetInputSize(), InferenceModel< IParser, TDataType >::GetOutputBindingInfos(), InferenceModel< IParser, TDataType >::GetOutputSize(), Params::m_ComputeDevices, ExecuteNetworkParams::m_ComputeDevices, ExecuteNetworkParams::m_DequantizeOutput, Params::m_DynamicBackendsPath, ExecuteNetworkParams::m_DynamicBackendsPath, Params::m_EnableFp16TurboMode, ExecuteNetworkParams::m_EnableFp16TurboMode, ExecuteNetworkParams::m_EnableLayerDetails, ExecuteNetworkParams::m_EnableProfiling, ExecuteNetworkParams::m_GenerateTensorData, Params::m_InputBindings, ExecuteNetworkParams::m_InputNames, Params::m_InputShapes, ExecuteNetworkParams::m_InputTensorDataFilePaths, ExecuteNetworkParams::m_InputTensorShapes, ExecuteNetworkParams::m_InputTypes, Params::m_IsModelBinary, ExecuteNetworkParams::m_IsModelBinary, Params::m_ModelPath, ExecuteNetworkParams::m_ModelPath, Params::m_OutputBindings, ExecuteNetworkParams::m_OutputNames, ExecuteNetworkParams::m_OutputTensorFiles, ExecuteNetworkParams::m_OutputTypes, Params::m_ParseUnsupported, ExecuteNetworkParams::m_ParseUnsupported, ExecuteNetworkParams::m_PrintIntermediate, Params::m_PrintIntermediateLayers, ExecuteNetworkParams::m_QuantizeInput, Params::m_SubgraphId, ExecuteNetworkParams::m_SubgraphId, ExecuteNetworkParams::m_ThresholdTime, Params::m_VisualizePostOptimizationModel, InferenceModel< IParser, TDataType >::Run(), and Exception::what().

393 {
394  using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
395 
396  std::vector<TContainer> inputDataContainers;
397 
398  try
399  {
400  // Creates an InferenceModel, which will parse the model and load it into an IRuntime.
401  typename InferenceModel<TParser, TDataType>::Params inferenceModelParams;
402  inferenceModelParams.m_ModelPath = params.m_ModelPath;
403  inferenceModelParams.m_IsModelBinary = params.m_IsModelBinary;
404  inferenceModelParams.m_ComputeDevices = params.m_ComputeDevices;
405  inferenceModelParams.m_DynamicBackendsPath = params.m_DynamicBackendsPath;
406  inferenceModelParams.m_PrintIntermediateLayers = params.m_PrintIntermediate;
407  inferenceModelParams.m_VisualizePostOptimizationModel = params.m_EnableLayerDetails;
408  inferenceModelParams.m_ParseUnsupported = params.m_ParseUnsupported;
409 
410  for(const std::string& inputName: params.m_InputNames)
411  {
412  inferenceModelParams.m_InputBindings.push_back(inputName);
413  }
414 
415  for(unsigned int i = 0; i < params.m_InputTensorShapes.size(); ++i)
416  {
417  inferenceModelParams.m_InputShapes.push_back(*params.m_InputTensorShapes[i]);
418  }
419 
420  for(const std::string& outputName: params.m_OutputNames)
421  {
422  inferenceModelParams.m_OutputBindings.push_back(outputName);
423  }
424 
425  inferenceModelParams.m_SubgraphId = params.m_SubgraphId;
426  inferenceModelParams.m_EnableFp16TurboMode = params.m_EnableFp16TurboMode;
427 
428  InferenceModel<TParser, TDataType> model(inferenceModelParams,
429  params.m_EnableProfiling,
430  params.m_DynamicBackendsPath,
431  runtime);
432 
433  const size_t numInputs = inferenceModelParams.m_InputBindings.size();
434  for(unsigned int i = 0; i < numInputs; ++i)
435  {
437  armnn::MakeOptional<QuantizationParams>(model.GetInputQuantizationParams()) :
439 
442  armnn::MakeOptional<std::string>(params.m_InputTensorDataFilePaths[i]);
443 
444  unsigned int numElements = model.GetInputSize(i);
445  if (params.m_InputTensorShapes.size() > i && params.m_InputTensorShapes[i])
446  {
447  // If the user has provided a tensor shape for the current input,
448  // override numElements
449  numElements = params.m_InputTensorShapes[i]->GetNumElements();
450  }
451 
452  TContainer tensorData;
453  PopulateTensorWithData(tensorData,
454  numElements,
455  params.m_InputTypes[i],
456  qParams,
457  dataFile);
458 
459  inputDataContainers.push_back(tensorData);
460  }
461 
462  const size_t numOutputs = inferenceModelParams.m_OutputBindings.size();
463  std::vector<TContainer> outputDataContainers;
464 
465  for (unsigned int i = 0; i < numOutputs; ++i)
466  {
467  if (params.m_OutputTypes[i].compare("float") == 0)
468  {
469  outputDataContainers.push_back(std::vector<float>(model.GetOutputSize(i)));
470  }
471  else if (params.m_OutputTypes[i].compare("int") == 0)
472  {
473  outputDataContainers.push_back(std::vector<int>(model.GetOutputSize(i)));
474  }
475  else if (params.m_OutputTypes[i].compare("qasymm8") == 0)
476  {
477  outputDataContainers.push_back(std::vector<uint8_t>(model.GetOutputSize(i)));
478  }
479  else
480  {
481  ARMNN_LOG(fatal) << "Unsupported tensor data type \"" << params.m_OutputTypes[i] << "\". ";
482  return EXIT_FAILURE;
483  }
484  }
485 
486  // model.Run returns the inference time elapsed in EnqueueWorkload (in milliseconds)
487  auto inference_duration = model.Run(inputDataContainers, outputDataContainers);
488 
489  if (params.m_GenerateTensorData)
490  {
491  ARMNN_LOG(warning) << "The input data was generated, note that the output will not be useful";
492  }
493 
494  // Print output tensors
495  const auto& infosOut = model.GetOutputBindingInfos();
496  for (size_t i = 0; i < numOutputs; i++)
497  {
498  const armnn::TensorInfo& infoOut = infosOut[i].second;
499  auto outputTensorFile = params.m_OutputTensorFiles.empty() ? "" : params.m_OutputTensorFiles[i];
500 
501  TensorPrinter printer(inferenceModelParams.m_OutputBindings[i],
502  infoOut,
503  outputTensorFile,
504  params.m_DequantizeOutput);
505  boost::apply_visitor(printer, outputDataContainers[i]);
506  }
507 
508  ARMNN_LOG(info) << "\nInference time: " << std::setprecision(2)
509  << std::fixed << inference_duration.count() << " ms";
510 
511  // If thresholdTime == 0.0 (default), then it hasn't been supplied at command line
512  if (params.m_ThresholdTime != 0.0)
513  {
514  ARMNN_LOG(info) << "Threshold time: " << std::setprecision(2)
515  << std::fixed << params.m_ThresholdTime << " ms";
516  auto thresholdMinusInference = params.m_ThresholdTime - inference_duration.count();
517  ARMNN_LOG(info) << "Threshold time - Inference time: " << std::setprecision(2)
518  << std::fixed << thresholdMinusInference << " ms" << "\n";
519 
520  if (thresholdMinusInference < 0)
521  {
522  std::string errorMessage = "Elapsed inference time is greater than provided threshold time.";
523  ARMNN_LOG(fatal) << errorMessage;
524  }
525  }
526  }
527  catch (armnn::Exception const& e)
528  {
529  ARMNN_LOG(fatal) << "Armnn Error: " << e.what();
530  return EXIT_FAILURE;
531  }
532 
533  return EXIT_SUCCESS;
534 }
std::vector< string > m_OutputTypes
std::vector< string > m_OutputTensorFiles
std::vector< TensorShapePtr > m_InputTensorShapes
virtual const char * what() const noexcept override
Definition: Exceptions.cpp:32
#define ARMNN_LOG(severity)
Definition: Logging.hpp:163
Copyright (c) 2020 ARM Limited.
boost::variant< std::vector< float >, std::vector< int >, std::vector< unsigned char > > TContainer
std::vector< std::string > m_InputBindings
std::vector< armnn::BackendId > m_ComputeDevices
std::vector< string > m_InputNames
std::vector< armnn::TensorShape > m_InputShapes
std::vector< std::string > m_OutputBindings
std::vector< armnn::BackendId > m_ComputeDevices
std::vector< string > m_OutputNames
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
std::vector< string > m_InputTypes
Optional< T > MakeOptional(Args &&... args)
Utility template that constructs an object of type T in-place and wraps it inside an Optional<T> obje...
Definition: Optional.hpp:304

◆ RunCsvTest()

int RunCsvTest ( const armnnUtils::CsvRow csvRow,
const std::shared_ptr< armnn::IRuntime > &  runtime,
const bool  enableProfiling,
const bool  enableFp16TurboMode,
const double &  thresholdTime,
const bool  printIntermediate,
bool  enableLayerDetails = false,
bool  parseUnuspported = false 
)

Definition at line 750 of file NetworkExecutionUtils.hpp.

References ARMNN_LOG, armnn::BackendRegistryInstance(), BackendRegistry::GetBackendIdsAsString(), armnn::IgnoreUnused(), RunTest(), and CsvRow::values.

Referenced by main().

753 {
754  IgnoreUnused(runtime);
755  std::string modelFormat;
756  std::string modelPath;
757  std::string inputNames;
758  std::string inputTensorShapes;
759  std::string inputTensorDataFilePaths;
760  std::string outputNames;
761  std::string inputTypes;
762  std::string outputTypes;
763  std::string dynamicBackendsPath;
764  std::string outputTensorFiles;
765 
766  size_t subgraphId = 0;
767 
768  const std::string backendsMessage = std::string("The preferred order of devices to run layers on by default. ")
769  + std::string("Possible choices: ")
771 
772  po::options_description desc("Options");
773  try
774  {
775  desc.add_options()
776  ("model-format,f", po::value(&modelFormat),
777  "armnn-binary, caffe-binary, caffe-text, tflite-binary, onnx-binary, onnx-text, tensorflow-binary or "
778  "tensorflow-text.")
779  ("model-path,m", po::value(&modelPath), "Path to model file, e.g. .armnn, .caffemodel, .prototxt, "
780  ".tflite, .onnx")
781  ("compute,c", po::value<std::vector<armnn::BackendId>>()->multitoken(),
782  backendsMessage.c_str())
783  ("dynamic-backends-path,b", po::value(&dynamicBackendsPath),
784  "Path where to load any available dynamic backend from. "
785  "If left empty (the default), dynamic backends will not be used.")
786  ("input-name,i", po::value(&inputNames), "Identifier of the input tensors in the network separated by comma.")
787  ("subgraph-number,n", po::value<size_t>(&subgraphId)->default_value(0), "Id of the subgraph to be "
788  "executed. Defaults to 0.")
789  ("input-tensor-shape,s", po::value(&inputTensorShapes),
790  "The shape of the input tensors in the network as a flat array of integers separated by comma. "
791  "Several shapes can be passed separating them by semicolon. "
792  "This parameter is optional, depending on the network.")
793  ("input-tensor-data,d", po::value(&inputTensorDataFilePaths)->default_value(""),
794  "Path to files containing the input data as a flat array separated by whitespace. "
795  "Several paths can be passed separating them by comma. If not specified, the network will be run with dummy "
796  "data (useful for profiling).")
797  ("input-type,y",po::value(&inputTypes), "The type of the input tensors in the network separated by comma. "
798  "If unset, defaults to \"float\" for all defined inputs. "
799  "Accepted values (float, int or qasymm8).")
800  ("quantize-input,q",po::bool_switch()->default_value(false),
801  "If this option is enabled, all float inputs will be quantized to qasymm8. "
802  "If unset, default to not quantized. "
803  "Accepted values (true or false)")
804  ("output-type,z",po::value(&outputTypes), "The type of the output tensors in the network separated by comma. "
805  "If unset, defaults to \"float\" for all defined outputs. "
806  "Accepted values (float, int or qasymm8).")
807  ("output-name,o", po::value(&outputNames),
808  "Identifier of the output tensors in the network separated by comma.")
809  ("dequantize-output,l",po::bool_switch()->default_value(false),
810  "If this option is enabled, all quantized outputs will be dequantized to float. "
811  "If unset, default to not get dequantized. "
812  "Accepted values (true or false)")
813  ("write-outputs-to-file,w", po::value(&outputTensorFiles),
814  "Comma-separated list of output file paths keyed with the binding-id of the output slot. "
815  "If left empty (the default), the output tensors will not be written to a file.");
816  }
817  catch (const std::exception& e)
818  {
819  // Coverity points out that default_value(...) can throw a bad_lexical_cast,
820  // and that desc.add_options() can throw boost::io::too_few_args.
821  // They really won't in any of these cases.
822  BOOST_ASSERT_MSG(false, "Caught unexpected exception");
823  ARMNN_LOG(fatal) << "Fatal internal error: " << e.what();
824  return EXIT_FAILURE;
825  }
826 
827  std::vector<const char*> clOptions;
828  clOptions.reserve(csvRow.values.size());
829  for (const std::string& value : csvRow.values)
830  {
831  clOptions.push_back(value.c_str());
832  }
833 
834  po::variables_map vm;
835  try
836  {
837  po::store(po::parse_command_line(static_cast<int>(clOptions.size()), clOptions.data(), desc), vm);
838 
839  po::notify(vm);
840 
841  CheckOptionDependencies(vm);
842  }
843  catch (const po::error& e)
844  {
845  std::cerr << e.what() << std::endl << std::endl;
846  std::cerr << desc << std::endl;
847  return EXIT_FAILURE;
848  }
849 
850  // Get the value of the switch arguments.
851  bool quantizeInput = vm["quantize-input"].as<bool>();
852  bool dequantizeOutput = vm["dequantize-output"].as<bool>();
853 
854  // Get the preferred order of compute devices.
855  std::vector<armnn::BackendId> computeDevices = vm["compute"].as<std::vector<armnn::BackendId>>();
856 
857  // Remove duplicates from the list of compute devices.
858  RemoveDuplicateDevices(computeDevices);
859 
860  // Check that the specified compute devices are valid.
861  std::string invalidBackends;
862  if (!CheckRequestedBackendsAreValid(computeDevices, armnn::Optional<std::string&>(invalidBackends)))
863  {
864  ARMNN_LOG(fatal) << "The list of preferred devices contains invalid backend IDs: "
865  << invalidBackends;
866  return EXIT_FAILURE;
867  }
868 
869  return RunTest(modelFormat, inputTensorShapes, computeDevices, dynamicBackendsPath, modelPath, inputNames,
870  inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames, outputTensorFiles,
871  dequantizeOutput, enableProfiling, enableFp16TurboMode, thresholdTime, printIntermediate, subgraphId,
872  enableLayerDetails, parseUnuspported);
873 }
int RunTest(const std::string &format, const std::string &inputTensorShapesStr, const vector< armnn::BackendId > &computeDevices, const std::string &dynamicBackendsPath, const std::string &path, const std::string &inputNames, const std::string &inputTensorDataFilePaths, const std::string &inputTypes, bool quantizeInput, const std::string &outputTypes, const std::string &outputNames, const std::string &outputTensorFiles, bool dequantizeOuput, bool enableProfiling, bool enableFp16TurboMode, const double &thresholdTime, bool printIntermediate, const size_t subgraphId, bool enableLayerDetails=false, bool parseUnsupported=false, const std::shared_ptr< armnn::IRuntime > &runtime=nullptr)
std::vector< std::string > values
Definition: CsvReader.hpp:15
#define ARMNN_LOG(severity)
Definition: Logging.hpp:163
BackendRegistry & BackendRegistryInstance()
void IgnoreUnused(Ts &&...)
std::string GetBackendIdsAsString() const

◆ RunTest()

int RunTest ( const std::string &  format,
const std::string &  inputTensorShapesStr,
const vector< armnn::BackendId > &  computeDevices,
const std::string &  dynamicBackendsPath,
const std::string &  path,
const std::string &  inputNames,
const std::string &  inputTensorDataFilePaths,
const std::string &  inputTypes,
bool  quantizeInput,
const std::string &  outputTypes,
const std::string &  outputNames,
const std::string &  outputTensorFiles,
bool  dequantizeOuput,
bool  enableProfiling,
bool  enableFp16TurboMode,
const double &  thresholdTime,
bool  printIntermediate,
const size_t  subgraphId,
bool  enableLayerDetails = false,
bool  parseUnsupported = false,
const std::shared_ptr< armnn::IRuntime > &  runtime = nullptr 
)

Definition at line 537 of file NetworkExecutionUtils.hpp.

References ARMNN_LOG, ExecuteNetworkParams::m_ComputeDevices, ExecuteNetworkParams::m_DequantizeOutput, ExecuteNetworkParams::m_DynamicBackendsPath, ExecuteNetworkParams::m_EnableFp16TurboMode, ExecuteNetworkParams::m_EnableLayerDetails, ExecuteNetworkParams::m_EnableProfiling, ExecuteNetworkParams::m_GenerateTensorData, ExecuteNetworkParams::m_InputNames, ExecuteNetworkParams::m_InputTensorDataFilePaths, ExecuteNetworkParams::m_InputTensorShapes, ExecuteNetworkParams::m_InputTypes, ExecuteNetworkParams::m_IsModelBinary, ExecuteNetworkParams::m_ModelPath, ExecuteNetworkParams::m_OutputNames, ExecuteNetworkParams::m_OutputTensorFiles, ExecuteNetworkParams::m_OutputTypes, ExecuteNetworkParams::m_ParseUnsupported, ExecuteNetworkParams::m_PrintIntermediate, ExecuteNetworkParams::m_QuantizeInput, ExecuteNetworkParams::m_SubgraphId, ExecuteNetworkParams::m_ThresholdTime, and Exception::what().

Referenced by BOOST_FIXTURE_TEST_CASE(), main(), and RunCsvTest().

558 {
559  std::string modelFormat = boost::trim_copy(format);
560  std::string modelPath = boost::trim_copy(path);
561  std::vector<std::string> inputNamesVector = ParseStringList(inputNames, ",");
562  std::vector<std::string> inputTensorShapesVector = ParseStringList(inputTensorShapesStr, ":");
563  std::vector<std::string> inputTensorDataFilePathsVector = ParseStringList(
564  inputTensorDataFilePaths, ",");
565  std::vector<std::string> outputNamesVector = ParseStringList(outputNames, ",");
566  std::vector<std::string> inputTypesVector = ParseStringList(inputTypes, ",");
567  std::vector<std::string> outputTypesVector = ParseStringList(outputTypes, ",");
568  std::vector<std::string> outputTensorFilesVector = ParseStringList(outputTensorFiles, ",");
569 
570  // Parse model binary flag from the model-format string we got from the command-line
571  bool isModelBinary;
572  if (modelFormat.find("bin") != std::string::npos)
573  {
574  isModelBinary = true;
575  }
576  else if (modelFormat.find("txt") != std::string::npos || modelFormat.find("text") != std::string::npos)
577  {
578  isModelBinary = false;
579  }
580  else
581  {
582  ARMNN_LOG(fatal) << "Unknown model format: '" << modelFormat << "'. Please include 'binary' or 'text'";
583  return EXIT_FAILURE;
584  }
585 
586  if ((inputTensorShapesVector.size() != 0) && (inputTensorShapesVector.size() != inputNamesVector.size()))
587  {
588  ARMNN_LOG(fatal) << "input-name and input-tensor-shape must have the same amount of elements.";
589  return EXIT_FAILURE;
590  }
591 
592  if ((inputTensorDataFilePathsVector.size() != 0) &&
593  (inputTensorDataFilePathsVector.size() != inputNamesVector.size()))
594  {
595  ARMNN_LOG(fatal) << "input-name and input-tensor-data must have the same amount of elements.";
596  return EXIT_FAILURE;
597  }
598 
599  if ((outputTensorFilesVector.size() != 0) &&
600  (outputTensorFilesVector.size() != outputNamesVector.size()))
601  {
602  ARMNN_LOG(fatal) << "output-name and write-outputs-to-file must have the same amount of elements.";
603  return EXIT_FAILURE;
604  }
605 
606  if (inputTypesVector.size() == 0)
607  {
608  //Defaults the value of all inputs to "float"
609  inputTypesVector.assign(inputNamesVector.size(), "float");
610  }
611  else if ((inputTypesVector.size() != 0) && (inputTypesVector.size() != inputNamesVector.size()))
612  {
613  ARMNN_LOG(fatal) << "input-name and input-type must have the same amount of elements.";
614  return EXIT_FAILURE;
615  }
616 
617  if (outputTypesVector.size() == 0)
618  {
619  //Defaults the value of all outputs to "float"
620  outputTypesVector.assign(outputNamesVector.size(), "float");
621  }
622  else if ((outputTypesVector.size() != 0) && (outputTypesVector.size() != outputNamesVector.size()))
623  {
624  ARMNN_LOG(fatal) << "output-name and output-type must have the same amount of elements.";
625  return EXIT_FAILURE;
626  }
627 
628  // Parse input tensor shape from the string we got from the command-line.
629  std::vector<std::unique_ptr<armnn::TensorShape>> inputTensorShapes;
630 
631  if (!inputTensorShapesVector.empty())
632  {
633  inputTensorShapes.reserve(inputTensorShapesVector.size());
634 
635  for(const std::string& shape : inputTensorShapesVector)
636  {
637  std::stringstream ss(shape);
638  std::vector<unsigned int> dims = ParseArray(ss);
639 
640  try
641  {
642  // Coverity fix: An exception of type armnn::InvalidArgumentException is thrown and never caught.
643  inputTensorShapes.push_back(std::make_unique<armnn::TensorShape>(dims.size(), dims.data()));
644  }
645  catch (const armnn::InvalidArgumentException& e)
646  {
647  ARMNN_LOG(fatal) << "Cannot create tensor shape: " << e.what();
648  return EXIT_FAILURE;
649  }
650  }
651  }
652 
653  // Check that threshold time is not less than zero
654  if (thresholdTime < 0)
655  {
656  ARMNN_LOG(fatal) << "Threshold time supplied as a command line argument is less than zero.";
657  return EXIT_FAILURE;
658  }
659 
660  ExecuteNetworkParams params;
661  params.m_ModelPath = modelPath.c_str();
662  params.m_IsModelBinary = isModelBinary;
663  params.m_ComputeDevices = computeDevices;
664  params.m_DynamicBackendsPath = dynamicBackendsPath;
665  params.m_InputNames = inputNamesVector;
666  params.m_InputTensorShapes = std::move(inputTensorShapes);
667  params.m_InputTensorDataFilePaths = inputTensorDataFilePathsVector;
668  params.m_InputTypes = inputTypesVector;
669  params.m_QuantizeInput = quantizeInput;
670  params.m_OutputTypes = outputTypesVector;
671  params.m_OutputNames = outputNamesVector;
672  params.m_OutputTensorFiles = outputTensorFilesVector;
673  params.m_DequantizeOutput = dequantizeOuput;
674  params.m_EnableProfiling = enableProfiling;
675  params.m_EnableFp16TurboMode = enableFp16TurboMode;
676  params.m_ThresholdTime = thresholdTime;
677  params.m_PrintIntermediate = printIntermediate;
678  params.m_SubgraphId = subgraphId;
679  params.m_EnableLayerDetails = enableLayerDetails;
680  params.m_GenerateTensorData = inputTensorDataFilePathsVector.empty();
681  params.m_ParseUnsupported = parseUnsupported;
682 
683  // Warn if ExecuteNetwork will generate dummy input data
684  if (params.m_GenerateTensorData)
685  {
686  ARMNN_LOG(warning) << "No input files provided, input tensors will be filled with 0s.";
687  }
688 
689  // Forward to implementation based on the parser type
690  if (modelFormat.find("armnn") != std::string::npos)
691  {
692 #if defined(ARMNN_SERIALIZER)
693  return MainImpl<armnnDeserializer::IDeserializer, float>(params, runtime);
694 #else
695  ARMNN_LOG(fatal) << "Not built with serialization support.";
696  return EXIT_FAILURE;
697 #endif
698  }
699  else if (modelFormat.find("caffe") != std::string::npos)
700  {
701 #if defined(ARMNN_CAFFE_PARSER)
702  return MainImpl<armnnCaffeParser::ICaffeParser, float>(params, runtime);
703 #else
704  ARMNN_LOG(fatal) << "Not built with Caffe parser support.";
705  return EXIT_FAILURE;
706 #endif
707  }
708  else if (modelFormat.find("onnx") != std::string::npos)
709 {
710 #if defined(ARMNN_ONNX_PARSER)
711  return MainImpl<armnnOnnxParser::IOnnxParser, float>(params, runtime);
712 #else
713  ARMNN_LOG(fatal) << "Not built with Onnx parser support.";
714  return EXIT_FAILURE;
715 #endif
716  }
717  else if (modelFormat.find("tensorflow") != std::string::npos)
718  {
719 #if defined(ARMNN_TF_PARSER)
720  return MainImpl<armnnTfParser::ITfParser, float>(params, runtime);
721 #else
722  ARMNN_LOG(fatal) << "Not built with Tensorflow parser support.";
723  return EXIT_FAILURE;
724 #endif
725  }
726  else if(modelFormat.find("tflite") != std::string::npos)
727  {
728 #if defined(ARMNN_TF_LITE_PARSER)
729  if (! isModelBinary)
730  {
731  ARMNN_LOG(fatal) << "Unknown model format: '" << modelFormat << "'. Only 'binary' format supported \
732  for tflite files";
733  return EXIT_FAILURE;
734  }
735  return MainImpl<armnnTfLiteParser::ITfLiteParser, float>(params, runtime);
736 #else
737  ARMNN_LOG(fatal) << "Unknown model format: '" << modelFormat <<
738  "'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'";
739  return EXIT_FAILURE;
740 #endif
741  }
742  else
743  {
744  ARMNN_LOG(fatal) << "Unknown model format: '" << modelFormat <<
745  "'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'";
746  return EXIT_FAILURE;
747  }
748 }
std::vector< string > m_OutputTypes
std::vector< string > m_OutputTensorFiles
std::vector< TensorShapePtr > m_InputTensorShapes
virtual const char * what() const noexcept override
Definition: Exceptions.cpp:32
#define ARMNN_LOG(severity)
Definition: Logging.hpp:163
std::vector< armnn::BackendId > m_ComputeDevices
std::vector< string > m_InputNames
std::vector< string > m_OutputNames
std::vector< string > m_InputTensorDataFilePaths
std::vector< string > m_InputTypes

Variable Documentation

◆ generateTensorData

bool generateTensorData = true

Definition at line 361 of file NetworkExecutionUtils.hpp.