ArmNN  NotReleased
NetworkExecutionUtils.hpp File Reference
#include <armnn/ArmNN.hpp>
#include <armnn/TypesUtils.hpp>
#include "CsvReader.hpp"
#include "../InferenceTest.hpp"
#include <Profiling.hpp>
#include <ResolveType.hpp>
#include <boost/algorithm/string/trim.hpp>
#include <boost/algorithm/string/split.hpp>
#include <boost/algorithm/string/classification.hpp>
#include <boost/program_options.hpp>
#include <boost/variant.hpp>
#include <iostream>
#include <fstream>
#include <functional>
#include <future>
#include <algorithm>
#include <iterator>

Go to the source code of this file.

Classes

struct  ExecuteNetworkParams
 

Functions

template<typename TParser , typename TDataType >
int MainImpl (const ExecuteNetworkParams &params, const std::shared_ptr< armnn::IRuntime > &runtime=nullptr)
 
int RunTest (const std::string &format, const std::string &inputTensorShapesStr, const vector< armnn::BackendId > &computeDevices, const std::string &dynamicBackendsPath, const std::string &path, const std::string &inputNames, const std::string &inputTensorDataFilePaths, const std::string &inputTypes, bool quantizeInput, const std::string &outputTypes, const std::string &outputNames, const std::string &outputTensorFiles, bool enableProfiling, bool enableFp16TurboMode, const double &thresholdTime, bool printIntermediate, const size_t subgraphId, bool enableLayerDetails=false, bool parseUnsupported=false, const std::shared_ptr< armnn::IRuntime > &runtime=nullptr)
 
int RunCsvTest (const armnnUtils::CsvRow &csvRow, const std::shared_ptr< armnn::IRuntime > &runtime, const bool enableProfiling, const bool enableFp16TurboMode, const double &thresholdTime, const bool printIntermediate, bool enableLayerDetails=false, bool parseUnuspported=false)
 

Variables

bool generateTensorData = true
 

Function Documentation

◆ MainImpl()

int MainImpl ( const ExecuteNetworkParams params,
const std::shared_ptr< armnn::IRuntime > &  runtime = nullptr 
)

Definition at line 377 of file NetworkExecutionUtils.hpp.

References ARMNN_LOG, InferenceModel< IParser, TDataType >::GetInputQuantizationParams(), InferenceModel< IParser, TDataType >::GetInputSize(), InferenceModel< IParser, TDataType >::GetOutputBindingInfos(), InferenceModel< IParser, TDataType >::GetOutputSize(), Params::m_ComputeDevices, ExecuteNetworkParams::m_ComputeDevices, Params::m_DynamicBackendsPath, ExecuteNetworkParams::m_DynamicBackendsPath, Params::m_EnableFp16TurboMode, ExecuteNetworkParams::m_EnableFp16TurboMode, ExecuteNetworkParams::m_EnableLayerDetails, ExecuteNetworkParams::m_EnableProfiling, ExecuteNetworkParams::m_GenerateTensorData, Params::m_InputBindings, ExecuteNetworkParams::m_InputNames, Params::m_InputShapes, ExecuteNetworkParams::m_InputTensorDataFilePaths, ExecuteNetworkParams::m_InputTensorShapes, ExecuteNetworkParams::m_InputTypes, Params::m_IsModelBinary, ExecuteNetworkParams::m_IsModelBinary, Params::m_ModelPath, ExecuteNetworkParams::m_ModelPath, Params::m_OutputBindings, ExecuteNetworkParams::m_OutputNames, ExecuteNetworkParams::m_OutputTensorFiles, ExecuteNetworkParams::m_OutputTypes, Params::m_ParseUnsupported, ExecuteNetworkParams::m_ParseUnsupported, ExecuteNetworkParams::m_PrintIntermediate, Params::m_PrintIntermediateLayers, ExecuteNetworkParams::m_QuantizeInput, Params::m_SubgraphId, ExecuteNetworkParams::m_SubgraphId, ExecuteNetworkParams::m_ThresholdTime, Params::m_VisualizePostOptimizationModel, InferenceModel< IParser, TDataType >::Run(), and Exception::what().

379 {
380  using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
381 
382  std::vector<TContainer> inputDataContainers;
383 
384  try
385  {
386  // Creates an InferenceModel, which will parse the model and load it into an IRuntime.
387  typename InferenceModel<TParser, TDataType>::Params inferenceModelParams;
388  inferenceModelParams.m_ModelPath = params.m_ModelPath;
389  inferenceModelParams.m_IsModelBinary = params.m_IsModelBinary;
390  inferenceModelParams.m_ComputeDevices = params.m_ComputeDevices;
391  inferenceModelParams.m_DynamicBackendsPath = params.m_DynamicBackendsPath;
392  inferenceModelParams.m_PrintIntermediateLayers = params.m_PrintIntermediate;
393  inferenceModelParams.m_VisualizePostOptimizationModel = params.m_EnableLayerDetails;
394  inferenceModelParams.m_ParseUnsupported = params.m_ParseUnsupported;
395 
396  for(const std::string& inputName: params.m_InputNames)
397  {
398  inferenceModelParams.m_InputBindings.push_back(inputName);
399  }
400 
401  for(unsigned int i = 0; i < params.m_InputTensorShapes.size(); ++i)
402  {
403  inferenceModelParams.m_InputShapes.push_back(*params.m_InputTensorShapes[i]);
404  }
405 
406  for(const std::string& outputName: params.m_OutputNames)
407  {
408  inferenceModelParams.m_OutputBindings.push_back(outputName);
409  }
410 
411  inferenceModelParams.m_SubgraphId = params.m_SubgraphId;
412  inferenceModelParams.m_EnableFp16TurboMode = params.m_EnableFp16TurboMode;
413 
414  InferenceModel<TParser, TDataType> model(inferenceModelParams,
415  params.m_EnableProfiling,
416  params.m_DynamicBackendsPath,
417  runtime);
418 
419  const size_t numInputs = inferenceModelParams.m_InputBindings.size();
420  for(unsigned int i = 0; i < numInputs; ++i)
421  {
423  armnn::MakeOptional<QuantizationParams>(model.GetInputQuantizationParams()) :
425 
428  armnn::MakeOptional<std::string>(params.m_InputTensorDataFilePaths[i]);
429 
430  unsigned int numElements = model.GetInputSize(i);
431  if (params.m_InputTensorShapes.size() > i && params.m_InputTensorShapes[i])
432  {
433  // If the user has provided a tensor shape for the current input,
434  // override numElements
435  numElements = params.m_InputTensorShapes[i]->GetNumElements();
436  }
437 
438  TContainer tensorData;
439  PopulateTensorWithData(tensorData,
440  numElements,
441  params.m_InputTypes[i],
442  qParams,
443  dataFile);
444 
445  inputDataContainers.push_back(tensorData);
446  }
447 
448  const size_t numOutputs = inferenceModelParams.m_OutputBindings.size();
449  std::vector<TContainer> outputDataContainers;
450 
451  for (unsigned int i = 0; i < numOutputs; ++i)
452  {
453  if (params.m_OutputTypes[i].compare("float") == 0)
454  {
455  outputDataContainers.push_back(std::vector<float>(model.GetOutputSize(i)));
456  }
457  else if (params.m_OutputTypes[i].compare("int") == 0)
458  {
459  outputDataContainers.push_back(std::vector<int>(model.GetOutputSize(i)));
460  }
461  else if (params.m_OutputTypes[i].compare("qasymm8") == 0)
462  {
463  outputDataContainers.push_back(std::vector<uint8_t>(model.GetOutputSize(i)));
464  }
465  else
466  {
467  ARMNN_LOG(fatal) << "Unsupported tensor data type \"" << params.m_OutputTypes[i] << "\". ";
468  return EXIT_FAILURE;
469  }
470  }
471 
472  // model.Run returns the inference time elapsed in EnqueueWorkload (in milliseconds)
473  auto inference_duration = model.Run(inputDataContainers, outputDataContainers);
474 
475  if (params.m_GenerateTensorData)
476  {
477  ARMNN_LOG(warning) << "The input data was generated, note that the output will not be useful";
478  }
479 
480  // Print output tensors
481  const auto& infosOut = model.GetOutputBindingInfos();
482  for (size_t i = 0; i < numOutputs; i++)
483  {
484  const armnn::TensorInfo& infoOut = infosOut[i].second;
485  auto outputTensorFile = params.m_OutputTensorFiles.empty() ? "" : params.m_OutputTensorFiles[i];
486 
487  TensorPrinter printer(inferenceModelParams.m_OutputBindings[i], infoOut, outputTensorFile);
488  boost::apply_visitor(printer, outputDataContainers[i]);
489  }
490 
491  ARMNN_LOG(info) << "\nInference time: " << std::setprecision(2)
492  << std::fixed << inference_duration.count() << " ms";
493 
494  // If thresholdTime == 0.0 (default), then it hasn't been supplied at command line
495  if (params.m_ThresholdTime != 0.0)
496  {
497  ARMNN_LOG(info) << "Threshold time: " << std::setprecision(2)
498  << std::fixed << params.m_ThresholdTime << " ms";
499  auto thresholdMinusInference = params.m_ThresholdTime - inference_duration.count();
500  ARMNN_LOG(info) << "Threshold time - Inference time: " << std::setprecision(2)
501  << std::fixed << thresholdMinusInference << " ms" << "\n";
502 
503  if (thresholdMinusInference < 0)
504  {
505  std::string errorMessage = "Elapsed inference time is greater than provided threshold time.";
506  ARMNN_LOG(fatal) << errorMessage;
507  }
508  }
509  }
510  catch (armnn::Exception const& e)
511  {
512  ARMNN_LOG(fatal) << "Armnn Error: " << e.what();
513  return EXIT_FAILURE;
514  }
515 
516  return EXIT_SUCCESS;
517 }
std::vector< TensorShapePtr > m_InputTensorShapes
#define ARMNN_LOG(severity)
Definition: Logging.hpp:163
std::vector< std::string > m_InputBindings
virtual const char * what() const noexcept override
Definition: Exceptions.cpp:32
std::vector< string > m_OutputTypes
std::vector< armnn::BackendId > m_ComputeDevices
std::vector< string > m_OutputNames
std::vector< armnn::BackendId > m_ComputeDevices
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
boost::variant< std::vector< float >, std::vector< int >, std::vector< unsigned char > > TContainer
Optional< T > MakeOptional(Args &&... args)
Definition: Optional.hpp:304
std::vector< string > m_InputNames
std::vector< string > m_InputTypes
std::vector< string > m_OutputTensorFiles
std::vector< std::string > m_OutputBindings
std::vector< armnn::TensorShape > m_InputShapes

◆ RunCsvTest()

int RunCsvTest ( const armnnUtils::CsvRow csvRow,
const std::shared_ptr< armnn::IRuntime > &  runtime,
const bool  enableProfiling,
const bool  enableFp16TurboMode,
const double &  thresholdTime,
const bool  printIntermediate,
bool  enableLayerDetails = false,
bool  parseUnuspported = false 
)

Definition at line 731 of file NetworkExecutionUtils.hpp.

References ARMNN_LOG, armnn::BackendRegistryInstance(), BackendRegistry::GetBackendIdsAsString(), RunTest(), and CsvRow::values.

Referenced by main().

734 {
735  boost::ignore_unused(runtime);
736  std::string modelFormat;
737  std::string modelPath;
738  std::string inputNames;
739  std::string inputTensorShapes;
740  std::string inputTensorDataFilePaths;
741  std::string outputNames;
742  std::string inputTypes;
743  std::string outputTypes;
744  std::string dynamicBackendsPath;
745  std::string outputTensorFiles;
746 
747  size_t subgraphId = 0;
748 
749  const std::string backendsMessage = std::string("The preferred order of devices to run layers on by default. ")
750  + std::string("Possible choices: ")
752 
753  po::options_description desc("Options");
754  try
755  {
756  desc.add_options()
757  ("model-format,f", po::value(&modelFormat),
758  "armnn-binary, caffe-binary, caffe-text, tflite-binary, onnx-binary, onnx-text, tensorflow-binary or "
759  "tensorflow-text.")
760  ("model-path,m", po::value(&modelPath), "Path to model file, e.g. .armnn, .caffemodel, .prototxt, "
761  ".tflite, .onnx")
762  ("compute,c", po::value<std::vector<armnn::BackendId>>()->multitoken(),
763  backendsMessage.c_str())
764  ("dynamic-backends-path,b", po::value(&dynamicBackendsPath),
765  "Path where to load any available dynamic backend from. "
766  "If left empty (the default), dynamic backends will not be used.")
767  ("input-name,i", po::value(&inputNames), "Identifier of the input tensors in the network separated by comma.")
768  ("subgraph-number,n", po::value<size_t>(&subgraphId)->default_value(0), "Id of the subgraph to be "
769  "executed. Defaults to 0.")
770  ("input-tensor-shape,s", po::value(&inputTensorShapes),
771  "The shape of the input tensors in the network as a flat array of integers separated by comma. "
772  "Several shapes can be passed separating them by semicolon. "
773  "This parameter is optional, depending on the network.")
774  ("input-tensor-data,d", po::value(&inputTensorDataFilePaths)->default_value(""),
775  "Path to files containing the input data as a flat array separated by whitespace. "
776  "Several paths can be passed separating them by comma. If not specified, the network will be run with dummy "
777  "data (useful for profiling).")
778  ("input-type,y",po::value(&inputTypes), "The type of the input tensors in the network separated by comma. "
779  "If unset, defaults to \"float\" for all defined inputs. "
780  "Accepted values (float, int or qasymm8).")
781  ("quantize-input,q",po::bool_switch()->default_value(false),
782  "If this option is enabled, all float inputs will be quantized to qasymm8. "
783  "If unset, default to not quantized. "
784  "Accepted values (true or false)")
785  ("output-type,z",po::value(&outputTypes), "The type of the output tensors in the network separated by comma. "
786  "If unset, defaults to \"float\" for all defined outputs. "
787  "Accepted values (float, int or qasymm8).")
788  ("output-name,o", po::value(&outputNames),
789  "Identifier of the output tensors in the network separated by comma.")
790  ("write-outputs-to-file,w", po::value(&outputTensorFiles),
791  "Comma-separated list of output file paths keyed with the binding-id of the output slot. "
792  "If left empty (the default), the output tensors will not be written to a file.");
793  }
794  catch (const std::exception& e)
795  {
796  // Coverity points out that default_value(...) can throw a bad_lexical_cast,
797  // and that desc.add_options() can throw boost::io::too_few_args.
798  // They really won't in any of these cases.
799  BOOST_ASSERT_MSG(false, "Caught unexpected exception");
800  ARMNN_LOG(fatal) << "Fatal internal error: " << e.what();
801  return EXIT_FAILURE;
802  }
803 
804  std::vector<const char*> clOptions;
805  clOptions.reserve(csvRow.values.size());
806  for (const std::string& value : csvRow.values)
807  {
808  clOptions.push_back(value.c_str());
809  }
810 
811  po::variables_map vm;
812  try
813  {
814  po::store(po::parse_command_line(static_cast<int>(clOptions.size()), clOptions.data(), desc), vm);
815 
816  po::notify(vm);
817 
818  CheckOptionDependencies(vm);
819  }
820  catch (const po::error& e)
821  {
822  std::cerr << e.what() << std::endl << std::endl;
823  std::cerr << desc << std::endl;
824  return EXIT_FAILURE;
825  }
826 
827  // Get the value of the switch arguments.
828  bool quantizeInput = vm["quantize-input"].as<bool>();
829 
830  // Get the preferred order of compute devices.
831  std::vector<armnn::BackendId> computeDevices = vm["compute"].as<std::vector<armnn::BackendId>>();
832 
833  // Remove duplicates from the list of compute devices.
834  RemoveDuplicateDevices(computeDevices);
835 
836  // Check that the specified compute devices are valid.
837  std::string invalidBackends;
838  if (!CheckRequestedBackendsAreValid(computeDevices, armnn::Optional<std::string&>(invalidBackends)))
839  {
840  ARMNN_LOG(fatal) << "The list of preferred devices contains invalid backend IDs: "
841  << invalidBackends;
842  return EXIT_FAILURE;
843  }
844 
845  return RunTest(modelFormat, inputTensorShapes, computeDevices, dynamicBackendsPath, modelPath, inputNames,
846  inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames, outputTensorFiles,
847  enableProfiling, enableFp16TurboMode, thresholdTime, printIntermediate, subgraphId,
848  enableLayerDetails, parseUnuspported);
849 }
std::vector< std::string > values
Definition: CsvReader.hpp:15
std::string GetBackendIdsAsString() const
int RunTest(const std::string &format, const std::string &inputTensorShapesStr, const vector< armnn::BackendId > &computeDevices, const std::string &dynamicBackendsPath, const std::string &path, const std::string &inputNames, const std::string &inputTensorDataFilePaths, const std::string &inputTypes, bool quantizeInput, const std::string &outputTypes, const std::string &outputNames, const std::string &outputTensorFiles, bool enableProfiling, bool enableFp16TurboMode, const double &thresholdTime, bool printIntermediate, const size_t subgraphId, bool enableLayerDetails=false, bool parseUnsupported=false, const std::shared_ptr< armnn::IRuntime > &runtime=nullptr)
#define ARMNN_LOG(severity)
Definition: Logging.hpp:163
BackendRegistry & BackendRegistryInstance()

◆ RunTest()

int RunTest ( const std::string &  format,
const std::string &  inputTensorShapesStr,
const vector< armnn::BackendId > &  computeDevices,
const std::string &  dynamicBackendsPath,
const std::string &  path,
const std::string &  inputNames,
const std::string &  inputTensorDataFilePaths,
const std::string &  inputTypes,
bool  quantizeInput,
const std::string &  outputTypes,
const std::string &  outputNames,
const std::string &  outputTensorFiles,
bool  enableProfiling,
bool  enableFp16TurboMode,
const double &  thresholdTime,
bool  printIntermediate,
const size_t  subgraphId,
bool  enableLayerDetails = false,
bool  parseUnsupported = false,
const std::shared_ptr< armnn::IRuntime > &  runtime = nullptr 
)

Definition at line 520 of file NetworkExecutionUtils.hpp.

References ARMNN_LOG, ExecuteNetworkParams::m_ComputeDevices, ExecuteNetworkParams::m_DynamicBackendsPath, ExecuteNetworkParams::m_EnableFp16TurboMode, ExecuteNetworkParams::m_EnableLayerDetails, ExecuteNetworkParams::m_EnableProfiling, ExecuteNetworkParams::m_GenerateTensorData, ExecuteNetworkParams::m_InputNames, ExecuteNetworkParams::m_InputTensorDataFilePaths, ExecuteNetworkParams::m_InputTensorShapes, ExecuteNetworkParams::m_InputTypes, ExecuteNetworkParams::m_IsModelBinary, ExecuteNetworkParams::m_ModelPath, ExecuteNetworkParams::m_OutputNames, ExecuteNetworkParams::m_OutputTensorFiles, ExecuteNetworkParams::m_OutputTypes, ExecuteNetworkParams::m_ParseUnsupported, ExecuteNetworkParams::m_PrintIntermediate, ExecuteNetworkParams::m_QuantizeInput, ExecuteNetworkParams::m_SubgraphId, ExecuteNetworkParams::m_ThresholdTime, and Exception::what().

Referenced by BOOST_FIXTURE_TEST_CASE(), main(), and RunCsvTest().

540 {
541  std::string modelFormat = boost::trim_copy(format);
542  std::string modelPath = boost::trim_copy(path);
543  std::vector<std::string> inputNamesVector = ParseStringList(inputNames, ",");
544  std::vector<std::string> inputTensorShapesVector = ParseStringList(inputTensorShapesStr, ":");
545  std::vector<std::string> inputTensorDataFilePathsVector = ParseStringList(
546  inputTensorDataFilePaths, ",");
547  std::vector<std::string> outputNamesVector = ParseStringList(outputNames, ",");
548  std::vector<std::string> inputTypesVector = ParseStringList(inputTypes, ",");
549  std::vector<std::string> outputTypesVector = ParseStringList(outputTypes, ",");
550  std::vector<std::string> outputTensorFilesVector = ParseStringList(outputTensorFiles, ",");
551 
552  // Parse model binary flag from the model-format string we got from the command-line
553  bool isModelBinary;
554  if (modelFormat.find("bin") != std::string::npos)
555  {
556  isModelBinary = true;
557  }
558  else if (modelFormat.find("txt") != std::string::npos || modelFormat.find("text") != std::string::npos)
559  {
560  isModelBinary = false;
561  }
562  else
563  {
564  ARMNN_LOG(fatal) << "Unknown model format: '" << modelFormat << "'. Please include 'binary' or 'text'";
565  return EXIT_FAILURE;
566  }
567 
568  if ((inputTensorShapesVector.size() != 0) && (inputTensorShapesVector.size() != inputNamesVector.size()))
569  {
570  ARMNN_LOG(fatal) << "input-name and input-tensor-shape must have the same amount of elements.";
571  return EXIT_FAILURE;
572  }
573 
574  if ((inputTensorDataFilePathsVector.size() != 0) &&
575  (inputTensorDataFilePathsVector.size() != inputNamesVector.size()))
576  {
577  ARMNN_LOG(fatal) << "input-name and input-tensor-data must have the same amount of elements.";
578  return EXIT_FAILURE;
579  }
580 
581  if ((outputTensorFilesVector.size() != 0) &&
582  (outputTensorFilesVector.size() != outputNamesVector.size()))
583  {
584  ARMNN_LOG(fatal) << "output-name and write-outputs-to-file must have the same amount of elements.";
585  return EXIT_FAILURE;
586  }
587 
588  if (inputTypesVector.size() == 0)
589  {
590  //Defaults the value of all inputs to "float"
591  inputTypesVector.assign(inputNamesVector.size(), "float");
592  }
593  else if ((inputTypesVector.size() != 0) && (inputTypesVector.size() != inputNamesVector.size()))
594  {
595  ARMNN_LOG(fatal) << "input-name and input-type must have the same amount of elements.";
596  return EXIT_FAILURE;
597  }
598 
599  if (outputTypesVector.size() == 0)
600  {
601  //Defaults the value of all outputs to "float"
602  outputTypesVector.assign(outputNamesVector.size(), "float");
603  }
604  else if ((outputTypesVector.size() != 0) && (outputTypesVector.size() != outputNamesVector.size()))
605  {
606  ARMNN_LOG(fatal) << "output-name and output-type must have the same amount of elements.";
607  return EXIT_FAILURE;
608  }
609 
610  // Parse input tensor shape from the string we got from the command-line.
611  std::vector<std::unique_ptr<armnn::TensorShape>> inputTensorShapes;
612 
613  if (!inputTensorShapesVector.empty())
614  {
615  inputTensorShapes.reserve(inputTensorShapesVector.size());
616 
617  for(const std::string& shape : inputTensorShapesVector)
618  {
619  std::stringstream ss(shape);
620  std::vector<unsigned int> dims = ParseArray(ss);
621 
622  try
623  {
624  // Coverity fix: An exception of type armnn::InvalidArgumentException is thrown and never caught.
625  inputTensorShapes.push_back(std::make_unique<armnn::TensorShape>(dims.size(), dims.data()));
626  }
627  catch (const armnn::InvalidArgumentException& e)
628  {
629  ARMNN_LOG(fatal) << "Cannot create tensor shape: " << e.what();
630  return EXIT_FAILURE;
631  }
632  }
633  }
634 
635  // Check that threshold time is not less than zero
636  if (thresholdTime < 0)
637  {
638  ARMNN_LOG(fatal) << "Threshold time supplied as a command line argument is less than zero.";
639  return EXIT_FAILURE;
640  }
641 
642  ExecuteNetworkParams params;
643  params.m_ModelPath = modelPath.c_str();
644  params.m_IsModelBinary = isModelBinary;
645  params.m_ComputeDevices = computeDevices;
646  params.m_DynamicBackendsPath = dynamicBackendsPath;
647  params.m_InputNames = inputNamesVector;
648  params.m_InputTensorShapes = std::move(inputTensorShapes);
649  params.m_InputTensorDataFilePaths = inputTensorDataFilePathsVector;
650  params.m_InputTypes = inputTypesVector;
651  params.m_QuantizeInput = quantizeInput;
652  params.m_OutputTypes = outputTypesVector;
653  params.m_OutputNames = outputNamesVector;
654  params.m_OutputTensorFiles = outputTensorFilesVector;
655  params.m_EnableProfiling = enableProfiling;
656  params.m_EnableFp16TurboMode = enableFp16TurboMode;
657  params.m_ThresholdTime = thresholdTime;
658  params.m_PrintIntermediate = printIntermediate;
659  params.m_SubgraphId = subgraphId;
660  params.m_EnableLayerDetails = enableLayerDetails;
661  params.m_GenerateTensorData = inputTensorDataFilePathsVector.empty();
662  params.m_ParseUnsupported = parseUnsupported;
663 
664  // Warn if ExecuteNetwork will generate dummy input data
665  if (params.m_GenerateTensorData)
666  {
667  ARMNN_LOG(warning) << "No input files provided, input tensors will be filled with 0s.";
668  }
669 
670  // Forward to implementation based on the parser type
671  if (modelFormat.find("armnn") != std::string::npos)
672  {
673 #if defined(ARMNN_SERIALIZER)
674  return MainImpl<armnnDeserializer::IDeserializer, float>(params, runtime);
675 #else
676  ARMNN_LOG(fatal) << "Not built with serialization support.";
677  return EXIT_FAILURE;
678 #endif
679  }
680  else if (modelFormat.find("caffe") != std::string::npos)
681  {
682 #if defined(ARMNN_CAFFE_PARSER)
683  return MainImpl<armnnCaffeParser::ICaffeParser, float>(params, runtime);
684 #else
685  ARMNN_LOG(fatal) << "Not built with Caffe parser support.";
686  return EXIT_FAILURE;
687 #endif
688  }
689  else if (modelFormat.find("onnx") != std::string::npos)
690 {
691 #if defined(ARMNN_ONNX_PARSER)
692  return MainImpl<armnnOnnxParser::IOnnxParser, float>(params, runtime);
693 #else
694  ARMNN_LOG(fatal) << "Not built with Onnx parser support.";
695  return EXIT_FAILURE;
696 #endif
697  }
698  else if (modelFormat.find("tensorflow") != std::string::npos)
699  {
700 #if defined(ARMNN_TF_PARSER)
701  return MainImpl<armnnTfParser::ITfParser, float>(params, runtime);
702 #else
703  ARMNN_LOG(fatal) << "Not built with Tensorflow parser support.";
704  return EXIT_FAILURE;
705 #endif
706  }
707  else if(modelFormat.find("tflite") != std::string::npos)
708  {
709 #if defined(ARMNN_TF_LITE_PARSER)
710  if (! isModelBinary)
711  {
712  ARMNN_LOG(fatal) << "Unknown model format: '" << modelFormat << "'. Only 'binary' format supported \
713  for tflite files";
714  return EXIT_FAILURE;
715  }
716  return MainImpl<armnnTfLiteParser::ITfLiteParser, float>(params, runtime);
717 #else
718  ARMNN_LOG(fatal) << "Unknown model format: '" << modelFormat <<
719  "'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'";
720  return EXIT_FAILURE;
721 #endif
722  }
723  else
724  {
725  ARMNN_LOG(fatal) << "Unknown model format: '" << modelFormat <<
726  "'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'";
727  return EXIT_FAILURE;
728  }
729 }
std::vector< TensorShapePtr > m_InputTensorShapes
#define ARMNN_LOG(severity)
Definition: Logging.hpp:163
virtual const char * what() const noexcept override
Definition: Exceptions.cpp:32
std::vector< string > m_OutputTypes
std::vector< string > m_OutputNames
std::vector< armnn::BackendId > m_ComputeDevices
std::vector< string > m_InputNames
std::vector< string > m_InputTypes
std::vector< string > m_InputTensorDataFilePaths
std::vector< string > m_OutputTensorFiles

Variable Documentation

◆ generateTensorData

bool generateTensorData = true

Definition at line 348 of file NetworkExecutionUtils.hpp.