ArmNN
 20.11
ArmnnConverter.cpp File Reference
#include <armnn/Logging.hpp>
#include <HeapProfiling.hpp>
#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/StringUtils.hpp>
#include <cxxopts/cxxopts.hpp>
#include <fmt/format.h>
#include <cstdlib>
#include <fstream>
#include <iostream>

Go to the source code of this file.

Macros

#define CXXOPTS_VECTOR_DELIMITER   '.'
 

Functions

int main (int argc, char *argv[])
 

Macro Definition Documentation

◆ CXXOPTS_VECTOR_DELIMITER

#define CXXOPTS_VECTOR_DELIMITER   '.'

Definition at line 33 of file ArmnnConverter.cpp.

Function Documentation

◆ main()

int main ( int  argc,
char *  argv[] 
)

Definition at line 336 of file ArmnnConverter.cpp.

References ARMNN_LOG, armnn::ConfigureLogging(), armnn::Debug, armnn::Info, and Exception::what().

337 {
338 
339 #if (!defined(ARMNN_CAFFE_PARSER) \
340  && !defined(ARMNN_ONNX_PARSER) \
341  && !defined(ARMNN_TF_PARSER) \
342  && !defined(ARMNN_TF_LITE_PARSER))
343  ARMNN_LOG(fatal) << "Not built with any of the supported parsers, Caffe, Onnx, Tensorflow, or TfLite.";
344  return EXIT_FAILURE;
345 #endif
346 
347 #if !defined(ARMNN_SERIALIZER)
348  ARMNN_LOG(fatal) << "Not built with Serializer support.";
349  return EXIT_FAILURE;
350 #endif
351 
352 #ifdef NDEBUG
354 #else
356 #endif
357 
358  armnn::ConfigureLogging(true, true, level);
359 
360  std::string modelFormat;
361  std::string modelPath;
362 
363  std::vector<std::string> inputNames;
364  std::vector<std::string> inputTensorShapeStrs;
365  std::vector<armnn::TensorShape> inputTensorShapes;
366 
367  std::vector<std::string> outputNames;
368  std::string outputPath;
369 
370  bool isModelBinary = true;
371 
372  if (ParseCommandLineArgs(
373  argc, argv, modelFormat, modelPath, inputNames, inputTensorShapeStrs, outputNames, outputPath, isModelBinary)
374  != EXIT_SUCCESS)
375  {
376  return EXIT_FAILURE;
377  }
378 
379  for (const std::string& shapeStr : inputTensorShapeStrs)
380  {
381  if (!shapeStr.empty())
382  {
383  std::stringstream ss(shapeStr);
384 
385  try
386  {
387  armnn::TensorShape shape = ParseTensorShape(ss);
388  inputTensorShapes.push_back(shape);
389  }
390  catch (const armnn::InvalidArgumentException& e)
391  {
392  ARMNN_LOG(fatal) << "Cannot create tensor shape: " << e.what();
393  return EXIT_FAILURE;
394  }
395  }
396  }
397 
398  ArmnnConverter converter(modelPath, inputNames, inputTensorShapes, outputNames, outputPath, isModelBinary);
399 
400  try
401  {
402  if (modelFormat.find("caffe") != std::string::npos)
403  {
404 #if defined(ARMNN_CAFFE_PARSER)
405  if (!converter.CreateNetwork<armnnCaffeParser::ICaffeParser>())
406  {
407  ARMNN_LOG(fatal) << "Failed to load model from file";
408  return EXIT_FAILURE;
409  }
410 #else
411  ARMNN_LOG(fatal) << "Not built with Caffe parser support.";
412  return EXIT_FAILURE;
413 #endif
414  }
415  else if (modelFormat.find("onnx") != std::string::npos)
416  {
417 #if defined(ARMNN_ONNX_PARSER)
418  if (!converter.CreateNetwork<armnnOnnxParser::IOnnxParser>())
419  {
420  ARMNN_LOG(fatal) << "Failed to load model from file";
421  return EXIT_FAILURE;
422  }
423 #else
424  ARMNN_LOG(fatal) << "Not built with Onnx parser support.";
425  return EXIT_FAILURE;
426 #endif
427  }
428  else if (modelFormat.find("tensorflow") != std::string::npos)
429  {
430 #if defined(ARMNN_TF_PARSER)
431  if (!converter.CreateNetwork<armnnTfParser::ITfParser>())
432  {
433  ARMNN_LOG(fatal) << "Failed to load model from file";
434  return EXIT_FAILURE;
435  }
436 #else
437  ARMNN_LOG(fatal) << "Not built with Tensorflow parser support.";
438  return EXIT_FAILURE;
439 #endif
440  }
441  else if (modelFormat.find("tflite") != std::string::npos)
442  {
443 #if defined(ARMNN_TF_LITE_PARSER)
444  if (!isModelBinary)
445  {
446  ARMNN_LOG(fatal) << "Unknown model format: '" << modelFormat << "'. Only 'binary' format supported \
447  for tflite files";
448  return EXIT_FAILURE;
449  }
450 
451  if (!converter.CreateNetwork<armnnTfLiteParser::ITfLiteParser>())
452  {
453  ARMNN_LOG(fatal) << "Failed to load model from file";
454  return EXIT_FAILURE;
455  }
456 #else
457  ARMNN_LOG(fatal) << "Not built with TfLite parser support.";
458  return EXIT_FAILURE;
459 #endif
460  }
461  else
462  {
463  ARMNN_LOG(fatal) << "Unknown model format: '" << modelFormat << "'";
464  return EXIT_FAILURE;
465  }
466  }
467  catch(armnn::Exception& e)
468  {
469  ARMNN_LOG(fatal) << "Failed to load model from file: " << e.what();
470  return EXIT_FAILURE;
471  }
472 
473  if (!converter.Serialize())
474  {
475  ARMNN_LOG(fatal) << "Failed to serialize model";
476  return EXIT_FAILURE;
477  }
478 
479  return EXIT_SUCCESS;
480 }
void ConfigureLogging(bool printToStandardOutput, bool printToDebugOutput, LogSeverity severity)
Configures the logging behaviour of the ARMNN library.
Definition: Utils.cpp:10
virtual const char * what() const noexcept override
Definition: Exceptions.cpp:32
#define ARMNN_LOG(severity)
Definition: Logging.hpp:163
Parses a directed acyclic graph from a tensorflow protobuf file.
Definition: ITfParser.hpp:25
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
LogSeverity
Definition: Utils.hpp:12