12 #include <fmt/format.h> 17 if (modelFormat.find(
"binary") != std::string::npos)
21 else if (modelFormat.find(
"txt") != std::string::npos || modelFormat.find(
"text") != std::string::npos)
28 "Please include 'binary' or 'text'",
36 if (modelFormat.find(
"armnn") != std::string::npos)
38 #if defined(ARMNN_SERIALIZER) 41 "built with serialization support.");
44 else if (modelFormat.find(
"onnx") != std::string::npos)
46 #if defined(ARMNN_ONNX_PARSER) 49 "built with Onnx parser support.");
52 else if (modelFormat.find(
"tflite") != std::string::npos)
54 #if defined(ARMNN_TF_LITE_PARSER) 58 "format supported for tflite files",
61 #elif defined(ARMNN_TFLITE_DELEGATE) 64 "built with Tensorflow Lite parser support.");
70 "Please include 'tflite' or 'onnx'",
76 const std::string& tuningPath,
77 const std::vector<armnn::BackendId> computeDevices)
79 if (!tuningPath.empty())
83 ARMNN_LOG(info) <<
"Using cl tuning file: " << tuningPath <<
"\n";
89 else if ((1 <= tuningLevel) && (tuningLevel <= 3))
91 ARMNN_LOG(info) <<
"Starting execution to generate a cl tuning file: " << tuningPath <<
"\n" 92 <<
"Tuning level in use: " << tuningLevel <<
"\n";
94 else if ((0 < tuningLevel) || (tuningLevel > 3))
102 auto it = std::find(computeDevices.begin(), computeDevices.end(),
"GpuAcc");
103 if (it == computeDevices.end())
105 ARMNN_LOG(warning) <<
"To use Cl Tuning the compute device GpuAcc needs to be active.";
116 std::string invalidBackends;
119 ARMNN_LOG(fatal) <<
"The list of preferred devices contains invalid backend IDs: " 129 "enabled at the same time.");
141 "the same amount of elements. ");
154 fmt::format(
"According to the number of input names the user provided the network has {} " 155 "inputs. But only {} input-tensor-data file paths were provided. Each input of the " 156 "model is expected to be stored in it's own file.",
163 fmt::format(
"According to the number of input names the user provided the network has {} " 164 "inputs. The user specified {} input-tensor-data file paths which is not " 165 "divisible by the number of inputs.",
186 fmt::format(
"The number of input-tensor-data files ({0}) is not divisible by the " 187 "number of inputs ({1} according to the number of input names).",
210 std::stringstream errmsg;
213 fmt::format(
"The user provided {0} output-tensor files. The only allowed number of output-tensor " 214 "files is the number of outputs of the network ({1} according to the number of " 215 "output names) or the number of outputs multiplied with the number of times the " 216 "network should be executed (NumOutputs * NumIterations = {1} * {2} = {3}).",
233 ARMNN_LOG(warning) <<
"No input files provided, input tensors will be filled with 0s.";
237 #if defined(ARMNN_TFLITE_DELEGATE) 286 filename.replace_extension(
"dot");
290 return delegateOptions;
std::vector< std::string > m_InputTypes
ModelOptions m_ModelOptions
std::string m_MLGOTuningFilePath
ShapeInferenceMethod m_shapeInferenceMethod
std::vector< TensorShapePtr > m_InputTensorShapes
void SetSerializeToDot(const std::string &serializeToDotFile)
bool m_EnableFp16TurboMode
std::string m_DynamicBackendsPath
#define ARMNN_LOG(severity)
bool m_ReduceFp32ToBf16
Reduces all Fp32 operators in the model to Bf16 for faster processing.
std::vector< std::string > m_OutputNames
std::vector< std::string > m_OutputTensorFiles
void CheckClTuningParameter(const int &tuningLevel, const std::string &tuningPath, const std::vector< armnn::BackendId > computeDevices)
std::string m_CachedNetworkFilePath
std::vector< armnn::BackendId > m_ComputeDevices
bool m_ReduceFp32ToFp16
Reduces all Fp32 operators in the model to Fp16 for faster processing.
std::vector< std::string > m_OutputTypes
bool m_GenerateTensorData
unsigned int m_NumberOfThreads
std::vector< std::string > m_InputNames
Validate all output shapes.
void CheckModelFormat(const std::string &modelFormat)
bool ValidatePaths(const std::vector< std::string > &fileVec, const bool expectFile)
Verifies if a given vector of strings are valid paths.
ArmNN performs an optimization on each model/network before it gets loaded for execution.
bool m_EnableBf16TurboMode
std::vector< std::string > m_InputTensorDataFilePaths
bool IsModelBinary(const std::string &modelFormat)
Struct for the users to pass backend specific options.
bool m_EnableLayerDetails
void SetDynamicBackendsPath(const std::string &dynamicBackendsPath)
Infer missing output shapes and validate all output shapes.
void SetOptimizerOptions(const armnn::OptimizerOptions &optimizerOptions)
void SetGpuProfilingState(bool gpuProfilingState)
bool ValidatePath(const std::string &file, const bool expectFile)
Verifies if the given string is a valid path.
std::string m_ModelFormat
void SetInternalProfilingParams(bool internalProfilingState, const armnn::ProfilingDetailsMethod &internalProfilingDetail)