// // Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "NetworkExecutionUtils/NetworkExecutionUtils.hpp" #include "ExecuteNetworkProgramOptions.hpp" #include #include #include #if defined(ARMNN_SERIALIZER) #include "armnnDeserializer/IDeserializer.hpp" #endif #if defined(ARMNN_CAFFE_PARSER) #include "armnnCaffeParser/ICaffeParser.hpp" #endif #if defined(ARMNN_TF_PARSER) #include "armnnTfParser/ITfParser.hpp" #endif #if defined(ARMNN_TF_LITE_PARSER) #include "armnnTfLiteParser/ITfLiteParser.hpp" #endif #if defined(ARMNN_ONNX_PARSER) #include "armnnOnnxParser/IOnnxParser.hpp" #endif #if defined(ARMNN_TFLITE_DELEGATE) #include #include #include #include #include #include #include #include #include #endif #include #if defined(ARMNN_TFLITE_DELEGATE) int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, const std::shared_ptr& runtime = nullptr) { using namespace tflite; std::unique_ptr model = tflite::FlatBufferModel::BuildFromFile(params.m_ModelPath.c_str()); auto tfLiteInterpreter = std::make_unique(); tflite::ops::builtin::BuiltinOpResolver resolver; tflite::InterpreterBuilder builder(*model, resolver); builder(&tfLiteInterpreter); tfLiteInterpreter->AllocateTensors(); int status = 0; if (params.m_TfLiteExecutor == ExecuteNetworkParams::TfLiteExecutor::ArmNNTfLiteDelegate) { // Create the Armnn Delegate armnnDelegate::DelegateOptions delegateOptions(params.m_ComputeDevices); std::unique_ptr theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), armnnDelegate::TfLiteArmnnDelegateDelete); // Register armnn_delegate to TfLiteInterpreter status = tfLiteInterpreter->ModifyGraphWithDelegate(std::move(theArmnnDelegate)); if (status == kTfLiteError) { ARMNN_LOG(fatal) << "Could not register ArmNN TfLite Delegate to TfLiteInterpreter!"; return EXIT_FAILURE; } } else { std::cout << "Running on TfLite without ArmNN delegate\n"; } std::vector inputBindings; for (const std::string& inputName: params.m_InputNames) { inputBindings.push_back(inputName); } armnn::Optional dataFile = params.m_GenerateTensorData ? armnn::EmptyOptional() : armnn::MakeOptional(params.m_InputTensorDataFilePaths[0]); const size_t numInputs = inputBindings.size(); for(unsigned int inputIndex = 0; inputIndex < numInputs; ++inputIndex) { int input = tfLiteInterpreter->inputs()[inputIndex]; TfLiteIntArray* inputDims = tfLiteInterpreter->tensor(input)->dims; long inputSize = 1; for (unsigned int dim = 0; dim < static_cast(inputDims->size); ++dim) { inputSize *= inputDims->data[dim]; } if (params.m_InputTypes[inputIndex].compare("float") == 0) { auto inputData = tfLiteInterpreter->typed_tensor(input); if(inputData == NULL) { ARMNN_LOG(fatal) << "Input tensor is null, input type: " "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect."; return EXIT_FAILURE; } std::vector tensorData; PopulateTensorWithDataGeneric(tensorData, params.m_InputTensorShapes[inputIndex]->GetNumElements(), dataFile, [](const std::string& s) { return std::stof(s); }); std::copy(tensorData.begin(), tensorData.end(), inputData); } else if (params.m_InputTypes[inputIndex].compare("qsymms8") == 0) { auto inputData = tfLiteInterpreter->typed_tensor(input); if(inputData == NULL) { ARMNN_LOG(fatal) << "Input tensor is null, input type: " "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect."; return EXIT_FAILURE; } std::vector tensorData; PopulateTensorWithDataGeneric(tensorData, params.m_InputTensorShapes[inputIndex]->GetNumElements(), dataFile, [](const std::string& s) { return armnn::numeric_cast(std::stoi(s)); }); std::copy(tensorData.begin(), tensorData.end(), inputData); } else if (params.m_InputTypes[inputIndex].compare("int") == 0) { auto inputData = tfLiteInterpreter->typed_tensor(input); if(inputData == NULL) { ARMNN_LOG(fatal) << "Input tensor is null, input type: " "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect."; return EXIT_FAILURE; } std::vector tensorData; PopulateTensorWithDataGeneric(tensorData, params.m_InputTensorShapes[inputIndex]->GetNumElements(), dataFile, [](const std::string& s) { return std::stoi(s); }); std::copy(tensorData.begin(), tensorData.end(), inputData); } else if (params.m_InputTypes[inputIndex].compare("qasymm8") == 0) { auto inputData = tfLiteInterpreter->typed_tensor(input); if(inputData == NULL) { ARMNN_LOG(fatal) << "Input tensor is null, input type: " "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect."; return EXIT_FAILURE; } std::vector tensorData; PopulateTensorWithDataGeneric(tensorData, params.m_InputTensorShapes[inputIndex]->GetNumElements(), dataFile, [](const std::string& s) { return armnn::numeric_cast(std::stoi(s)); }); std::copy(tensorData.begin(), tensorData.end(), inputData); } else { ARMNN_LOG(fatal) << "Unsupported input tensor data type \"" << params.m_InputTypes[inputIndex] << "\". "; return EXIT_FAILURE; } } for (size_t x = 0; x < params.m_Iterations; x++) { // Run the inference status = tfLiteInterpreter->Invoke(); // Print out the output for (unsigned int outputIndex = 0; outputIndex < params.m_OutputNames.size(); ++outputIndex) { auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[outputIndex]; TfLiteIntArray* outputDims = tfLiteInterpreter->tensor(tfLiteDelegateOutputId)->dims; long outputSize = 1; for (unsigned int dim = 0; dim < static_cast(outputDims->size); ++dim) { outputSize *= outputDims->data[dim]; } std::cout << params.m_OutputNames[outputIndex] << ": "; if (params.m_OutputTypes[outputIndex].compare("float") == 0) { auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); if(tfLiteDelageOutputData == NULL) { ARMNN_LOG(fatal) << "Output tensor is null, output type: " "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect."; return EXIT_FAILURE; } for (int i = 0; i < outputSize; ++i) { printf("%f ", tfLiteDelageOutputData[i]); } } else if (params.m_OutputTypes[outputIndex].compare("int") == 0) { auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); if(tfLiteDelageOutputData == NULL) { ARMNN_LOG(fatal) << "Output tensor is null, output type: " "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect."; return EXIT_FAILURE; } for (int i = 0; i < outputSize; ++i) { printf("%d ", tfLiteDelageOutputData[i]); } } else if (params.m_OutputTypes[outputIndex].compare("qsymms8") == 0) { auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); if(tfLiteDelageOutputData == NULL) { ARMNN_LOG(fatal) << "Output tensor is null, output type: " "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect."; return EXIT_FAILURE; } for (int i = 0; i < outputSize; ++i) { printf("%d ", tfLiteDelageOutputData[i]); } } else if (params.m_OutputTypes[outputIndex].compare("qasymm8") == 0) { auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); if(tfLiteDelageOutputData == NULL) { ARMNN_LOG(fatal) << "Output tensor is null, output type: " "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect."; return EXIT_FAILURE; } for (int i = 0; i < outputSize; ++i) { printf("%u ", tfLiteDelageOutputData[i]); } } else { ARMNN_LOG(fatal) << "Output tensor is null, output type: " "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect. Output type can be specified with -z argument"; return EXIT_FAILURE; } std::cout << std::endl; } } return status; } #endif template int MainImpl(const ExecuteNetworkParams& params, const std::shared_ptr& runtime = nullptr) { using TContainer = mapbox::util::variant, std::vector, std::vector, std::vector>; std::vector inputDataContainers; try { // Creates an InferenceModel, which will parse the model and load it into an IRuntime. typename InferenceModel::Params inferenceModelParams; inferenceModelParams.m_ModelPath = params.m_ModelPath; inferenceModelParams.m_IsModelBinary = params.m_IsModelBinary; inferenceModelParams.m_ComputeDevices = params.m_ComputeDevices; inferenceModelParams.m_DynamicBackendsPath = params.m_DynamicBackendsPath; inferenceModelParams.m_PrintIntermediateLayers = params.m_PrintIntermediate; inferenceModelParams.m_VisualizePostOptimizationModel = params.m_EnableLayerDetails; inferenceModelParams.m_ParseUnsupported = params.m_ParseUnsupported; inferenceModelParams.m_InferOutputShape = params.m_InferOutputShape; inferenceModelParams.m_EnableFastMath = params.m_EnableFastMath; inferenceModelParams.m_SaveCachedNetwork = params.m_SaveCachedNetwork; inferenceModelParams.m_CachedNetworkFilePath = params.m_CachedNetworkFilePath; inferenceModelParams.m_NumberOfThreads = params.m_NumberOfThreads; inferenceModelParams.m_MLGOTuningFilePath = params.m_MLGOTuningFilePath; for(const std::string& inputName: params.m_InputNames) { inferenceModelParams.m_InputBindings.push_back(inputName); } for(unsigned int i = 0; i < params.m_InputTensorShapes.size(); ++i) { inferenceModelParams.m_InputShapes.push_back(*params.m_InputTensorShapes[i]); } for(const std::string& outputName: params.m_OutputNames) { inferenceModelParams.m_OutputBindings.push_back(outputName); } inferenceModelParams.m_SubgraphId = params.m_SubgraphId; inferenceModelParams.m_EnableFp16TurboMode = params.m_EnableFp16TurboMode; inferenceModelParams.m_EnableBf16TurboMode = params.m_EnableBf16TurboMode; InferenceModel model(inferenceModelParams, params.m_EnableProfiling, params.m_DynamicBackendsPath, runtime); const size_t numInputs = inferenceModelParams.m_InputBindings.size(); for(unsigned int i = 0; i < numInputs; ++i) { armnn::Optional qParams = params.m_QuantizeInput ? armnn::MakeOptional( model.GetInputQuantizationParams()) : armnn::EmptyOptional(); armnn::Optional dataFile = params.m_GenerateTensorData ? armnn::EmptyOptional() : armnn::MakeOptional( params.m_InputTensorDataFilePaths[i]); unsigned int numElements = model.GetInputSize(i); if (params.m_InputTensorShapes.size() > i && params.m_InputTensorShapes[i]) { // If the user has provided a tensor shape for the current input, // override numElements numElements = params.m_InputTensorShapes[i]->GetNumElements(); } TContainer tensorData; PopulateTensorWithData(tensorData, numElements, params.m_InputTypes[i], qParams, dataFile); inputDataContainers.push_back(tensorData); } const size_t numOutputs = inferenceModelParams.m_OutputBindings.size(); std::vector outputDataContainers; for (unsigned int i = 0; i < numOutputs; ++i) { if (params.m_OutputTypes[i].compare("float") == 0) { outputDataContainers.push_back(std::vector(model.GetOutputSize(i))); } else if (params.m_OutputTypes[i].compare("int") == 0) { outputDataContainers.push_back(std::vector(model.GetOutputSize(i))); } else if (params.m_OutputTypes[i].compare("qasymm8") == 0) { outputDataContainers.push_back(std::vector(model.GetOutputSize(i))); } else if (params.m_OutputTypes[i].compare("qsymms8") == 0) { outputDataContainers.push_back(std::vector(model.GetOutputSize(i))); } else { ARMNN_LOG(fatal) << "Unsupported tensor data type \"" << params.m_OutputTypes[i] << "\". "; return EXIT_FAILURE; } } for (size_t x = 0; x < params.m_Iterations; x++) { // model.Run returns the inference time elapsed in EnqueueWorkload (in milliseconds) auto inference_duration = model.Run(inputDataContainers, outputDataContainers); if (params.m_GenerateTensorData) { ARMNN_LOG(warning) << "The input data was generated, note that the output will not be useful"; } // Print output tensors const auto& infosOut = model.GetOutputBindingInfos(); for (size_t i = 0; i < numOutputs; i++) { const armnn::TensorInfo& infoOut = infosOut[i].second; auto outputTensorFile = params.m_OutputTensorFiles.empty() ? "" : params.m_OutputTensorFiles[i]; TensorPrinter printer(inferenceModelParams.m_OutputBindings[i], infoOut, outputTensorFile, params.m_DequantizeOutput); mapbox::util::apply_visitor(printer, outputDataContainers[i]); } ARMNN_LOG(info) << "\nInference time: " << std::setprecision(2) << std::fixed << inference_duration.count() << " ms\n"; // If thresholdTime == 0.0 (default), then it hasn't been supplied at command line if (params.m_ThresholdTime != 0.0) { ARMNN_LOG(info) << "Threshold time: " << std::setprecision(2) << std::fixed << params.m_ThresholdTime << " ms"; auto thresholdMinusInference = params.m_ThresholdTime - inference_duration.count(); ARMNN_LOG(info) << "Threshold time - Inference time: " << std::setprecision(2) << std::fixed << thresholdMinusInference << " ms" << "\n"; if (thresholdMinusInference < 0) { std::string errorMessage = "Elapsed inference time is greater than provided threshold time."; ARMNN_LOG(fatal) << errorMessage; } } } } catch (const armnn::Exception& e) { ARMNN_LOG(fatal) << "Armnn Error: " << e.what(); return EXIT_FAILURE; } return EXIT_SUCCESS; } // MAIN int main(int argc, const char* argv[]) { // Configures logging for both the ARMNN library and this test program. #ifdef NDEBUG armnn::LogSeverity level = armnn::LogSeverity::Info; #else armnn::LogSeverity level = armnn::LogSeverity::Debug; #endif armnn::ConfigureLogging(true, true, level); // Get ExecuteNetwork parameters and runtime options from command line ProgramOptions ProgramOptions(argc, argv); // Create runtime std::shared_ptr runtime(armnn::IRuntime::Create(ProgramOptions.m_RuntimeOptions)); std::string modelFormat = ProgramOptions.m_ExNetParams.m_ModelFormat; // Forward to implementation based on the parser type if (modelFormat.find("armnn") != std::string::npos) { #if defined(ARMNN_SERIALIZER) return MainImpl(ProgramOptions.m_ExNetParams, runtime); #else ARMNN_LOG(fatal) << "Not built with serialization support."; return EXIT_FAILURE; #endif } else if (modelFormat.find("caffe") != std::string::npos) { #if defined(ARMNN_CAFFE_PARSER) return MainImpl(ProgramOptions.m_ExNetParams, runtime); #else ARMNN_LOG(fatal) << "Not built with Caffe parser support."; return EXIT_FAILURE; #endif } else if (modelFormat.find("onnx") != std::string::npos) { #if defined(ARMNN_ONNX_PARSER) return MainImpl(ProgramOptions.m_ExNetParams, runtime); #else ARMNN_LOG(fatal) << "Not built with Onnx parser support."; return EXIT_FAILURE; #endif } else if (modelFormat.find("tensorflow") != std::string::npos) { #if defined(ARMNN_TF_PARSER) return MainImpl(ProgramOptions.m_ExNetParams, runtime); #else ARMNN_LOG(fatal) << "Not built with Tensorflow parser support."; return EXIT_FAILURE; #endif } else if(modelFormat.find("tflite") != std::string::npos) { if (ProgramOptions.m_ExNetParams.m_TfLiteExecutor == ExecuteNetworkParams::TfLiteExecutor::ArmNNTfLiteParser) { #if defined(ARMNN_TF_LITE_PARSER) return MainImpl(ProgramOptions.m_ExNetParams, runtime); #else ARMNN_LOG(fatal) << "Not built with Tensorflow-Lite parser support."; return EXIT_FAILURE; #endif } else if (ProgramOptions.m_ExNetParams.m_TfLiteExecutor == ExecuteNetworkParams::TfLiteExecutor::ArmNNTfLiteDelegate || ProgramOptions.m_ExNetParams.m_TfLiteExecutor == ExecuteNetworkParams::TfLiteExecutor::TfliteInterpreter) { #if defined(ARMNN_TF_LITE_DELEGATE) return TfLiteDelegateMainImpl(ProgramOptions.m_ExNetParams, runtime); #else ARMNN_LOG(fatal) << "Not built with Arm NN Tensorflow-Lite delegate support."; return EXIT_FAILURE; #endif } } else { ARMNN_LOG(fatal) << "Unknown model format: '" << modelFormat << "'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'"; return EXIT_FAILURE; } }