From 5d03e31aaf4d82e9f9cdc03c41d2328bbb2a0dee Mon Sep 17 00:00:00 2001 From: Sadik Armagan Date: Tue, 17 Nov 2020 16:43:56 +0000 Subject: IVGCVSW-5377 Add ArmNN TfLite delegate to ExecuteNetwork * Added package manger to turn internal calls to find_package into a no-op * Changed delegate cmake so it can now be built within armnn Change-Id: I2a7ecb9a3c1ca05474cd1dccd91498f6f6c0b32e Signed-off-by: Finn Williams Signed-off-by: Sadik Armagan --- tests/ExecuteNetwork/ExecuteNetwork.cpp | 195 ++++++++++++++++++++++++++++++++ 1 file changed, 195 insertions(+) (limited to 'tests/ExecuteNetwork/ExecuteNetwork.cpp') diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp index c17eabd837..fa84a6ee4f 100644 --- a/tests/ExecuteNetwork/ExecuteNetwork.cpp +++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp @@ -25,9 +25,194 @@ #if defined(ARMNN_ONNX_PARSER) #include "armnnOnnxParser/IOnnxParser.hpp" #endif +#if defined(ARMNN_TFLITE_DELEGATE) +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#endif #include +#if defined(ARMNN_TFLITE_DELEGATE) +int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, + const std::shared_ptr& runtime = nullptr) +{ + using namespace tflite; + + std::unique_ptr model = tflite::FlatBufferModel::BuildFromFile(params.m_ModelPath.c_str()); + + auto tfLiteInterpreter = std::make_unique(); + tflite::ops::builtin::BuiltinOpResolver resolver; + + tflite::InterpreterBuilder builder(*model, resolver); + builder(&tfLiteInterpreter); + tfLiteInterpreter->AllocateTensors(); + + // Create the Armnn Delegate + armnnDelegate::DelegateOptions delegateOptions(params.m_ComputeDevices); + std::unique_ptr + theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), + armnnDelegate::TfLiteArmnnDelegateDelete); + // Register armnn_delegate to TfLiteInterpreter + int status = tfLiteInterpreter->ModifyGraphWithDelegate(std::move(theArmnnDelegate)); + + std::vector inputBindings; + for (const std::string& inputName: params.m_InputNames) + { + inputBindings.push_back(inputName); + } + + armnn::Optional dataFile = params.m_GenerateTensorData + ? armnn::EmptyOptional() + : armnn::MakeOptional(params.m_InputTensorDataFilePaths[0]); + + const size_t numInputs = inputBindings.size(); + + for(unsigned int inputIndex = 0; inputIndex < numInputs; ++inputIndex) + { + int input = tfLiteInterpreter->inputs()[inputIndex]; + if (params.m_InputTypes[inputIndex].compare("float") == 0) + { + auto inputData = tfLiteInterpreter->typed_tensor(input); + TContainer tensorData; + PopulateTensorWithData(tensorData, + params.m_InputTensorShapes[inputIndex]->GetNumElements(), + params.m_InputTypes[inputIndex], + armnn::EmptyOptional(), + dataFile); + inputData = reinterpret_cast(&tensorData); + armnn::IgnoreUnused(inputData); + } + else if (params.m_InputTypes[inputIndex].compare("int") == 0) + { + auto inputData = tfLiteInterpreter->typed_tensor(input); + TContainer tensorData; + PopulateTensorWithData(tensorData, + params.m_InputTensorShapes[inputIndex]->GetNumElements(), + params.m_InputTypes[inputIndex], + armnn::EmptyOptional(), + dataFile); + inputData = reinterpret_cast(&tensorData); + armnn::IgnoreUnused(inputData); + } + else if (params.m_InputTypes[inputIndex].compare("qasymm8") == 0) + { + auto inputData = tfLiteInterpreter->typed_tensor(input); + TContainer tensorData; + PopulateTensorWithData(tensorData, + params.m_InputTensorShapes[inputIndex]->GetNumElements(), + params.m_InputTypes[inputIndex], + armnn::EmptyOptional(), + dataFile); + inputData = reinterpret_cast(&tensorData); + armnn::IgnoreUnused(inputData); + } + else + { + ARMNN_LOG(fatal) << "Unsupported input tensor data type \"" << params.m_InputTypes[inputIndex] << "\". "; + return EXIT_FAILURE; + } + } + + for (size_t x = 0; x < params.m_Iterations; x++) + { + // Run the inference + tfLiteInterpreter->Invoke(); + + // Print out the output + for (unsigned int outputIndex = 0; outputIndex < params.m_OutputNames.size(); ++outputIndex) + { + std::cout << "Printing out the output" << std::endl; + auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[outputIndex]; + TfLiteIntArray *outputDims = tfLiteInterpreter->tensor(tfLiteDelegateOutputId)->dims; + + int outputSize = 1; + for (unsigned int dim = 0; dim < static_cast(outputDims->size); ++dim) + { + outputSize *= outputDims->data[dim]; + } + + std::cout << params.m_OutputNames[outputIndex] << ": "; + if (params.m_OutputTypes[outputIndex].compare("float") == 0) + { + auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); + + if(tfLiteDelageOutputData == NULL) + { + ARMNN_LOG(fatal) << "Output tensor is null, output type: " + "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect."; + return EXIT_FAILURE; + } + + for (int i = 0; i < outputSize; ++i) + { + std::cout << tfLiteDelageOutputData[i] << ", "; + if (i % 60 == 0) + { + std::cout << std::endl; + } + } + } + else if (params.m_OutputTypes[outputIndex].compare("int") == 0) + { + auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); + + if(tfLiteDelageOutputData == NULL) + { + ARMNN_LOG(fatal) << "Output tensor is null, output type: " + "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect."; + return EXIT_FAILURE; + } + + for (int i = 0; i < outputSize; ++i) + { + std::cout << tfLiteDelageOutputData[i] << ", "; + if (i % 60 == 0) + { + std::cout << std::endl; + } + } + } + else if (params.m_OutputTypes[outputIndex].compare("qasymm8") == 0) + { + auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); + if(tfLiteDelageOutputData == NULL) + { + ARMNN_LOG(fatal) << "Output tensor is null, output type: " + "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect."; + return EXIT_FAILURE; + } + + for (int i = 0; i < outputSize; ++i) + { + std::cout << unsigned(tfLiteDelageOutputData[i]) << ", "; + if (i % 60 == 0) + { + std::cout << std::endl; + } + } + } + else + { + ARMNN_LOG(fatal) << "Output tensor is null, output type: " + "\"" << params.m_OutputTypes[outputIndex] << + "\" may be incorrect. Output type can be specified with -z argument"; + return EXIT_FAILURE; + } + std::cout << std::endl; + } + } + + return status; +} +#endif template int MainImpl(const ExecuteNetworkParams& params, const std::shared_ptr& runtime = nullptr) @@ -242,6 +427,16 @@ int main(int argc, const char* argv[]) } else if(modelFormat.find("tflite") != std::string::npos) { + + if (ProgramOptions.m_ExNetParams.m_EnableDelegate) + { + #if defined(ARMNN_TF_LITE_DELEGATE) + return TfLiteDelegateMainImpl(ProgramOptions.m_ExNetParams, runtime); + #else + ARMNN_LOG(fatal) << "Not built with Tensorflow-Lite parser support."; + return EXIT_FAILURE; + #endif + } #if defined(ARMNN_TF_LITE_PARSER) return MainImpl(ProgramOptions.m_ExNetParams, runtime); #else -- cgit v1.2.1