From 5d03e31aaf4d82e9f9cdc03c41d2328bbb2a0dee Mon Sep 17 00:00:00 2001 From: Sadik Armagan Date: Tue, 17 Nov 2020 16:43:56 +0000 Subject: IVGCVSW-5377 Add ArmNN TfLite delegate to ExecuteNetwork * Added package manger to turn internal calls to find_package into a no-op * Changed delegate cmake so it can now be built within armnn Change-Id: I2a7ecb9a3c1ca05474cd1dccd91498f6f6c0b32e Signed-off-by: Finn Williams Signed-off-by: Sadik Armagan --- tests/CMakeLists.txt | 11 +- tests/ExecuteNetwork/ExecuteNetwork.cpp | 195 +++++++++++++++++++++ tests/ExecuteNetwork/ExecuteNetworkParams.cpp | 1 + tests/ExecuteNetwork/ExecuteNetworkParams.hpp | 1 + .../ExecuteNetworkProgramOptions.cpp | 4 + 5 files changed, 208 insertions(+), 4 deletions(-) (limited to 'tests') diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 9d3b026687..edea34dfee 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -264,12 +264,15 @@ if (BUILD_ARMNN_SERIALIZER OR BUILD_CAFFE_PARSER OR BUILD_TF_PARSER OR BUILD_TF_ target_link_libraries(ExecuteNetwork armnnTfLiteParser) endif() if (BUILD_ONNX_PARSER) - target_link_libraries(ExecuteNetwork armnnOnnxParser) + target_link_libraries(ExecuteNetwork armnnOnnxParser) + endif() + if (BUILD_ARMNN_TFLITE_DELEGATE) + target_link_libraries(ExecuteNetwork ArmnnDelegate::ArmnnDelegate) endif() - target_link_libraries(ExecuteNetwork armnn) - target_link_libraries(ExecuteNetwork ${CMAKE_THREAD_LIBS_INIT}) - addDllCopyCommands(ExecuteNetwork) + + target_link_libraries(ExecuteNetwork ${CMAKE_THREAD_LIBS_INIT}) + addDllCopyCommands(ExecuteNetwork) endif() if(BUILD_ACCURACY_TOOL) diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp index c17eabd837..fa84a6ee4f 100644 --- a/tests/ExecuteNetwork/ExecuteNetwork.cpp +++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp @@ -25,9 +25,194 @@ #if defined(ARMNN_ONNX_PARSER) #include "armnnOnnxParser/IOnnxParser.hpp" #endif +#if defined(ARMNN_TFLITE_DELEGATE) +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#endif #include +#if defined(ARMNN_TFLITE_DELEGATE) +int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, + const std::shared_ptr& runtime = nullptr) +{ + using namespace tflite; + + std::unique_ptr model = tflite::FlatBufferModel::BuildFromFile(params.m_ModelPath.c_str()); + + auto tfLiteInterpreter = std::make_unique(); + tflite::ops::builtin::BuiltinOpResolver resolver; + + tflite::InterpreterBuilder builder(*model, resolver); + builder(&tfLiteInterpreter); + tfLiteInterpreter->AllocateTensors(); + + // Create the Armnn Delegate + armnnDelegate::DelegateOptions delegateOptions(params.m_ComputeDevices); + std::unique_ptr + theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), + armnnDelegate::TfLiteArmnnDelegateDelete); + // Register armnn_delegate to TfLiteInterpreter + int status = tfLiteInterpreter->ModifyGraphWithDelegate(std::move(theArmnnDelegate)); + + std::vector inputBindings; + for (const std::string& inputName: params.m_InputNames) + { + inputBindings.push_back(inputName); + } + + armnn::Optional dataFile = params.m_GenerateTensorData + ? armnn::EmptyOptional() + : armnn::MakeOptional(params.m_InputTensorDataFilePaths[0]); + + const size_t numInputs = inputBindings.size(); + + for(unsigned int inputIndex = 0; inputIndex < numInputs; ++inputIndex) + { + int input = tfLiteInterpreter->inputs()[inputIndex]; + if (params.m_InputTypes[inputIndex].compare("float") == 0) + { + auto inputData = tfLiteInterpreter->typed_tensor(input); + TContainer tensorData; + PopulateTensorWithData(tensorData, + params.m_InputTensorShapes[inputIndex]->GetNumElements(), + params.m_InputTypes[inputIndex], + armnn::EmptyOptional(), + dataFile); + inputData = reinterpret_cast(&tensorData); + armnn::IgnoreUnused(inputData); + } + else if (params.m_InputTypes[inputIndex].compare("int") == 0) + { + auto inputData = tfLiteInterpreter->typed_tensor(input); + TContainer tensorData; + PopulateTensorWithData(tensorData, + params.m_InputTensorShapes[inputIndex]->GetNumElements(), + params.m_InputTypes[inputIndex], + armnn::EmptyOptional(), + dataFile); + inputData = reinterpret_cast(&tensorData); + armnn::IgnoreUnused(inputData); + } + else if (params.m_InputTypes[inputIndex].compare("qasymm8") == 0) + { + auto inputData = tfLiteInterpreter->typed_tensor(input); + TContainer tensorData; + PopulateTensorWithData(tensorData, + params.m_InputTensorShapes[inputIndex]->GetNumElements(), + params.m_InputTypes[inputIndex], + armnn::EmptyOptional(), + dataFile); + inputData = reinterpret_cast(&tensorData); + armnn::IgnoreUnused(inputData); + } + else + { + ARMNN_LOG(fatal) << "Unsupported input tensor data type \"" << params.m_InputTypes[inputIndex] << "\". "; + return EXIT_FAILURE; + } + } + + for (size_t x = 0; x < params.m_Iterations; x++) + { + // Run the inference + tfLiteInterpreter->Invoke(); + + // Print out the output + for (unsigned int outputIndex = 0; outputIndex < params.m_OutputNames.size(); ++outputIndex) + { + std::cout << "Printing out the output" << std::endl; + auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[outputIndex]; + TfLiteIntArray *outputDims = tfLiteInterpreter->tensor(tfLiteDelegateOutputId)->dims; + + int outputSize = 1; + for (unsigned int dim = 0; dim < static_cast(outputDims->size); ++dim) + { + outputSize *= outputDims->data[dim]; + } + + std::cout << params.m_OutputNames[outputIndex] << ": "; + if (params.m_OutputTypes[outputIndex].compare("float") == 0) + { + auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); + + if(tfLiteDelageOutputData == NULL) + { + ARMNN_LOG(fatal) << "Output tensor is null, output type: " + "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect."; + return EXIT_FAILURE; + } + + for (int i = 0; i < outputSize; ++i) + { + std::cout << tfLiteDelageOutputData[i] << ", "; + if (i % 60 == 0) + { + std::cout << std::endl; + } + } + } + else if (params.m_OutputTypes[outputIndex].compare("int") == 0) + { + auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); + + if(tfLiteDelageOutputData == NULL) + { + ARMNN_LOG(fatal) << "Output tensor is null, output type: " + "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect."; + return EXIT_FAILURE; + } + + for (int i = 0; i < outputSize; ++i) + { + std::cout << tfLiteDelageOutputData[i] << ", "; + if (i % 60 == 0) + { + std::cout << std::endl; + } + } + } + else if (params.m_OutputTypes[outputIndex].compare("qasymm8") == 0) + { + auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); + if(tfLiteDelageOutputData == NULL) + { + ARMNN_LOG(fatal) << "Output tensor is null, output type: " + "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect."; + return EXIT_FAILURE; + } + + for (int i = 0; i < outputSize; ++i) + { + std::cout << unsigned(tfLiteDelageOutputData[i]) << ", "; + if (i % 60 == 0) + { + std::cout << std::endl; + } + } + } + else + { + ARMNN_LOG(fatal) << "Output tensor is null, output type: " + "\"" << params.m_OutputTypes[outputIndex] << + "\" may be incorrect. Output type can be specified with -z argument"; + return EXIT_FAILURE; + } + std::cout << std::endl; + } + } + + return status; +} +#endif template int MainImpl(const ExecuteNetworkParams& params, const std::shared_ptr& runtime = nullptr) @@ -242,6 +427,16 @@ int main(int argc, const char* argv[]) } else if(modelFormat.find("tflite") != std::string::npos) { + + if (ProgramOptions.m_ExNetParams.m_EnableDelegate) + { + #if defined(ARMNN_TF_LITE_DELEGATE) + return TfLiteDelegateMainImpl(ProgramOptions.m_ExNetParams, runtime); + #else + ARMNN_LOG(fatal) << "Not built with Tensorflow-Lite parser support."; + return EXIT_FAILURE; + #endif + } #if defined(ARMNN_TF_LITE_PARSER) return MainImpl(ProgramOptions.m_ExNetParams, runtime); #else diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp index a3a7f6a753..890ab2a658 100644 --- a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp +++ b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp @@ -74,6 +74,7 @@ void CheckModelFormat(const std::string& modelFormat) "format supported for tflite files", modelFormat)); } +#elif defined(ARMNN_TFLITE_DELEGATE) #else throw armnn::InvalidArgumentException("Can't run model in tflite format without a " "built with Tensorflow Lite parser support."); diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.hpp b/tests/ExecuteNetwork/ExecuteNetworkParams.hpp index 5490230ede..8f176c2fd6 100644 --- a/tests/ExecuteNetwork/ExecuteNetworkParams.hpp +++ b/tests/ExecuteNetwork/ExecuteNetworkParams.hpp @@ -24,6 +24,7 @@ struct ExecuteNetworkParams bool m_EnableProfiling; bool m_GenerateTensorData; bool m_InferOutputShape = false; + bool m_EnableDelegate = false; std::vector m_InputNames; std::vector m_InputTensorDataFilePaths; std::vector m_InputTensorShapes; diff --git a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp index e37c4eb42e..b499289f61 100644 --- a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp +++ b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp @@ -168,6 +168,10 @@ ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork", "tensorflow-text.", cxxopts::value()) + ("D,armnn-tflite-delegate", + "enable Arm NN TfLite delegate", + cxxopts::value(m_ExNetParams.m_EnableDelegate)->default_value("false")->implicit_value("true")) + ("m,model-path", "Path to model file, e.g. .armnn, .caffemodel, .prototxt, .tflite, .onnx", cxxopts::value(m_ExNetParams.m_ModelPath)) -- cgit v1.2.1