aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2020-11-17 16:43:56 +0000
committerFinn Williams <Finn.Williams@arm.com>2020-11-17 19:56:11 +0000
commit5d03e31aaf4d82e9f9cdc03c41d2328bbb2a0dee (patch)
tree6af2bfa7747ee5461d00bee7da6b0224fd4f9b6f
parenta40b434e2e3afdbf2254ef735758244bd840ef77 (diff)
downloadarmnn-5d03e31aaf4d82e9f9cdc03c41d2328bbb2a0dee.tar.gz
IVGCVSW-5377 Add ArmNN TfLite delegate to ExecuteNetwork
* Added package manger to turn internal calls to find_package into a no-op * Changed delegate cmake so it can now be built within armnn Change-Id: I2a7ecb9a3c1ca05474cd1dccd91498f6f6c0b32e Signed-off-by: Finn Williams <Finn.Williams@arm.com> Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
-rw-r--r--CMakeLists.txt22
-rw-r--r--cmake/GlobalConfig.cmake4
-rw-r--r--delegate/CMakeLists.txt2
-rw-r--r--delegate/cmake/Modules/ArmnnDelegateConfig.cmake.in2
-rw-r--r--delegate/include/armnn_delegate.hpp4
-rw-r--r--delegate/src/Transpose.hpp2
-rw-r--r--tests/CMakeLists.txt11
-rw-r--r--tests/ExecuteNetwork/ExecuteNetwork.cpp195
-rw-r--r--tests/ExecuteNetwork/ExecuteNetworkParams.cpp1
-rw-r--r--tests/ExecuteNetwork/ExecuteNetworkParams.hpp1
-rw-r--r--tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp4
11 files changed, 240 insertions, 8 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 30b03dce04..cee3c2a020 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -17,6 +17,16 @@ foreach(cmake_file ${additional_cmake_files})
include(${cmake_file})
endforeach()
+cmake_policy(SET CMP0057 NEW)
+
+set(as_subproject Armnn)
+
+macro(find_package)
+ if(NOT ${ARGV0} IN_LIST as_subproject)
+ _find_package(${ARGV})
+ endif()
+endmacro()
+
if (DYNAMIC_BACKEND_PATHS)
# It's expected to have the format: DYNAMIC_BACKEND_PATHS="PATH_1:PATH_2...:PATH_N"
add_definitions('-DDYNAMIC_BACKEND_PATHS="${DYNAMIC_BACKEND_PATHS}"')
@@ -29,6 +39,15 @@ add_subdirectory(src/armnnTfLiteParser)
add_subdirectory(src/armnnSerializer)
add_subdirectory(src/armnnDeserializer)
+
+if (BUILD_ARMNN_TFLITE_DELEGATE)
+
+ list(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/delegate/cmake/Modules)
+ add_subdirectory(delegate)
+
+ add_definitions(-DARMNN_TF_LITE_DELEGATE)
+endif()
+
if (BUILD_TESTS)
add_subdirectory(tests)
endif()
@@ -1164,6 +1183,9 @@ export(
NAMESPACE Armnn::
)
+add_library(Armnn::Armnn ALIAS armnn)
+add_library(Armnn::armnnUtils ALIAS armnnUtils)
+
####################################################
## Build Python bindings
if (BUILD_PYTHON_WHL OR BUILD_PYTHON_SRC)
diff --git a/cmake/GlobalConfig.cmake b/cmake/GlobalConfig.cmake
index 360f06860e..843ad6b639 100644
--- a/cmake/GlobalConfig.cmake
+++ b/cmake/GlobalConfig.cmake
@@ -35,6 +35,7 @@ option(BUILD_PYTHON_WHL "Build Python wheel package" OFF)
option(BUILD_PYTHON_SRC "Build Python source package" OFF)
option(BUILD_STATIC_PIPE_LIBS "Build Static PIPE libraries" OFF)
option(BUILD_PIPE_ONLY "Build the PIPE libraries only" OFF)
+option(BUILD_ARMNN_TFLITE_DELEGATE "Build the Arm NN TfLite delegate" OFF)
include(SelectLibraryConfigurations)
@@ -210,6 +211,9 @@ if(BUILD_ONNX_PARSER)
include_directories(SYSTEM "${ONNX_GENERATED_SOURCES}")
endif()
+if(BUILD_ARMNN_TFLITE_DELEGATE)
+ add_definitions(-DARMNN_TFLITE_DELEGATE)
+endif()
# Flatbuffers support for TF Lite and Armnn Serializer
if(BUILD_TF_LITE_PARSER OR BUILD_ARMNN_SERIALIZER)
# verify we have a valid flatbuffers include path
diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt
index 595784f37a..d53af88105 100644
--- a/delegate/CMakeLists.txt
+++ b/delegate/CMakeLists.txt
@@ -195,5 +195,7 @@ export(
EXPORT armnn-delegate-targets
FILE ${CMAKE_CURRENT_BINARY_DIR}/ArmnnDelegateTargets.cmake
NAMESPACE ArmnnDelegate::)
+add_library(ArmnnDelegate::ArmnnDelegate ALIAS armnnDelegate)
+
####################################################
diff --git a/delegate/cmake/Modules/ArmnnDelegateConfig.cmake.in b/delegate/cmake/Modules/ArmnnDelegateConfig.cmake.in
index 12d1161afa..c403068db8 100644
--- a/delegate/cmake/Modules/ArmnnDelegateConfig.cmake.in
+++ b/delegate/cmake/Modules/ArmnnDelegateConfig.cmake.in
@@ -9,6 +9,8 @@ MESSAGE(STATUS "Found ArmnnDelegate: ${ARMNN_DELEGATE_CONFIG_FILE}")
include(CMakeFindDependencyMacro)
+find_dependency(Armnn REQUIRED CONFIG)
+
list(APPEND CMAKE_MODULE_PATH ${ARMNN_DELEGATE_CMAKE_DIR})
if(NOT TARGET ArmnnDelegate::ArmnnDelegate)
diff --git a/delegate/include/armnn_delegate.hpp b/delegate/include/armnn_delegate.hpp
index 6f18185d7b..adf264aabb 100644
--- a/delegate/include/armnn_delegate.hpp
+++ b/delegate/include/armnn_delegate.hpp
@@ -3,8 +3,7 @@
// SPDX-License-Identifier: MIT
//
-#ifndef ARMNN_TFLITE_DELEGATE
-#define ARMNN_TFLITE_DELEGATE
+#pragma once
#include "DelegateOptions.hpp"
@@ -114,5 +113,4 @@ private:
} // armnnDelegate namespace
-#endif // ARMNN_TFLITE_DELEGATE
diff --git a/delegate/src/Transpose.hpp b/delegate/src/Transpose.hpp
index dd79bd3bee..f3c348a1c2 100644
--- a/delegate/src/Transpose.hpp
+++ b/delegate/src/Transpose.hpp
@@ -64,7 +64,7 @@ TfLiteStatus VisitTransposeOperator(DelegateData& delegateData,
auto* permTensorDataPtr = tflite::GetTensorData<int32_t>(&tfLiteInputTensor1);
unsigned int numEl = tfLiteInputTensor1.dims->data[0];
- ARMNN_ASSERT( numEl <= armnn::MaxNumOfTensorDimensions);
+ ARMNN_ASSERT( numEl <= static_cast<int>(armnn::MaxNumOfTensorDimensions));
ARMNN_ASSERT( tfLiteInputTensor1.dims->size == 1); // ensure only single dimension to the permutation tensor
armnn::TransposeDescriptor descriptor(armnn::PermutationVector(
diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt
index 9d3b026687..edea34dfee 100644
--- a/tests/CMakeLists.txt
+++ b/tests/CMakeLists.txt
@@ -264,12 +264,15 @@ if (BUILD_ARMNN_SERIALIZER OR BUILD_CAFFE_PARSER OR BUILD_TF_PARSER OR BUILD_TF_
target_link_libraries(ExecuteNetwork armnnTfLiteParser)
endif()
if (BUILD_ONNX_PARSER)
- target_link_libraries(ExecuteNetwork armnnOnnxParser)
+ target_link_libraries(ExecuteNetwork armnnOnnxParser)
+ endif()
+ if (BUILD_ARMNN_TFLITE_DELEGATE)
+ target_link_libraries(ExecuteNetwork ArmnnDelegate::ArmnnDelegate)
endif()
-
target_link_libraries(ExecuteNetwork armnn)
- target_link_libraries(ExecuteNetwork ${CMAKE_THREAD_LIBS_INIT})
- addDllCopyCommands(ExecuteNetwork)
+
+ target_link_libraries(ExecuteNetwork ${CMAKE_THREAD_LIBS_INIT})
+ addDllCopyCommands(ExecuteNetwork)
endif()
if(BUILD_ACCURACY_TOOL)
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp
index c17eabd837..fa84a6ee4f 100644
--- a/tests/ExecuteNetwork/ExecuteNetwork.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp
@@ -25,9 +25,194 @@
#if defined(ARMNN_ONNX_PARSER)
#include "armnnOnnxParser/IOnnxParser.hpp"
#endif
+#if defined(ARMNN_TFLITE_DELEGATE)
+#include <armnn_delegate.hpp>
+#include <DelegateOptions.hpp>
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/optional_debug_tools.h>
+#include <tensorflow/lite/kernels/builtin_op_kernels.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#endif
#include <future>
+#if defined(ARMNN_TFLITE_DELEGATE)
+int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params,
+ const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
+{
+ using namespace tflite;
+
+ std::unique_ptr<tflite::FlatBufferModel> model = tflite::FlatBufferModel::BuildFromFile(params.m_ModelPath.c_str());
+
+ auto tfLiteInterpreter = std::make_unique<Interpreter>();
+ tflite::ops::builtin::BuiltinOpResolver resolver;
+
+ tflite::InterpreterBuilder builder(*model, resolver);
+ builder(&tfLiteInterpreter);
+ tfLiteInterpreter->AllocateTensors();
+
+ // Create the Armnn Delegate
+ armnnDelegate::DelegateOptions delegateOptions(params.m_ComputeDevices);
+ std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+ theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+ armnnDelegate::TfLiteArmnnDelegateDelete);
+ // Register armnn_delegate to TfLiteInterpreter
+ int status = tfLiteInterpreter->ModifyGraphWithDelegate(std::move(theArmnnDelegate));
+
+ std::vector<std::string> inputBindings;
+ for (const std::string& inputName: params.m_InputNames)
+ {
+ inputBindings.push_back(inputName);
+ }
+
+ armnn::Optional<std::string> dataFile = params.m_GenerateTensorData
+ ? armnn::EmptyOptional()
+ : armnn::MakeOptional<std::string>(params.m_InputTensorDataFilePaths[0]);
+
+ const size_t numInputs = inputBindings.size();
+
+ for(unsigned int inputIndex = 0; inputIndex < numInputs; ++inputIndex)
+ {
+ int input = tfLiteInterpreter->inputs()[inputIndex];
+ if (params.m_InputTypes[inputIndex].compare("float") == 0)
+ {
+ auto inputData = tfLiteInterpreter->typed_tensor<float>(input);
+ TContainer tensorData;
+ PopulateTensorWithData(tensorData,
+ params.m_InputTensorShapes[inputIndex]->GetNumElements(),
+ params.m_InputTypes[inputIndex],
+ armnn::EmptyOptional(),
+ dataFile);
+ inputData = reinterpret_cast<float*>(&tensorData);
+ armnn::IgnoreUnused(inputData);
+ }
+ else if (params.m_InputTypes[inputIndex].compare("int") == 0)
+ {
+ auto inputData = tfLiteInterpreter->typed_tensor<int32_t>(input);
+ TContainer tensorData;
+ PopulateTensorWithData(tensorData,
+ params.m_InputTensorShapes[inputIndex]->GetNumElements(),
+ params.m_InputTypes[inputIndex],
+ armnn::EmptyOptional(),
+ dataFile);
+ inputData = reinterpret_cast<int32_t*>(&tensorData);
+ armnn::IgnoreUnused(inputData);
+ }
+ else if (params.m_InputTypes[inputIndex].compare("qasymm8") == 0)
+ {
+ auto inputData = tfLiteInterpreter->typed_tensor<uint8_t>(input);
+ TContainer tensorData;
+ PopulateTensorWithData(tensorData,
+ params.m_InputTensorShapes[inputIndex]->GetNumElements(),
+ params.m_InputTypes[inputIndex],
+ armnn::EmptyOptional(),
+ dataFile);
+ inputData = reinterpret_cast<uint8_t*>(&tensorData);
+ armnn::IgnoreUnused(inputData);
+ }
+ else
+ {
+ ARMNN_LOG(fatal) << "Unsupported input tensor data type \"" << params.m_InputTypes[inputIndex] << "\". ";
+ return EXIT_FAILURE;
+ }
+ }
+
+ for (size_t x = 0; x < params.m_Iterations; x++)
+ {
+ // Run the inference
+ tfLiteInterpreter->Invoke();
+
+ // Print out the output
+ for (unsigned int outputIndex = 0; outputIndex < params.m_OutputNames.size(); ++outputIndex)
+ {
+ std::cout << "Printing out the output" << std::endl;
+ auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[outputIndex];
+ TfLiteIntArray *outputDims = tfLiteInterpreter->tensor(tfLiteDelegateOutputId)->dims;
+
+ int outputSize = 1;
+ for (unsigned int dim = 0; dim < static_cast<unsigned int>(outputDims->size); ++dim)
+ {
+ outputSize *= outputDims->data[dim];
+ }
+
+ std::cout << params.m_OutputNames[outputIndex] << ": ";
+ if (params.m_OutputTypes[outputIndex].compare("float") == 0)
+ {
+ auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
+
+ if(tfLiteDelageOutputData == NULL)
+ {
+ ARMNN_LOG(fatal) << "Output tensor is null, output type: "
+ "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect.";
+ return EXIT_FAILURE;
+ }
+
+ for (int i = 0; i < outputSize; ++i)
+ {
+ std::cout << tfLiteDelageOutputData[i] << ", ";
+ if (i % 60 == 0)
+ {
+ std::cout << std::endl;
+ }
+ }
+ }
+ else if (params.m_OutputTypes[outputIndex].compare("int") == 0)
+ {
+ auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<int32_t>(tfLiteDelegateOutputId);
+
+ if(tfLiteDelageOutputData == NULL)
+ {
+ ARMNN_LOG(fatal) << "Output tensor is null, output type: "
+ "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect.";
+ return EXIT_FAILURE;
+ }
+
+ for (int i = 0; i < outputSize; ++i)
+ {
+ std::cout << tfLiteDelageOutputData[i] << ", ";
+ if (i % 60 == 0)
+ {
+ std::cout << std::endl;
+ }
+ }
+ }
+ else if (params.m_OutputTypes[outputIndex].compare("qasymm8") == 0)
+ {
+ auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<uint8_t>(tfLiteDelegateOutputId);
+ if(tfLiteDelageOutputData == NULL)
+ {
+ ARMNN_LOG(fatal) << "Output tensor is null, output type: "
+ "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect.";
+ return EXIT_FAILURE;
+ }
+
+ for (int i = 0; i < outputSize; ++i)
+ {
+ std::cout << unsigned(tfLiteDelageOutputData[i]) << ", ";
+ if (i % 60 == 0)
+ {
+ std::cout << std::endl;
+ }
+ }
+ }
+ else
+ {
+ ARMNN_LOG(fatal) << "Output tensor is null, output type: "
+ "\"" << params.m_OutputTypes[outputIndex] <<
+ "\" may be incorrect. Output type can be specified with -z argument";
+ return EXIT_FAILURE;
+ }
+ std::cout << std::endl;
+ }
+ }
+
+ return status;
+}
+#endif
template<typename TParser, typename TDataType>
int MainImpl(const ExecuteNetworkParams& params,
const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
@@ -242,6 +427,16 @@ int main(int argc, const char* argv[])
}
else if(modelFormat.find("tflite") != std::string::npos)
{
+
+ if (ProgramOptions.m_ExNetParams.m_EnableDelegate)
+ {
+ #if defined(ARMNN_TF_LITE_DELEGATE)
+ return TfLiteDelegateMainImpl(ProgramOptions.m_ExNetParams, runtime);
+ #else
+ ARMNN_LOG(fatal) << "Not built with Tensorflow-Lite parser support.";
+ return EXIT_FAILURE;
+ #endif
+ }
#if defined(ARMNN_TF_LITE_PARSER)
return MainImpl<armnnTfLiteParser::ITfLiteParser, float>(ProgramOptions.m_ExNetParams, runtime);
#else
diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
index a3a7f6a753..890ab2a658 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
@@ -74,6 +74,7 @@ void CheckModelFormat(const std::string& modelFormat)
"format supported for tflite files",
modelFormat));
}
+#elif defined(ARMNN_TFLITE_DELEGATE)
#else
throw armnn::InvalidArgumentException("Can't run model in tflite format without a "
"built with Tensorflow Lite parser support.");
diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.hpp b/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
index 5490230ede..8f176c2fd6 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
@@ -24,6 +24,7 @@ struct ExecuteNetworkParams
bool m_EnableProfiling;
bool m_GenerateTensorData;
bool m_InferOutputShape = false;
+ bool m_EnableDelegate = false;
std::vector<std::string> m_InputNames;
std::vector<std::string> m_InputTensorDataFilePaths;
std::vector<TensorShapePtr> m_InputTensorShapes;
diff --git a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
index e37c4eb42e..b499289f61 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
@@ -168,6 +168,10 @@ ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork",
"tensorflow-text.",
cxxopts::value<std::string>())
+ ("D,armnn-tflite-delegate",
+ "enable Arm NN TfLite delegate",
+ cxxopts::value<bool>(m_ExNetParams.m_EnableDelegate)->default_value("false")->implicit_value("true"))
+
("m,model-path",
"Path to model file, e.g. .armnn, .caffemodel, .prototxt, .tflite, .onnx",
cxxopts::value<std::string>(m_ExNetParams.m_ModelPath))