aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--CMakeLists.txt4
-rw-r--r--delegate/CMakeLists.txt41
-rw-r--r--delegate/cmake/Modules/ArmnnDelegateConfig.cmake.in6
-rw-r--r--delegate/cmake/Modules/FindTensorflow.cmake30
-rw-r--r--tests/ExecuteNetwork/ExecuteNetwork.cpp34
5 files changed, 55 insertions, 60 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index cee3c2a020..763c010d56 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -41,10 +41,8 @@ add_subdirectory(src/armnnDeserializer)
if (BUILD_ARMNN_TFLITE_DELEGATE)
-
- list(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/delegate/cmake/Modules)
+ set(ARMNN_SUB_PROJECT ON)
add_subdirectory(delegate)
-
add_definitions(-DARMNN_TF_LITE_DELEGATE)
endif()
diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt
index 677a38ea4a..aa2f3600bf 100644
--- a/delegate/CMakeLists.txt
+++ b/delegate/CMakeLists.txt
@@ -8,7 +8,7 @@ project(armnnDelegate)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14 -Wall -Wextra -Werror -Wold-style-cast -Wno-missing-braces -Wconversion -Wsign-conversion")
-set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/Modules/")
+set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${PROJECT_SOURCE_DIR}/cmake/Modules/")
set(armnnDelegate_sources)
list(APPEND armnnDelegate_sources
@@ -54,24 +54,15 @@ target_include_directories(armnnDelegate
include(GNUInstallDirs)
## Add Armnn as a Dependency
-find_package(Armnn REQUIRED)
-target_link_libraries(armnnDelegate Armnn::Armnn)
-
-## Add Tensorflow v2.3.1 dependency
-find_package(Tensorflow 2.3.1 REQUIRED MODULE)
-
-target_link_libraries(armnnDelegate
- ${Tensorflow_LIB})
-
-target_include_directories(armnnDelegate
- PRIVATE
- ${Tensorflow_INCLUDE_DIR})
+if(NOT ARMNN_SUB_PROJECT)
+ find_package(Armnn REQUIRED CONFIG HINTS ${Armnn_DIR})
+endif()
+target_link_libraries(armnnDelegate PUBLIC Armnn::Armnn)
## Add TfLite v2.3.1 dependency
find_package(TfLite REQUIRED MODULE)
-target_link_libraries(armnnDelegate
- ${TfLite_LIB})
+target_link_libraries(armnnDelegate PUBLIC ${TfLite_LIB})
# Various tflite header files are not warning clean
# We can't change compilation flags on header files directly, so we need to add them to an interface library first
@@ -84,12 +75,12 @@ target_compile_options(tflite_headers INTERFACE $<$<CXX_COMPILER_ID:GNU>:-Wno-co
-Wno-unused-parameter
-Wno-unused-function>)
-target_link_libraries(armnnDelegate tflite_headers)
+target_link_libraries(armnnDelegate PUBLIC tflite_headers)
## Add Flatbuffers dependency
find_package(Flatbuffers REQUIRED MODULE)
-target_link_libraries(armnnDelegate
+target_link_libraries(armnnDelegate PRIVATE
${Flatbuffers_LIB})
# include/flatbuffers/flatbuffers.h is not warning clean
@@ -99,7 +90,7 @@ target_include_directories(flatbuffer_headers INTERFACE $<BUILD_INTERFACE:${Flat
$<INSTALL_INTERFACE:include/flatbuffer_headers>)
target_compile_options(flatbuffer_headers INTERFACE $<$<CXX_COMPILER_ID:GNU>:-Wno-sign-conversion>)
-target_link_libraries(armnnDelegate flatbuffer_headers)
+target_link_libraries(armnnDelegate PUBLIC flatbuffer_headers)
option(BUILD_UNIT_TESTS "Build unit tests" ON)
if(BUILD_UNIT_TESTS)
@@ -146,12 +137,11 @@ if(BUILD_UNIT_TESTS)
# Add half library from armnn third-party libraries
target_include_directories(DelegateUnitTests PRIVATE ${ARMNN_SOURCE_DIR}/third-party)
- target_link_libraries(DelegateUnitTests armnnDelegate)
- target_link_libraries(DelegateUnitTests Armnn::armnnUtils)
+ target_link_libraries(DelegateUnitTests PRIVATE armnnDelegate)
+ target_link_libraries(DelegateUnitTests PRIVATE Armnn::armnnUtils)
-target_link_libraries(DelegateUnitTests tflite_headers)
-
-target_link_libraries(DelegateUnitTests flatbuffer_headers)
+ target_link_libraries(DelegateUnitTests PRIVATE tflite_headers)
+ target_link_libraries(DelegateUnitTests PRIVATE flatbuffer_headers)
endif()
@@ -187,10 +177,13 @@ include(CMakePackageConfigHelpers)
set(INSTALL_CONFIGDIR ${CMAKE_INSTALL_LIBDIR})
message(STATUS "CMAKE_CURRENT_LIST_DIR ${CMAKE_CURRENT_LIST_DIR}" )
message(STATUS "CMAKE_CURRENT_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}" )
+SET(Armnn_DIR "${Armnn_DIR}")
+
configure_package_config_file(
${CMAKE_CURRENT_LIST_DIR}/cmake/Modules/ArmnnDelegateConfig.cmake.in
${CMAKE_CURRENT_BINARY_DIR}/ArmnnDelegateConfig.cmake
- INSTALL_DESTINATION ${INSTALL_CONFIGDIR})
+ INSTALL_DESTINATION ${INSTALL_CONFIGDIR}
+ PATH_VARS Armnn_DIR)
## Install ArmNN Delegate config file
install(
diff --git a/delegate/cmake/Modules/ArmnnDelegateConfig.cmake.in b/delegate/cmake/Modules/ArmnnDelegateConfig.cmake.in
index c403068db8..c878c46ad3 100644
--- a/delegate/cmake/Modules/ArmnnDelegateConfig.cmake.in
+++ b/delegate/cmake/Modules/ArmnnDelegateConfig.cmake.in
@@ -9,10 +9,12 @@ MESSAGE(STATUS "Found ArmnnDelegate: ${ARMNN_DELEGATE_CONFIG_FILE}")
include(CMakeFindDependencyMacro)
-find_dependency(Armnn REQUIRED CONFIG)
-
list(APPEND CMAKE_MODULE_PATH ${ARMNN_DELEGATE_CMAKE_DIR})
+@PACKAGE_INIT@
+set_and_check(Armnn_DIR "@Armnn_DIR@")
+find_dependency(Armnn REQUIRED CONFIG HINTS ${Armnn_DIR})
+
if(NOT TARGET ArmnnDelegate::ArmnnDelegate)
MESSAGE(STATUS "ArmnnDelegate Import: ${ARMNN_DELEGATE_CMAKE_DIR}/ArmnnDelegateTargets.cmake")
include("${ARMNN_DELEGATE_CMAKE_DIR}/ArmnnDelegateTargets.cmake")
diff --git a/delegate/cmake/Modules/FindTensorflow.cmake b/delegate/cmake/Modules/FindTensorflow.cmake
deleted file mode 100644
index 8f90011a65..0000000000
--- a/delegate/cmake/Modules/FindTensorflow.cmake
+++ /dev/null
@@ -1,30 +0,0 @@
-#
-# Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-# SPDX-License-Identifier: MIT
-#
-
-include(FindPackageHandleStandardArgs)
-unset(TENSORFLOW_FOUND)
-
-find_path(Tensorflow_INCLUDE_DIR
- NAMES
- tensorflow/core
- tensorflow/cc
- third_party
- HINTS
- ${TENSORFLOW_ROOT})
-
-find_library(Tensorflow_LIB
- NAMES
- tensorflow_all
- HINTS
- ${TENSORFLOW_LIB_DIR})
-
-## Set TENSORFLOW_FOUND
-find_package_handle_standard_args(Tensorflow DEFAULT_MSG Tensorflow_INCLUDE_DIR Tensorflow_LIB)
-
-## Set external variables for usage in CMakeLists.txt
-if(TENSORFLOW_FOUND)
- set(Tensorflow_LIB ${Tensorflow_LIB})
- set(Tensorflow_INCLUDE_DIRS ${Tensorflow_INCLUDE_DIR})
-endif() \ No newline at end of file
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp
index be341b670a..00507e0c49 100644
--- a/tests/ExecuteNetwork/ExecuteNetwork.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp
@@ -88,6 +88,14 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params,
if (params.m_InputTypes[inputIndex].compare("float") == 0)
{
auto inputData = tfLiteInterpreter->typed_tensor<float>(input);
+
+ if(tfLiteInterpreter == NULL)
+ {
+ ARMNN_LOG(fatal) << "Input tensor is null, input type: "
+ "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
+ return EXIT_FAILURE;
+ }
+
std::vector<float> tensorData;
PopulateTensorWithDataGeneric<float>(tensorData,
params.m_InputTensorShapes[inputIndex]->GetNumElements(),
@@ -100,6 +108,14 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params,
else if (params.m_InputTypes[inputIndex].compare("int8") == 0)
{
auto inputData = tfLiteInterpreter->typed_tensor<int8_t>(input);
+
+ if(tfLiteInterpreter == NULL)
+ {
+ ARMNN_LOG(fatal) << "Input tensor is null, input type: "
+ "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
+ return EXIT_FAILURE;
+ }
+
std::vector<int8_t> tensorData;
PopulateTensorWithDataGeneric<int8_t>(tensorData,
params.m_InputTensorShapes[inputIndex]->GetNumElements(),
@@ -112,6 +128,14 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params,
else if (params.m_InputTypes[inputIndex].compare("int") == 0)
{
auto inputData = tfLiteInterpreter->typed_tensor<int32_t>(input);
+
+ if(tfLiteInterpreter == NULL)
+ {
+ ARMNN_LOG(fatal) << "Input tensor is null, input type: "
+ "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
+ return EXIT_FAILURE;
+ }
+
std::vector<int32_t> tensorData;
PopulateTensorWithDataGeneric<int32_t>(tensorData,
params.m_InputTensorShapes[inputIndex]->GetNumElements(),
@@ -124,6 +148,14 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params,
else if (params.m_InputTypes[inputIndex].compare("qasymm8") == 0)
{
auto inputData = tfLiteInterpreter->typed_tensor<uint8_t>(input);
+
+ if(tfLiteInterpreter == NULL)
+ {
+ ARMNN_LOG(fatal) << "Input tensor is null, input type: "
+ "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
+ return EXIT_FAILURE;
+ }
+
std::vector<uint8_t> tensorData;
PopulateTensorWithDataGeneric<uint8_t>(tensorData,
params.m_InputTensorShapes[inputIndex]->GetNumElements(),
@@ -468,7 +500,7 @@ int main(int argc, const char* argv[])
#if defined(ARMNN_TF_LITE_DELEGATE)
return TfLiteDelegateMainImpl(ProgramOptions.m_ExNetParams, runtime);
#else
- ARMNN_LOG(fatal) << "Not built with Tensorflow-Lite parser support.";
+ ARMNN_LOG(fatal) << "Not built with Arm NN Tensorflow-Lite delegate support.";
return EXIT_FAILURE;
#endif
}