aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJim Flynn <jim.flynn@arm.com>2021-09-21 09:06:31 +0000
committerJim Flynn <jim.flynn@arm.com>2021-09-21 09:07:01 +0000
commita3268f13d2b7abb9ae075389b4faae2b660d4889 (patch)
treedb3935b662b3f11fbcaedc267c01b82152c3878c
parentcdc495ea61a94ced93e877b62bcca5fa68f52f9b (diff)
downloadarmnn-a3268f13d2b7abb9ae075389b4faae2b660d4889.tar.gz
Revert "IVGCVSW-6181 patch to allow building against tflite > v2.3"
This reverts commit fc8d434bb318aebb433a2f6d8ce9c066cd9c1b1e. Reason for revert: Causes failures in the armv8 builds need to back out and fix again later Change-Id: I5ccdbb622caaa6413de41e1ee073f38dcabff7d8
-rw-r--r--CMakeLists.txt2
-rwxr-xr-xsrc/armnnTfLiteParser/CMakeLists.txt36
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.cpp16
3 files changed, 2 insertions, 52 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 28d63d35df..8e6c1f3495 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -19,8 +19,6 @@ foreach(cmake_file ${additional_cmake_files})
include(${cmake_file})
endforeach()
-set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${PROJECT_SOURCE_DIR}/delegate/cmake/Modules/")
-
cmake_policy(SET CMP0057 NEW)
set(as_subproject Armnn)
diff --git a/src/armnnTfLiteParser/CMakeLists.txt b/src/armnnTfLiteParser/CMakeLists.txt
index 1fbd8b845a..6a02c94b82 100755
--- a/src/armnnTfLiteParser/CMakeLists.txt
+++ b/src/armnnTfLiteParser/CMakeLists.txt
@@ -13,44 +13,16 @@ if(BUILD_TF_LITE_PARSER)
add_library_ex(armnnTfLiteParser SHARED ${armnn_tf_lite_parser_sources})
- # NOTE: even though the tensorflow sources contain a ./tensorflow/lite/schema/schema_generated.h
- # file we cannot use this directly because we need to take packaging for linux distros into
- # account. On Ubuntu 20.04 the available package is flatbuffers 1.11 and on 21.10 it is 1.12
- # despite the minor versioning they are not backward compatible. The current tensorflow lite
- # source (v2.3-v2.5) is generated from 1.12... so we need to generate from the
- # ./tensorflow/lite/schema/schema.fbs in the tensorflow lite source using the flatc matching
- # the target platform but use the ./tensorflow/lite/version.h to determine which version of
- # tensorflow lite the header was generated from.
include_directories(SYSTEM "${FLATBUFFERS_INCLUDE_PATH}")
set_target_properties(armnnTfLiteParser PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR})
target_include_directories(armnnTfLiteParser PRIVATE ../armnn)
target_include_directories(armnnTfLiteParser PRIVATE ../armnnUtils)
target_include_directories(armnnTfLiteParser SYSTEM PRIVATE "${TF_LITE_SCHEMA_INCLUDE_PATH}")
-
- # using the armnn/delegate/cmake/Modules/FindTfLite.cmake to find the TfLite sources
- # so that we can use the tensorflow/lite/version.h to determine which version of
- # tensorflow lite we are compiling against
- find_package(TfLite REQUIRED MODULE)
-
- # Various tflite header files are not warning clean
- # We can't change compilation flags on header files directly, so we need to add them to an interface library first
- add_library(tflite_version_headers INTERFACE)
- target_include_directories(tflite_version_headers INTERFACE $<BUILD_INTERFACE:${TfLite_INCLUDE_DIR}>
- $<INSTALL_INTERFACE:include/tflite_version_headers>)
-
- target_compile_options(tflite_version_headers INTERFACE -Wno-conversion
- -Wno-sign-conversion
- -Wno-unused-parameter
- -Wno-unused-function)
-
# If user has explicitly specified flatbuffers lib then use that,
# otherwise search for it based on FLATBUFFERS_BUILD_DIR
if (FLATBUFFERS_LIBRARY)
- target_link_libraries(armnnTfLiteParser
- armnn
- tflite_version_headers
- ${FLATBUFFERS_LIBRARY})
+ target_link_libraries(armnnTfLiteParser armnn ${FLATBUFFERS_LIBRARY})
else()
# Use PATH_SUFFIXES to help find separate libs for debug/release on Windows builds
find_library(FLATBUFFERS_LIBRARY_DEBUG NAMES flatbuffers
@@ -59,11 +31,7 @@ if(BUILD_TF_LITE_PARSER)
find_library(FLATBUFFERS_LIBRARY_RELEASE NAMES flatbuffers
HINTS ${FLATBUFFERS_BUILD_DIR}
PATH_SUFFIXES "Release")
- target_link_libraries(armnnTfLiteParser
- armnn
- tflite_version_headers
- debug ${FLATBUFFERS_LIBRARY_DEBUG}
- optimized ${FLATBUFFERS_LIBRARY_RELEASE})
+ target_link_libraries(armnnTfLiteParser armnn debug ${FLATBUFFERS_LIBRARY_DEBUG} optimized ${FLATBUFFERS_LIBRARY_RELEASE})
endif()
set_target_properties(armnnTfLiteParser PROPERTIES VERSION ${TFLITE_PARSER_LIB_VERSION} SOVERSION ${TFLITE_PARSER_LIB_SOVERSION} )
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 0f0e67c539..bedefdec2f 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -32,8 +32,6 @@
#include <fmt/format.h>
-#include <tensorflow/lite/version.h>
-
#include <algorithm>
#include <fstream>
#include <iostream>
@@ -769,14 +767,7 @@ INetworkPtr TfLiteParserImpl::CreateNetworkFromModel()
for (OperatorPtr const& op : subgraph->operators)
{
auto const& opCodePtr = m_Model->operator_codes[op->opcode_index];
-
-// work around the introduction of the deprecated_builtin_code introduced in 2.4 in a backwards compatible manner
-#if TF_MAJOR_VERSION > 2 || (TF_MAJOR_VERSION == 2 && TF_MINOR_VERSION > 3)
- auto builtinCode = std::max(opCodePtr->builtin_code,
- static_cast<tflite::BuiltinOperator>(opCodePtr->deprecated_builtin_code));
-#else
auto builtinCode = opCodePtr->builtin_code;
-#endif
if (builtinCode > tflite::BuiltinOperator_MAX)
{
@@ -896,14 +887,7 @@ void TfLiteParserImpl::ParseUnsupportedOperator(size_t subgraphIndex, size_t ope
const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
auto opcodeIndex = operatorPtr->opcode_index;
-
-// work around the introduction of the deprecated_builtin_code introduced in 2.4 in a backwards compatible manner
-#if TF_MAJOR_VERSION > 2 || (TF_MAJOR_VERSION == 2 && TF_MINOR_VERSION > 3)
- auto opcode = std::max(m_Model->operator_codes[opcodeIndex]->builtin_code,
- static_cast<tflite::BuiltinOperator>(m_Model->operator_codes[opcodeIndex]->deprecated_builtin_code));
-#else
auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
-#endif
if (!m_Options || !m_Options.value().m_StandInLayerForUnsupported)
{