aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--CMakeLists.txt8
-rw-r--r--README.md13
-rw-r--r--cmake/GlobalConfig.cmake16
-rw-r--r--delegate/CMakeLists.txt3
-rw-r--r--delegate/classic/src/armnn_delegate.cpp1
-rw-r--r--delegate/cmake/Modules/FindFlatbuffers.cmake8
-rw-r--r--delegate/cmake/Modules/FindTfLite.cmake187
-rw-r--r--delegate/cmake/Modules/FindTfLiteAbsl.cmake57
-rw-r--r--delegate/cmake/Modules/FindTfLiteSrc.cmake12
-rw-r--r--delegate/test/ScatterNdTest.cpp28
-rw-r--r--docs/02_operator_list.dox19
-rw-r--r--docs/05_01_parsers.dox2
-rw-r--r--docs/05_04_supportlibrary.dox2
-rw-r--r--docs/Doxyfile4
-rw-r--r--docs/FAQ.md13
-rw-r--r--include/armnn/IRuntime.hpp3
-rw-r--r--include/armnnOnnxParser/IOnnxParser.hpp11
-rw-r--r--python/pyarmnn/README.md1
-rw-r--r--python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i42
-rw-r--r--python/pyarmnn/test/test_network.py3
-rw-r--r--samples/AsyncExecutionSample.cpp2
-rw-r--r--samples/PreImportMemorySample.cpp6
-rwxr-xr-xscripts/get_compute_library.sh4
-rw-r--r--shim/BuildGuideShimSupportLibrary.md23
-rw-r--r--shim/sl/README.md1
-rw-r--r--shim/sl/canonical/ArmnnDevice.hpp3
-rw-r--r--shim/sl/canonical/ArmnnDriver.hpp8
-rw-r--r--shim/sl/canonical/ArmnnDriverImpl.hpp4
-rw-r--r--shim/sl/canonical/ArmnnPreparedModel.hpp4
-rw-r--r--src/armnn/Tensor.cpp15
-rw-r--r--src/armnn/Threadpool.cpp5
-rw-r--r--src/armnn/test/RuntimeTests.cpp6
-rw-r--r--src/armnnConverter/ArmnnConverter.cpp4
-rw-r--r--src/armnnConverter/README.md2
-rw-r--r--src/armnnOnnxParser/OnnxParser.cpp4
-rw-r--r--src/armnnOnnxParser/test/Constructor.cpp4
-rw-r--r--src/armnnOnnxParser/test/CreateNetwork.cpp4
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.cpp4
-rw-r--r--src/armnnUtils/ParserPrototxtFixture.hpp10
-rw-r--r--src/backends/aclCommon/ArmComputeTensorUtils.cpp26
-rw-r--r--src/backends/aclCommon/ArmComputeTensorUtils.hpp6
-rw-r--r--src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp6
-rw-r--r--src/backends/cl/ClLayerSupport.cpp24
-rw-r--r--src/backends/cl/ClLayerSupport.hpp9
-rw-r--r--src/backends/cl/ClWorkloadFactory.cpp7
-rw-r--r--src/backends/cl/backend.mk3
-rw-r--r--src/backends/cl/test/ClEndToEndTests.cpp22
-rw-r--r--src/backends/cl/test/ClLayerTests.cpp89
-rw-r--r--src/backends/cl/workloads/CMakeLists.txt4
-rw-r--r--src/backends/cl/workloads/ClScatterNdWorkload.cpp77
-rw-r--r--src/backends/cl/workloads/ClScatterNdWorkload.hpp35
-rw-r--r--src/backends/cl/workloads/ClWorkloads.hpp3
-rw-r--r--src/backends/gpuFsa/GpuFsaBackend.hpp5
-rw-r--r--src/backends/gpuFsa/GpuFsaRegistryInitializer.cpp6
-rw-r--r--src/backends/gpuFsa/test/GpuFsaWorkloadFactoryHelper.hpp5
-rw-r--r--src/backends/neon/NeonLayerSupport.cpp21
-rw-r--r--src/backends/reference/RefWorkloadFactory.cpp2
-rw-r--r--src/backends/reference/test/RefEndToEndTests.cpp2
-rw-r--r--src/backends/tosaCommon/TosaMappings.cpp2
-rw-r--r--src/backends/tosaCommon/TosaMappings.hpp2
-rw-r--r--src/backends/tosaCommon/operatorMappings/ActivationOperator.cpp12
-rw-r--r--src/backends/tosaCommon/operatorMappings/AvgPool2DIgnoreValueOperator.cpp14
-rw-r--r--src/backends/tosaCommon/operatorMappings/ConcatOperator.cpp13
-rw-r--r--src/backends/tosaCommon/operatorMappings/Conv2dOperator.cpp14
-rw-r--r--src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.cpp20
-rw-r--r--src/backends/tosaCommon/operatorMappings/ElementwiseUnaryOperator.cpp12
-rw-r--r--src/backends/tosaCommon/operatorMappings/Pooling2DOperator.cpp14
-rw-r--r--src/backends/tosaCommon/operatorMappings/QuantizeOperator.cpp12
-rw-r--r--src/backends/tosaCommon/operatorMappings/ReshapeOperator.cpp14
-rw-r--r--src/backends/tosaCommon/operatorMappings/ResizeOperator.cpp14
-rw-r--r--src/backends/tosaCommon/operatorMappings/SliceOperator.cpp14
-rw-r--r--src/backends/tosaCommon/operatorMappings/SplitOperator.cpp8
-rw-r--r--src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp23
-rw-r--r--src/backends/tosaCommon/operatorMappings/TransposeConv2dOperator.cpp12
-rw-r--r--src/backends/tosaCommon/operatorMappings/TransposeOperator.cpp14
-rw-r--r--src/backends/tosaCommon/test/AvgPool2DIgnoreValueChecker.hpp4
-rw-r--r--src/backends/tosaCommon/test/OneToManyMappingTests.cpp3
-rw-r--r--src/backends/tosaCommon/test/OneToOneMappingTests.cpp6
-rw-r--r--src/backends/tosaCommon/test/SplitChecker.hpp4
-rw-r--r--src/backends/tosaCommon/test/TosaTestUtils.hpp8
-rw-r--r--tests/ExecuteNetwork/ArmNNExecutor.cpp152
-rw-r--r--tests/ExecuteNetwork/ExecuteNetworkParams.hpp3
-rw-r--r--tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp12
-rw-r--r--tests/InferenceModel.hpp8
84 files changed, 945 insertions, 367 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 660b7555ef..a14c500bc6 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -529,8 +529,12 @@ install(DIRECTORY profiling/client/include/ DESTINATION ${CMAKE_INSTALL_INCLUDED
if(ARMCOMPUTENEON OR ARMCOMPUTECL OR ARMCOMPUTEGPUFSA)
if (BUILD_ACL_OPENMP)
- target_link_libraries(armnn PUBLIC -fopenmp -static-openmp )
- target_compile_options(armnn PUBLIC -fopenmp)
+ if (("${CMAKE_SYSTEM_NAME}" STREQUAL Android))
+ target_link_libraries(armnn PUBLIC -fopenmp -static-openmp)
+ else() # Assumes GCC on aarch64.
+ target_compile_options(armnn PUBLIC -fopenmp)
+ target_link_libraries(armnn PUBLIC -lgomp)
+ endif()
endif()
target_link_libraries(armnn PUBLIC ${ARMCOMPUTE_LIBRARIES})
endif()
diff --git a/README.md b/README.md
index 782cd8eb87..0c9985f060 100644
--- a/README.md
+++ b/README.md
@@ -59,12 +59,13 @@ Arm NN from scratch is the ability to **exactly choose which components to build
## Pre-Built Binaries
-| Operating System | Architecture-specific Release Archive (Download) |
-|-----------------------------------------------|--------------------------------------------------|
-| Android (AAR) | [![](https://img.shields.io/badge/download-android--aar-orange)](https://github.com/ARM-software/armnn/releases/download/v23.11/armnn_delegate_jni-23.11.aar) |
-| Android 11 "R/Red Velvet Cake" (API level 30) | [![](https://img.shields.io/badge/download-arm64--v82a-orange)](https://github.com/ARM-software/armnn/releases/download/v23.11/ArmNN-android-30-arm64-v8.2-a.tar.gz) [![](https://img.shields.io/badge/download-arm64--v8a-orange)](https://github.com/ARM-software/armnn/releases/download/v23.11/ArmNN-android-30-arm64-v8a.tar.gz) |
-| Android 12 "S/Snow Cone" (API level 31) | [![](https://img.shields.io/badge/download-arm64--v82a-yellow)](https://github.com/ARM-software/armnn/releases/download/v23.11/ArmNN-android-31-arm64-v8.2-a.tar.gz) [![](https://img.shields.io/badge/download-arm64--v8a-yellow)](https://github.com/ARM-software/armnn/releases/download/v23.11/ArmNN-android-31-arm64-v8a.tar.gz) [![](https://img.shields.io/badge/download-arm64--v86a-yellow)](https://github.com/ARM-software/armnn/releases/download/v23.11/ArmNN-android-31-arm64-v8.6-a.tar.gz) [![](https://img.shields.io/badge/download-arm64--v86asve-yellow)](https://github.com/ARM-software/armnn/releases/download/v23.11/ArmNN-android-31-arm64-v8.6-a-sve.tar.gz) [![](https://img.shields.io/badge/download-arm64--v86asve2-yellow)](https://github.com/ARM-software/armnn/releases/download/v23.11/ArmNN-android-31-arm64-v8.6-a-sve2.tar.gz) |
-| Android 13 "T/Tiramisu" (API level 33) | [![](https://img.shields.io/badge/download-arm64--v82a-red)](https://github.com/ARM-software/armnn/releases/download/v23.11/ArmNN-android-32-arm64-v8.2-a.tar.gz) [![](https://img.shields.io/badge/download-arm64--v86a-red)](https://github.com/ARM-software/armnn/releases/download/v23.11/ArmNN-android-32-arm64-v8.6-a.tar.gz) [![](https://img.shields.io/badge/download-arm64--v86asve-red)](https://github.com/ARM-software/armnn/releases/download/v23.11/ArmNN-android-32-arm64-v8.6-a-sve.tar.gz) [![](https://img.shields.io/badge/download-arm64--v86asve2-red)](https://github.com/ARM-software/armnn/releases/download/v23.11/ArmNN-android-32-arm64-v8.6-a-sve2.tar.gz) |
+| Operating System | Architecture-specific Release Archive (Download) |
+|-----------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| Android (AAR) | [![](https://img.shields.io/badge/download-android--aar-green)](https://github.com/ARM-software/armnn/releases/download/v24.05/armnn_delegate_jni-24.05.aar) |
+| Android 11 "R/Red Velvet Cake" (API level 30) | [![](https://img.shields.io/badge/download-arm64--v82a-orange)](https://github.com/ARM-software/armnn/releases/download/v24.05/ArmNN-android-30-arm64-v8.2-a.tar.gz) [![](https://img.shields.io/badge/download-arm64--v8a-orange)](https://github.com/ARM-software/armnn/releases/download/v24.05/ArmNN-android-30-arm64-v8a.tar.gz) |
+| Android 12 "S/Snow Cone" (API level 31) | [![](https://img.shields.io/badge/download-arm64--v82a-yellow)](https://github.com/ARM-software/armnn/releases/download/v24.05/ArmNN-android-31-arm64-v8.2-a.tar.gz) [![](https://img.shields.io/badge/download-arm64--v8a-yellow)](https://github.com/ARM-software/armnn/releases/download/v24.05/ArmNN-android-31-arm64-v8a.tar.gz) [![](https://img.shields.io/badge/download-arm64--v86a-yellow)](https://github.com/ARM-software/armnn/releases/download/v24.05/ArmNN-android-31-arm64-v8.6-a.tar.gz) [![](https://img.shields.io/badge/download-arm64--v86asve-yellow)](https://github.com/ARM-software/armnn/releases/download/v24.05/ArmNN-android-31-arm64-v8.6-a-sve.tar.gz) [![](https://img.shields.io/badge/download-arm64--v86asve2-yellow)](https://github.com/ARM-software/armnn/releases/download/v24.05/ArmNN-android-31-arm64-v8.6-a-sve2.tar.gz) |
+| Android 13 "T/Tiramisu" (API level 33) | [![](https://img.shields.io/badge/download-arm64--v82a-purple)](https://github.com/ARM-software/armnn/releases/download/v24.05/ArmNN-android-33-arm64-v8.2-a.tar.gz) [![](https://img.shields.io/badge/download-arm64--v86a-purple)](https://github.com/ARM-software/armnn/releases/download/v24.05/ArmNN-android-33-arm64-v8.6-a.tar.gz) [![](https://img.shields.io/badge/download-arm64--v86asve-purple)](https://github.com/ARM-software/armnn/releases/download/v24.05/ArmNN-android-33-arm64-v8.6-a-sve.tar.gz) [![](https://img.shields.io/badge/download-arm64--v86asve2-purple)](https://github.com/ARM-software/armnn/releases/download/v24.05/ArmNN-android-33-arm64-v8.6-a-sve2.tar.gz) |
+| Android 14 "U/Upside Down Cake" (API level 34)| [![](https://img.shields.io/badge/download-arm64--v82a-blue)](https://github.com/ARM-software/armnn/releases/download/v24.05/ArmNN-android-34-arm64-v8.2-a.tar.gz) [![](https://img.shields.io/badge/download-arm64--v86a-blue)](https://github.com/ARM-software/armnn/releases/download/v24.05/ArmNN-android-34-arm64-v8.6-a.tar.gz) [![](https://img.shields.io/badge/download-arm64--v86asve-blue)](https://github.com/ARM-software/armnn/releases/download/v24.05/ArmNN-android-34-arm64-v8.6-a-sve.tar.gz) [![](https://img.shields.io/badge/download-arm64--v86asve2-blue)](https://github.com/ARM-software/armnn/releases/download/v24.05/ArmNN-android-34-arm64-v8.6-a-sve2.tar.gz) |
## Software Overview
The Arm NN SDK supports ML models in **TensorFlow Lite** (TF Lite) and **ONNX** formats.
diff --git a/cmake/GlobalConfig.cmake b/cmake/GlobalConfig.cmake
index feeaef1c60..a4a9552d80 100644
--- a/cmake/GlobalConfig.cmake
+++ b/cmake/GlobalConfig.cmake
@@ -1,5 +1,5 @@
#
-# Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+# Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
# Copyright 2020 NXP
# SPDX-License-Identifier: MIT
#
@@ -46,7 +46,7 @@ option(BUILD_SHARED_LIBS "Determines if Armnn will be built statically or dynami
This is an experimental feature and not fully supported.
Only the ArmNN core and the Delegate can be built statically." ON)
option(BUILD_ACL_OPENMP "If enabled statically link the OpenMP scheduler for ACL.
- Note: ACL must already be built with openmp=1 cppthreads=0" OFF)
+ Note: ACL must already be built with openmp=1 cppthreads=0" ON)
option(EXECUTE_NETWORK_STATIC " This is a limited experimental build that is entirely static.
It currently only supports being set by changing the current CMake default options like so:
BUILD_TF_LITE_PARSER=1/0
@@ -255,14 +255,17 @@ endif()
# Flatbuffers support for TF Lite, Armnn Serializer or the TOSA backend.
if(BUILD_TF_LITE_PARSER OR BUILD_ARMNN_SERIALIZER OR ARMNNTOSAREF)
# verify we have a valid flatbuffers include path
- find_path(FLATBUFFERS_INCLUDE_PATH flatbuffers/flatbuffers.h
- HINTS ${FLATBUFFERS_ROOT}/include /usr/local/include /usr/include)
+ find_path(FLATBUFFERS_INCLUDE_PATH
+ flatbuffers/flatbuffers.h
+ PATHS ${FLATBUFFERS_ROOT}/include /usr/local/include /usr/include
+ NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
message(STATUS "Flatbuffers headers are located at: ${FLATBUFFERS_INCLUDE_PATH}")
find_library(FLATBUFFERS_LIBRARY
NAMES libflatbuffers.a flatbuffers
- HINTS ${FLATBUFFERS_ROOT}/lib /usr/local/lib /usr/lib)
+ PATHS ${FLATBUFFERS_ROOT}/lib /usr/local/lib /usr/lib
+ NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
message(STATUS "Flatbuffers library located at: ${FLATBUFFERS_LIBRARY}")
endif()
@@ -271,7 +274,8 @@ endif()
if(BUILD_TF_LITE_PARSER)
find_path(TF_LITE_SCHEMA_INCLUDE_PATH
schema_generated.h
- HINTS ${TF_LITE_GENERATED_PATH})
+ PATHS ${TF_LITE_GENERATED_PATH}
+ NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
message(STATUS "Tf Lite generated header found at: ${TF_LITE_SCHEMA_INCLUDE_PATH}")
diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt
index f8b0300976..ebde7c69ce 100644
--- a/delegate/CMakeLists.txt
+++ b/delegate/CMakeLists.txt
@@ -17,6 +17,9 @@ option(BUILD_SHARED_LIBS "Build share libs" ON)
option(BUILD_DELEGATE_JNI_INTERFACE "Builds a library to allow accessing the Arm NN delegate from Java code.
This is an experimental feature." ON)
+## Do not include flatbuffers::ClassicLocale which can cause abort when destroyed
+add_definitions(-DFLATBUFFERS_LOCALE_INDEPENDENT=0)
+
set(armnnDelegate_sources)
list(APPEND armnnDelegate_sources
common/include/DelegateOptions.hpp
diff --git a/delegate/classic/src/armnn_delegate.cpp b/delegate/classic/src/armnn_delegate.cpp
index c3815b6c59..76d718bd96 100644
--- a/delegate/classic/src/armnn_delegate.cpp
+++ b/delegate/classic/src/armnn_delegate.cpp
@@ -48,7 +48,6 @@
#include <armnnUtils/Filesystem.hpp>
#include <armnn/utility/Timer.hpp>
-#include <flatbuffers/flatbuffers.h>
#include <tensorflow/lite/context_util.h>
#include <tensorflow/lite/schema/schema_generated.h>
diff --git a/delegate/cmake/Modules/FindFlatbuffers.cmake b/delegate/cmake/Modules/FindFlatbuffers.cmake
index 13d6f917b3..e0924fb85d 100644
--- a/delegate/cmake/Modules/FindFlatbuffers.cmake
+++ b/delegate/cmake/Modules/FindFlatbuffers.cmake
@@ -1,5 +1,5 @@
#
-# Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+# Copyright © 2020, 2024 Arm Ltd and Contributors. All rights reserved.
# SPDX-License-Identifier: MIT
#
@@ -11,7 +11,7 @@ find_path(Flatbuffers_INCLUDE_DIR
HINTS
${FLATBUFFERS_ROOT}/include
/usr/local/include
- /usr/include)
+ /usr/include NO_CMAKE_FIND_ROOT_PATH)
find_library(Flatbuffers_LIB
NAMES
@@ -20,7 +20,7 @@ find_library(Flatbuffers_LIB
HINTS
${FLATBUFFERS_ROOT}/lib
/usr/local/lib
- /usr/lib)
+ /usr/lib NO_CMAKE_FIND_ROOT_PATH)
## Set FLATBUFFERS_FOUND
find_package_handle_standard_args(Flatbuffers DEFAULT_MSG Flatbuffers_INCLUDE_DIR Flatbuffers_LIB)
@@ -29,4 +29,4 @@ find_package_handle_standard_args(Flatbuffers DEFAULT_MSG Flatbuffers_INCLUDE_DI
if(FLATBUFFERS_FOUND)
set(Flatbuffers_LIB ${Flatbuffers_LIB})
set(Flatbuffers_INCLUDE_DIR ${Flatbuffers_INCLUDE_DIR})
-endif() \ No newline at end of file
+endif()
diff --git a/delegate/cmake/Modules/FindTfLite.cmake b/delegate/cmake/Modules/FindTfLite.cmake
index 20f93ca4ae..c299485da0 100644
--- a/delegate/cmake/Modules/FindTfLite.cmake
+++ b/delegate/cmake/Modules/FindTfLite.cmake
@@ -1,5 +1,5 @@
#
-# Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+# Copyright © 2020, 2023-2024 Arm Ltd and Contributors. All rights reserved.
# SPDX-License-Identifier: MIT
#
@@ -13,96 +13,133 @@ unset(TFLITE_FOUND)
# on tensorflow lite headers but no need to link to the binary libraries to use only the sources
# and not have an artificial dependency on the libraries.
#
-
# First look for the static version of tensorflow lite
-find_library(TfLite_LIB NAMES "libtensorflow-lite.a" HINTS ${TFLITE_LIB_ROOT} ${TFLITE_LIB_ROOT}/tensorflow/lite)
+find_library(TfLite_LIB NAMES "libtensorflow-lite.a" HINTS ${TFLITE_LIB_ROOT} ${TFLITE_LIB_ROOT}/tensorflow/lite NO_CMAKE_FIND_ROOT_PATH )
# If not found then, look for the dynamic library of tensorflow lite
-find_library(TfLite_LIB NAMES "libtensorflow_lite_all.so" "libtensorflowlite.so" HINTS ${TFLITE_LIB_ROOT} ${TFLITE_LIB_ROOT}/tensorflow/lite)
+find_library(TfLite_LIB NAMES "libtensorflow_lite_all.so" "libtensorflowlite.so" HINTS ${TFLITE_LIB_ROOT} ${TFLITE_LIB_ROOT}/tensorflow/lite NO_CMAKE_FIND_ROOT_PATH)
# If the static library was found, gather all of its dependencies
if (TfLite_LIB MATCHES .a$)
message("-- Static tensorflow lite library found, using for ArmNN build")
find_library(TfLite_abseilstrings_LIB "libabsl_strings.a"
- PATH ${TFLITE_LIB_ROOT}/_deps/abseil-cpp-build/absl/strings)
+ PATHS ${TFLITE_LIB_ROOT}/_deps/abseil-cpp-build/absl/strings
+ NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+
find_library(TfLite_abseil_synchronization_LIB "libabsl_synchronization.a"
- PATH ${TFLITE_LIB_ROOT}/_deps/abseil-cpp-build/absl/synchronization)
- # Required for building TensorFlow in Debug
+ PATHS ${TFLITE_LIB_ROOT}/_deps/abseil-cpp-build/absl/synchronization
+ NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+
+
+ # Required for building TensorFlow in Debug
+
find_library(TfLite_abseil_graphCycle_internal_LIB "libabsl_graphcycles_internal.a"
- PATH ${TFLITE_LIB_ROOT}/_deps/abseil-cpp-build/absl/synchronization)
+ PATHS ${TFLITE_LIB_ROOT}/_deps/abseil-cpp-build/absl/synchronization NO_CMAKE_FIND_ROOT_PATH )
find_library(TfLite_farmhash_LIB "libfarmhash.a"
- PATH ${TFLITE_LIB_ROOT}/_deps/farmhash-build)
+ PATHS ${TFLITE_LIB_ROOT}/_deps/farmhash-build NO_CMAKE_FIND_ROOT_PATH)
find_library(TfLite_fftsg_LIB "libfft2d_fftsg.a"
- PATH ${TFLITE_LIB_ROOT}/_deps/fft2d-build)
+ PATHS ${TFLITE_LIB_ROOT}/_deps/fft2d-build NO_CMAKE_FIND_ROOT_PATH)
find_library(TfLite_fftsg2d_LIB "libfft2d_fftsg2d.a"
- PATH ${TFLITE_LIB_ROOT}/_deps/fft2d-build)
- find_library(TfLite_flatbuffers_LIB "libflatbuffers.a"
- PATH ${TFLITE_LIB_ROOT}/_deps/flatbuffers-build)
+ PATHS ${TFLITE_LIB_ROOT}/_deps/fft2d-build NO_CMAKE_FIND_ROOT_PATH)
+
+ find_library(TfLite_flatbuffers_LIB "libflatbuffers.a"
+ PATHS ${TFLITE_LIB_ROOT}/_deps/flatbuffers-build
+ NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+
find_library(TfLite_cpuinfo_LIB "libcpuinfo.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/cpuinfo-build)
+ ${TFLITE_LIB_ROOT}/_deps/cpuinfo-build NO_CMAKE_FIND_ROOT_PATH)
- # All remaining libraries are part of libruy.
- find_library(TfLite_ruy_allocator_LIB "libruy_allocator.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy)
- find_library(TfLite_ruy_apply_multiplier_LIB "libruy_apply_multiplier.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy)
- find_library(TfLite_ruy_blocking_counter_LIB "libruy_blocking_counter.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy)
- find_library(TfLite_ruy_block_map_LIB "libruy_block_map.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy)
- find_library(TfLite_ruy_context_LIB "libruy_context.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy)
- find_library(TfLite_ruy_context_get_ctx_LIB "libruy_context_get_ctx.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy)
- find_library(TfLite_ruy_cpuinfo_LIB "libruy_cpuinfo.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy)
- find_library(TfLite_ruy_ctx_LIB "libruy_ctx.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy)
- find_library(TfLite_ruy_denormal_LIB "libruy_denormal.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy)
- find_library(TfLite_ruy_frontend_LIB "libruy_frontend.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy)
- find_library(TfLite_ruy_have_built_path_for_avx2_fma_LIB "libruy_have_built_path_for_avx2_fma.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy)
- find_library(TfLite_ruy_have_built_path_for_avx512_LIB "libruy_have_built_path_for_avx512.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy)
- find_library(TfLite_ruy_have_built_path_for_avx_LIB "libruy_have_built_path_for_avx.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy)
- find_library(TfLite_ruy_kernel_arm_LIB "libruy_kernel_arm.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy)
- find_library(TfLite_ruy_kernel_avx2_fma_LIB "libruy_kernel_avx2_fma.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy)
- find_library(TfLite_ruy_kernel_avx512_LIB "libruy_kernel_avx512.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy)
- find_library(TfLite_ruy_kernel_avx_LIB "libruy_kernel_avx.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy)
- find_library(TfLite_ruy_pack_arm_LIB "libruy_pack_arm.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy)
- find_library(TfLite_ruy_pack_avx2_fma_LIB "libruy_pack_avx2_fma.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy)
- find_library(TfLite_ruy_pack_avx512_LIB "libruy_pack_avx512.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy)
- find_library(TfLite_ruy_pack_avx_LIB "libruy_pack_avx.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy)
- find_library(TfLite_ruy_prepacked_cache_LIB "libruy_prepacked_cache.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy)
- find_library(TfLite_ruy_prepare_packed_matrices_LIB "libruy_prepare_packed_matrices.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy)
- find_library(TfLite_ruy_system_aligned_alloc_LIB "libruy_system_aligned_alloc.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy)
- find_library(TfLite_ruy_threadpool_LIB "libruy_thread_pool.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy)
- find_library(TfLite_ruy_trmul_LIB "libruy_trmul.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy)
- find_library(TfLite_ruy_tune_LIB "libruy_tune.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy)
- find_library(TfLite_ruy_wait_LIB "libruy_wait.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy)
- find_library(TfLite_ruy_profiler_LIB "libruy_profiler_instrumentation.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy/profiler)
- find_library(TfLite_pthread_pool_LIB "libpthreadpool.a" PATH
- ${TFLITE_LIB_ROOT}/pthreadpool)
+ find_library(TfLite_ruy_allocator_LIB "libruy_allocator.a"
+ PATHS ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy
+ NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+ find_library(TfLite_ruy_apply_multiplier_LIB "libruy_apply_multiplier.a"
+ PATHS ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy
+ NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+ find_library(TfLite_ruy_blocking_counter_LIB "libruy_blocking_counter.a"
+ PATHS ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy
+ NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+ find_library(TfLite_ruy_block_map_LIB "libruy_block_map.a"
+ PATHS ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy
+ NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+ find_library(TfLite_ruy_context_LIB "libruy_context.a"
+ PATHS ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy
+ NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+ find_library(TfLite_ruy_context_get_ctx_LIB "libruy_context_get_ctx.a"
+ PATHS ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy
+ NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+ find_library(TfLite_ruy_cpuinfo_LIB "libruy_cpuinfo.a"
+ PATHS ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy
+ NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+ find_library(TfLite_ruy_ctx_LIB "libruy_ctx.a"
+ PATHS ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy
+ NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+ find_library(TfLite_ruy_denormal_LIB "libruy_denormal.a"
+ PATHS ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy
+ NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+ find_library(TfLite_ruy_frontend_LIB "libruy_frontend.a"
+ PATHS ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy
+ NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+ find_library(TfLite_ruy_have_built_path_for_avx2_fma_LIB "libruy_have_built_path_for_avx2_fma.a"
+ PATHS ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy
+ NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+ find_library(TfLite_ruy_have_built_path_for_avx512_LIB "libruy_have_built_path_for_avx512.a"
+ PATHS ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy
+ NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+ find_library(TfLite_ruy_have_built_path_for_avx_LIB "libruy_have_built_path_for_avx.a"
+ PATHS ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy
+ NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+ find_library(TfLite_ruy_kernel_arm_LIB "libruy_kernel_arm.a"
+ PATHS ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy
+ NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+ find_library(TfLite_ruy_kernel_avx2_fma_LIB "libruy_kernel_avx2_fma.a"
+ PATHS ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy
+ NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+ find_library(TfLite_ruy_kernel_avx512_LIB "libruy_kernel_avx512.a"
+ PATHS ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy
+ NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+ find_library(TfLite_ruy_kernel_avx_LIB "libruy_kernel_avx.a"
+ PATHS ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy
+ NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+ find_library(TfLite_ruy_pack_arm_LIB "libruy_pack_arm.a"
+ PATHS ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy
+ NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+ find_library(TfLite_ruy_pack_avx2_fma_LIB "libruy_pack_avx2_fma.a"
+ PATHS ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy
+ NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+ find_library(TfLite_ruy_pack_avx512_LIB "libruy_pack_avx512.a"
+ PATHS ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy
+ NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+ find_library(TfLite_ruy_pack_avx_LIB "libruy_pack_avx.a"
+ PATHS ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy
+ NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+ find_library(TfLite_ruy_prepacked_cache_LIB "libruy_prepacked_cache.a"
+ PATHS ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy
+ NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+ find_library(TfLite_ruy_prepare_packed_matrices_LIB "libruy_prepare_packed_matrices.a"
+ PATHS ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy
+ NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+ find_library(TfLite_ruy_system_aligned_alloc_LIB "libruy_system_aligned_alloc.a"
+ PATHS ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy
+ NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+ find_library(TfLite_ruy_threadpool_LIB "libruy_thread_pool.a"
+ PATHS ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy
+ NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+ find_library(TfLite_ruy_trmul_LIB "libruy_trmul.a"
+ PATHS ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy
+ NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+ find_library(TfLite_ruy_tune_LIB "libruy_tune.a"
+ PATHS ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy
+ NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+ find_library(TfLite_ruy_wait_LIB "libruy_wait.a"
+ PATHS ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy
+ NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+ find_library(TfLite_ruy_profiler_LIB "libruy_profiler_instrumentation.a"
+ PATHS ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy/profiler
+ NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+ find_library(TfLite_pthread_pool_LIB "libpthreadpool.a"
+ PATHS ${TFLITE_LIB_ROOT}/pthreadpool
+ NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
## Set TFLITE_FOUND if all libraries are satisfied for static lib
find_package_handle_standard_args(TfLite DEFAULT_MSG TfLite_LIB TfLite_abseilstrings_LIB TfLite_farmhash_LIB TfLite_fftsg_LIB TfLite_fftsg2d_LIB
diff --git a/delegate/cmake/Modules/FindTfLiteAbsl.cmake b/delegate/cmake/Modules/FindTfLiteAbsl.cmake
index 46fab3bc5b..1a269c5b7d 100644
--- a/delegate/cmake/Modules/FindTfLiteAbsl.cmake
+++ b/delegate/cmake/Modules/FindTfLiteAbsl.cmake
@@ -1,5 +1,5 @@
#
-# Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+# Copyright © 2023, 2024 Arm Ltd and Contributors. All rights reserved.
# SPDX-License-Identifier: MIT
#
@@ -9,38 +9,39 @@ unset(TFLITEABSL_FOUND)
find_path(TfLite_ABSL_SYNC_HEADERS
NAMES
absl
- HINTS
- ${TFLITE_LIB_ROOT}/abseil-cpp)
+ PATHS
+ ${TFLITE_LIB_ROOT}/abseil-cpp
+ NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
# First look for the static version of tensorflow lite
-find_library(TfLite_LIB NAMES "libtensorflow-lite.a" HINTS ${TFLITE_LIB_ROOT} ${TFLITE_LIB_ROOT}/tensorflow/lite)
+find_library(TfLite_LIB NAMES "libtensorflow-lite.a" PATHS ${TFLITE_LIB_ROOT} ${TFLITE_LIB_ROOT}/tensorflow/lite NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH )
# If the static library was found, gather extra absl libraries for opaque delegate
if (TfLite_LIB MATCHES .a$)
- find_library(TfLite_abseil_base_LIB "libabsl_base.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/abseil-cpp-build/absl/base)
- find_library(TfLite_abseil_log_severity_LIB "libabsl_log_severity.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/abseil-cpp-build/absl/base)
- find_library(TfLite_abseil_spinlock_wait_LIB "libabsl_spinlock_wait.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/abseil-cpp-build/absl/base)
- find_library(TfLite_abseil_malloc_internal_LIB "libabsl_malloc_internal.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/abseil-cpp-build/absl/base)
- find_library(TfLite_abseil_raw_logging_internal_LIB "libabsl_raw_logging_internal.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/abseil-cpp-build/absl/base)
- find_library(TfLite_abseil_stacktrace_LIB "libabsl_stacktrace.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/abseil-cpp-build/absl/debugging)
- find_library(TfLite_abseil_debugging_internal_LIB "libabsl_debugging_internal.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/abseil-cpp-build/absl/debugging)
- find_library(TfLite_abseil_symbolize_LIB "libabsl_symbolize.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/abseil-cpp-build/absl/debugging)
- find_library(TfLite_abseil_demangle_internal_LIB "libabsl_demangle_internal.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/abseil-cpp-build/absl/debugging)
- find_library(TfLite_abseil_time_LIB "libabsl_time.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/abseil-cpp-build/absl/time)
- find_library(TfLite_abseil_time_zone_LIB "libabsl_time_zone.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/abseil-cpp-build/absl/time)
- find_library(TfLite_abseil_int128_LIB "libabsl_int128.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/abseil-cpp-build/absl/numeric)
+ find_library(TfLite_abseil_base_LIB "libabsl_base.a" PATHS
+ ${TFLITE_LIB_ROOT}/_deps/abseil-cpp-build/absl/base NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH )
+ find_library(TfLite_abseil_log_severity_LIB "libabsl_log_severity.a" PATHS
+ ${TFLITE_LIB_ROOT}/_deps/abseil-cpp-build/absl/base NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+ find_library(TfLite_abseil_spinlock_wait_LIB "libabsl_spinlock_wait.a" PATHS
+ ${TFLITE_LIB_ROOT}/_deps/abseil-cpp-build/absl/base NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+ find_library(TfLite_abseil_malloc_internal_LIB "libabsl_malloc_internal.a" PATHS
+ ${TFLITE_LIB_ROOT}/_deps/abseil-cpp-build/absl/base NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+ find_library(TfLite_abseil_raw_logging_internal_LIB "libabsl_raw_logging_internal.a" PATHS
+ ${TFLITE_LIB_ROOT}/_deps/abseil-cpp-build/absl/base NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+ find_library(TfLite_abseil_stacktrace_LIB "libabsl_stacktrace.a" PATHS
+ ${TFLITE_LIB_ROOT}/_deps/abseil-cpp-build/absl/debugging NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+ find_library(TfLite_abseil_debugging_internal_LIB "libabsl_debugging_internal.a" PATHS
+ ${TFLITE_LIB_ROOT}/_deps/abseil-cpp-build/absl/debugging NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+ find_library(TfLite_abseil_symbolize_LIB "libabsl_symbolize.a" PATHS
+ ${TFLITE_LIB_ROOT}/_deps/abseil-cpp-build/absl/debugging NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+ find_library(TfLite_abseil_demangle_internal_LIB "libabsl_demangle_internal.a" PATHS
+ ${TFLITE_LIB_ROOT}/_deps/abseil-cpp-build/absl/debugging NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+ find_library(TfLite_abseil_time_LIB "libabsl_time.a" PATHS
+ ${TFLITE_LIB_ROOT}/_deps/abseil-cpp-build/absl/time NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+ find_library(TfLite_abseil_time_zone_LIB "libabsl_time_zone.a" PATHS
+ ${TFLITE_LIB_ROOT}/_deps/abseil-cpp-build/absl/time NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+ find_library(TfLite_abseil_int128_LIB "libabsl_int128.a" PATHS
+ ${TFLITE_LIB_ROOT}/_deps/abseil-cpp-build/absl/numeric NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
## Set TFLITEABSL_FOUND
find_package_handle_standard_args(TfLiteAbsl DEFAULT_MSG TfLite_ABSL_SYNC_HEADERS TfLite_abseil_base_LIB
diff --git a/delegate/cmake/Modules/FindTfLiteSrc.cmake b/delegate/cmake/Modules/FindTfLiteSrc.cmake
index 91833c18e6..1f07ed7b22 100644
--- a/delegate/cmake/Modules/FindTfLiteSrc.cmake
+++ b/delegate/cmake/Modules/FindTfLiteSrc.cmake
@@ -1,22 +1,22 @@
#
-# Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+# Copyright © 2021, 2024 Arm Ltd and Contributors. All rights reserved.
# SPDX-License-Identifier: MIT
#
-
include(FindPackageHandleStandardArgs)
unset(TFLITE_SRC_FOUND)
-
find_path(TfLite_INCLUDE_DIR
NAMES
tensorflow/lite
third_party
- HINTS
- ${TENSORFLOW_ROOT})
+ PATHS
+ ${TENSORFLOW_ROOT}
+ NO_CMAKE_FIND_ROOT_PATH )
find_path(TfLite_Schema_INCLUDE_PATH
schema_generated.h
HINTS
- ${TENSORFLOW_ROOT}/tensorflow/lite/schema)
+ ${TENSORFLOW_ROOT}/tensorflow/lite/schema
+ NO_CMAKE_FIND_ROOT_PATH )
## Set TFLITE_FOUND
find_package_handle_standard_args(TfLiteSrc DEFAULT_MSG TfLite_INCLUDE_DIR TfLite_Schema_INCLUDE_PATH)
diff --git a/delegate/test/ScatterNdTest.cpp b/delegate/test/ScatterNdTest.cpp
index 2b2a67c4eb..802efc23f0 100644
--- a/delegate/test/ScatterNdTest.cpp
+++ b/delegate/test/ScatterNdTest.cpp
@@ -275,13 +275,13 @@ TEST_SUITE("ScatterNdDelegateTests")
TEST_CASE ("ScatterNd_1Dim_FP32_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef, armnn::Compute::GpuAcc };
ScatterNd1DimTest<float>(tflite::TensorType_FLOAT32, backends);
}
TEST_CASE ("ScatterNd_1Dim_INT32_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef, armnn::Compute::GpuAcc };
ScatterNd1DimTest<int32_t>(tflite::TensorType_INT32, backends);
}
@@ -299,13 +299,13 @@ TEST_CASE ("ScatterNd_1Dim_UINT8_Test")
TEST_CASE ("ScatterNd_2Dim_FP32_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef, armnn::Compute::GpuAcc };
ScatterNd2DimTest<float>(tflite::TensorType_FLOAT32, backends);
}
TEST_CASE ("ScatterNd_2Dim_INT32_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef, armnn::Compute::GpuAcc };
ScatterNd2DimTest<int32_t>(tflite::TensorType_INT32, backends);
}
@@ -323,13 +323,13 @@ TEST_CASE ("ScatterNd_2Dim_UINT8_Test")
TEST_CASE ("ScatterNd_2Dim_1Outter_1Inner_FP32_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef, armnn::Compute::GpuAcc };
ScatterNd2Dim1Outter1InnerTest<float>(tflite::TensorType_FLOAT32, backends);
}
TEST_CASE ("ScatterNd_2Dim_1Outter_1Inner_INT32_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef, armnn::Compute::GpuAcc };
ScatterNd2Dim1Outter1InnerTest<int32_t>(tflite::TensorType_INT32, backends);
}
@@ -347,13 +347,13 @@ TEST_CASE ("ScatterNd_2Dim_1Outter_1Inner_UINT8_Test")
TEST_CASE ("ScatterNd_3Dim_FP32_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef, armnn::Compute::GpuAcc };
ScatterNd3DimTest<float>(tflite::TensorType_FLOAT32, backends);
}
TEST_CASE ("ScatterNd_3Dim_INT32_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef, armnn::Compute::GpuAcc };
ScatterNd3DimTest<int32_t>(tflite::TensorType_INT32, backends);
}
@@ -371,13 +371,13 @@ TEST_CASE ("ScatterNd_3Dim_UINT8_Test")
TEST_CASE ("ScatterNd_3Dim_1Outter_2Inner_FP32_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef, armnn::Compute::GpuAcc };
ScatterNd3Dim1Outter2InnerTest<float>(tflite::TensorType_FLOAT32, backends);
}
TEST_CASE ("ScatterNd_3Dim_1Outter_2Inner_INT32_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef, armnn::Compute::GpuAcc };
ScatterNd3Dim1Outter2InnerTest<int32_t>(tflite::TensorType_INT32, backends);
}
@@ -395,13 +395,13 @@ TEST_CASE ("ScatterNd_3Dim_1Outter_2Inner_UINT8_Test")
TEST_CASE ("ScatterNd_3Dim_2Outter_1Inner_FP32_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef, armnn::Compute::GpuAcc };
ScatterNd3Dim2Outter1InnerTest<float>(tflite::TensorType_FLOAT32, backends);
}
TEST_CASE ("ScatterNd_3Dim_2Outter_1Inner_INT32_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef, armnn::Compute::GpuAcc };
ScatterNd3Dim2Outter1InnerTest<int32_t>(tflite::TensorType_INT32, backends);
}
@@ -419,13 +419,13 @@ TEST_CASE ("ScatterNd_3Dim_2Outter_1Inner_UINT8_Test")
TEST_CASE ("ScatterNd_4Dim_FP32_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef, armnn::Compute::GpuAcc };
ScatterNdDim4<float>(tflite::TensorType_FLOAT32, backends);
}
TEST_CASE ("ScatterNd_4Dim_INT32_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef, armnn::Compute::GpuAcc };
ScatterNdDim4<int32_t>(tflite::TensorType_INT32, backends);
}
diff --git a/docs/02_operator_list.dox b/docs/02_operator_list.dox
index 6b2442d28f..1890a5630b 100644
--- a/docs/02_operator_list.dox
+++ b/docs/02_operator_list.dox
@@ -1,4 +1,4 @@
-/// Copyright (c) 2021, 2023-2024 ARM Limited and Contributors. All rights reserved.
+/// Copyright (c) 2021-2024 ARM Limited and Contributors. All rights reserved.
///
/// SPDX-License-Identifier: MIT
///
@@ -899,6 +899,7 @@ where N = batches, C = channels, H = height, W = width
<table>
<tr><th>
<tr><td>BFLOAT16
+ <tr><td>BOOLEAN
<tr><td>FLOAT16
<tr><td>FLOAT32
<tr><td>QASYMMS8
@@ -2957,18 +2958,12 @@ where N = batches, C = channels, H = height, W = width
<td>CpuAcc
<td>
<ul>
- <li>All
+ <li>None
</ul>
<td>
<table>
- <tr><th>
- <tr><td>FLOAT16
- <tr><td>FLOAT32
- <tr><td>QASYMMS8
- <tr><td>QASYMMU8
- <tr><td>QASYMM8
- <tr><td>QSYMMS16
- <tr><td>SIGNED32
+ <tr><th>
+ <tr><td>N/A
</table>
<tr>
<td>GpuAcc
@@ -2981,10 +2976,6 @@ where N = batches, C = channels, H = height, W = width
<tr><th>
<tr><td>FLOAT16
<tr><td>FLOAT32
- <tr><td>QASYMMS8
- <tr><td>QASYMMU8
- <tr><td>QSYMMS8
- <tr><td>QSYMMS16
<tr><td>SIGNED32
</table>
<tr>
diff --git a/docs/05_01_parsers.dox b/docs/05_01_parsers.dox
index 03b8ea22a2..d85d364676 100644
--- a/docs/05_01_parsers.dox
+++ b/docs/05_01_parsers.dox
@@ -24,6 +24,8 @@ bindings take a look into the @ref md_python_pyarmnn_README section.
@section S5_onnx_parser Arm NN Onnx Parser
+## Note: Arm NN will be dropping support for Onnx Parser in 24.08.
+
`armnnOnnxParser` is a library for loading neural networks defined in ONNX protobuf files into the Arm NN runtime.
## ONNX operators that the Arm NN SDK supports
diff --git a/docs/05_04_supportlibrary.dox b/docs/05_04_supportlibrary.dox
index c96c6f00d6..46a9c73388 100644
--- a/docs/05_04_supportlibrary.dox
+++ b/docs/05_04_supportlibrary.dox
@@ -11,6 +11,8 @@ namespace armnn
@section supportlibraryintro About the NNAPI Support Library
+## Note: Arm NN will be dropping support for NNAPI Support Library in 24.08.
+
If you need help building the Arm NN NNAPI Support Library, please take a look at our [build guide](shim/BuildGuideShimSupportLibrary.md).
@section sloptions Support Library Options
diff --git a/docs/Doxyfile b/docs/Doxyfile
index 22b2cadc1c..b4a886351f 100644
--- a/docs/Doxyfile
+++ b/docs/Doxyfile
@@ -1,6 +1,6 @@
# Doxyfile 1.8.17
-# Copyright (c) 2018, 2020-2023 ARM Ltd and Contributors. All rights reserved.
+# Copyright (c) 2018, 2020-2024 ARM Ltd and Contributors. All rights reserved.
#
# SPDX-License-Identifier: MIT
#
@@ -61,7 +61,7 @@ PROJECT_NAME = "Arm NN"
# could be handy for archiving the generated documentation or if some version
# control system is used.
-PROJECT_NUMBER = 24.02
+PROJECT_NUMBER = 24.05
# Using the PROJECT_BRIEF tag one can provide an optional one line description
# for a project that appears at the top of each page and should give viewer a
diff --git a/docs/FAQ.md b/docs/FAQ.md
index 12fcdad052..797413ad87 100644
--- a/docs/FAQ.md
+++ b/docs/FAQ.md
@@ -78,3 +78,16 @@ Running multiple inferences in multiple threads concurrently is not supported, b
ArmNN supports multithreading at kernel level and this is implemented in Arm Compute Library (ACL) (https://github.com/ARM-software/ComputeLibrary/).
During inference, at the operator level, the main thread will create multiple threads and execute the same kernel on different parts of the data. At runtime ACL will detect the number of CPU cores in the system and use one thread per cpu core for each kernel.
Multithreading at operator level is not supported due to limitations in ACL, for more information please refer to https://arm-software.github.io/ComputeLibrary/latest/architecture.xhtml#architecture_thread_safety
+
+On Android, Executables containing Arm NN delegate or Arm NN TfLite Parser occasionally SIGABORT during destruction of Flatbuffers.
+------------------------------
+Unloading some TfLite models occasionally throws a SIGABORT. The error looks similar to this:
+~~~
+#0  0x0000007ff22df5c4 in abort () from target:/apex/com.android.runtime/lib64/bionic/libc.so
+#1  0x0000007ff22ca61c in scudo::die() () from target:/apex/com.android.runtime/lib64/bionic/libc.so
+#2  0x0000007ff22cb244 in scudo::ScopedErrorReport::~ScopedErrorReport() () from target:/apex/com.android.runtime/lib64/bionic/libc.so
+#3  0x0000007ff22cb768 in scudo::reportInvalidChunkState(scudo::AllocatorAction, void*) () from target:/apex/com.android.runtime/lib64/bionic/libc.so
+#4  0x0000007ff22cd520 in scudo::Allocator<scudo::AndroidConfig, &scudo_malloc_postinit>::deallocate(void*, scudo::Chunk::Origin, unsigned long, unsigned long) () from target:/apex/com.android.runtime/lib64/bionic/libc.so
+#5  0x0000007fee6f96f8 in flatbuffers::ClassicLocale::~ClassicLocale() () from target:/data/local/tmp/build.android.aarch64/armnn/libarmnnTfLiteParser.so
+~~~
+The solution to set the flag "-DFLATBUFFERS_LOCALE_INDEPENDENT=0" in the build. By default, this is already done for our internal executables, for example, ExecuteNetwork.
diff --git a/include/armnn/IRuntime.hpp b/include/armnn/IRuntime.hpp
index a97e44782f..d07bab558d 100644
--- a/include/armnn/IRuntime.hpp
+++ b/include/armnn/IRuntime.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -258,6 +258,7 @@ public:
/// Evaluates a network using input in inputTensors and outputs filled into outputTensors.
/// This function performs a thread safe execution of the network. Returns once execution is complete.
/// Will block until this and any other thread using the same workingMem object completes.
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("The Async interface will be removed from Arm NN in 24.08", "24.08")
Status Execute(IWorkingMemHandle& workingMemHandle,
const InputTensors& inputTensors,
const OutputTensors& outputTensors,
diff --git a/include/armnnOnnxParser/IOnnxParser.hpp b/include/armnnOnnxParser/IOnnxParser.hpp
index 89c22c03de..c0e2d745de 100644
--- a/include/armnnOnnxParser/IOnnxParser.hpp
+++ b/include/armnnOnnxParser/IOnnxParser.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017,2022 Arm Ltd. All rights reserved.
+// Copyright © 2017,2022,2024 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -23,11 +23,14 @@ using IOnnxParserPtr = std::unique_ptr<IOnnxParser, void(*)(IOnnxParser* parser)
class IOnnxParser
{
public:
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("The ONNX Parser will be removed from Arm NN in 24.08", "24.08")
static IOnnxParser* CreateRaw();
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("The ONNX Parser will be removed from Arm NN in 24.08", "24.08")
static IOnnxParserPtr Create();
static void Destroy(IOnnxParser* parser);
/// Create the network from a protobuf binary vector
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("The ONNX Parser will be removed from Arm NN in 24.08", "24.08")
armnn::INetworkPtr CreateNetworkFromBinary(const std::vector<uint8_t>& binaryContent);
/// Create the network from a protobuf binary vector, with inputShapes specified
@@ -35,24 +38,30 @@ public:
const std::map<std::string, armnn::TensorShape>& inputShapes);
/// Create the network from a protobuf binary file on disk
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("The ONNX Parser will be removed from Arm NN in 24.08", "24.08")
armnn::INetworkPtr CreateNetworkFromBinaryFile(const char* graphFile);
/// Create the network from a protobuf text file on disk
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("The ONNX Parser will be removed from Arm NN in 24.08", "24.08")
armnn::INetworkPtr CreateNetworkFromTextFile(const char* graphFile);
/// Create the network directly from protobuf text in a string. Useful for debugging/testing
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("The ONNX Parser will be removed from Arm NN in 24.08", "24.08")
armnn::INetworkPtr CreateNetworkFromString(const std::string& protoText);
/// Create the network from a protobuf binary file on disk, with inputShapes specified
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("The ONNX Parser will be removed from Arm NN in 24.08", "24.08")
armnn::INetworkPtr CreateNetworkFromBinaryFile(const char* graphFile,
const std::map<std::string, armnn::TensorShape>& inputShapes);
/// Create the network from a protobuf text file on disk, with inputShapes specified
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("The ONNX Parser will be removed from Arm NN in 24.08", "24.08")
armnn::INetworkPtr CreateNetworkFromTextFile(const char* graphFile,
const std::map<std::string, armnn::TensorShape>& inputShapes);
/// Create the network directly from protobuf text in a string, with inputShapes specified.
/// Useful for debugging/testing
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("The ONNX Parser will be removed from Arm NN in 24.08", "24.08")
armnn::INetworkPtr CreateNetworkFromString(const std::string& protoText,
const std::map<std::string, armnn::TensorShape>& inputShapes);
diff --git a/python/pyarmnn/README.md b/python/pyarmnn/README.md
index b510c361c7..a165f58609 100644
--- a/python/pyarmnn/README.md
+++ b/python/pyarmnn/README.md
@@ -1,5 +1,6 @@
# PyArmNN
+## Note: Arm NN will be dropping support for PyArmNN in 24.08.
PyArmNN is a python extension for [Arm NN SDK](https://developer.arm.com/ip-products/processors/machine-learning/arm-nn).
PyArmNN provides interface similar to Arm NN C++ Api.
Before you proceed with the project setup, you will need to checkout and build a corresponding Arm NN version.
diff --git a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i
index 0b7f55d1cc..bfaa55919b 100644
--- a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i
+++ b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i
@@ -597,6 +597,20 @@ public:
%feature("docstring",
+ "
+ Adds a Broadcast_To layer to the network.
+
+ Args:
+ broadcastToDescriptor (BroadcastToDescriptor): Descriptor for the explicit broadcat operation.
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddBroadcastToLayer;
+ armnn::IConnectableLayer* AddBroadcastToLayer(const armnn::BroadcastToDescriptor& broadcastToDescriptor,
+ const char* name = nullptr);
+
+ %feature("docstring",
"
Adds a Division layer to the network.
@@ -1148,6 +1162,20 @@ public:
const char* name = nullptr);
%feature("docstring",
+ "
+ Adds a Fused layer to the network. This is a precompiled layer for fused operator that merges Add + Mul + Add.
+
+ Args:
+ fusedDescriptor (FusedDescriptor): Description of the fused layer.
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddFusedLayer;
+ armnn::IConnectableLayer* AddFusedLayer(const armnn::FusedDescriptor& fusedDescriptor,
+ const char* name = nullptr);
+
+ %feature("docstring",
"
Adds a LogicalBinary layer to the network.
@@ -1162,6 +1190,20 @@ public:
const char* name = nullptr);
%feature("docstring",
+ "
+ Adds a ScatterND layer to the network.
+
+ Args:
+ scatterndDescriptor (ScatterNdDescriptor): Description of the tile layer.
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddScatterNdLayer;
+ armnn::IConnectableLayer* AddScatterNdLayer(const armnn::ScatterNdDescriptor& scatterndDescriptor,
+ const char* name = nullptr);
+
+ %feature("docstring",
"
Adds a Tile layer to the network.
diff --git a/python/pyarmnn/test/test_network.py b/python/pyarmnn/test/test_network.py
index 91602b8fa8..933828329b 100644
--- a/python/pyarmnn/test/test_network.py
+++ b/python/pyarmnn/test/test_network.py
@@ -193,6 +193,7 @@ def test_serialize_to_dot_mode_readonly(network_file, get_runtime, tmpdir):
'AddBatchMatMulLayer',
'AddBatchNormalizationLayer',
'AddBatchToSpaceNdLayer',
+ 'AddBroadcastToLayer',
'AddCastLayer',
'AddChannelShuffleLayer',
'AddComparisonLayer',
@@ -210,6 +211,7 @@ def test_serialize_to_dot_mode_readonly(network_file, get_runtime, tmpdir):
'AddFloorLayer',
'AddFillLayer',
'AddFullyConnectedLayer',
+ 'AddFusedLayer',
'AddGatherLayer',
'AddGatherNdLayer',
'AddInputLayer',
@@ -237,6 +239,7 @@ def test_serialize_to_dot_mode_readonly(network_file, get_runtime, tmpdir):
'AddReshapeLayer',
'AddResizeLayer',
'AddReverseV2Layer',
+ 'AddScatterNdLayer',
'AddShapeLayer',
'AddSliceLayer',
'AddSoftmaxLayer',
diff --git a/samples/AsyncExecutionSample.cpp b/samples/AsyncExecutionSample.cpp
index a789aade01..4647c3af75 100644
--- a/samples/AsyncExecutionSample.cpp
+++ b/samples/AsyncExecutionSample.cpp
@@ -120,8 +120,10 @@ int main()
// Lambda function to execute the network. We use it as thread function.
auto execute = [&](unsigned int executionIndex)
{
+ARMNN_NO_DEPRECATE_WARN_BEGIN
auto memHandle = run->CreateWorkingMemHandle(networkIdentifier);
run->Execute(*memHandle, inputTensors[executionIndex], outputTensors[executionIndex]);
+ARMNN_NO_DEPRECATE_WARN_END
};
// Prepare some threads and let each execute the network with a different input
diff --git a/samples/PreImportMemorySample.cpp b/samples/PreImportMemorySample.cpp
index 98f386bfdf..b7263d7d76 100644
--- a/samples/PreImportMemorySample.cpp
+++ b/samples/PreImportMemorySample.cpp
@@ -88,7 +88,9 @@ int main()
// This function performs a thread safe execution of the network. Returns once execution is complete.
// Will block until this and any other thread using the same workingMem object completes.
// Execute with PreImported inputTensor1 as well as Non-PreImported inputTensor2
+ARMNN_NO_DEPRECATE_WARN_BEGIN
runtime->Execute(*memHandle.get(), {}, {{2, outputTensor1}}, importedInputVec /* pre-imported ids */);
+ARMNN_NO_DEPRECATE_WARN_END
// ImportOutputs separates the importing and mapping of OutputTensors from network execution.
// Allowing for a set of OutputTensors to be imported and mapped once, but used in execution many times.
@@ -99,8 +101,10 @@ int main()
// PreImport outputTensor1
std::vector<ImportedOutputId> importedOutputVec = runtime->ImportOutputs(networkIdentifier1, {output1});
+ARMNN_NO_DEPRECATE_WARN_BEGIN
// Execute with Non-PreImported inputTensor1 as well as PreImported inputTensor2
runtime->Execute(*memHandle.get(), {{0, inputTensor1}}, {{2, outputTensor1}}, {1 /* pre-imported id */});
+ARMNN_NO_DEPRECATE_WARN_END
// Clear the previously PreImportedInput with the network Id and inputIds returned from ImportInputs()
// Note: This will happen automatically during destructor of armnn::LoadedNetwork
@@ -110,8 +114,10 @@ int main()
// Note: This will happen automatically during destructor of armnn::LoadedNetwork
runtime->ClearImportedOutputs(networkIdentifier1, importedOutputVec);
+ARMNN_NO_DEPRECATE_WARN_BEGIN
// Execute with Non-PreImported inputTensor1, inputTensor2 and the PreImported outputTensor1
runtime->Execute(*memHandle.get(), {{0, inputTensor1}, {1, inputTensor2}}, {{2, outputTensor1}});
+ARMNN_NO_DEPRECATE_WARN_END
std::cout << "Your number was " << outputData1.data()[0] << std::endl;
diff --git a/scripts/get_compute_library.sh b/scripts/get_compute_library.sh
index 63c09629a8..aa07e731c6 100755
--- a/scripts/get_compute_library.sh
+++ b/scripts/get_compute_library.sh
@@ -7,10 +7,10 @@
CMD=$( basename "$0" )
# For pinning to a ref use this:
-DEFAULT_CLFRAMEWORKREVISION="branches/arm_compute_24_04" # Release 24.04
+DEFAULT_CLFRAMEWORKREVISION="branches/arm_compute_24_05" # Release 24.05
#
# For pinning to a revision use this:
-#DEFAULT_CLFRAMEWORKREVISION="1322065a3fbd15b00dbfb0969d6b438b5ba15530" #11399: Specify absolute tolerance
+#DEFAULT_CLFRAMEWORKREVISION="c22e1263ba3a6945ceb1fdccb33eac512fd156fb" #11520: arm_gemm: fix SVE check on fast mode kernels.
usage() {
echo -e "get_compute_library.sh: Clones the Arm Compute Library (ACL) repo from the ML Platform server and checks out
diff --git a/shim/BuildGuideShimSupportLibrary.md b/shim/BuildGuideShimSupportLibrary.md
index 98c626fee0..8baa9a044d 100644
--- a/shim/BuildGuideShimSupportLibrary.md
+++ b/shim/BuildGuideShimSupportLibrary.md
@@ -1,5 +1,6 @@
# How to use the Android NDK to build Arm NN
+- [Deprecation Notice](#deprecation-notice)
- [Introduction](#introduction)
- [Prerequisites](#prerequisites)
- [Download Arm NN](#download-arm-nn)
@@ -9,6 +10,9 @@
- [Build Arm NN Shim](#build-arm-nn-shim)
+## Deprecation Notice
+Arm NN will be dropping support for support library in 24.08.
+
## Introduction
These are step by step instructions for building the Arm NN shim and support library for NNAPI.
This work is currently in an experimental phase.
@@ -16,7 +20,7 @@ This work is currently in an experimental phase.
## Prerequisites
The following are required to build the Arm NN support library
-* Android NDK r25
+* Android NDK r26b
* Detailed setup can be found in [BuildGuideAndroidNDK.md](../BuildGuideAndroidNDK.md)
* Flatbuffer version 23.5.26
* Detailed setup can be found in [BuildGuideCrossCompilation.md](../BuildGuideCrossCompilation.md)
@@ -33,7 +37,7 @@ export WORKING_DIR=<path to where the Arm NN source code, clframework and aosp r
export AOSP_ROOT=<path to the root of Android tree where the shim will be built>
export AOSP_MODULES_ROOT=<path to where AOSP modules will be cloned i.e. $WORKING_DIR/aosp>
export ARMNN_BUILD_DIR=<path to the Arm NN build directory i.e. $WORKING_DIR/build>
-export NDK=<path to>android-ndk-r25
+export NDK=<path to>android-ndk-r26b
export NDK_TOOLCHAIN_ROOT=$NDK/toolchains/llvm/prebuilt/linux-x86_64
export PATH=$NDK_TOOLCHAIN_ROOT/bin/:$PATH
export FLATBUFFERS_ANDROID_BUILD=<path to flatbuffers target android build>
@@ -68,7 +72,7 @@ cd ${WORKING_DIR}/clframework
scons arch=arm64-v8a \
toolchain_prefix=aarch64-linux-android- \
-compiler_prefix=aarch64-linux-android29- \
+compiler_prefix=aarch64-linux-android${ANDROID_API}- \
neon=1 opencl=1 \
embed_kernels=1 \
build_dir=android-arm64v8a \
@@ -76,6 +80,7 @@ extra_cxx_flags="-Wno-parentheses-equality -Wno-missing-braces -fPIC" \
Werror=0 embed_kernels=1 examples=0 \
validation_tests=0 benchmark_tests=0 benchmark_examples=0 os=android -j16
```
+Note: ANDROID_API is the Android API version you want to build.
## Build Arm NN and Serializer
@@ -83,12 +88,12 @@ validation_tests=0 benchmark_tests=0 benchmark_examples=0 os=android -j16
(Requires CMake if not previously installed: `sudo apt install cmake`)
```bash
cd $ARMNN_BUILD_DIR
-CXX=aarch64-linux-android29-clang++ \
-CC=aarch64-linux-android29-clang \
+CXX=aarch64-linux-android${ANDROID_API}-clang++ \
+CC=aarch64-linux-android${ANDROID_API}-clang \
CXX_FLAGS="-fPIE -fPIC" cmake ${WORKING_DIR}/armnn \
-DCMAKE_ANDROID_NDK=$NDK \
-DCMAKE_SYSTEM_NAME=Android \
--DCMAKE_SYSTEM_VERSION=29 \
+-DCMAKE_SYSTEM_VERSION=$ANDROID_API \
-DCMAKE_ANDROID_ARCH_ABI=arm64-v8a \
-DCMAKE_EXE_LINKER_FLAGS="-pie -llog -lz" \
-DARMCOMPUTE_ROOT=$WORKING_DIR/clframework/ \
@@ -125,7 +130,7 @@ CMARGS="$CMARGS \
-DANDROID_ABI=arm64-v8a \
-DCMAKE_ANDROID_ARCH_ABI=arm64-v8a \
-DCMAKE_ANDROID_NDK=$NDK \
--DANDROID_PLATFORM=android-29 \
+-DANDROID_PLATFORM=android-$ANDROID_API \
-DAOSP_MODULES_ROOT=$AOSP_MODULES_ROOT \
-DARMNN_SOURCE_DIR=$WORKING_DIR/armnn \
-DArmnn_DIR=$ARMNN_BUILD_DIR "
@@ -133,8 +138,8 @@ CMARGS="$CMARGS \
mkdir ${WORKING_DIR}/armnn/shim/sl/build
cd ${WORKING_DIR}/armnn/shim/sl/build
-CXX=aarch64-linux-android29-clang++ \
-CC=aarch64-linux-android29-clang \
+CXX=aarch64-linux-android$ANDROID_API-clang++ \
+CC=aarch64-linux-android$ANDROID_API-clang \
cmake $CMARGS ../
make
```
diff --git a/shim/sl/README.md b/shim/sl/README.md
index 32f117a562..3a099dd24a 100644
--- a/shim/sl/README.md
+++ b/shim/sl/README.md
@@ -1,6 +1,7 @@
# Arm NN Support Library Neural Networks driver
This directory contains the Arm NN Support Library for the Android Neural Networks API.
+### Note: Arm NN will be dropping support for Support Library in 24.08.
# Passing parameters to the support library runtime.
diff --git a/shim/sl/canonical/ArmnnDevice.hpp b/shim/sl/canonical/ArmnnDevice.hpp
index 93109696f7..dbc966683c 100644
--- a/shim/sl/canonical/ArmnnDevice.hpp
+++ b/shim/sl/canonical/ArmnnDevice.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -17,6 +17,7 @@ class ArmnnDevice
friend class ArmnnDriver;
public:
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("The Shim and support library will be removed from Arm NN in 24.08", "24.08")
ArmnnDevice(DriverOptions options);
~ArmnnDevice() {}
protected:
diff --git a/shim/sl/canonical/ArmnnDriver.hpp b/shim/sl/canonical/ArmnnDriver.hpp
index 6cb06604d2..d6b7d849b3 100644
--- a/shim/sl/canonical/ArmnnDriver.hpp
+++ b/shim/sl/canonical/ArmnnDriver.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -33,6 +33,7 @@ class ArmnnDriver : public IDevice
private:
std::unique_ptr<ArmnnDevice> m_Device;
public:
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("The Shim and support library will be removed from Arm NN in 24.08", "24.08")
ArmnnDriver(DriverOptions options)
{
try
@@ -212,7 +213,7 @@ public:
if (hasDeadlinePassed(deadline)) {
return NN_ERROR(ErrorStatus::MISSED_DEADLINE_PERSISTENT);
}
-
+ARMNN_NO_DEPRECATE_WARN_BEGIN
return ArmnnDriverImpl::PrepareArmnnModel(m_Device->m_Runtime,
m_Device->m_ClTunedParameters,
m_Device->m_Options,
@@ -222,6 +223,7 @@ public:
token,
model.relaxComputationFloat32toFloat16 && m_Device->m_Options.GetFp16Enabled(),
priority);
+ARMNN_NO_DEPRECATE_WARN_END
}
GeneralResult<SharedPreparedModel> prepareModelFromCache(OptionalTimePoint deadline,
@@ -239,6 +241,7 @@ public:
return NN_ERROR(ErrorStatus::MISSED_DEADLINE_PERSISTENT);
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
return ArmnnDriverImpl::PrepareArmnnModelFromCache(
m_Device->m_Runtime,
m_Device->m_ClTunedParameters,
@@ -247,6 +250,7 @@ public:
dataCache,
token,
m_Device->m_Options.GetFp16Enabled());
+ARMNN_NO_DEPRECATE_WARN_END
}
GeneralResult<SharedBuffer> allocate(const BufferDesc&,
diff --git a/shim/sl/canonical/ArmnnDriverImpl.hpp b/shim/sl/canonical/ArmnnDriverImpl.hpp
index 6af0ab285d..47106fdeee 100644
--- a/shim/sl/canonical/ArmnnDriverImpl.hpp
+++ b/shim/sl/canonical/ArmnnDriverImpl.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022, 2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -23,6 +23,7 @@ namespace armnn_driver
class ArmnnDriverImpl
{
public:
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("The Shim and support library will be removed from Arm NN in 24.08", "24.08")
static GeneralResult<SharedPreparedModel> PrepareArmnnModel(
const armnn::IRuntimePtr& runtime,
const armnn::IGpuAccTunedParametersPtr& clTunedParameters,
@@ -34,6 +35,7 @@ public:
bool float32ToFloat16 = false,
Priority priority = Priority::MEDIUM);
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("The Shim and support library will be removed from Arm NN in 24.08", "24.08")
static GeneralResult<SharedPreparedModel> PrepareArmnnModelFromCache(
const armnn::IRuntimePtr& runtime,
const armnn::IGpuAccTunedParametersPtr& clTunedParameters,
diff --git a/shim/sl/canonical/ArmnnPreparedModel.hpp b/shim/sl/canonical/ArmnnPreparedModel.hpp
index c487858c01..295acf1cfe 100644
--- a/shim/sl/canonical/ArmnnPreparedModel.hpp
+++ b/shim/sl/canonical/ArmnnPreparedModel.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022, 2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -39,6 +39,7 @@ class ArmnnPreparedModel final : public IPreparedModel,
public std::enable_shared_from_this<ArmnnPreparedModel>
{
public:
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("The Shim and support library will be removed from Arm NN in 24.08", "24.08")
ArmnnPreparedModel(armnn::NetworkId networkId,
armnn::IRuntime* runtime,
const Model& model,
@@ -46,6 +47,7 @@ public:
const bool gpuProfilingEnabled,
Priority priority = Priority::MEDIUM);
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("The Shim and support library will be removed from Arm NN in 24.08", "24.08")
ArmnnPreparedModel(armnn::NetworkId networkId,
armnn::IRuntime* runtime,
const std::string& requestInputsAndOutputsDumpDir,
diff --git a/src/armnn/Tensor.cpp b/src/armnn/Tensor.cpp
index f75fc60ef7..650b93835f 100644
--- a/src/armnn/Tensor.cpp
+++ b/src/armnn/Tensor.cpp
@@ -465,15 +465,12 @@ float TensorInfo::GetQuantizationScale() const
// NOTE: old default for backward compatibility
return 1.0f;
}
- // If this tensor includes multiples scales then you should be calling GetQuantizationScales.
- // This should be an exception not an assert but unfortunately it breaks many tests.
- // ToDo: IVGCVSW-8323
- ARMNN_ASSERT(!HasMultipleQuantizationScales());
-// if (HasMultipleQuantizationScales())
-// {
-// throw RuntimeException("Invalid call to GetQuantizationScale on a tensor with multiple scale values. Use "
-// "GetQuantizationScales instead.");
-// }
+
+ if (HasMultipleQuantizationScales())
+ {
+ throw RuntimeException("Invalid call to GetQuantizationScale on a tensor with multiple scale values. Use "
+ "GetQuantizationScales instead.");
+ }
return m_Quantization.m_Scales[0];
}
diff --git a/src/armnn/Threadpool.cpp b/src/armnn/Threadpool.cpp
index df4ff84fb5..9cd8be9dfb 100644
--- a/src/armnn/Threadpool.cpp
+++ b/src/armnn/Threadpool.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#if !defined(ARMNN_DISABLE_THREADS)
@@ -188,11 +188,12 @@ void Threadpool::ProcessExecPriorities(uint32_t index)
try // executing the inference
{
IWorkingMemHandle& memHandle = *(m_WorkingMemHandleMap.at(networkId))[index];
-
+ARMNN_NO_DEPRECATE_WARN_BEGIN
// Execute and populate the time at end of inference in the callback
m_RuntimePtr->Execute(memHandle, inputTensors, outputTensors) == Status::Success ?
cb->Notify(Status::Success, std::make_pair(startTime, armnn::GetTimeNow())) :
cb->Notify(Status::Failure, std::make_pair(startTime, armnn::GetTimeNow()));
+ARMNN_NO_DEPRECATE_WARN_END
}
catch (const RuntimeException&)
{
diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp
index 6d5e2ae2b5..4bb2f44357 100644
--- a/src/armnn/test/RuntimeTests.cpp
+++ b/src/armnn/test/RuntimeTests.cpp
@@ -116,7 +116,9 @@ TEST_CASE("RuntimePreImportInputs")
auto memHandle = runtime->CreateWorkingMemHandle(networkId);
+ARMNN_NO_DEPRECATE_WARN_BEGIN
runtime->Execute(*memHandle.get(), {{1, inputTensor2}}, {{2, outputTensor}}, {0 /* pre-imported id */});
+ARMNN_NO_DEPRECATE_WARN_END
for (auto val: output) {
CHECK(val == 30);
}
@@ -125,12 +127,16 @@ TEST_CASE("RuntimePreImportInputs")
CHECK(importedInputVec2.size() == 1);
CHECK(importedInputVec2[0] == 1);
+ARMNN_NO_DEPRECATE_WARN_BEGIN
runtime->Execute(*memHandle.get(), {{0, inputTensor1}}, {{2, outputTensor}}, {1 /* pre-imported id */});
+ARMNN_NO_DEPRECATE_WARN_END
for (auto val: output) {
CHECK(val == 30);
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
runtime->Execute(*memHandle.get(), {}, {{2, outputTensor}}, {0, 1});
+ARMNN_NO_DEPRECATE_WARN_BEGIN
for (auto val: output) {
CHECK(val == 30);
}
diff --git a/src/armnnConverter/ArmnnConverter.cpp b/src/armnnConverter/ArmnnConverter.cpp
index 0129204251..f3beb81d8b 100644
--- a/src/armnnConverter/ArmnnConverter.cpp
+++ b/src/armnnConverter/ArmnnConverter.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include <armnn/Logging.hpp>
@@ -292,6 +292,7 @@ private:
#endif
#if defined(ARMNN_ONNX_PARSER)
+ARMNN_NO_DEPRECATE_WARN_BEGIN
bool CreateNetwork (ParserType<armnnOnnxParser::IOnnxParser>)
{
// Create a network from a file on disk
@@ -318,6 +319,7 @@ private:
return m_NetworkPtr.get() != nullptr;
}
+ARMNN_NO_DEPRECATE_WARN_END
#endif
};
diff --git a/src/armnnConverter/README.md b/src/armnnConverter/README.md
index 6c8ab27458..a1f29d9fbe 100644
--- a/src/armnnConverter/README.md
+++ b/src/armnnConverter/README.md
@@ -1,5 +1,7 @@
# The ArmnnConverter
+## Note: Arm NN will be dropping support for ArmnnConverter in 24.08.
+
The `ArmnnConverter` is a program for converting neural networks from other formats to Arm NN format.
Currently the program supports models in Onnx and Tensorflow Lite FlatBuffers formats.
Run the program with no arguments to see command-line help.
diff --git a/src/armnnOnnxParser/OnnxParser.cpp b/src/armnnOnnxParser/OnnxParser.cpp
index 5ec99ede74..b757c843fe 100644
--- a/src/armnnOnnxParser/OnnxParser.cpp
+++ b/src/armnnOnnxParser/OnnxParser.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017,2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "OnnxParser.hpp"
@@ -37,7 +37,9 @@ IOnnxParser* IOnnxParser::CreateRaw()
IOnnxParserPtr IOnnxParser::Create()
{
+ARMNN_NO_DEPRECATE_WARN_BEGIN
return IOnnxParserPtr(CreateRaw(), &IOnnxParser::Destroy);
+ARMNN_NO_DEPRECATE_WARN_END
}
void IOnnxParser::Destroy(IOnnxParser* parser)
diff --git a/src/armnnOnnxParser/test/Constructor.cpp b/src/armnnOnnxParser/test/Constructor.cpp
index 0fc3a1ecba..c6fff210ad 100644
--- a/src/armnnOnnxParser/test/Constructor.cpp
+++ b/src/armnnOnnxParser/test/Constructor.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -7,6 +7,7 @@
#include <doctest/doctest.h>
+ARMNN_NO_DEPRECATE_WARN_BEGIN
TEST_SUITE("OnnxParser_Constructor")
{
TEST_CASE("Create")
@@ -15,3 +16,4 @@ TEST_CASE("Create")
}
}
+ARMNN_NO_DEPRECATE_WARN_END \ No newline at end of file
diff --git a/src/armnnOnnxParser/test/CreateNetwork.cpp b/src/armnnOnnxParser/test/CreateNetwork.cpp
index eeeaca85dc..84589eb1e7 100644
--- a/src/armnnOnnxParser/test/CreateNetwork.cpp
+++ b/src/armnnOnnxParser/test/CreateNetwork.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -7,6 +7,7 @@
#include <doctest/doctest.h>
#include "google/protobuf/stubs/logging.h"
+ARMNN_NO_DEPRECATE_WARN_BEGIN
TEST_SUITE("OnnxParser_CreateNetwork")
{
@@ -62,3 +63,4 @@ TEST_CASE("CreateNetworkWithInvalidString")
}
}
+ARMNN_NO_DEPRECATE_WARN_END
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 3fd81ff973..3a4c084301 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -3,6 +3,10 @@
// SPDX-License-Identifier: MIT
//
+// Do not include flatbuffers::ClassicLocale which can cause abort when destroyed
+// This define must be added before the include or it causes a macro redefine error
+#define FLATBUFFERS_LOCALE_INDEPENDENT 0
+
#include "TfLiteParser.hpp"
#include "armnnTfLiteParser/Version.hpp"
diff --git a/src/armnnUtils/ParserPrototxtFixture.hpp b/src/armnnUtils/ParserPrototxtFixture.hpp
index a12a66ea25..9226031c5a 100644
--- a/src/armnnUtils/ParserPrototxtFixture.hpp
+++ b/src/armnnUtils/ParserPrototxtFixture.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -23,12 +23,14 @@ namespace armnnUtils
template<typename TParser>
struct ParserPrototxtFixture
{
+ARMNN_NO_DEPRECATE_WARN_BEGIN
ParserPrototxtFixture()
: m_Parser(TParser::Create())
, m_Runtime(armnn::IRuntime::Create(armnn::IRuntime::CreationOptions()))
, m_NetworkIdentifier(-1)
{
}
+ARMNN_NO_DEPRECATE_WARN_END
/// Parses and loads the network defined by the m_Prototext string.
/// @{
@@ -140,7 +142,7 @@ template<typename TParser>
void ParserPrototxtFixture<TParser>::Setup(const std::map<std::string, armnn::TensorShape>& inputShapes)
{
std::string errorMessage;
-
+ARMNN_NO_DEPRECATE_WARN_BEGIN
armnn::INetworkPtr network =
m_Parser->CreateNetworkFromString(m_Prototext.c_str(), inputShapes);
auto optimized = Optimize(*network, { armnn::Compute::CpuRef }, m_Runtime->GetDeviceSpec());
@@ -151,13 +153,14 @@ void ParserPrototxtFixture<TParser>::Setup(const std::map<std::string, armnn::Te
errorMessage,
CHECK_LOCATION().AsString()));
}
+ARMNN_NO_DEPRECATE_WARN_END
}
template<typename TParser>
void ParserPrototxtFixture<TParser>::Setup()
{
std::string errorMessage;
-
+ARMNN_NO_DEPRECATE_WARN_BEGIN
armnn::INetworkPtr network =
m_Parser->CreateNetworkFromString(m_Prototext.c_str());
auto optimized = Optimize(*network, { armnn::Compute::CpuRef }, m_Runtime->GetDeviceSpec());
@@ -168,6 +171,7 @@ void ParserPrototxtFixture<TParser>::Setup()
errorMessage,
CHECK_LOCATION().AsString()));
}
+ARMNN_NO_DEPRECATE_WARN_END
}
template<typename TParser>
diff --git a/src/backends/aclCommon/ArmComputeTensorUtils.cpp b/src/backends/aclCommon/ArmComputeTensorUtils.cpp
index c5b4fa157e..cfd2e0e110 100644
--- a/src/backends/aclCommon/ArmComputeTensorUtils.cpp
+++ b/src/backends/aclCommon/ArmComputeTensorUtils.cpp
@@ -459,5 +459,31 @@ unsigned int ComputeDepthwiseConv2dDepthMultiplier(armnn::DataLayout layout,
return depthMultiplier;
}
+arm_compute::ScatterInfo BuildArmComputeScatterInfo(const ScatterNdDescriptor& descriptor)
+{
+ arm_compute::ScatterFunction scatterFunction;
+ switch(descriptor.m_Function)
+ {
+ case ScatterNdFunction::Update:
+ scatterFunction = arm_compute::ScatterFunction::Update;
+ break;
+ case ScatterNdFunction::Add:
+ scatterFunction = arm_compute::ScatterFunction::Add;
+ break;
+ case ScatterNdFunction::Sub:
+ scatterFunction = arm_compute::ScatterFunction::Sub;
+ break;
+ case ScatterNdFunction::Max:
+ scatterFunction = arm_compute::ScatterFunction::Max;
+ break;
+ case ScatterNdFunction::Min:
+ scatterFunction = arm_compute::ScatterFunction::Min;
+ break;
+ default: throw InvalidArgumentException("Unknown ArmNN::ScatterNd Function: [" +
+ std::to_string(static_cast<int>(descriptor.m_Function)) + "]");
+ }
+
+ return arm_compute::ScatterInfo(scatterFunction, !descriptor.m_InputEnabled);
+}
} // namespace armcomputetensorutils
} // namespace armnn
diff --git a/src/backends/aclCommon/ArmComputeTensorUtils.hpp b/src/backends/aclCommon/ArmComputeTensorUtils.hpp
index d8a41fe41f..63c70c7092 100644
--- a/src/backends/aclCommon/ArmComputeTensorUtils.hpp
+++ b/src/backends/aclCommon/ArmComputeTensorUtils.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -12,6 +12,7 @@
#include <arm_compute/core/ITensor.h>
#include <arm_compute/core/TensorInfo.h>
#include <arm_compute/core/Types.h>
+#include <arm_compute/function_info/ScatterInfo.h>
#include <Half.hpp>
@@ -108,6 +109,9 @@ unsigned int ComputeDepthwiseConv2dDepthMultiplier(armnn::DataLayout layout,
const arm_compute::TensorShape& weightsShape,
const arm_compute::TensorShape& inputShape);
+/// Utility function used to setup an arm_compute::ScatterInfo from ArmNN ScatterNd descriptor
+arm_compute::ScatterInfo BuildArmComputeScatterInfo(const ScatterNdDescriptor& descriptor);
+
/// Utility function used to setup an arm_compute::PadStrideInfo object from an ArmNN layer descriptor.
template <typename Descriptor>
arm_compute::PadStrideInfo BuildArmComputePadStrideInfo(const Descriptor& descriptor)
diff --git a/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp b/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp
index 84bf34dc60..00be81dd3e 100644
--- a/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp
+++ b/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp
@@ -90,11 +90,12 @@ void AsyncThreadedEndToEndTestImpl(INetworkPtr network,
InputTensors& inputTensors = inputTensorsVec[i];
OutputTensors& outputTensors = outputTensorsVec[i];
IWorkingMemHandle& workingMemHandle = *workingMemHandles[i].get();
-
threads.emplace_back([&]()
{
+ARMNN_NO_DEPRECATE_WARN_BEGIN
// Run the async network
runtime->Execute(workingMemHandle, inputTensors, outputTensors);
+ARMNN_NO_DEPRECATE_WARN_END
});
}
@@ -184,9 +185,10 @@ void AsyncEndToEndTestImpl(INetworkPtr network,
// Create WorkingMemHandle for this async network
std::unique_ptr<IWorkingMemHandle> workingMemHandle = runtime->CreateWorkingMemHandle(networkId);
IWorkingMemHandle& workingMemHandleRef = *workingMemHandle.get();
-
+ARMNN_NO_DEPRECATE_WARN_BEGIN
// Run the async network
runtime->Execute(workingMemHandleRef, inputTensors, outputTensorsVec[0]);
+ARMNN_NO_DEPRECATE_WARN_END
}
else
{
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index 9f7d562df6..030b4c2d09 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -73,6 +73,7 @@
#include "workloads/ClResizeWorkload.hpp"
#include "workloads/ClReverseV2Workload.hpp"
#include "workloads/ClRsqrtWorkload.hpp"
+#include "workloads/ClScatterNdWorkload.hpp"
#include "workloads/ClSinWorkload.hpp"
#include "workloads/ClSliceWorkload.hpp"
#include "workloads/ClSoftmaxWorkload.hpp"
@@ -578,6 +579,13 @@ bool ClLayerSupport::IsLayerSupported(const LayerType& type,
infos[1],
infos[2],
reasonIfUnsupported);
+ case LayerType::ScatterNd:
+ return IsScatterNdSupported(infos[0], // input/shape
+ infos[1], // indices
+ infos[2], // updates
+ infos[3], // output
+ *(PolymorphicDowncast<const ScatterNdDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
case LayerType::Shape:
return LayerSupportBase::IsShapeSupported(infos[0],
infos[1],
@@ -1442,6 +1450,22 @@ bool ClLayerSupport::IsReverseV2Supported(const TensorInfo& input,
output);
}
+bool ClLayerSupport::IsScatterNdSupported(const TensorInfo& input,
+ const TensorInfo& indices,
+ const TensorInfo& updates,
+ const TensorInfo& output,
+ const ScatterNdDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ FORWARD_WORKLOAD_VALIDATE_FUNC(ClScatterNdWorkloadValidate,
+ reasonIfUnsupported,
+ input,
+ indices,
+ updates,
+ output,
+ descriptor);
+}
+
bool ClLayerSupport::IsSliceSupported(const TensorInfo& input,
const TensorInfo& output,
const SliceDescriptor& descriptor,
diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp
index 907db01b89..8e9c0be7f8 100644
--- a/src/backends/cl/ClLayerSupport.hpp
+++ b/src/backends/cl/ClLayerSupport.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -300,6 +300,13 @@ public:
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const;
+ bool IsScatterNdSupported(const TensorInfo& input,
+ const TensorInfo& indices,
+ const TensorInfo& updates,
+ const TensorInfo& output,
+ const ScatterNdDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+
bool IsSliceSupported(const TensorInfo& input,
const TensorInfo& output,
const SliceDescriptor& descriptor,
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index 6fe42644c2..6a7b0e64ae 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "ClWorkloadFactory.hpp"
@@ -716,6 +716,11 @@ std::unique_ptr<IWorkload> ClWorkloadFactory::CreateWorkload(LayerType type,
auto reverseV2QueueDescriptor = PolymorphicDowncast<const ReverseV2QueueDescriptor*>(&descriptor);
return MakeWorkload<ClReverseV2Workload>(*reverseV2QueueDescriptor, info, m_CLCompileContext);
}
+ case LayerType::ScatterNd :
+ {
+ auto scatterNdQueueDescriptor = PolymorphicDowncast<const ScatterNdQueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClScatterNdWorkload>(*scatterNdQueueDescriptor, info, m_CLCompileContext);
+ }
case LayerType::Slice :
{
auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
diff --git a/src/backends/cl/backend.mk b/src/backends/cl/backend.mk
index 2143c30309..f233ffc5e1 100644
--- a/src/backends/cl/backend.mk
+++ b/src/backends/cl/backend.mk
@@ -1,5 +1,5 @@
#
-# Copyright © 2017-2023 ARM Ltd and Contributors. All rights reserved.
+# Copyright © 2017-2024 ARM Ltd and Contributors. All rights reserved.
# SPDX-License-Identifier: MIT
#
@@ -81,6 +81,7 @@ BACKEND_SOURCES := \
workloads/ClResizeWorkload.cpp \
workloads/ClReverseV2Workload.cpp \
workloads/ClRsqrtWorkload.cpp \
+ workloads/ClScatterNdWorkload.cpp \
workloads/ClSinWorkload.cpp \
workloads/ClSliceWorkload.cpp \
workloads/ClSoftmaxWorkload.cpp \
diff --git a/src/backends/cl/test/ClEndToEndTests.cpp b/src/backends/cl/test/ClEndToEndTests.cpp
index 9e60843177..fa5d545547 100644
--- a/src/backends/cl/test/ClEndToEndTests.cpp
+++ b/src/backends/cl/test/ClEndToEndTests.cpp
@@ -25,6 +25,7 @@
#include <backendsCommon/test/ReshapeEndToEndTestImpl.hpp>
#include <backendsCommon/test/ResizeEndToEndTestImpl.hpp>
#include <backendsCommon/test/ReverseV2EndToEndTestImpl.hpp>
+#include <backendsCommon/test/ScatterNdEndToEndTestImpl.hpp>
#include <backendsCommon/test/SliceEndToEndTestImpl.hpp>
#include <backendsCommon/test/SpaceToDepthEndToEndTestImpl.hpp>
#include <backendsCommon/test/SplitterEndToEndTestImpl.hpp>
@@ -322,6 +323,27 @@ TEST_CASE("DequantizeEndToEndOffsetTest")
DequantizeEndToEndOffset<armnn::DataType::QAsymmU8>(clDefaultBackends);
}
+// ScatterNd
+TEST_CASE("ClScatterNd1DInputEndToEndFloat32Test")
+{
+ ScatterNd1DimUpdateWithInputEndToEnd<armnn::DataType::Float32>(clDefaultBackends);
+}
+
+TEST_CASE("ClScatterNd1DNoInputEndToEndFloat32Test")
+{
+ ScatterNd1DimUpdateNoInputEndToEnd<armnn::DataType::Float32>(clDefaultBackends);
+}
+
+TEST_CASE("ClScatterNd2DInputEndToEndFloat32Test")
+{
+ ScatterNd2DimUpdateWithInputEndToEnd<armnn::DataType::Float32>(clDefaultBackends);
+}
+
+TEST_CASE("ClScatterNd2DNoInputEndToEndFloat32Test")
+{
+ ScatterNd2DimUpdateNoInputEndToEnd<armnn::DataType::Float32>(clDefaultBackends);
+}
+
// Slice
TEST_CASE("ClSliceEndtoEndTestFloat32")
{
diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp
index da2b967fcb..e193ca24ea 100644
--- a/src/backends/cl/test/ClLayerTests.cpp
+++ b/src/backends/cl/test/ClLayerTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -1531,6 +1531,93 @@ ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleSoftmaxBeta2Uint8, ClContextControlFixtur
// LogSoftmax
ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LogSoftmaxFloat32_1, ClContextControlFixture, LogSoftmaxTest1<DataType::Float32>)
+// ScatterNd
+// With Input tensor
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ScatterNd1DUpdateTestWithInputFloat32,
+ ClContextControlFixture,
+ ScatterNd1DimUpdateWithInput<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ScatterNd2DUpdateTestWithInputFloat32,
+ ClContextControlFixture,
+ ScatterNd2DimUpdateWithInput<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ScatterNd2Dim1Outter1InnerUpdateWithInputFloat32,
+ ClContextControlFixture,
+ ScatterNd2Dim1Outter1InnerUpdateWithInput<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ScatterNd3DimUpdateWithInputFloat32,
+ ClContextControlFixture,
+ ScatterNd3DimUpdateWithInput<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ScatterNd3Dim1Outter2InnerUpdateWithInputFloat32,
+ ClContextControlFixture,
+ ScatterNd3Dim1Outter2InnerUpdateWithInput<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ScatterNd3Dim2Outter1InnerUpdateWithInputFloat32,
+ ClContextControlFixture,
+ ScatterNd3Dim2Outter1InnerUpdateWithInput<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ScatterNd4DimUpdateWithInputFloat32,
+ ClContextControlFixture,
+ ScatterNd4DimUpdateWithInput<DataType::Float32>)
+
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ScatterNd2DimAddWithInputFloat32,
+ ClContextControlFixture,
+ ScatterNd2DimAddWithInput<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ScatterNd2DimSubWithInputFloat32,
+ ClContextControlFixture,
+ ScatterNd2DimSubWithInput<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ScatterNd2DimMaxWithInputFloat32,
+ ClContextControlFixture,
+ ScatterNd2DimMaxWithInput<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ScatterNd2DimMinWithInputFloat32,
+ ClContextControlFixture,
+ ScatterNd2DimMinWithInput<DataType::Float32>)
+
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ScatterNd3DimUpdateWithInputFloat16,
+ ClContextControlFixture,
+ ScatterNd3DimUpdateWithInput<DataType::Float16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ScatterNd3DimUpdateWithInputSigned32,
+ ClContextControlFixture,
+ ScatterNd3DimUpdateWithInput<DataType::Signed32>)
+
+// No input tensor, only shape provided
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ScatterNd1DUpdateTestNoInputFloat32,
+ ClContextControlFixture,
+ ScatterNd1DimUpdateNoInput<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ScatterNd2DimUpdateTestNoInputFloat32,
+ ClContextControlFixture,
+ ScatterNd2DimUpdateNoInput<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ScatterNd2Dim1Outter1InnerUpdateNoInputFloat32,
+ ClContextControlFixture,
+ ScatterNd2Dim1Outter1InnerUpdateNoInput<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ScatterNd3DimUpdateNoInputFloat32,
+ ClContextControlFixture,
+ ScatterNd3DimUpdateNoInput<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ScatterNd3Dim1Outter2InnerUpdateNoInputFloat32,
+ ClContextControlFixture,
+ ScatterNd3Dim1Outter2InnerUpdateNoInput<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ScatterNd3Dim2Outter1InnerUpdateNoInputFloat32,
+ ClContextControlFixture,
+ ScatterNd3Dim2Outter1InnerUpdateNoInput<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ScatterNd4DimUpdateNoInputFloat32,
+ ClContextControlFixture,
+ ScatterNd4DimUpdateNoInput<DataType::Float32>)
+
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ScatterNd2DimAddNoInputFloat32,
+ ClContextControlFixture,
+ ScatterNd2DimAddNoInput<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ScatterNd2DimSubNoInputFloat32,
+ ClContextControlFixture,
+ ScatterNd2DimSubNoInput<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ScatterNd2DimMaxNoInputFloat32,
+ ClContextControlFixture,
+ ScatterNd2DimMaxNoInput<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ScatterNd2DimMinNoInputFloat32,
+ ClContextControlFixture,
+ ScatterNd2DimMinNoInput<DataType::Float32>)
+
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ScatterNd3DimUpdateNoInputFloat16,
+ ClContextControlFixture,
+ ScatterNd3DimUpdateNoInput<DataType::Float16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ScatterNd3DimUpdateNoInputSigned32,
+ ClContextControlFixture,
+ ScatterNd3DimUpdateNoInput<DataType::Signed32>)
+
// Space To Batch Nd
ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SpaceToBatchNdSimpleFloat32, ClContextControlFixture, SpaceToBatchNdSimpleFloat32Test)
ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
diff --git a/src/backends/cl/workloads/CMakeLists.txt b/src/backends/cl/workloads/CMakeLists.txt
index f38366fa57..7db602b46b 100644
--- a/src/backends/cl/workloads/CMakeLists.txt
+++ b/src/backends/cl/workloads/CMakeLists.txt
@@ -1,5 +1,5 @@
#
-# Copyright © 2017, 2023 Arm Ltd and Contributors. All rights reserved.
+# Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
# SPDX-License-Identifier: MIT
#
@@ -113,6 +113,8 @@ list(APPEND armnnClBackendWorkloads_sources
ClReverseV2Workload.hpp
ClRsqrtWorkload.cpp
ClRsqrtWorkload.hpp
+ ClScatterNdWorkload.cpp
+ ClScatterNdWorkload.hpp
ClSinWorkload.cpp
ClSinWorkload.hpp
ClSliceWorkload.cpp
diff --git a/src/backends/cl/workloads/ClScatterNdWorkload.cpp b/src/backends/cl/workloads/ClScatterNdWorkload.cpp
new file mode 100644
index 0000000000..e75edf12c9
--- /dev/null
+++ b/src/backends/cl/workloads/ClScatterNdWorkload.cpp
@@ -0,0 +1,77 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ClScatterNdWorkload.hpp"
+
+#include "ClWorkloadUtils.hpp"
+
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <cl/ClTensorHandle.hpp>
+
+#include <arm_compute/function_info/ScatterInfo.h>
+
+namespace armnn
+{
+
+using namespace armcomputetensorutils;
+
+arm_compute::Status ClScatterNdWorkloadValidate(const TensorInfo& inputInfo,
+ const TensorInfo& indicesInfo,
+ const TensorInfo& updatesInfo,
+ const TensorInfo& outputInfo,
+ const ScatterNdDescriptor& descriptor)
+{
+ const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(inputInfo);
+ const arm_compute::TensorInfo aclIndicesInfo = BuildArmComputeTensorInfo(indicesInfo);
+ const arm_compute::TensorInfo aclUpdatesInfo = BuildArmComputeTensorInfo(updatesInfo);
+ const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(outputInfo);
+
+ arm_compute::ScatterInfo scatterInfo = BuildArmComputeScatterInfo(descriptor);
+
+ return arm_compute::CLScatter::validate(descriptor.m_InputEnabled ? &aclInputInfo : nullptr,
+ &aclUpdatesInfo,
+ &aclIndicesInfo,
+ &aclOutputInfo,
+ scatterInfo);
+}
+
+ClScatterNdWorkload::ClScatterNdWorkload(const ScatterNdQueueDescriptor& descriptor,
+ const WorkloadInfo& info,
+ const arm_compute::CLCompileContext& clCompileContext)
+ : ClBaseWorkload<ScatterNdQueueDescriptor>(descriptor, info)
+{
+ // Report Profiling Details
+ ARMNN_REPORT_PROFILING_WORKLOAD_DESC("ClScatterNdWorkload_Construct",
+ descriptor.m_Parameters,
+ info,
+ this->GetGuid());
+
+ m_Data.ValidateInputsOutputs("ClScatterNdWorkload", 3, 1);
+
+ arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ICLTensor& updates = static_cast<IClTensorHandle*>(m_Data.m_Inputs[2])->GetTensor();
+ arm_compute::ICLTensor& indices = static_cast<IClTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
+ arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+ arm_compute::ScatterInfo scatterInfo = BuildArmComputeScatterInfo(descriptor.m_Parameters);
+
+ {
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClScatterNdWorkload_configure");
+ m_ScatterNdLayer.configure(clCompileContext,
+ descriptor.m_Parameters.m_InputEnabled ? &input : nullptr,
+ &updates,
+ &indices,
+ &output,
+ scatterInfo);
+ }
+}
+
+void ClScatterNdWorkload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClScatterNdWorkload_Execute");
+ RunClFunction(m_ScatterNdLayer, CHECK_LOCATION());
+}
+
+} //namespace armnn
diff --git a/src/backends/cl/workloads/ClScatterNdWorkload.hpp b/src/backends/cl/workloads/ClScatterNdWorkload.hpp
new file mode 100644
index 0000000000..070dac440d
--- /dev/null
+++ b/src/backends/cl/workloads/ClScatterNdWorkload.hpp
@@ -0,0 +1,35 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/Descriptors.hpp>
+
+#include <arm_compute/runtime/CL/functions/CLScatter.h>
+
+#include "ClBaseWorkload.hpp"
+
+namespace armnn
+{
+
+arm_compute::Status ClScatterNdWorkloadValidate(const TensorInfo& input,
+ const TensorInfo& indices,
+ const TensorInfo& updates,
+ const TensorInfo& output,
+ const ScatterNdDescriptor& descriptor);
+
+class ClScatterNdWorkload : public ClBaseWorkload<ScatterNdQueueDescriptor>
+{
+public:
+ ClScatterNdWorkload(const ScatterNdQueueDescriptor& descriptor,
+ const WorkloadInfo& info,
+ const arm_compute::CLCompileContext& clCompileContext);
+ void Execute() const override;
+
+private:
+ mutable arm_compute::CLScatter m_ScatterNdLayer;
+};
+
+} //namespace armnn
diff --git a/src/backends/cl/workloads/ClWorkloads.hpp b/src/backends/cl/workloads/ClWorkloads.hpp
index 40b3e99258..3178f6420d 100644
--- a/src/backends/cl/workloads/ClWorkloads.hpp
+++ b/src/backends/cl/workloads/ClWorkloads.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017,2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -57,6 +57,7 @@
#include "ClResizeWorkload.hpp"
#include "ClReverseV2Workload.hpp"
#include "ClRsqrtWorkload.hpp"
+#include "ClScatterNdWorkload.hpp"
#include "ClSinWorkload.hpp"
#include "ClSliceWorkload.hpp"
#include "ClSoftmaxWorkload.hpp"
diff --git a/src/backends/gpuFsa/GpuFsaBackend.hpp b/src/backends/gpuFsa/GpuFsaBackend.hpp
index f5a866b186..92a1c15d25 100644
--- a/src/backends/gpuFsa/GpuFsaBackend.hpp
+++ b/src/backends/gpuFsa/GpuFsaBackend.hpp
@@ -50,11 +50,13 @@ const BackendCapabilities gpuFsaCapabilities("GpuFsa",
{"MultiAxisPacking", false},
{"SingleAxisPacking", false}
});
-
+ARMNN_NO_DEPRECATE_WARN_BEGIN
class GpuFsaBackend : public IBackendInternal
{
public:
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("The GpuFsa backend will be removed from Arm NN in 24.08", "24.08")
GpuFsaBackend() : m_CustomAllocator(nullptr) {};
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("The GpuFsa backend will be removed from Arm NN in 24.08", "24.08")
GpuFsaBackend(std::shared_ptr<ICustomAllocator> allocator)
{
UseCustomMemoryAllocator(allocator, armnn::EmptyOptional());
@@ -301,5 +303,6 @@ public:
std::shared_ptr<GpuFsaBackendCustomAllocatorWrapper> m_CustomAllocator;
bool m_UsingCustomAllocator = false;
};
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/backends/gpuFsa/GpuFsaRegistryInitializer.cpp b/src/backends/gpuFsa/GpuFsaRegistryInitializer.cpp
index 9efb300576..ea0bbc299a 100644
--- a/src/backends/gpuFsa/GpuFsaRegistryInitializer.cpp
+++ b/src/backends/gpuFsa/GpuFsaRegistryInitializer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -15,7 +15,9 @@ static BackendRegistry::StaticRegistryInitializer g_RegisterHelper
GpuFsaBackend::GetIdStatic(),
[]()
{
- return IBackendInternalUniquePtr(new GpuFsaBackend);
+ARMNN_NO_DEPRECATE_WARN_BEGIN
+ return IBackendInternalUniquePtr(new GpuFsaBackend);
+ARMNN_NO_DEPRECATE_WARN_END
}
};
} // Anonymous namespace \ No newline at end of file
diff --git a/src/backends/gpuFsa/test/GpuFsaWorkloadFactoryHelper.hpp b/src/backends/gpuFsa/test/GpuFsaWorkloadFactoryHelper.hpp
index c1d75d625b..e2cb4b925e 100644
--- a/src/backends/gpuFsa/test/GpuFsaWorkloadFactoryHelper.hpp
+++ b/src/backends/gpuFsa/test/GpuFsaWorkloadFactoryHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -13,9 +13,9 @@
#include <gpuFsa/GpuFsaWorkloadFactory.hpp>
#include "gpuFsa/GpuFsaTensorHandleFactory.hpp"
+ARMNN_NO_DEPRECATE_WARN_BEGIN
namespace
{
-
template<>
struct WorkloadFactoryHelper<armnn::GpuFsaWorkloadFactory>
{
@@ -43,3 +43,4 @@ struct WorkloadFactoryHelper<armnn::GpuFsaWorkloadFactory>
using GpuFsaWorkloadFactoryHelper = WorkloadFactoryHelper<armnn::GpuFsaWorkloadFactory>;
} // anonymous namespace
+ARMNN_NO_DEPRECATE_WARN_END \ No newline at end of file
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index 0298c7c552..b6db52342e 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -102,11 +102,22 @@ const TensorInfo OverrideDataType(const TensorInfo& info, Optional<DataType> typ
{
return info;
}
- return TensorInfo(info.GetShape(),
- type.value(),
- info.GetQuantizationScale(),
- info.GetQuantizationOffset(),
- info.IsConstant());
+ if (info.HasMultipleQuantizationScales())
+ {
+ return TensorInfo(info.GetShape(),
+ type.value(),
+ info.GetQuantizationScales(),
+ info.GetQuantizationDim().value(),
+ info.IsConstant());
+ }
+ else
+ {
+ return TensorInfo(info.GetShape(),
+ type.value(),
+ info.GetQuantizationScale(),
+ info.GetQuantizationOffset(),
+ info.IsConstant());
+ }
}
template< typename ... Args>
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index 086f8eea8d..76c18a4fd6 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -279,7 +279,7 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateWorkload(LayerType type,
{
return std::make_unique<RefDebugBooleanWorkload>(*debugQueueDescriptor, info);
}
- return MakeWorkload<RefDebugFloat32Workload, RefDebugQAsymmU8Workload>(*debugQueueDescriptor, info);
+ return MakeWorkload<RefDebugFloat32Workload, RefDebugQAsymmU8Workload>(*debugQueueDescriptor, info);
}
case LayerType::DepthToSpace:
{
diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp
index 68b7fbff90..6f57236dd5 100644
--- a/src/backends/reference/test/RefEndToEndTests.cpp
+++ b/src/backends/reference/test/RefEndToEndTests.cpp
@@ -1354,7 +1354,6 @@ TEST_CASE("QuantizationEndToEndFloat16_S16Test")
}
// ScatterNd
-
TEST_CASE("RefScatterNd1DInputEndToEndFloat32Test")
{
ScatterNd1DimUpdateWithInputEndToEnd<armnn::DataType::Float32>(defaultBackends);
@@ -1395,7 +1394,6 @@ TEST_CASE("RefScatterNd2DNoInputEndToEndInt8Test")
ScatterNd2DimUpdateNoInputEndToEnd<armnn::DataType::QAsymmS8>(defaultBackends);
}
-
// SpaceToDepth
TEST_CASE("RefSpaceToDepthNhwcEndToEndTest1")
{
diff --git a/src/backends/tosaCommon/TosaMappings.cpp b/src/backends/tosaCommon/TosaMappings.cpp
index 1ebb68b3c9..0e44d54aab 100644
--- a/src/backends/tosaCommon/TosaMappings.cpp
+++ b/src/backends/tosaCommon/TosaMappings.cpp
@@ -127,7 +127,7 @@ TosaSerializationBasicBlock* GetTosaMapping(const Layer* layer,
}
}
-TosaSerializationBasicBlock* GetTosaMappingFromLayer(Layer* layer)
+TosaSerializationBasicBlock* GetTosaMappingFromLayer(const Layer* layer)
{
std::vector<const TensorInfo*> inputs;
for (auto inputSlot : layer->GetInputSlots())
diff --git a/src/backends/tosaCommon/TosaMappings.hpp b/src/backends/tosaCommon/TosaMappings.hpp
index cc41f1b7c8..fe1ba3a077 100644
--- a/src/backends/tosaCommon/TosaMappings.hpp
+++ b/src/backends/tosaCommon/TosaMappings.hpp
@@ -27,4 +27,4 @@ TosaSerializationBasicBlock* GetTosaMapping(const Layer* layer,
// Function called in armnn::OptimizeSubgraphView() when access to armnn::Layer is available
// and there is an option to set TOSA basic block data from constant layer tensors available from the input layer.
-TosaSerializationBasicBlock* GetTosaMappingFromLayer(Layer* layer);
+TosaSerializationBasicBlock* GetTosaMappingFromLayer(const Layer* layer);
diff --git a/src/backends/tosaCommon/operatorMappings/ActivationOperator.cpp b/src/backends/tosaCommon/operatorMappings/ActivationOperator.cpp
index 480cdf5b86..c13555da6a 100644
--- a/src/backends/tosaCommon/operatorMappings/ActivationOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/ActivationOperator.cpp
@@ -29,7 +29,7 @@ TosaSerializationBasicBlock* ConvertActivationToTosaOperator(const Layer* layer,
throw armnn::Exception("ConvertActivationToTosaOperator: 1 output tensor required.");
}
- std::string inputName = std::string("input0_");
+ std::string inputName = std::string("input_");
std::string outputNameAlpha = std::string("intermediate1_") + GetUniqueTosaMappingID();
std::string outputNameMul = std::string("intermediate2_") + GetUniqueTosaMappingID();
std::string outputName = std::string("output0_");
@@ -39,12 +39,8 @@ TosaSerializationBasicBlock* ConvertActivationToTosaOperator(const Layer* layer,
// using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
if (layer != nullptr)
{
- // Get the layers connected to the input slots and determine unique tensors names.
- Layer& connectedInputLayer = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
- inputName = GenerateUniqueName(connectedInputLayer, 0);
-
- // Determine unique output tensor name.
- outputName = GenerateUniqueOutputName(*layer, 0);
+ inputName = GenerateUniqueInputName(layer->GetInputSlot(0));
+ outputName = GenerateUniqueOutputName(*layer);
}
std::vector<TosaSerializationTensor*> tensors;
@@ -54,7 +50,7 @@ TosaSerializationBasicBlock* ConvertActivationToTosaOperator(const Layer* layer,
// There also can't be duplicate tensor.
std::vector<int32_t> inputShape0;
DType inputDType0 = DType::DType_UNKNOWN;
- if(inputName.find("input0_") != std::string::npos)
+ if(inputName.find("input_") != std::string::npos)
{
inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
inputDType0 = ArmNNToDType(inputs[0]->GetDataType());
diff --git a/src/backends/tosaCommon/operatorMappings/AvgPool2DIgnoreValueOperator.cpp b/src/backends/tosaCommon/operatorMappings/AvgPool2DIgnoreValueOperator.cpp
index a7ca873831..bd198e2d5a 100644
--- a/src/backends/tosaCommon/operatorMappings/AvgPool2DIgnoreValueOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/AvgPool2DIgnoreValueOperator.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,7 +10,7 @@ TosaSerializationBasicBlock* ConvertAvgPool2DIgnoreValueToTosaOperator(const Lay
const std::vector<const TensorInfo*>& outputs,
const Pooling2dDescriptor* poolDescriptor)
{
- std::string padInputName = std::string("input0_");
+ std::string padInputName = std::string("input_");
std::string padOutputName = std::string("intermediate0_") + GetUniqueTosaMappingID();
std::string poolOutputName = std::string("output0_");
std::string blockName = std::string("Op_AVG_POOL2D_block_") + GetUniqueTosaMappingID();
@@ -19,12 +19,8 @@ TosaSerializationBasicBlock* ConvertAvgPool2DIgnoreValueToTosaOperator(const Lay
// using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
if(layer != nullptr)
{
- // Get the layers connected to the input slots and determine unique tensors names.
- Layer& connectedInputLayer = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
- padInputName = GenerateUniqueName(connectedInputLayer, 0);
-
- // Determine unique output tensor name.
- poolOutputName = GenerateUniqueOutputName(*layer, 0);
+ padInputName = GenerateUniqueInputName(layer->GetInputSlot(0));
+ poolOutputName = GenerateUniqueOutputName(*layer);
}
std::vector<int> paddings;
@@ -81,7 +77,7 @@ TosaSerializationBasicBlock* ConvertAvgPool2DIgnoreValueToTosaOperator(const Lay
// Only add input tensors if connected layer is an input layer.
// As intermediate or constant tensors will be created separately.
// There also can't be duplicate tensor.
- if(padInputName.find("input0_") != std::string::npos)
+ if(padInputName.find("input_") != std::string::npos)
{
tensors.push_back(new TosaSerializationTensor(padInputName, inputShape, inputDType, {}));
}
diff --git a/src/backends/tosaCommon/operatorMappings/ConcatOperator.cpp b/src/backends/tosaCommon/operatorMappings/ConcatOperator.cpp
index d1ff0dfb20..905f32c4c4 100644
--- a/src/backends/tosaCommon/operatorMappings/ConcatOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/ConcatOperator.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -21,7 +21,7 @@ TosaSerializationBasicBlock* ConvertConcatToTosaOperator(const Layer* layer,
{
for (uint32_t i = 0; i < numInputs; ++i)
{
- inputNames.push_back("input"+ std::to_string(i) +"_");
+ inputNames.push_back("input_"+ std::to_string(i));
}
}
// If a layer is present then the block will be used for execution, so input and output names need to be determined
@@ -31,14 +31,12 @@ TosaSerializationBasicBlock* ConvertConcatToTosaOperator(const Layer* layer,
// Get the layers connected to the input slots and determine unique tensor names.
for (uint32_t i = 0; i < numInputs; ++i)
{
- Layer& connectedLayer = layer->GetInputSlot(i).GetConnectedOutputSlot()->GetOwningLayer();
-
- std::string inputName = GenerateUniqueName(connectedLayer, i);
+ std::string inputName = GenerateUniqueInputName(layer->GetInputSlot(i));
inputNames.push_back(inputName);
}
// Determine unique output tensor name.
- outputName = GenerateUniqueOutputName(*layer, 0);
+ outputName = GenerateUniqueOutputName(*layer);
}
auto axis = static_cast<int32_t>(concatDescriptor->GetConcatAxis());
@@ -51,8 +49,7 @@ TosaSerializationBasicBlock* ConvertConcatToTosaOperator(const Layer* layer,
{outputName});
std::vector<TosaSerializationTensor*> tensors;
- tensors.reserve(numInputs);
-
+ tensors.reserve(numInputs + 1);
for (uint32_t i = 0; i < numInputs; ++i)
{
// Only add input tensors for validation or when the connected layer is an input layer.
diff --git a/src/backends/tosaCommon/operatorMappings/Conv2dOperator.cpp b/src/backends/tosaCommon/operatorMappings/Conv2dOperator.cpp
index 96701d4384..6d1699d87b 100644
--- a/src/backends/tosaCommon/operatorMappings/Conv2dOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/Conv2dOperator.cpp
@@ -22,11 +22,11 @@ TosaSerializationBasicBlock* ConvertConv2dToTosaOperator(const Layer* layer,
// Set input names for validation purposes only.
if(layer == nullptr)
{
- inputNames.emplace_back("input0_");
- inputNames.emplace_back("input1_");
+ inputNames.emplace_back("input_0");
+ inputNames.emplace_back("input_1");
if(conv2dDescriptor->m_BiasEnabled)
{
- inputNames.emplace_back("input2_");
+ inputNames.emplace_back("input_2");
}
}
// If a layer is present then the block will be used for execution, so input and output names need to be
@@ -37,14 +37,12 @@ TosaSerializationBasicBlock* ConvertConv2dToTosaOperator(const Layer* layer,
// Get the layer connected to the input slot and determine unique tensor names.
for (uint32_t i = 0; i < inputs.size(); ++i)
{
- Layer& connectedLayer = layer->GetInputSlot(i).GetConnectedOutputSlot()->GetOwningLayer();
-
- std::string inputName = GenerateUniqueName(connectedLayer, i);
+ std::string inputName = GenerateUniqueInputName(layer->GetInputSlot(i));
inputNames.push_back(inputName);
}
// Determine unique output tensor name.
- outputName = GenerateUniqueOutputName(*layer, 0);
+ outputName = GenerateUniqueOutputName(*layer);
}
std::vector<TosaSerializationTensor*> tensors;
@@ -54,7 +52,7 @@ TosaSerializationBasicBlock* ConvertConv2dToTosaOperator(const Layer* layer,
// Only add tensor if connected layer is an input layer.
// As intermediate or constant tensors will be created separately.
// There also can't be duplicate tensors.
- if(inputNames[0].find("input0_") != std::string::npos)
+ if(inputNames[0].find("input_") != std::string::npos)
{
std::vector<int32_t> inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
diff --git a/src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.cpp b/src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.cpp
index a9af249673..55b4f15e49 100644
--- a/src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.cpp
@@ -11,8 +11,8 @@ TosaSerializationBasicBlock* ConvertElementwiseBinaryToTosaOperator(const Layer*
const std::vector<const TensorInfo*>& outputs,
const ElementwiseBinaryDescriptor* descriptor)
{
- std::string input0Name = std::string("input0_");
- std::string input1Name = std::string("input1_");
+ std::string input0Name = std::string("input_0");
+ std::string input1Name = std::string("input_1");
std::string outputName = std::string("output0_");
std::string blockName;
@@ -20,15 +20,9 @@ TosaSerializationBasicBlock* ConvertElementwiseBinaryToTosaOperator(const Layer*
// using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
if(layer != nullptr)
{
- // Get the layers connected to the input slots and determine unique tensor names.
- Layer& connectedLayer0 = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
- input0Name = GenerateUniqueName(connectedLayer0, 0);
-
- Layer& connectedLayer1 = layer->GetInputSlot(1).GetConnectedOutputSlot()->GetOwningLayer();
- input1Name = GenerateUniqueName(connectedLayer1, 1);
-
- // Determine unique output tensor name.
- outputName = GenerateUniqueOutputName(*layer, 0);
+ input0Name = GenerateUniqueInputName(layer->GetInputSlot(0));
+ input1Name = GenerateUniqueInputName(layer->GetInputSlot(1));
+ outputName = GenerateUniqueOutputName(*layer);
}
TosaSerializationOperator* op = nullptr;
@@ -93,13 +87,13 @@ TosaSerializationBasicBlock* ConvertElementwiseBinaryToTosaOperator(const Layer*
// Only add input tensors if connected layer is an input layer.
// As intermediate or constant tensors will be created separately.
// There also can't be duplicate tensor.
- if(input0Name.find("input0_") != std::string::npos)
+ if(input0Name.find("input_") != std::string::npos)
{
std::vector<int32_t> inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
DType inputDType0 = ArmNNToDType(inputs[0]->GetDataType());
tensors.push_back(new TosaSerializationTensor(input0Name, inputShape0, inputDType0, {}));
}
- if(input1Name.find("input1_") != std::string::npos)
+ if(input1Name.find("input_") != std::string::npos)
{
std::vector<int32_t> inputShape1 = GetTosaTensorShape(inputs[1]->GetShape());
DType inputDType1 = ArmNNToDType(inputs[1]->GetDataType());
diff --git a/src/backends/tosaCommon/operatorMappings/ElementwiseUnaryOperator.cpp b/src/backends/tosaCommon/operatorMappings/ElementwiseUnaryOperator.cpp
index 02dddab8bc..d0eac0b4f4 100644
--- a/src/backends/tosaCommon/operatorMappings/ElementwiseUnaryOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/ElementwiseUnaryOperator.cpp
@@ -10,7 +10,7 @@ TosaSerializationBasicBlock* ConvertElementwiseUnaryOperator(const Layer* layer,
const std::vector<const TensorInfo*>& outputs,
const ElementwiseUnaryDescriptor* unaryDescriptor)
{
- std::string input0Name = std::string("input0_");
+ std::string input0Name = std::string("input_");
std::string outputName = std::string("output0_");
std::string blockName = std::string("Op_ELEMENTWISEUNARY_block_") + GetUniqueTosaMappingID();
@@ -19,12 +19,8 @@ TosaSerializationBasicBlock* ConvertElementwiseUnaryOperator(const Layer* layer,
// using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
if(layer != nullptr)
{
- // Get the layer connected to the input slot and determine unique the tensor name.
- Layer& connectedLayer0 = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
- input0Name = GenerateUniqueName(connectedLayer0, 0);
-
- // Determine unique output tensor name.
- outputName = GenerateUniqueOutputName(*layer, 0);
+ input0Name = GenerateUniqueInputName(layer->GetInputSlot(0));
+ outputName = GenerateUniqueOutputName(*layer);
}
TosaSerializationOperator* op = nullptr;
@@ -48,7 +44,7 @@ TosaSerializationBasicBlock* ConvertElementwiseUnaryOperator(const Layer* layer,
// Only add input tensor if connected layer is an input layer.
// As intermediate or constant tensors will be created separately.
// There also can't be duplicate tensor.
- if(input0Name.find("input0_") != std::string::npos)
+ if(input0Name.find("input_") != std::string::npos)
{
std::vector<int32_t> inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
DType inputDType0 = ArmNNToDType(inputs[0]->GetDataType());
diff --git a/src/backends/tosaCommon/operatorMappings/Pooling2DOperator.cpp b/src/backends/tosaCommon/operatorMappings/Pooling2DOperator.cpp
index c33f61296a..56e3f3402c 100644
--- a/src/backends/tosaCommon/operatorMappings/Pooling2DOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/Pooling2DOperator.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -13,7 +13,7 @@ TosaSerializationBasicBlock* ConvertPooling2DToTosaOperator(const Layer* layer,
std::string poolType = (poolDescriptor->m_PoolType == PoolingAlgorithm::Max) ? "Op_MAX" : "Op_AVG";
Op opcode = (poolDescriptor->m_PoolType == PoolingAlgorithm::Max) ? Op_MAX_POOL2D : Op_AVG_POOL2D;
- std::string input0Name = std::string("input0_");
+ std::string input0Name = std::string("input_");
std::string outputName = std::string("output0_");
std::string blockName = std::string("Op_") + poolType + std::string("_POOL2D_block_") + GetUniqueTosaMappingID();
@@ -21,12 +21,8 @@ TosaSerializationBasicBlock* ConvertPooling2DToTosaOperator(const Layer* layer,
// using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
if(layer != nullptr)
{
- // Get the layers connected to the input slots and determine unique tensor names.
- Layer& connectedInputLayer = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
- input0Name = GenerateUniqueName(connectedInputLayer, 0);
-
- // Determine unique output tensor name.
- outputName = GenerateUniqueOutputName(*layer, 0);
+ input0Name = GenerateUniqueInputName(layer->GetInputSlot(0));
+ outputName = GenerateUniqueOutputName(*layer);
}
std::vector<int> pad = {static_cast<int>(poolDescriptor->m_PadTop),
@@ -50,7 +46,7 @@ TosaSerializationBasicBlock* ConvertPooling2DToTosaOperator(const Layer* layer,
// Only add input tensors if connected layer is an input layer.
// As intermediate or constant tensors will be created separately.
// There also can't be duplicate tensor.
- if(input0Name.find("input0_") != std::string::npos)
+ if(input0Name.find("input_") != std::string::npos)
{
std::vector<int32_t> inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
DType inputDType0 = ArmNNToDType(inputs[0]->GetDataType());
diff --git a/src/backends/tosaCommon/operatorMappings/QuantizeOperator.cpp b/src/backends/tosaCommon/operatorMappings/QuantizeOperator.cpp
index 1242d3b2c6..a4d7d0ed28 100644
--- a/src/backends/tosaCommon/operatorMappings/QuantizeOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/QuantizeOperator.cpp
@@ -21,7 +21,7 @@ TosaSerializationBasicBlock* ConvertQuantizeToTosaOperator(const Layer* layer,
ARMNN_THROW_INVALIDARG_MSG_IF_FALSE( outputs.size() == 1,
"ConvertQuantizeToTosaOperator: Quantize must have only one output" );
- std::string inputName = std::string("input0_");
+ std::string inputName = std::string("input_");
std::string outputName = std::string("output0_");
std::string blockName = std::string("Op_QUANTIZE_block_") + GetUniqueTosaMappingID();
@@ -29,12 +29,8 @@ TosaSerializationBasicBlock* ConvertQuantizeToTosaOperator(const Layer* layer,
// using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
if(layer != nullptr)
{
- // Get the layers connected to the input slots and determine unique tensor names.
- Layer& connectedLayer = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
- inputName = GenerateUniqueName(connectedLayer, 0);
-
- // Determine unique output tensor name.
- outputName = GenerateUniqueOutputName(*layer, 0);
+ inputName = GenerateUniqueInputName(layer->GetInputSlot(0));
+ outputName = GenerateUniqueOutputName(*layer);
}
const TensorInfo inputInfo = *inputs[0];
@@ -60,7 +56,7 @@ TosaSerializationBasicBlock* ConvertQuantizeToTosaOperator(const Layer* layer,
// Only add input tensors if connected layer is an input layer.
// As intermediate or constant tensors will be created separately.
// There also can't be duplicate tensor.
- if(inputName.find("input0_") != std::string::npos)
+ if(inputName.find("input_") != std::string::npos)
{
tensors.push_back(new TosaSerializationTensor(inputName, inputShape0, inputDType0, {}));
}
diff --git a/src/backends/tosaCommon/operatorMappings/ReshapeOperator.cpp b/src/backends/tosaCommon/operatorMappings/ReshapeOperator.cpp
index 55d66806b7..e7e5dc77d9 100644
--- a/src/backends/tosaCommon/operatorMappings/ReshapeOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/ReshapeOperator.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,7 +10,7 @@ TosaSerializationBasicBlock* ConvertReshapeToTosaOperator(const Layer* layer,
const std::vector<const TensorInfo*>& outputs,
const ReshapeDescriptor* reshapeDescriptor)
{
- std::string inputName = std::string("input0_");
+ std::string inputName = std::string("input_");
std::string outputName = std::string("output0_");
std::string blockName = std::string("Op_RESHAPE_block_") + GetUniqueTosaMappingID();
@@ -18,12 +18,8 @@ TosaSerializationBasicBlock* ConvertReshapeToTosaOperator(const Layer* layer,
// using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
if(layer != nullptr)
{
- // Get the layers connected to the input slots and determine unique tensor names.
- Layer& connectedLayer = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
- inputName = GenerateUniqueName(connectedLayer, 0);
-
- // Determine unique output tensor name.
- outputName = GenerateUniqueOutputName(*layer, 0);
+ inputName = GenerateUniqueInputName(layer->GetInputSlot(0));
+ outputName = GenerateUniqueOutputName(*layer);
}
TosaReshapeAttribute attribute(GetTosaTensorShape(reshapeDescriptor->m_TargetShape));
@@ -39,7 +35,7 @@ TosaSerializationBasicBlock* ConvertReshapeToTosaOperator(const Layer* layer,
// Only add input tensors if connected layer is an input layer.
// As intermediate or constant tensors will be created separately.
// There also can't be duplicate tensor.
- if(inputName.find("input0_") != std::string::npos)
+ if(inputName.find("input_") != std::string::npos)
{
std::vector<int32_t> inputShape = GetTosaTensorShape(inputs[0]->GetShape());
DType inputDType = ArmNNToDType(inputs[0]->GetDataType());
diff --git a/src/backends/tosaCommon/operatorMappings/ResizeOperator.cpp b/src/backends/tosaCommon/operatorMappings/ResizeOperator.cpp
index 72c7352a65..bb1eabd27b 100644
--- a/src/backends/tosaCommon/operatorMappings/ResizeOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/ResizeOperator.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
// Copyright © 2020, 2023 The TensorFlow Authors. All Rights Reserved.
@@ -37,7 +37,7 @@ TosaSerializationBasicBlock* ConvertResizeToTosaOperator(const Layer* layer,
throw armnn::InvalidArgumentException("ConvertResizeToTosaOperator: Unsupported Resize method.");
}
- std::string inputName = std::string("input0_");
+ std::string inputName = std::string("input_");
std::string outputName = std::string("output0_");
std::string blockName = std::string("Op_RESIZE_block_") + GetUniqueTosaMappingID();
@@ -45,12 +45,8 @@ TosaSerializationBasicBlock* ConvertResizeToTosaOperator(const Layer* layer,
// using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
if(layer != nullptr)
{
- // Get the layers connected to the input slots and determine unique tensor names.
- Layer& connectedLayer = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
- inputName = GenerateUniqueName(connectedLayer, 0);
-
- // Determine unique output tensor name.
- outputName = GenerateUniqueOutputName(*layer, 0);
+ inputName = GenerateUniqueInputName(layer->GetInputSlot(0));
+ outputName = GenerateUniqueOutputName(*layer);
}
int32_t inputHeight = static_cast<int32_t>(inputs[0]->GetShape()[1]);
@@ -149,7 +145,7 @@ TosaSerializationBasicBlock* ConvertResizeToTosaOperator(const Layer* layer,
// Only add input tensors if connected layer is an input layer.
// As intermediate or constant tensors will be created separately.
// There also can't be duplicate tensor.
- if(inputName.find("input0_") != std::string::npos)
+ if(inputName.find("input_") != std::string::npos)
{
std::vector<int32_t> inputShape = GetTosaTensorShape(inputs[0]->GetShape());
DType inputDType = ArmNNToDType(inputs[0]->GetDataType());
diff --git a/src/backends/tosaCommon/operatorMappings/SliceOperator.cpp b/src/backends/tosaCommon/operatorMappings/SliceOperator.cpp
index 294d38937f..5fe0c8da46 100644
--- a/src/backends/tosaCommon/operatorMappings/SliceOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/SliceOperator.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,7 +10,7 @@ TosaSerializationBasicBlock* ConvertSliceToTosaOperator(const Layer* layer,
const std::vector<const TensorInfo*>& outputs,
const SliceDescriptor* sliceDescriptor)
{
- std::string inputName = std::string("input0_");
+ std::string inputName = std::string("input_");
std::string outputName = std::string("output0_");
std::string blockName = std::string("Op_SLICE_block_") + GetUniqueTosaMappingID();
@@ -18,12 +18,8 @@ TosaSerializationBasicBlock* ConvertSliceToTosaOperator(const Layer* layer,
// using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
if(layer != nullptr)
{
- // Get the layers connected to the input slots and determine unique tensor names.
- Layer& connectedLayer = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
- inputName = GenerateUniqueName(connectedLayer, 0);
-
- // Determine unique output tensor name.
- outputName = GenerateUniqueOutputName(*layer, 0);
+ inputName = GenerateUniqueInputName(layer->GetInputSlot(0));
+ outputName = GenerateUniqueOutputName(*layer);
}
std::vector<int32_t> begin(sliceDescriptor->m_Begin.begin(), sliceDescriptor->m_Begin.end());
@@ -42,7 +38,7 @@ TosaSerializationBasicBlock* ConvertSliceToTosaOperator(const Layer* layer,
// Only add input tensors if connected layer is an input layer.
// As intermediate or constant tensors will be created separately.
// There also can't be duplicate tensor.
- if(inputName.find("input0_") != std::string::npos)
+ if(inputName.find("input_") != std::string::npos)
{
std::vector<int32_t> inputShape = GetTosaTensorShape(inputs[0]->GetShape());
DType inputDType = ArmNNToDType(inputs[0]->GetDataType());
diff --git a/src/backends/tosaCommon/operatorMappings/SplitOperator.cpp b/src/backends/tosaCommon/operatorMappings/SplitOperator.cpp
index b73386633d..53f4f052bb 100644
--- a/src/backends/tosaCommon/operatorMappings/SplitOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/SplitOperator.cpp
@@ -27,7 +27,7 @@ TosaSerializationBasicBlock* ConvertSplitToTosaOperator(const Layer* layer,
throw armnn::Exception("ConvertSplitToTosaOperator: Dynamic input dimensions are unsupported.");
}
- std::string inputName = std::string("input0_");
+ std::string inputName = std::string("input_");
std::vector<std::string> outputNames;
std::string blockName = std::string("Op_SPLIT_block_") + GetUniqueTosaMappingID();
@@ -36,9 +36,7 @@ TosaSerializationBasicBlock* ConvertSplitToTosaOperator(const Layer* layer,
// using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
if(layer != nullptr)
{
- // Get the layers connected to the input slots and determine unique tensor names.
- Layer& connectedLayer = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
- inputName = GenerateUniqueName(connectedLayer, 0);
+ inputName = GenerateUniqueInputName(layer->GetInputSlot(0));
for (unsigned int i=0; i < numSplit; ++i)
{
@@ -87,7 +85,7 @@ TosaSerializationBasicBlock* ConvertSplitToTosaOperator(const Layer* layer,
// Only add input tensors if connected layer is an input layer.
// As intermediate or constant tensors will be created separately.
// There also can't be duplicate tensor.
- if(inputName.find("input0_") != std::string::npos)
+ if(inputName.find("input_") != std::string::npos)
{
std::vector<int32_t> inputShape = GetTosaTensorShape(inputs[0]->GetShape());
DType inputDType = ArmNNToDType(inputs[0]->GetDataType());
diff --git a/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp b/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp
index b7f14bf5b7..f566504a40 100644
--- a/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp
+++ b/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp
@@ -87,7 +87,7 @@ inline std::vector<int32_t> GetTosaTensorShape(const TensorShape& shape)
}
// Function that generates unique name using the layer type, input slot and layer guid.
-inline std::string GenerateUniqueName(const Layer& layer, uint32_t layerSlot)
+static std::string GenerateUniqueName(const Layer& layer, uint32_t layerSlot)
{
std::string guid = std::to_string(layer.GetGuid());
std::string slotAndGuid = std::to_string(layerSlot) + "_" + guid;
@@ -95,7 +95,7 @@ inline std::string GenerateUniqueName(const Layer& layer, uint32_t layerSlot)
switch (layer.GetType())
{
case LayerType::Input:
- return "input" + slotAndGuid;
+ return "input_" + guid;
case LayerType::Output:
return "output" + slotAndGuid;
case LayerType::Constant:
@@ -105,8 +105,19 @@ inline std::string GenerateUniqueName(const Layer& layer, uint32_t layerSlot)
}
}
+// Function that generates unique name for the parent layer from the child layer input slot.
+inline std::string GenerateUniqueInputName(const armnn::InputSlot& slot)
+{
+ // Get the layers connected to the input slots and determine unique tensor names.
+ Layer& connectedLayer = slot.GetConnectedOutputSlot()->GetOwningLayer();
+ // For layer input, we want to ensure we get the correct output slot of the parent layer.
+ // For example, if parent layer is split, the parent output slot could be 0 or 1 index.
+ uint32_t connectedOutputSlotIdx = slot.GetConnectedOutputSlot()->CalculateIndexOnOwner();
+ return GenerateUniqueName(connectedLayer, connectedOutputSlotIdx);
+}
+
// Function that generates unique output name using the layer type, input slot and layer guid.
-inline std::string GenerateUniqueOutputName(const Layer& layer, uint32_t layerSlot)
+inline std::string GenerateUniqueOutputName(const Layer& layer, uint32_t layerSlot = 0)
{
Layer& connectedLayer = layer.GetOutputSlot().GetConnection(0)->GetOwningLayer();
@@ -443,6 +454,12 @@ inline std::vector<uint8_t> CreateConstTosaData(const void* value,
error = TosaSerializationHandler::ConvertI8toU8(data, uint8Data);
break;
}
+ case DType::DType_UINT8:
+ {
+ const int8_t* copy_data = static_cast<const int8_t*>(value);
+ uint8Data.assign(copy_data, copy_data + numElements);
+ break;
+ }
case DType::DType_INT4:
{
std::vector<int8_t> data(numElements, *static_cast<const int8_t*>(value));
diff --git a/src/backends/tosaCommon/operatorMappings/TransposeConv2dOperator.cpp b/src/backends/tosaCommon/operatorMappings/TransposeConv2dOperator.cpp
index 8c2ae9f2b5..81d58e04fe 100644
--- a/src/backends/tosaCommon/operatorMappings/TransposeConv2dOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/TransposeConv2dOperator.cpp
@@ -12,7 +12,7 @@ TosaSerializationBasicBlock* ConvertTransposeConv2dToTosaOperator(const Layer* l
const std::vector<const TensorInfo*>& outputs,
const TransposeConvolution2dDescriptor* descriptor)
{
- std::string input0Name = std::string("input0_");
+ std::string input0Name = std::string("input_");
std::string input1Name = std::string("constant_") + GetUniqueTosaMappingID();
std::string input2Name = std::string("constant_") + GetUniqueTosaMappingID();
std::string outputName = std::string("output0_");
@@ -22,12 +22,8 @@ TosaSerializationBasicBlock* ConvertTransposeConv2dToTosaOperator(const Layer* l
// using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
if(layer != nullptr)
{
- // Get the layers connected to the input slots and determine unique tensor names.
- Layer& connectedInputLayer = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
- input0Name = GenerateUniqueName(connectedInputLayer, 0);
-
- // Determine unique output tensor name.
- outputName = GenerateUniqueOutputName(*layer, 0);
+ input0Name = GenerateUniqueInputName(layer->GetInputSlot(0));
+ outputName = GenerateUniqueOutputName(*layer);
}
std::vector<TosaSerializationTensor*> tensors;
@@ -37,7 +33,7 @@ TosaSerializationBasicBlock* ConvertTransposeConv2dToTosaOperator(const Layer* l
// Only add tensor if connected layer is an input layer.
// As intermediate or constant tensors will be created separately.
// There also can't be duplicate tensors.
- if(input0Name.find("input0_") != std::string::npos)
+ if(input0Name.find("input_") != std::string::npos)
{
std::vector<int32_t> inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
DType inputDType0 = ArmNNToDType(inputs[0]->GetDataType());
diff --git a/src/backends/tosaCommon/operatorMappings/TransposeOperator.cpp b/src/backends/tosaCommon/operatorMappings/TransposeOperator.cpp
index ccc77741c9..229a1b2421 100644
--- a/src/backends/tosaCommon/operatorMappings/TransposeOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/TransposeOperator.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,7 +10,7 @@ TosaSerializationBasicBlock* ConvertTransposeToTosaOperator(const Layer* layer,
const std::vector<const TensorInfo*>& outputs,
const TransposeDescriptor* transposeDescriptor)
{
- std::string input0Name = std::string("input0_");
+ std::string input0Name = std::string("input_");
std::string outputName = std::string("output0_");
std::string blockName = std::string("Op_TRANSPOSE_block_") + GetUniqueTosaMappingID();
@@ -18,12 +18,8 @@ TosaSerializationBasicBlock* ConvertTransposeToTosaOperator(const Layer* layer,
// using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
if(layer != nullptr)
{
- // Get the layers connected to the input slot and determine unique tensor name.
- Layer& connectedLayer0 = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
- input0Name = GenerateUniqueName(connectedLayer0, 0);
-
- // Determine unique output tensor name.
- outputName = GenerateUniqueOutputName(*layer, 0);
+ input0Name = GenerateUniqueInputName(layer->GetInputSlot(0));
+ outputName = GenerateUniqueOutputName(*layer);
}
std::vector<int32_t> mappings(transposeDescriptor->m_DimMappings.begin(),
@@ -42,7 +38,7 @@ TosaSerializationBasicBlock* ConvertTransposeToTosaOperator(const Layer* layer,
// Only add input tensors if connected layer is an input layer.
// As intermediate or constant tensors will be created separately.
// There also can't be duplicate tensor.
- if(input0Name.find("input0_") != std::string::npos)
+ if(input0Name.find("input_") != std::string::npos)
{
std::vector<int32_t> inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
DType inputDType0 = ArmNNToDType(inputs[0]->GetDataType());
diff --git a/src/backends/tosaCommon/test/AvgPool2DIgnoreValueChecker.hpp b/src/backends/tosaCommon/test/AvgPool2DIgnoreValueChecker.hpp
index 6f57c4a61e..4c38d6b1e7 100644
--- a/src/backends/tosaCommon/test/AvgPool2DIgnoreValueChecker.hpp
+++ b/src/backends/tosaCommon/test/AvgPool2DIgnoreValueChecker.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022,2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -39,7 +39,7 @@ void VerifyAvgPool2DIgnoreValue(TosaSerializationBasicBlock* basicBlock,
std::basic_string<char> blockInputName = basicBlock->GetInputs()[i];
std::basic_string<char> operatorInputName = padOp->GetInputTensorNames()[i];
- std::string opStr = "input" + std::to_string(i) + "_";
+ std::string opStr = "input_";
CHECK(blockInputName == operatorInputName);
CHECK(basicBlock->GetTensorByName(blockInputName));
diff --git a/src/backends/tosaCommon/test/OneToManyMappingTests.cpp b/src/backends/tosaCommon/test/OneToManyMappingTests.cpp
index 991ef159bf..6ad6ea8d05 100644
--- a/src/backends/tosaCommon/test/OneToManyMappingTests.cpp
+++ b/src/backends/tosaCommon/test/OneToManyMappingTests.cpp
@@ -77,8 +77,7 @@ TEST_CASE("GetTosaMappingFromLayer_AvgPool2DIgnoreValueLayer")
input0->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
pool->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
- TosaSerializationBasicBlock* basicBlock =
- GetTosaMappingFromLayer(PolymorphicDowncast<Layer*>(pool));
+ TosaSerializationBasicBlock* basicBlock = GetTosaMappingFromLayer(PolymorphicDowncast<Layer*>(pool));
VerifyAvgPool2DIgnoreValue(basicBlock,
inputShape,
outputShape,
diff --git a/src/backends/tosaCommon/test/OneToOneMappingTests.cpp b/src/backends/tosaCommon/test/OneToOneMappingTests.cpp
index 267c9fb49d..8665aa9102 100644
--- a/src/backends/tosaCommon/test/OneToOneMappingTests.cpp
+++ b/src/backends/tosaCommon/test/OneToOneMappingTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -786,7 +786,7 @@ TEST_CASE("GetTosaMapping_TransposeConv2dLayer")
CHECK(basicBlock->GetOperators().size() == 3);
CHECK(basicBlock->GetTensors().size() == 4);
- CHECK(basicBlock->GetInputs()[0].find("input0_") != std::string::npos);
+ CHECK(basicBlock->GetInputs()[0].find("input_") != std::string::npos);
CHECK(basicBlock->GetInputs()[1].find("constant_") != std::string::npos);
CHECK(basicBlock->GetInputs()[2].find("constant_") != std::string::npos);
CHECK(basicBlock->GetOutputs()[0].find("output0_") != std::string::npos);
@@ -848,7 +848,7 @@ TEST_CASE("GetTosaMappingFromLayer_TransposeConv2dLayer")
CHECK(basicBlock->GetOperators().size() == 3);
CHECK(basicBlock->GetTensors().size() == 4);
- CHECK(basicBlock->GetInputs()[0].find("input0_") != std::string::npos);
+ CHECK(basicBlock->GetInputs()[0].find("input_") != std::string::npos);
CHECK(basicBlock->GetInputs()[1].find("constant_") != std::string::npos);
CHECK(basicBlock->GetInputs()[2].find("constant_") != std::string::npos);
CHECK(basicBlock->GetOutputs()[0].find("output0_") != std::string::npos);
diff --git a/src/backends/tosaCommon/test/SplitChecker.hpp b/src/backends/tosaCommon/test/SplitChecker.hpp
index edef4a1cf9..4a4eeba016 100644
--- a/src/backends/tosaCommon/test/SplitChecker.hpp
+++ b/src/backends/tosaCommon/test/SplitChecker.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -38,7 +38,7 @@ void VerifySplit(TosaSerializationBasicBlock* splitBlock,
std::basic_string<char> blockInputName = splitBlock->GetInputs()[0];
std::basic_string<char> operatorInputName = sliceOp->GetInputTensorNames()[0];
- std::string opInputStr = "input" + std::to_string(0) + "_";
+ std::string opInputStr = "input_";
CHECK(blockInputName == operatorInputName);
CHECK(splitBlock->GetTensorByName(blockInputName));
diff --git a/src/backends/tosaCommon/test/TosaTestUtils.hpp b/src/backends/tosaCommon/test/TosaTestUtils.hpp
index 05dd164b50..a0eec74e12 100644
--- a/src/backends/tosaCommon/test/TosaTestUtils.hpp
+++ b/src/backends/tosaCommon/test/TosaTestUtils.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -284,20 +284,20 @@ inline void AssertTosaOneToOneMappingBasicBlock(TosaSerializationBasicBlock* bas
CHECK(op->GetInputTensorNames().size() == numInputTensors);
CHECK(op->GetOutputTensorNames().size() == numOutputs);
- for (uint32_t i = 0; i < numInputs; i++)
+ for (uint32_t i = 0; i < numInputs; ++i)
{
std::basic_string<char> blockInputName = basicBlock->GetInputs()[i];
std::basic_string<char> operatorInputName = op->GetInputTensorNames()[i];
std::basic_string<char> tensorName = basicBlock->GetTensors()[i]->GetName();
- std::string opStr = "input" + std::to_string(i) + "_";
+ std::string opStr = "input_";
CHECK(blockInputName == operatorInputName);
CHECK(tensorName == operatorInputName);
CHECK(blockInputName.find(opStr) != std::string::npos);
}
- for (uint32_t i = 0; i < numOutputs; i++)
+ for (uint32_t i = 0; i < numOutputs; ++i)
{
std::basic_string<char> blockOutputName = basicBlock->GetOutputs()[i];
std::basic_string<char> operatorOutputName = op->GetOutputTensorNames()[i];
diff --git a/tests/ExecuteNetwork/ArmNNExecutor.cpp b/tests/ExecuteNetwork/ArmNNExecutor.cpp
index 4518f1426f..2004bb1ec0 100644
--- a/tests/ExecuteNetwork/ArmNNExecutor.cpp
+++ b/tests/ExecuteNetwork/ArmNNExecutor.cpp
@@ -7,27 +7,157 @@
#include "ArmNNExecutor.hpp"
#include "NetworkExecutionUtils/NetworkExecutionUtils.hpp"
-#include <armnn/IAsyncExecutionCallback.hpp>
#include <AsyncExecutionCallback.hpp>
-
-
+#include <armnn/IAsyncExecutionCallback.hpp>
+#if defined(ARMNN_SERIALIZER)
+#include <armnnSerializer/ISerializer.hpp>
+#endif
using namespace armnn;
using namespace std::chrono;
+#if defined(ARMNN_SERIALIZER)
+/**
+ * Given a reference to an INetwork and a target directory, serialize the network to a file
+ * called "<timestamp>_network.armnn"
+ *
+ * @param network The network to serialize.
+ * @param dumpDir The target directory.
+ * @return the full path to the serialized file.
+ */
+std::string SerializeNetwork(const armnn::INetwork& network, const std::string& dumpDir)
+{
+ if (dumpDir.empty())
+ {
+ throw InvalidArgumentException("An output directory must be specified.");
+ }
+ fs::path outputDirectory(dumpDir);
+ if (!exists(outputDirectory))
+ {
+ throw InvalidArgumentException(
+ fmt::format("The specified directory does not exist: {}", outputDirectory.c_str()));
+ }
+ auto serializer(armnnSerializer::ISerializer::Create());
+ // Serialize the Network
+ serializer->Serialize(network);
+
+ fs::path fileName;
+ fileName += dumpDir;
+ // used to get a timestamp to name diagnostic files (the ArmNN serialized graph
+ // and getSupportedOperations.txt files)
+ timespec ts;
+ if (clock_gettime(CLOCK_MONOTONIC_RAW, &ts) == 0)
+ {
+ std::stringstream ss;
+ ss << std::to_string(ts.tv_sec) << "_" << std::to_string(ts.tv_nsec) << "_network.armnn";
+ fileName += ss.str();
+ }
+ else
+ {
+ // This is incredibly unlikely but just in case.
+ throw RuntimeException("clock_gettime, CLOCK_MONOTONIC_RAW returned a non zero result.");
+ }
+
+ // Save serialized network to a file
+ std::ofstream serializedFile(fileName, std::ios::out | std::ios::binary);
+ auto serialized = serializer->SaveSerializedToStream(serializedFile);
+ if (!serialized)
+ {
+ throw RuntimeException(fmt::format("An error occurred when serializing to file %s", fileName.c_str()));
+ }
+ serializedFile.flush();
+ serializedFile.close();
+ return fileName;
+}
+
+/**
+ * Given a reference to an optimized network and a target directory, serialize the network in .dot file format to
+ * a file called "<timestamp>_optimized_networkgraph.dot"
+ *
+ * @param network The network to serialize.
+ * @param dumpDir The target directory.
+ * @return the full path to the serialized file.
+ */
+std::string SerializeNetworkToDotFile(const armnn::IOptimizedNetwork& optimizedNetwork, const std::string& dumpDir)
+{
+ if (dumpDir.empty())
+ {
+ throw InvalidArgumentException("An output directory must be specified.");
+ }
+ fs::path outputDirectory(dumpDir);
+ if (!exists(outputDirectory))
+ {
+ throw InvalidArgumentException(
+ fmt::format("The specified directory does not exist: {}", outputDirectory.c_str()));
+ }
+
+ fs::path fileName;
+ fileName += dumpDir;
+ // used to get a timestamp to name diagnostic files (the ArmNN serialized graph
+ // and getSupportedOperations.txt files)
+ timespec ts;
+ if (clock_gettime(CLOCK_MONOTONIC_RAW, &ts) == 0)
+ {
+ std::stringstream ss;
+ ss << std::to_string(ts.tv_sec) << "_" << std::to_string(ts.tv_nsec) << "_optimized_networkgraph.dot";
+ fileName += ss.str();
+ }
+ else
+ {
+ // This is incredibly unlikely but just in case.
+ throw RuntimeException("clock_gettime, CLOCK_MONOTONIC_RAW returned a non zero result.");
+ }
+
+ // Write the network graph to a dot file.
+ std::ofstream fileStream;
+ fileStream.open(fileName, std::ofstream::out | std::ofstream::trunc);
+ if (!fileStream.good())
+ {
+ throw RuntimeException(fmt::format("An error occurred when creating %s", fileName.c_str()));
+ }
+
+ if (optimizedNetwork.SerializeToDot(fileStream) != armnn::Status::Success)
+ {
+ throw RuntimeException(fmt::format("An error occurred when serializing to file %s", fileName.c_str()));
+ }
+ fileStream.flush();
+ fileStream.close();
+ return fileName;
+}
+#endif
+
ArmNNExecutor::ArmNNExecutor(const ExecuteNetworkParams& params, armnn::IRuntime::CreationOptions runtimeOptions)
-: m_Params(params)
+ : m_Params(params)
{
- runtimeOptions.m_EnableGpuProfiling = params.m_EnableProfiling;
+ runtimeOptions.m_EnableGpuProfiling = params.m_EnableProfiling;
runtimeOptions.m_DynamicBackendsPath = params.m_DynamicBackendsPath;
// Create/Get the static ArmNN Runtime. Note that the m_Runtime will be shared by all ArmNNExecutor
// instances so the RuntimeOptions cannot be altered for different ArmNNExecutor instances.
m_Runtime = GetRuntime(runtimeOptions);
- auto parser = CreateParser();
+ auto parser = CreateParser();
auto network = parser->CreateNetwork(m_Params);
- auto optNet = OptimizeNetwork(network.get());
+ auto optNet = OptimizeNetwork(network.get());
+ // If the user has asked for detailed data write out the .armnn amd .dot files.
+ if (params.m_SerializeToArmNN)
+ {
+#if defined(ARMNN_SERIALIZER)
+ // .armnn first.
+ // This could throw multiple exceptions if the directory cannot be created or the file cannot be written.
+ std::string targetDirectory(armnnUtils::Filesystem::CreateDirectory("/ArmNNSerializeNetwork"));
+ std::string fileName;
+ fileName = SerializeNetwork(*network, targetDirectory);
+ ARMNN_LOG(info) << "The pre-optimized network has been serialized to:" << fileName;
+ // and the .dot file.
+ // Most of the possible exceptions should have already occurred with the .armnn file.
+ fileName =
+ SerializeNetworkToDotFile(*optNet, targetDirectory);
+ ARMNN_LOG(info) << "The optimized network has been serialized to:" << fileName;
+#else
+ ARMNN_LOG(info) << "Arm NN has not been built with ARMNN_SERIALIZER enabled.";
+#endif
+ }
m_IOInfo = GetIOInfo(optNet.get());
armnn::ProfilingDetailsMethod profilingDetailsMethod = ProfilingDetailsMethod::Undefined;
@@ -176,6 +306,12 @@ void ArmNNExecutor::ExecuteAsync()
void ArmNNExecutor::ExecuteSync()
{
+ // If we've only been asked to serialize the networks, don't execute the inference.
+ if (m_Params.m_SerializeToArmNN)
+ {
+ ARMNN_LOG(info) << "serialize-to-armnn has been specified. No inference will be executed.";
+ return;
+ }
for (size_t x = 0; x < m_Params.m_Iterations; x++)
{
std::shared_ptr<armnn::IProfiler> profiler = m_Runtime->GetProfiler(m_NetworkId);
@@ -800,6 +936,7 @@ armnn::BindingPointInfo ArmNNExecutor::TfliteParser::GetOutputBindingPointInfo(s
#if defined(ARMNN_ONNX_PARSER)
+ARMNN_NO_DEPRECATE_WARN_BEGIN
ArmNNExecutor::OnnxParser::OnnxParser() : m_Parser(armnnOnnxParser::IOnnxParser::Create()){}
armnn::INetworkPtr ArmNNExecutor::OnnxParser::CreateNetwork(const ExecuteNetworkParams& params)
@@ -843,4 +980,5 @@ armnn::BindingPointInfo ArmNNExecutor::OnnxParser::GetOutputBindingPointInfo(siz
{
return m_Parser->GetNetworkOutputBindingInfo(outputName);
}
+ARMNN_NO_DEPRECATE_WARN_END
#endif
diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.hpp b/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
index ffcb4f482c..c2bfb951d5 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022, 2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -67,6 +67,7 @@ struct ExecuteNetworkParams
std::string m_ComparisonFile;
std::vector<armnn::BackendId> m_ComparisonComputeDevices;
bool m_CompareWithTflite;
+ bool m_SerializeToArmNN;
// Ensures that the parameters for ExecuteNetwork fit together
void ValidateParams();
diff --git a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
index 5c1337f769..87b38c5f78 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
@@ -216,7 +216,8 @@ ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork",
("m,model-path",
"Path to model file, e.g. .armnn, .tflite, .onnx. "
- "DEPRECATED: .pb and .prototxt model files no longer load and are deprecated.",
+ "DEPRECATED: .pb and .prototxt model files no longer loaded and are deprecated."
+ "DEPRECATED: .onnx model files will no longer loaded from 24.08 onwards.",
cxxopts::value<std::string>(m_ExNetParams.m_ModelPath));
m_CxxOptions.add_options("b) Ordering")
@@ -237,7 +238,8 @@ ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork",
cxxopts::value<std::string>(m_RuntimeOptions.m_DynamicBackendsPath))
("P, thread-pool-size",
- "Run the network using the Arm NN thread pool with the number of threads provided. ",
+ "Run the network using the Arm NN thread pool with the number of threads provided. "
+ "DECRECATED: The asynchronous execution interface will be removed in 24.08",
cxxopts::value<size_t>(m_ExNetParams.m_ThreadPoolSize)->default_value("0"))
("d,input-tensor-data",
@@ -351,6 +353,12 @@ ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork",
"Perform an per byte root mean square error calculation of the output of the inference with"
" the tflite ref model.",
cxxopts::value<bool>(m_ExNetParams.m_CompareWithTflite)->default_value("false")
+ ->implicit_value("true"))
+ ("serialize-to-armnn",
+ "Serialize the loaded network to an .armnn file. This option will also serialize the optimized network"
+ " in dot format. This option only works with both the TfLite parser and the Arm NN serializer"
+ " enabled in the build. An inference will NOT be executed.",
+ cxxopts::value<bool>(m_ExNetParams.m_SerializeToArmNN)->default_value("false")
->implicit_value("true"));
m_CxxOptions.add_options("d) Optimization")
diff --git a/tests/InferenceModel.hpp b/tests/InferenceModel.hpp
index c053a4429a..a1b55c0996 100644
--- a/tests/InferenceModel.hpp
+++ b/tests/InferenceModel.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -276,6 +276,7 @@ public:
#endif
#if defined(ARMNN_ONNX_PARSER)
+ARMNN_NO_DEPRECATE_WARN_BEGIN
template <>
struct CreateNetworkImpl<armnnOnnxParser::IOnnxParser>
{
@@ -343,6 +344,7 @@ public:
return network;
}
};
+ARMNN_NO_DEPRECATE_WARN_END
#endif
@@ -660,11 +662,11 @@ public:
// Start timer to record inference time in EnqueueWorkload (in milliseconds)
const auto start_time = armnn::GetTimeNow();
-
+ARMNN_NO_DEPRECATE_WARN_BEGIN
armnn::Status ret = m_Runtime->Execute(workingMemHandleRef,
MakeInputTensors(inputContainers),
MakeOutputTensors(outputContainers));
-
+ARMNN_NO_DEPRECATE_WARN_END
const auto duration = armnn::GetTimeDuration(start_time);
// if profiling is enabled print out the results