aboutsummaryrefslogtreecommitdiff
path: root/delegate/classic
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2023-03-14 12:10:28 +0000
committerTeresa Charlin <teresa.charlinreyes@arm.com>2023-03-28 11:41:55 +0100
commitad1b3d7518429e2d16a2695d9b0bbf81b6565ac9 (patch)
treea5b8e1ad68a2437f007338f0b6195ca5ed2bddc3 /delegate/classic
parent9cb3466b677a1048b8abb24661e92c4c83fdda04 (diff)
downloadarmnn-ad1b3d7518429e2d16a2695d9b0bbf81b6565ac9.tar.gz
IVGCVSW-7555 Restructure Delegate
* New folders created: * common is for common code where TfLite API is not used * classic is for existing delegate implementations * opaque is for new opaque delegate implementation, * tests is for shared between existing Delegate and Opaque Delegate which have test utils to work which delegate to use. * Existing delegate is built to libarmnnDelegate.so and opaque delegate is built as libarmnnOpaqueDelegate.so * Opaque structure is introduced but no API is added yet. * CmakeList.txt and delegate/CMakeList.txt have been modified and 2 new CmakeList.txt added * Rename BUILD_ARMNN_TFLITE_DELEGATE as BUILD_CLASSIC_DELEGATE * Rename BUILD_ARMNN_TFLITE_OPAQUE_DELEGATE as BUILD_OPAQUE_DELEGATE Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: Ib682b9ad0ac8d8acdc4ec6d9099bb0008a9fe8ed
Diffstat (limited to 'delegate/classic')
-rw-r--r--delegate/classic/CMakeLists.txt101
-rw-r--r--delegate/classic/include/Version.hpp29
-rw-r--r--delegate/classic/include/armnn_delegate.hpp141
-rw-r--r--delegate/classic/src/Activation.hpp133
-rw-r--r--delegate/classic/src/ArgMinMax.hpp132
-rw-r--r--delegate/classic/src/BatchMatMul.hpp107
-rw-r--r--delegate/classic/src/BatchSpace.hpp216
-rw-r--r--delegate/classic/src/Comparison.hpp135
-rw-r--r--delegate/classic/src/Control.hpp342
-rw-r--r--delegate/classic/src/Convolution.hpp870
-rw-r--r--delegate/classic/src/ElementwiseBinary.hpp401
-rw-r--r--delegate/classic/src/ElementwiseUnary.hpp91
-rw-r--r--delegate/classic/src/Fill.hpp114
-rw-r--r--delegate/classic/src/FullyConnected.hpp275
-rw-r--r--delegate/classic/src/Gather.hpp106
-rw-r--r--delegate/classic/src/GatherNd.hpp82
-rw-r--r--delegate/classic/src/LogicalBinary.hpp102
-rw-r--r--delegate/classic/src/Lstm.hpp268
-rw-r--r--delegate/classic/src/MultiLayerFacade.hpp136
-rw-r--r--delegate/classic/src/Normalization.hpp162
-rw-r--r--delegate/classic/src/Pack.hpp122
-rw-r--r--delegate/classic/src/Pad.hpp179
-rw-r--r--delegate/classic/src/Pooling.hpp327
-rw-r--r--delegate/classic/src/Prelu.hpp108
-rw-r--r--delegate/classic/src/Quantization.hpp171
-rw-r--r--delegate/classic/src/Redefine.hpp289
-rw-r--r--delegate/classic/src/Reduce.hpp146
-rw-r--r--delegate/classic/src/Resize.hpp205
-rw-r--r--delegate/classic/src/Round.hpp71
-rw-r--r--delegate/classic/src/Shape.hpp95
-rw-r--r--delegate/classic/src/SharedFunctions.cpp116
-rw-r--r--delegate/classic/src/SharedFunctions.hpp25
-rw-r--r--delegate/classic/src/Slice.hpp141
-rw-r--r--delegate/classic/src/Softmax.hpp155
-rw-r--r--delegate/classic/src/SpaceDepth.hpp152
-rw-r--r--delegate/classic/src/Split.hpp347
-rw-r--r--delegate/classic/src/StridedSlice.hpp156
-rw-r--r--delegate/classic/src/Transpose.hpp110
-rw-r--r--delegate/classic/src/UnidirectionalSequenceLstm.hpp302
-rw-r--r--delegate/classic/src/Unpack.hpp214
-rw-r--r--delegate/classic/src/armnn_delegate.cpp1059
-rw-r--r--delegate/classic/src/armnn_external_delegate.cpp68
42 files changed, 8501 insertions, 0 deletions
diff --git a/delegate/classic/CMakeLists.txt b/delegate/classic/CMakeLists.txt
new file mode 100644
index 0000000000..04167130d7
--- /dev/null
+++ b/delegate/classic/CMakeLists.txt
@@ -0,0 +1,101 @@
+#
+# Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+# SPDX-License-Identifier: MIT
+#
+
+set(armnnClassicDelegateObject_sources)
+list(APPEND armnnClassicDelegateObject_sources
+ include/armnn_delegate.hpp
+ include/Version.hpp
+ src/armnn_delegate.cpp
+ src/armnn_external_delegate.cpp
+ src/Activation.hpp
+ src/ArgMinMax.hpp
+ src/BatchMatMul.hpp
+ src/BatchSpace.hpp
+ src/Comparison.hpp
+ src/Convolution.hpp
+ src/Control.hpp
+ src/ElementwiseBinary.hpp
+ src/ElementwiseUnary.hpp
+ src/Fill.hpp
+ src/FullyConnected.hpp
+ src/Gather.hpp
+ src/GatherNd.hpp
+ src/LogicalBinary.hpp
+ src/Lstm.hpp
+ src/MultiLayerFacade.hpp
+ src/Normalization.hpp
+ src/Pack.hpp
+ src/Pad.hpp
+ src/Pooling.hpp
+ src/Prelu.hpp
+ src/Quantization.hpp
+ src/Redefine.hpp
+ src/Reduce.hpp
+ src/Resize.hpp
+ src/Round.hpp
+ src/Shape.hpp
+ src/SharedFunctions.hpp
+ src/SharedFunctions.cpp
+ src/Slice.hpp
+ src/Softmax.hpp
+ src/SpaceDepth.hpp
+ src/Split.hpp
+ src/Unpack.hpp
+ src/Transpose.hpp)
+
+add_library(armnnClassicDelegateObject OBJECT ${armnnClassicDelegateObject_sources})
+
+target_include_directories(armnnClassicDelegateObject
+ PUBLIC
+ $<INSTALL_INTERFACE:include>
+ $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
+ $<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/common/include>
+ PRIVATE
+ ${CMAKE_CURRENT_SOURCE_DIR}/src
+ ${PROJECT_SOURCE_DIR}/common/src)
+
+## Add Tflite dependency
+if(NOT TfLite_INCLUDE_DIR OR NOT TfLite_Schema_INCLUDE_PATH)
+ find_package(TfLiteSrc REQUIRED MODULE)
+endif()
+
+# Various tflite header files are not warning clean
+# We can't change compilation flags on header files directly, so we need to add them to an interface library first
+add_library(tflite_headers INTERFACE)
+target_include_directories(tflite_headers
+ INTERFACE
+ $<BUILD_INTERFACE:${TfLite_INCLUDE_DIR}>
+ $<INSTALL_INTERFACE:include/tflite_headers>)
+
+target_compile_options(tflite_headers
+ INTERFACE
+ -Wno-conversion
+ -Wno-sign-conversion
+ -Wno-unused-parameter
+ -Wno-unused-function)
+
+target_link_libraries(armnnClassicDelegateObject PUBLIC tflite_headers)
+
+## Add Flatbuffers dependency
+find_package(Flatbuffers REQUIRED MODULE)
+
+target_link_libraries(armnnClassicDelegateObject PRIVATE ${Flatbuffers_LIB})
+
+# include/flatbuffers/flatbuffers.h is not warning clean
+# We can't change compilation flags on header files directly, so we need to add them to an interface library first
+add_library(flatbuffer_headers INTERFACE)
+target_include_directories(flatbuffer_headers
+ INTERFACE
+ $<BUILD_INTERFACE:${Flatbuffers_INCLUDE_DIR}>
+ $<INSTALL_INTERFACE:include/flatbuffer_headers>)
+target_compile_options(flatbuffer_headers INTERFACE -Wno-sign-conversion)
+
+target_link_libraries(armnnClassicDelegateObject PUBLIC flatbuffer_headers)
+
+####################################################
+## Export targets
+install(TARGETS armnnClassicDelegateObject
+ LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
+ RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}) \ No newline at end of file
diff --git a/delegate/classic/include/Version.hpp b/delegate/classic/include/Version.hpp
new file mode 100644
index 0000000000..c171d77c16
--- /dev/null
+++ b/delegate/classic/include/Version.hpp
@@ -0,0 +1,29 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+namespace armnnDelegate
+{
+
+/// Macro utils
+#define STRINGIFY_VALUE(s) STRINGIFY_MACRO(s)
+#define STRINGIFY_MACRO(s) #s
+
+// ArmNN Delegate version components
+#define DELEGATE_MAJOR_VERSION 28
+#define DELEGATE_MINOR_VERSION 0
+#define DELEGATE_PATCH_VERSION 0
+
+/// DELEGATE_VERSION: "X.Y.Z"
+/// where:
+/// X = Major version number
+/// Y = Minor version number
+/// Z = Patch version number
+#define DELEGATE_VERSION STRINGIFY_VALUE(DELEGATE_MAJOR_VERSION) "." \
+ STRINGIFY_VALUE(DELEGATE_MINOR_VERSION) "." \
+ STRINGIFY_VALUE(DELEGATE_PATCH_VERSION)
+
+} //namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/classic/include/armnn_delegate.hpp b/delegate/classic/include/armnn_delegate.hpp
new file mode 100644
index 0000000000..8957dc87d6
--- /dev/null
+++ b/delegate/classic/include/armnn_delegate.hpp
@@ -0,0 +1,141 @@
+//
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <DelegateOptions.hpp>
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+#include <tensorflow/lite/version.h>
+
+#if TF_MAJOR_VERSION > 2 || (TF_MAJOR_VERSION == 2 && TF_MINOR_VERSION > 3)
+#define ARMNN_POST_TFLITE_2_3
+#endif
+
+#if TF_MAJOR_VERSION > 2 || (TF_MAJOR_VERSION == 2 && TF_MINOR_VERSION > 4)
+#define ARMNN_POST_TFLITE_2_4
+#endif
+
+#if TF_MAJOR_VERSION > 2 || (TF_MAJOR_VERSION == 2 && TF_MINOR_VERSION > 5)
+#define ARMNN_POST_TFLITE_2_5
+#endif
+
+namespace armnnDelegate
+{
+
+struct DelegateData
+{
+ DelegateData(const std::vector<armnn::BackendId>& backends)
+ : m_Backends(backends)
+ , m_Network(nullptr, nullptr)
+ {}
+
+ const std::vector<armnn::BackendId> m_Backends;
+ armnn::INetworkPtr m_Network;
+ std::vector<armnn::IOutputSlot*> m_OutputSlotForNode;
+};
+
+// Forward decleration for functions initializing the ArmNN Delegate
+DelegateOptions TfLiteArmnnDelegateOptionsDefault();
+
+TfLiteDelegate* TfLiteArmnnDelegateCreate(armnnDelegate::DelegateOptions options);
+
+void TfLiteArmnnDelegateDelete(TfLiteDelegate* tfLiteDelegate);
+
+TfLiteStatus DoPrepare(TfLiteContext* context, TfLiteDelegate* delegate);
+
+/// ArmNN Delegate
+class Delegate
+{
+ friend class ArmnnSubgraph;
+public:
+ explicit Delegate(armnnDelegate::DelegateOptions options);
+
+ TfLiteIntArray* IdentifyOperatorsToDelegate(TfLiteContext* context);
+
+ TfLiteDelegate* GetDelegate();
+
+ /// Retrieve version in X.Y.Z form
+ static const std::string GetVersion();
+
+private:
+ /**
+ * Returns a pointer to the armnn::IRuntime* this will be shared by all armnn_delegates.
+ */
+ armnn::IRuntime* GetRuntime(const armnn::IRuntime::CreationOptions& options)
+ {
+ static armnn::IRuntimePtr instance = armnn::IRuntime::Create(options);
+ // Instantiated on first use.
+ return instance.get();
+ }
+
+ TfLiteDelegate m_Delegate = {
+ reinterpret_cast<void*>(this), // .data_
+ DoPrepare, // .Prepare
+ nullptr, // .CopyFromBufferHandle
+ nullptr, // .CopyToBufferHandle
+ nullptr, // .FreeBufferHandle
+ kTfLiteDelegateFlagsNone, // .flags
+ nullptr, // .opaque_delegate_builder
+ };
+
+ /// ArmNN Runtime pointer
+ armnn::IRuntime* m_Runtime;
+ /// ArmNN Delegate Options
+ armnnDelegate::DelegateOptions m_Options;
+};
+
+/// ArmnnSubgraph class where parsing the nodes to ArmNN format and creating the ArmNN Graph
+class ArmnnSubgraph
+{
+public:
+ static ArmnnSubgraph* Create(TfLiteContext* tfLiteContext,
+ const TfLiteDelegateParams* parameters,
+ const Delegate* delegate);
+
+ TfLiteStatus Prepare(TfLiteContext* tfLiteContext);
+
+ TfLiteStatus Invoke(TfLiteContext* tfLiteContext, TfLiteNode* tfLiteNode);
+
+ static TfLiteStatus VisitNode(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteRegistration* tfLiteRegistration,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex);
+
+private:
+ ArmnnSubgraph(armnn::NetworkId networkId,
+ armnn::IRuntime* runtime,
+ std::vector<armnn::BindingPointInfo>& inputBindings,
+ std::vector<armnn::BindingPointInfo>& outputBindings)
+ : m_NetworkId(networkId), m_Runtime(runtime), m_InputBindings(inputBindings), m_OutputBindings(outputBindings)
+ {}
+
+ static TfLiteStatus AddInputLayer(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ const TfLiteIntArray* inputs,
+ std::vector<armnn::BindingPointInfo>& inputBindings);
+
+ static TfLiteStatus AddOutputLayer(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ const TfLiteIntArray* outputs,
+ std::vector<armnn::BindingPointInfo>& outputBindings);
+
+
+ /// The Network Id
+ armnn::NetworkId m_NetworkId;
+ /// ArmNN Rumtime
+ armnn::IRuntime* m_Runtime;
+
+ // Binding information for inputs and outputs
+ std::vector<armnn::BindingPointInfo> m_InputBindings;
+ std::vector<armnn::BindingPointInfo> m_OutputBindings;
+
+};
+
+} // armnnDelegate namespace \ No newline at end of file
diff --git a/delegate/classic/src/Activation.hpp b/delegate/classic/src/Activation.hpp
new file mode 100644
index 0000000000..b86d89b4e5
--- /dev/null
+++ b/delegate/classic/src/Activation.hpp
@@ -0,0 +1,133 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <DelegateUtils.hpp>
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus ValidateActivationOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ const armnn::TensorInfo& inputInfo,
+ const armnn::TensorInfo& outputInfo,
+ armnn::ActivationDescriptor& activationDesc)
+{
+ bool isSupported = false;
+ auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC("ACTIVATION",
+ tfLiteContext,
+ IsActivationSupported,
+ delegateData.m_Backends,
+ isSupported,
+ armnn::BackendId(),
+ inputInfo,
+ outputInfo,
+ activationDesc);
+ };
+
+ validateFunc(outputInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+}
+
+TfLiteStatus VisitActivationOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ armnn::ActivationDescriptor activationDesc;
+ switch(operatorCode)
+ {
+ case kTfLiteBuiltinRelu:
+ {
+ activationDesc.m_Function = armnn::ActivationFunction::ReLu;
+ break;
+ }
+ case kTfLiteBuiltinRelu6:
+ {
+ activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
+ activationDesc.m_A = 6.0f;
+ break;
+ }
+ case kTfLiteBuiltinLogistic:
+ {
+ activationDesc.m_Function = armnn::ActivationFunction::Sigmoid;
+ break;
+ }
+ case kTfLiteBuiltinTanh:
+ {
+ activationDesc.m_Function = armnn::ActivationFunction::TanH;
+ activationDesc.m_A = 1.0f;
+ activationDesc.m_B = 1.0f;
+ break;
+ }
+ case kTfLiteBuiltinElu:
+ {
+ activationDesc.m_Function = armnn::ActivationFunction::Elu;
+ activationDesc.m_A = 1.0f;
+ break;
+ }
+ case kTfLiteBuiltinHardSwish:
+ {
+ activationDesc.m_Function = armnn::ActivationFunction::HardSwish;
+ break;
+ }
+ default:
+ {
+ return kTfLiteError;
+ }
+ }
+ if (!delegateData.m_Network)
+ {
+ return ValidateActivationOperator(delegateData,
+ tfLiteContext,
+ inputTensorInfo,
+ outputTensorInfo,
+ activationDesc);
+ }
+ armnn::IConnectableLayer* activationLayer = delegateData.m_Network->AddActivationLayer(activationDesc);
+ ARMNN_ASSERT(activationLayer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = activationLayer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(activationLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
+ // Connect
+ return Connect(activationLayer, tfLiteNode, delegateData);
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/classic/src/ArgMinMax.hpp b/delegate/classic/src/ArgMinMax.hpp
new file mode 100644
index 0000000000..4e4a2a3f3a
--- /dev/null
+++ b/delegate/classic/src/ArgMinMax.hpp
@@ -0,0 +1,132 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/kernels/internal/tensor_ctypes.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitArgMinMaxOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t argMinMaxOperatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteInputTensor, argMinMaxOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, argMinMaxOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ // Get const axis value from model and set it to descriptor.
+ const TfLiteTensor& tfLiteAxisTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
+ if (!IsValid(tfLiteContext, tfLiteAxisTensor, argMinMaxOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ armnn::ArgMinMaxDescriptor desc;
+ // Get the axis value from the input tensor
+ switch (tfLiteAxisTensor.type)
+ {
+ case kTfLiteInt32:
+ case kTfLiteInt64:
+ desc.m_Axis = tflite::GetTensorData<int>(&tfLiteAxisTensor)[0];
+ break;
+ default:
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Axis value data type is not supported in operator #%d node #%d: ",
+ argMinMaxOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ // If output_type is int32 then set Signed32 else Signed64. Default type is Signed64.
+ if (argMinMaxOperatorCode == kTfLiteBuiltinArgMax)
+ {
+ desc.m_Function = armnn::ArgMinMaxFunction::Max;
+ auto* argMaxParameters = reinterpret_cast<TfLiteArgMaxParams*>(tfLiteNode->builtin_data);
+ if (argMaxParameters->output_type != kTfLiteInt32 && argMaxParameters->output_type != kTfLiteInt64)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: output_type data type is not supported in operator #%d node #%d: ",
+ argMinMaxOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ }
+ else
+ {
+ desc.m_Function = armnn::ArgMinMaxFunction::Min;
+ auto* argMinParameters = reinterpret_cast<TfLiteArgMinParams*>(tfLiteNode->builtin_data);
+ if (argMinParameters->output_type != kTfLiteInt32 && argMinParameters->output_type != kTfLiteInt64)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: output_type data type is not supported in operator #%d node #%d: ",
+ argMinMaxOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ }
+
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC("ARGMINMAX",
+ tfLiteContext,
+ IsArgMinMaxSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ outInfo,
+ desc);
+ };
+
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ // Add an ArgMinMax layer
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddArgMinMaxLayer(desc);
+ layer->SetBackendId(setBackend);
+ ARMNN_ASSERT(layer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
+ // Connect
+ return Connect(layer, tfLiteNode, delegateData);
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/classic/src/BatchMatMul.hpp b/delegate/classic/src/BatchMatMul.hpp
new file mode 100644
index 0000000000..f56f728ef5
--- /dev/null
+++ b/delegate/classic/src/BatchMatMul.hpp
@@ -0,0 +1,107 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <DelegateUtils.hpp>
+#include <algorithm>
+#include <iterator>
+#include <string>
+#include <vector>
+
+namespace armnnDelegate
+{
+ TfLiteStatus VisitBatchMatMulOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+ {
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& kTfLiteLHSInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ const TfLiteTensor& kTfLiteRHSInputTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
+
+ if (!IsValid(tfLiteContext, kTfLiteLHSInputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+ if (!IsValid(tfLiteContext, kTfLiteRHSInputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ if (IsDynamicTensor(kTfLiteLHSInputTensor) || IsDynamicTensor(kTfLiteRHSInputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& kTfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (IsDynamicTensor(kTfLiteOutputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& armnnLHSInputTensorInfo = GetTensorInfoForTfLiteTensor(kTfLiteLHSInputTensor);
+ const armnn::TensorInfo& armnnRHSInputTensorInfo = GetTensorInfoForTfLiteTensor(kTfLiteRHSInputTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(kTfLiteOutputTensor, true);
+
+ armnn::BatchMatMulDescriptor descriptor;
+ auto* params = reinterpret_cast<TfLiteBatchMatMulParams *>(tfLiteNode->builtin_data);
+
+ // Tensorflow params are called adjoint, however they are actually just transposes behind the scene. They do
+ // not perform ajoint.
+ descriptor.m_TransposeX = params->adj_x;
+ descriptor.m_TransposeY = params->adj_y;
+
+ // Check if supported
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC("BATCH_MATMUL",
+ tfLiteContext,
+ IsBatchMatMulSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ armnnLHSInputTensorInfo,
+ armnnRHSInputTensorInfo,
+ outputTensorInfo,
+ descriptor);
+ };
+
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddBatchMatMulLayer(descriptor);
+ layer->SetBackendId(setBackend);
+ ARMNN_ASSERT(layer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
+ return Connect(layer, tfLiteNode, delegateData);
+ }
+} // namespace armnnDelegate
diff --git a/delegate/classic/src/BatchSpace.hpp b/delegate/classic/src/BatchSpace.hpp
new file mode 100644
index 0000000000..30c6dbfc15
--- /dev/null
+++ b/delegate/classic/src/BatchSpace.hpp
@@ -0,0 +1,216 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitBatchToSpaceNdOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteBlockShapeTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
+ if (!IsValid(tfLiteContext, tfLiteBlockShapeTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteCropsTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
+ if (!IsValid(tfLiteContext, tfLiteCropsTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& blockShapeTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBlockShapeTensor);
+ const armnn::TensorInfo& cropsTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteCropsTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
+ ::memcpy(blockShape.data(), tfLiteBlockShapeTensor.data.data, blockShapeTensorInfo.GetNumBytes());
+
+ std::vector<unsigned int> cropsVector(cropsTensorInfo.GetNumElements());
+ std::memcpy(cropsVector.data(), tfLiteCropsTensor.data.data, cropsTensorInfo.GetNumBytes());
+
+ size_t step = 2;
+ std::vector<std::pair<unsigned int, unsigned int>> crops;
+ for (unsigned int i = 0; i < cropsTensorInfo.GetNumElements() / step; ++i)
+ {
+ crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]);
+ }
+
+ armnn::BatchToSpaceNdDescriptor descriptor;
+ descriptor.m_BlockShape = blockShape;
+ descriptor.m_Crops = crops;
+ descriptor.m_DataLayout = armnn::DataLayout::NHWC;
+
+ // Check if supported
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC("BATCH_TO_SPACE_ND",
+ tfLiteContext,
+ IsBatchToSpaceNdSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ outputTensorInfo,
+ descriptor);
+ };
+
+ // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the
+ // support for the operator
+ // If supported, VisitBatchToSpaceNdOperator will be called again to add the layer to the network as seen below
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ // Add a BatchToSpace layer
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddBatchToSpaceNdLayer(descriptor);
+ layer->SetBackendId(setBackend);
+ ARMNN_ASSERT(layer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
+ // Connect
+ return Connect(layer, tfLiteNode, delegateData);
+}
+
+TfLiteStatus VisitSpaceToBatchNdOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteBlockShapeTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
+ if (!IsValid(tfLiteContext, tfLiteBlockShapeTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLitePadListTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
+ if (!IsValid(tfLiteContext, tfLitePadListTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& blockShapeTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBlockShapeTensor);
+ const armnn::TensorInfo& padListTensorInfo = GetTensorInfoForTfLiteTensor(tfLitePadListTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
+ std::memcpy(blockShape.data(), tfLiteBlockShapeTensor.data.data, blockShapeTensorInfo.GetNumBytes());
+
+ std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements());
+ std::memcpy(padListVector.data(), tfLitePadListTensor.data.data, padListTensorInfo.GetNumBytes());
+
+ size_t step = 2;
+ std::vector<std::pair<unsigned int, unsigned int>> padList;
+ for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i)
+ {
+ padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
+ }
+
+ armnn::SpaceToBatchNdDescriptor descriptor;
+ descriptor.m_BlockShape = blockShape;
+ descriptor.m_PadList = padList;
+ descriptor.m_DataLayout = armnn::DataLayout::NHWC;
+
+ // Check if supported
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC("SPACE_TO_BATCH_ND",
+ tfLiteContext,
+ IsSpaceToBatchNdSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ outputTensorInfo,
+ descriptor);
+ };
+
+ // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the
+ // support for the operator
+ // If supported, VisitSpaceToBatchNdOperator will be called again to add the layer to the network as seen below
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ // Add a SpaceToBatch layer
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddSpaceToBatchNdLayer(descriptor);
+ layer->SetBackendId(setBackend);
+ ARMNN_ASSERT(layer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
+ // Connect
+ return Connect(layer, tfLiteNode, delegateData);
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/classic/src/Comparison.hpp b/delegate/classic/src/Comparison.hpp
new file mode 100644
index 0000000000..6d7700d191
--- /dev/null
+++ b/delegate/classic/src/Comparison.hpp
@@ -0,0 +1,135 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <DelegateUtils.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitComparisonOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t tfLiteComparisonOperatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor0 = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (IsDynamicTensor(tfLiteInputTensor0))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
+ tfLiteComparisonOperatorCode, nodeIndex);
+
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteInputTensor1 = tfLiteTensors[tfLiteNode->inputs->data[1]];
+ if (IsDynamicTensor(tfLiteInputTensor1))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
+ tfLiteComparisonOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (IsDynamicTensor(tfLiteOutputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
+ tfLiteComparisonOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ armnn::TensorInfo inputTensorInfo0 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor0);
+ armnn::TensorInfo inputTensorInfo1 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor1);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ // Check if we need to expand the dims of any of the input tensor infos.
+ // This is required for a few of the backends.
+ if(inputTensorInfo0.GetNumDimensions() != inputTensorInfo1.GetNumDimensions())
+ {
+ ExpandTensorRankToEqual(inputTensorInfo0, inputTensorInfo1);
+ }
+
+ armnn::ComparisonOperation comparisonOperation = armnn::ComparisonOperation::Equal;
+ switch(tfLiteComparisonOperatorCode)
+ {
+ case kTfLiteBuiltinEqual:
+ comparisonOperation = armnn::ComparisonOperation::Equal;
+ break;
+ case kTfLiteBuiltinGreater:
+ comparisonOperation = armnn::ComparisonOperation::Greater;
+ break;
+ case kTfLiteBuiltinGreaterEqual:
+ comparisonOperation = armnn::ComparisonOperation::GreaterOrEqual;
+ break;
+ case kTfLiteBuiltinLess:
+ comparisonOperation = armnn::ComparisonOperation::Less;
+ break;
+ case kTfLiteBuiltinLessEqual:
+ comparisonOperation = armnn::ComparisonOperation::LessOrEqual;
+ break;
+ case kTfLiteBuiltinNotEqual:
+ comparisonOperation = armnn::ComparisonOperation::NotEqual;
+ break;
+ default:
+ return kTfLiteError;
+ }
+
+ armnn::ComparisonDescriptor descriptor(comparisonOperation);
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC("COMPARISON",
+ tfLiteContext,
+ IsComparisonSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo0,
+ inputTensorInfo1,
+ outputTensorInfo,
+ descriptor);
+ };
+
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ armnn::IConnectableLayer* comparisonLayer = delegateData.m_Network->AddComparisonLayer(descriptor);
+ comparisonLayer->SetBackendId(setBackend);
+ ARMNN_ASSERT(comparisonLayer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = comparisonLayer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(comparisonLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
+ return Connect(comparisonLayer, tfLiteNode, delegateData);
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/classic/src/Control.hpp b/delegate/classic/src/Control.hpp
new file mode 100644
index 0000000000..a3ea6e92a7
--- /dev/null
+++ b/delegate/classic/src/Control.hpp
@@ -0,0 +1,342 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/utility/IgnoreUnused.hpp>
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/kernels/internal/tensor_ctypes.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+#include <algorithm>
+#include <iterator>
+#include <string>
+#include <vector>
+
+namespace armnnDelegate
+{
+
+void SetupConcatViewOrigin(const armnn::TensorInfo& inputTensorInfo,
+ armnn::OriginsDescriptor& concatDescriptor,
+ const unsigned int concatAxis,
+ unsigned int inputIndex,
+ unsigned int& mergeDimOrigin)
+{
+ const uint32_t inputRank = concatDescriptor.GetNumDimensions();
+
+ // double check dimensions of the tensors
+ if (inputTensorInfo.GetNumDimensions() != inputRank)
+ {
+ throw armnn::ParseException("The number of dimensions for input tensors "
+ "of the concatenation operator should be: " + std::to_string(inputRank));
+ }
+
+ for (unsigned int j = 0; j < concatAxis; ++j)
+ {
+ concatDescriptor.SetViewOriginCoord(inputIndex, j, 0);
+ }
+
+ concatDescriptor.SetViewOriginCoord(inputIndex, concatAxis, mergeDimOrigin);
+ mergeDimOrigin += inputTensorInfo.GetShape()[concatAxis];
+
+ for (unsigned int j = concatAxis + 1; j < inputRank; ++j)
+ {
+ concatDescriptor.SetViewOriginCoord(inputIndex, j, 0);
+ }
+}
+
+TfLiteStatus VisitConcatenationOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t tfLiteConcatOperatorCode)
+{
+ unsigned int numInputs = tfLiteNode->inputs->size;
+ if (numInputs < 2)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
+ 2, numInputs, nodeIndex);
+ return kTfLiteError;
+ }
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+
+ std::vector<armnn::TensorInfo> inputTensorInfos;
+ for (unsigned int i = 0; i < numInputs; ++i)
+ {
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[i]];
+ if (!IsValid(tfLiteContext, tfLiteInputTensor, tfLiteConcatOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ inputTensorInfos.emplace_back(inputTensorInfo);
+ }
+
+ // Convert input tensors to const armnn::TensorInfo* type for FORWARD_LAYER_SUPPORT_FUNC.
+ std::vector<const armnn::TensorInfo*> inputConstTensorInfos;
+ std::transform(inputTensorInfos.begin(),
+ inputTensorInfos.end(),
+ std::back_inserter(inputConstTensorInfos),
+ [](armnn::TensorInfo& t)->const armnn::TensorInfo*{ return &t; });
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLiteConcatOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ // Setup OriginsDescriptor, axis and view origin
+ unsigned int numConcatView = static_cast<unsigned int>(numInputs);
+ uint32_t inputRank = tfLiteTensors[tfLiteNode->inputs->data[0]].dims->size;
+
+ auto* concatenationParameters = reinterpret_cast<TfLiteConcatenationParams*>(tfLiteNode->builtin_data);
+
+ if(!concatenationParameters)
+ {
+ throw armnn::Exception(&"TfLiteArmnnDelegate: Concat parameters are null in: " [ nodeIndex]);
+ }
+
+ const unsigned int concatDimInput = static_cast<unsigned int>(
+ (static_cast<int>(inputRank) + concatenationParameters->axis) % static_cast<int>(inputRank));
+
+ armnn::OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
+ concatDescriptor.SetConcatAxis(concatDimInput);
+
+ unsigned int mergeDimOrigin = 0;
+ for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
+ {
+ armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteTensor(
+ tfLiteTensors[tfLiteNode->inputs->data[viewIndex]]);
+
+ // Sets up concatDescriptor view origin
+ SetupConcatViewOrigin(inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
+ }
+
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ // Verify we support the fused activation before attempting to create a layer
+ TfLiteFusedActivation activationType = concatenationParameters->activation;
+
+ TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
+ outputTensorInfo, activationType);
+ if(activationStatus != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+
+ // Check if supported
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC("CONCATENATION",
+ tfLiteContext,
+ IsConcatSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputConstTensorInfos,
+ outputTensorInfo,
+ concatDescriptor);
+ };
+
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ // Setup layer and connect.
+ armnn::IConnectableLayer* concatenationLayer = delegateData.m_Network->AddConcatLayer(concatDescriptor);
+ concatenationLayer->SetBackendId(setBackend);
+ ARMNN_ASSERT(concatenationLayer != nullptr);
+
+ // Connect the Constant Inputs
+ auto inputsTensorsProcess = ProcessInputs(concatenationLayer,
+ delegateData,
+ tfLiteContext,
+ tfLiteNode);
+ if (inputsTensorsProcess == kTfLiteError)
+ {
+ return inputsTensorsProcess;
+ }
+
+ armnn::IOutputSlot& outputSlot = concatenationLayer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+ if(Connect(concatenationLayer, tfLiteNode, delegateData) != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+
+ if (activationType == kTfLiteActNone)
+ {
+ // No Activation
+ return kTfLiteOk;
+ }
+
+ // Check and Create activation
+ return FusedActivation(tfLiteContext, tfLiteNode, activationType, concatenationLayer, 0, delegateData);
+}
+
+TfLiteStatus VisitMeanOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t tfLiteMeanOperatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if(!IsValid(&tfLiteInputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
+ tfLiteMeanOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ if (IsDynamicTensor(tfLiteInputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
+ tfLiteMeanOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteAxisTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
+ if(!IsValid(&tfLiteAxisTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Invalid axis tensor in operator #%d node #%d: ",
+ tfLiteMeanOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ if (IsDynamicTensor(tfLiteAxisTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic axis tensors are not supported in operator #%d node #%d: ",
+ tfLiteMeanOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if(!IsValid(&tfLiteOutputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Invalid output tensor in operator #%d node #%d: ",
+ tfLiteAxisTensor, nodeIndex);
+ return kTfLiteError;
+ }
+ if (IsDynamicTensor(tfLiteOutputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
+ tfLiteMeanOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& axisTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteAxisTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ auto* axisTensorData = tflite::GetTensorData<int32_t>(&tfLiteAxisTensor);
+
+ std::vector<int32_t> axis;
+ // Add axis data to vector to be converter to unsigned int and assigned to descriptor axis.
+ for (unsigned int i = 0; i < axisTensorInfo.GetNumElements(); ++i)
+ {
+ axis.emplace_back(axisTensorData[i]);
+ }
+
+ // Convert the axis to unsigned int and remove duplicates.
+ unsigned int rank = inputTensorInfo.GetNumDimensions();
+ std::set<unsigned int> uniqueAxis;
+ std::transform(axis.begin(),
+ axis.end(),
+ std::inserter(uniqueAxis, uniqueAxis.begin()),
+ [rank](int i)->unsigned int{ return (i + rank) % rank; });
+
+ // Setup MeanDescriptor and assign axis and keepDims
+ armnn::MeanDescriptor desc;
+ desc.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
+ desc.m_KeepDims = inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ? true : false;
+
+ // Check if supported
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC("MEAN",
+ tfLiteContext,
+ IsMeanSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ outputTensorInfo,
+ desc);
+ };
+
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ // Setup layer and connect.
+ armnn::IConnectableLayer* meanLayer = delegateData.m_Network->AddMeanLayer(desc);
+ meanLayer->SetBackendId(setBackend);
+ ARMNN_ASSERT(meanLayer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = meanLayer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(meanLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
+ return Connect(meanLayer, tfLiteNode, delegateData);
+}
+
+TfLiteStatus VisitControlOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ armnn::IgnoreUnused(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ operatorCode);
+
+ switch(operatorCode)
+ {
+ case kTfLiteBuiltinConcatenation:
+ return VisitConcatenationOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
+ case kTfLiteBuiltinMean:
+ return VisitMeanOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
+ default:
+ return kTfLiteError;
+ }
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/classic/src/Convolution.hpp b/delegate/classic/src/Convolution.hpp
new file mode 100644
index 0000000000..f6a50615fc
--- /dev/null
+++ b/delegate/classic/src/Convolution.hpp
@@ -0,0 +1,870 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <DelegateUtils.hpp>
+#include "SharedFunctions.hpp"
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+#include "tensorflow/lite/kernels/internal/tensor.h"
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ auto numInputs = tfLiteNode->inputs->size;
+ if (numInputs < 2)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
+ 2, numInputs, nodeIndex);
+ return kTfLiteError;
+ }
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ armnn::Convolution2dDescriptor descriptor;
+ const auto params = reinterpret_cast<TfLiteConvParams*>(tfLiteNode->builtin_data);
+
+ bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2);
+ descriptor.m_BiasEnabled = biasEnabled;
+ descriptor.m_StrideX = NonNegative(params->stride_width, nodeIndex);
+ descriptor.m_StrideY = NonNegative(params->stride_height, nodeIndex);
+ descriptor.m_DataLayout = armnn::DataLayout::NHWC;
+ descriptor.m_DilationX = NonNegative(params->dilation_width_factor, nodeIndex);
+ descriptor.m_DilationY = NonNegative(params->dilation_height_factor, nodeIndex);
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if(!IsValid(&tfLiteTensors[tfLiteNode->inputs->data[0]]))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ if (IsDynamicTensor(tfLiteInputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if(!IsValid(&tfLiteOutputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Invalid output tensor in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ if (IsDynamicTensor(tfLiteOutputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
+ if(!IsValid(&tfLiteFilterTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Invalid filter tensor in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ if (IsDynamicTensor(tfLiteFilterTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic filter tensors are not supported in node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ auto* tfLiteNodeParameters = reinterpret_cast<TfLiteConvParams*>(tfLiteNode->builtin_data);
+ TfLiteFusedActivation activationType=kTfLiteActNone;
+ if (tfLiteNodeParameters)
+ {
+ activationType = tfLiteNodeParameters->activation;
+ TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
+ outputTensorInfo, activationType);
+ if(activationStatus != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+
+ }
+
+ const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
+
+ armnn::TensorInfo biasTensorInfo;
+ if(biasEnabled)
+ {
+ const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
+ if(!IsValid(&tfLiteBiasTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Invalid bias tensor in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ if (IsDynamicTensor(tfLiteBiasTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic bias tensors are not supported in node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+ biasTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBiasTensor);
+ }
+ else
+ {
+ biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
+ }
+
+ armnn::Optional<armnn::TensorInfo> optionalBiasInfo(biasTensorInfo);
+
+ // TfLite uses NHWC tensors
+ const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
+ const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
+
+ const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
+ const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
+
+ // Calculate padding
+ CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
+ descriptor.m_PadTop, descriptor.m_PadBottom, params->padding);
+ CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
+ descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
+
+ armnn::BackendId setBackend;
+ if (!delegateData.m_Network)
+ {
+ bool isSupported = false;
+ FORWARD_LAYER_SUPPORT_FUNC("CONV2D",
+ tfLiteContext,
+ IsConvolution2dSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ outputTensorInfo,
+ descriptor,
+ filterTensorInfo,
+ optionalBiasInfo);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ // Set up filter and biases
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution2dLayer(descriptor);
+ layer->SetBackendId(setBackend);
+
+ if(filterTensorInfo.IsConstant())
+ {
+ auto filter =
+ CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[1]],
+ filterTensorInfo);
+
+ armnn::IConnectableLayer *weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
+ weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
+ weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
+ }
+
+ if (biasEnabled)
+ {
+ const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
+ if(biasTensorInfo.IsConstant())
+ {
+ auto biasTensor = CreateConstTensor(&tfLiteBiasTensor, biasTensorInfo);
+ armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
+ ARMNN_ASSERT(biasLayer != nullptr);
+ biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
+ biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
+ }
+ }
+
+ // The data input can also be constant, so we must check that this is also allocated to an input slot
+ if(inputTensorInfo.IsConstant())
+ {
+ auto input =
+ CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]],
+ inputTensorInfo);
+
+ armnn::IConnectableLayer *inputLayer = delegateData.m_Network->AddConstantLayer(input);
+ inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
+ inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
+ }
+
+ ARMNN_ASSERT(layer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ if(Connect(layer, tfLiteNode, delegateData) != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+
+ if (!tfLiteNodeParameters)
+ {
+ // No Activation
+ return kTfLiteOk;
+ }
+ // Check and Create activation
+ return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
+
+}
+
+// Conv3d is only correctly supported for external delegates from TF Lite v2.6, as there was a breaking bug in v2.5.
+#if defined(ARMNN_POST_TFLITE_2_5)
+TfLiteStatus VisitConv3dOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ auto numInputs = tfLiteNode->inputs->size;
+ if (numInputs < 2)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
+ 2, numInputs, nodeIndex);
+ return kTfLiteError;
+ }
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ armnn::Convolution3dDescriptor descriptor;
+ const auto params = reinterpret_cast<TfLiteConv3DParams*>(tfLiteNode->builtin_data);
+
+ bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2);
+ descriptor.m_BiasEnabled = biasEnabled;
+ descriptor.m_DataLayout = armnn::DataLayout::NDHWC;
+ descriptor.m_StrideX = NonNegative(params->stride_width, nodeIndex);
+ descriptor.m_StrideY = NonNegative(params->stride_height, nodeIndex);
+ descriptor.m_StrideZ = NonNegative(params->stride_depth, nodeIndex);
+ descriptor.m_DilationX = NonNegative(params->dilation_width_factor, nodeIndex);
+ descriptor.m_DilationY = NonNegative(params->dilation_height_factor, nodeIndex);
+ descriptor.m_DilationZ = NonNegative(params->dilation_depth_factor, nodeIndex);
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
+ if (!IsValid(tfLiteContext, tfLiteFilterTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ auto* tfLiteNodeParameters = reinterpret_cast<TfLiteConv3DParams*>(tfLiteNode->builtin_data);
+ TfLiteFusedActivation activationType=kTfLiteActNone;
+ if (tfLiteNodeParameters)
+ {
+ activationType = tfLiteNodeParameters->activation;
+ TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
+ outputTensorInfo, activationType);
+ if(activationStatus != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+
+ }
+
+ const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
+
+ armnn::TensorInfo biasTensorInfo;
+ if(biasEnabled)
+ {
+ const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
+ if (!IsValid(tfLiteContext, tfLiteBiasTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+ biasTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBiasTensor);
+ }
+ else
+ {
+ biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
+ }
+
+ armnn::Optional<armnn::TensorInfo> optionalBiasInfo(biasTensorInfo);
+
+ // TfLite uses NDHWC tensors
+ const unsigned int inputDepth = inputTensorInfo.GetShape()[1];
+ const unsigned int inputHeight = inputTensorInfo.GetShape()[2];
+ const unsigned int inputWidth = inputTensorInfo.GetShape()[3];
+
+ // Assuming the filter is DHWIO : Depth, Height, Width, OutputChannels, InputChannels
+ const unsigned int filterDepth = filterTensorInfo.GetShape()[0];
+ const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
+ const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
+
+ // Calculate padding
+ CalcPadding(inputDepth, filterDepth, descriptor.m_StrideZ, descriptor.m_DilationZ,
+ descriptor.m_PadFront, descriptor.m_PadBack, params->padding);
+ CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
+ descriptor.m_PadTop, descriptor.m_PadBottom, params->padding);
+ CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
+ descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
+
+ // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the
+ // support for the operator
+ // If supported, VisitConvolutionOperator will be called again to add the layer to the network as seen below.
+ armnn::BackendId setBackend;
+ if (!delegateData.m_Network)
+ {
+ bool isSupported = false;
+ FORWARD_LAYER_SUPPORT_FUNC("CONV3D",
+ tfLiteContext,
+ IsConvolution3dSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ outputTensorInfo,
+ descriptor,
+ filterTensorInfo,
+ optionalBiasInfo);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution3dLayer(descriptor);
+ layer->SetBackendId(setBackend);
+ ARMNN_ASSERT(layer != nullptr);
+
+ // Add a constant layer for weights and biases if inputs are constant,
+ // which are connected to the Convolution3d layer as inputs.
+ if (filterTensorInfo.IsConstant())
+ {
+ auto filter = CreateConstTensor(&tfLiteFilterTensor,
+ filterTensorInfo);
+
+ armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
+ ARMNN_ASSERT(weightsLayer != nullptr);
+
+ weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
+ weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
+ }
+
+ if(biasEnabled)
+ {
+ const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
+ if(biasTensorInfo.IsConstant())
+ {
+ auto biases = CreateConstTensor(&tfLiteBiasTensor,
+ biasTensorInfo);
+
+ armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biases);
+ ARMNN_ASSERT(biasLayer != nullptr);
+
+ biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
+ biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
+ }
+ }
+
+ // The data input can also be constant, so we must check that this is also allocated to an input slot
+ if(inputTensorInfo.IsConstant())
+ {
+ auto input =
+ CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]],
+ inputTensorInfo);
+
+ armnn::IConnectableLayer *inputLayer = delegateData.m_Network->AddConstantLayer(input);
+ inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
+ inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
+ }
+
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ if(Connect(layer, tfLiteNode, delegateData) != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+
+ if (!tfLiteNodeParameters)
+ {
+ // No Activation
+ return kTfLiteOk;
+ }
+
+ // Check and create activation
+ return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
+}
+#endif
+
+TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ auto numInputs = tfLiteNode->inputs->size;
+ if (numInputs < 2)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
+ 2, numInputs, nodeIndex);
+ return kTfLiteError;
+ }
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2);
+
+ armnn::DepthwiseConvolution2dDescriptor descriptor;
+ const auto params = reinterpret_cast<TfLiteDepthwiseConvParams*>(tfLiteNode->builtin_data);
+
+ descriptor.m_BiasEnabled = biasEnabled;
+ descriptor.m_StrideX = NonNegative(params->stride_width, nodeIndex);
+ descriptor.m_StrideY = NonNegative(params->stride_height, nodeIndex);
+ descriptor.m_DataLayout = armnn::DataLayout::NHWC;
+ descriptor.m_DilationX = NonNegative(params->dilation_width_factor, nodeIndex);
+ descriptor.m_DilationY = NonNegative(params->dilation_height_factor, nodeIndex);
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if(!IsValid(&tfLiteInputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ if (IsDynamicTensor(tfLiteInputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if(!IsValid(&tfLiteOutputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Invalid output tensor in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ if (IsDynamicTensor(tfLiteOutputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
+ if(!IsValid(&tfLiteFilterTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Invalid filter tensor in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ if (IsDynamicTensor(tfLiteFilterTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic filter tensors are not supported in node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ auto* tfLiteNodeParameters = reinterpret_cast<TfLiteDepthwiseConvParams *>(tfLiteNode->builtin_data);
+ TfLiteFusedActivation activationType = kTfLiteActNone;
+ if (tfLiteNodeParameters)
+ {
+ activationType = tfLiteNodeParameters->activation;
+ TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
+ outputTensorInfo, activationType);
+ if(activationStatus != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+
+ }
+
+ const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
+
+ // Assuming input is NHWC
+ unsigned int inputHeight = inputTensorInfo.GetShape()[1];
+ unsigned int inputWidth = inputTensorInfo.GetShape()[2];
+
+ // TensorflowLite weights come in the format [1, H, W, I * M]
+ unsigned int filterHeight = filterTensorInfo.GetShape()[1];
+ unsigned int filterWidth = filterTensorInfo.GetShape()[2];
+
+ // Calculate padding
+ CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
+ descriptor.m_PadTop, descriptor.m_PadBottom, params->padding);
+ CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
+ descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
+
+ armnn::TensorInfo biasTensorInfo;
+ if(biasEnabled)
+ {
+ const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
+ if(!IsValid(&tfLiteBiasTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Invalid bias tensor in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ if (IsDynamicTensor(tfLiteBiasTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic bias tensors are not supported in node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+ biasTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBiasTensor);
+ }
+ else
+ {
+ biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
+ }
+
+ armnn::BackendId setBackend;
+ if (!delegateData.m_Network)
+ {
+ bool isSupported = false;
+ FORWARD_LAYER_SUPPORT_FUNC("DEPTHWISE_CONV2D",
+ tfLiteContext,
+ IsDepthwiseConvolutionSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ outputTensorInfo,
+ descriptor,
+ filterTensorInfo,
+ armnn::Optional<armnn::TensorInfo>(biasTensorInfo));
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthwiseConvolution2dLayer(descriptor);
+ layer->SetBackendId(setBackend);
+
+ if(filterTensorInfo.IsConstant())
+ {
+ // For depthwise the weights layout is the same as for tflite [1, H, W, I*M]. No permutation required.
+ auto filter = CreateConstTensor(&tfLiteFilterTensor, filterTensorInfo);
+
+ armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
+ weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
+ weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
+ }
+
+ if (biasEnabled)
+ {
+ const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
+ if(biasTensorInfo.IsConstant())
+ {
+ auto biasTensor = CreateConstTensor(&tfLiteBiasTensor, biasTensorInfo);
+ armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
+ ARMNN_ASSERT(biasLayer != nullptr);
+ biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
+ biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
+ }
+ }
+
+ // The data input can also be constant, so we must check that this is also allocated to an input slot
+ if(inputTensorInfo.IsConstant())
+ {
+ auto input =
+ CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]],
+ inputTensorInfo);
+
+ armnn::IConnectableLayer *inputLayer = delegateData.m_Network->AddConstantLayer(input);
+ inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
+ inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
+ }
+
+ ARMNN_ASSERT(layer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ if(Connect(layer, tfLiteNode, delegateData) != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+
+ if (!tfLiteNodeParameters)
+ {
+ // No Activation
+ return kTfLiteOk;
+ }
+ // Check and create activation
+ return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
+}
+
+TfLiteStatus VisitTransposeConv2dOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ armnn::TransposeConvolution2dDescriptor descriptor;
+ auto* parameters = reinterpret_cast<TfLiteTransposeConvParams*>(tfLiteNode->builtin_data);
+ descriptor.m_BiasEnabled = false;
+ descriptor.m_StrideX = NonNegative(parameters->stride_width, nodeIndex);
+ descriptor.m_StrideY = NonNegative(parameters->stride_height, nodeIndex);
+ descriptor.m_DataLayout = armnn::DataLayout::NHWC;
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteOutputShapeTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if(!IsValid(&tfLiteOutputShapeTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ if (IsDynamicTensor(tfLiteOutputShapeTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo outputShapeTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputShapeTensor);
+ std::vector<int32_t> outputShape(outputShapeTensorInfo.GetNumElements());
+ if (outputShapeTensorInfo.GetDataType() == armnn::DataType::Signed32)
+ {
+ for(unsigned int i=0; i < outputShapeTensorInfo.GetNumElements(); i++)
+ {
+ outputShape[i] = ::tflite::GetTensorData<int32_t>(&tfLiteOutputShapeTensor)[i];
+ }
+ }
+
+ if (outputShapeTensorInfo.GetDataType() == armnn::DataType::QAsymmU8)
+ {
+ for(unsigned int i=0; i < outputShapeTensorInfo.GetNumElements(); i++)
+ {
+ outputShape[i] = ::tflite::GetTensorData<uint8_t>(&tfLiteOutputShapeTensor)[i];
+ }
+ }
+ // Change from signed to unsigned int to store in TransposeConvolution2dDescriptor.
+ for (int dimension : outputShape)
+ {
+ descriptor.m_OutputShape.push_back(static_cast<unsigned int>(dimension));
+ }
+ descriptor.m_OutputShapeEnabled = true;
+
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
+ if(!IsValid(&tfLiteInputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ if (IsDynamicTensor(tfLiteInputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if(!IsValid(&tfLiteOutputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Invalid output tensor in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ if (IsDynamicTensor(tfLiteOutputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
+ if(!IsValid(&tfLiteFilterTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Invalid filter tensor in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ if (IsDynamicTensor(tfLiteFilterTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic filter tensors are not supported in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+ const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
+
+ // TfLite uses NHWC tensors
+ const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
+ const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
+
+ const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
+ const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
+
+ // Calculate padding
+ CalcPadding(inputHeight,
+ filterHeight,
+ descriptor.m_StrideY,
+ 1, // dilation y
+ descriptor.m_PadTop,
+ descriptor.m_PadBottom,
+ parameters->padding);
+ CalcPadding(inputWidth,
+ filterWidth,
+ descriptor.m_StrideX,
+ 1, // dilation x
+ descriptor.m_PadLeft,
+ descriptor.m_PadRight,
+ parameters->padding);
+
+ // Set up filter
+ auto filterTensor = CreateConstTensor(&tfLiteFilterTensor,
+ filterTensorInfo);
+ armnn::BackendId setBackend;
+ if (!delegateData.m_Network)
+ {
+ bool isSupported = false;
+ FORWARD_LAYER_SUPPORT_FUNC("TRANSPOSE_CONV2D",
+ tfLiteContext,
+ IsTransposeConvolution2dSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ outputTensorInfo,
+ descriptor,
+ filterTensorInfo,
+ armnn::EmptyOptional());
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddTransposeConvolution2dLayer(descriptor,
+ filterTensor,
+ armnn::EmptyOptional());
+ layer->SetBackendId(setBackend);
+ ARMNN_ASSERT(layer != nullptr);
+
+ // The data input can be constant, so we must check that this is allocated to an input slot
+ if(inputTensorInfo.IsConstant())
+ {
+ auto input =
+ CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[2]],
+ inputTensorInfo);
+
+ armnn::IConnectableLayer *inputLayer = delegateData.m_Network->AddConstantLayer(input);
+ inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
+ inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
+ }
+
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // Connect
+ if (delegateData.m_OutputSlotForNode[static_cast<unsigned int>(tfLiteNode->inputs->data[2])] != nullptr)
+ {
+ delegateData.m_OutputSlotForNode[static_cast<unsigned int>(tfLiteNode->inputs->data[2])]->
+ Connect(layer->GetInputSlot(0));
+ }
+
+ // Prepare output slots
+ for (unsigned int outputIndex = 0; outputIndex < layer->GetNumOutputSlots(); ++outputIndex)
+ {
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(outputIndex);
+ delegateData.m_OutputSlotForNode[static_cast<unsigned int>(tfLiteNode->outputs->data[outputIndex])] =
+ &outputSlot;
+ }
+ return kTfLiteOk;
+}
+
+TfLiteStatus VisitConvolutionOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ switch(operatorCode)
+ {
+ case kTfLiteBuiltinConv2d:
+ return VisitConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
+// Conv3d is only correctly supported for external delegates from TF Lite v2.6, as there was a breaking bug in v2.5.
+#if defined(ARMNN_POST_TFLITE_2_5)
+ case kTfLiteBuiltinConv3d:
+ return VisitConv3dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
+#endif
+ case kTfLiteBuiltinDepthwiseConv2d:
+ return VisitDepthwiseConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
+ case kTfLiteBuiltinTransposeConv:
+ return VisitTransposeConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
+ default:
+ return kTfLiteError;
+ }
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/classic/src/ElementwiseBinary.hpp b/delegate/classic/src/ElementwiseBinary.hpp
new file mode 100644
index 0000000000..e11327b95a
--- /dev/null
+++ b/delegate/classic/src/ElementwiseBinary.hpp
@@ -0,0 +1,401 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <DelegateUtils.hpp>
+#include "MultiLayerFacade.hpp"
+#include "SharedFunctions.hpp"
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+#include "tensorflow/lite/delegates/utils.h"
+
+namespace armnnDelegate
+{
+
+TfLiteStatus ValidateAddOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ const armnn::TensorInfo& inputInfo1,
+ const armnn::TensorInfo& inputInfo2,
+ const armnn::TensorInfo& outputInfo)
+{
+ bool isSupported = false;
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+ {
+ std::vector<armnn::TensorInfo> infos { inputInfo1, inputInfo2, outputInfo };
+ FORWARD_LAYER_SUPPORT_FUNC("ADD",
+ tfLiteContext,
+ IsElementwiseBinarySupported,
+ delegateData.m_Backends,
+ isSupported,
+ armnn::BackendId(),
+ inputInfo1,
+ inputInfo2,
+ outputInfo,
+ armnn::BinaryOperation::Add);
+ };
+
+ validateFunc(outputInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+}
+
+
+TfLiteStatus ValidateDivOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ const armnn::TensorInfo& inputInfo1,
+ const armnn::TensorInfo& inputInfo2,
+ const armnn::TensorInfo& outputInfo)
+{
+ bool isSupported = false;
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC("DIV",
+ tfLiteContext,
+ IsElementwiseBinarySupported,
+ delegateData.m_Backends,
+ isSupported,
+ armnn::BackendId(),
+ inputInfo1,
+ inputInfo2,
+ outputTensorInfo,
+ armnn::BinaryOperation::Div);
+ };
+
+ validateFunc(outputInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+}
+
+TfLiteStatus ValidateFloorDivOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ const armnn::TensorInfo& inputInfo1,
+ const armnn::TensorInfo& inputInfo2,
+ const armnn::TensorInfo& outputInfo)
+{
+ // need first to validate that the div operator is supported
+ // then that the floor operator is supported
+ TfLiteStatus status = ValidateDivOperator(delegateData, tfLiteContext, inputInfo1, inputInfo2, outputInfo);
+ if (status != kTfLiteOk)
+ {
+ return status;
+ }
+ // if the inputs and output of the div are all Signed32 we don't need to add the floor operator afterward.
+ if (AreAllSigned32(inputInfo1, inputInfo2, outputInfo))
+ {
+ return status;
+ }
+ // in case broadcasting is being done from one of the inputs to the div
+ // choose the full sized input tensor to pass to the floor validation routine
+ armnn::TensorInfo floorInputInfo = inputInfo1;
+ if (inputInfo1.GetNumDimensions() < inputInfo2.GetNumDimensions())
+ {
+ floorInputInfo = inputInfo2;
+ }
+ status = ValidateFloorOperator(delegateData, tfLiteContext, floorInputInfo, outputInfo);
+ return status;
+}
+
+TfLiteStatus ValidateMaximumOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ const armnn::TensorInfo& inputInfo1,
+ const armnn::TensorInfo& inputInfo2,
+ const armnn::TensorInfo& outputInfo)
+{
+ bool isSupported = false;
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC("MAXIMUM",
+ tfLiteContext,
+ IsElementwiseBinarySupported,
+ delegateData.m_Backends,
+ isSupported,
+ armnn::BackendId(),
+ inputInfo1,
+ inputInfo2,
+ outputTensorInfo,
+ armnn::BinaryOperation::Maximum);
+ };
+
+ validateFunc(outputInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+}
+
+TfLiteStatus ValidateMinimumOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ const armnn::TensorInfo& inputInfo1,
+ const armnn::TensorInfo& inputInfo2,
+ const armnn::TensorInfo& outputInfo)
+{
+ bool isSupported = false;
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC("MINIMUM",
+ tfLiteContext,
+ IsElementwiseBinarySupported,
+ delegateData.m_Backends,
+ isSupported,
+ armnn::BackendId(),
+ inputInfo1,
+ inputInfo2,
+ outputTensorInfo,
+ armnn::BinaryOperation::Minimum);
+ };
+
+ validateFunc(outputInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+}
+
+TfLiteStatus ValidateMulOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ const armnn::TensorInfo& inputInfo1,
+ const armnn::TensorInfo& inputInfo2,
+ const armnn::TensorInfo& outputInfo)
+{
+ bool isSupported = false;
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC("MUL",
+ tfLiteContext,
+ IsElementwiseBinarySupported,
+ delegateData.m_Backends,
+ isSupported,
+ armnn::BackendId(),
+ inputInfo1,
+ inputInfo2,
+ outputTensorInfo,
+ armnn::BinaryOperation::Mul);
+ };
+
+ validateFunc(outputInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+}
+
+TfLiteStatus ValidateSubOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ const armnn::TensorInfo& inputInfo1,
+ const armnn::TensorInfo& inputInfo2,
+ const armnn::TensorInfo& outputInfo)
+{
+ bool isSupported = false;
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC("SUB",
+ tfLiteContext,
+ IsElementwiseBinarySupported,
+ delegateData.m_Backends,
+ isSupported,
+ armnn::BackendId(),
+ inputInfo1,
+ inputInfo2,
+ outputTensorInfo,
+ armnn::BinaryOperation::Sub);
+ };
+
+ validateFunc(outputInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+}
+
+std::pair<armnn::IConnectableLayer*, armnn::IConnectableLayer*> AddFloorDivLayer(
+ DelegateData& delegateData,
+ const armnn::TensorInfo& outputTensorInfo)
+{
+ armnn::IConnectableLayer* divisionLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
+ armnn::BinaryOperation::Div);
+ // if the output of the div is Signed32 the Floor layer is not required
+ if (armnn::DataType::Signed32 == outputTensorInfo.GetDataType())
+ {
+ return std::make_pair(divisionLayer, divisionLayer);
+ }
+ armnn::IOutputSlot& outputSlot = divisionLayer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+ armnn::IConnectableLayer* floorLayer = delegateData.m_Network->AddFloorLayer();
+ outputSlot.Connect(floorLayer->GetInputSlot(0));
+ return std::make_pair(divisionLayer, floorLayer);
+}
+
+TfLiteStatus VisitElementwiseBinaryOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t elementwiseBinaryOperatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor0 = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (IsDynamicTensor(tfLiteInputTensor0))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
+ elementwiseBinaryOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteInputTensor1 = tfLiteTensors[tfLiteNode->inputs->data[1]];
+ if (IsDynamicTensor(tfLiteInputTensor1))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
+ elementwiseBinaryOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (IsDynamicTensor(tfLiteOutputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
+ elementwiseBinaryOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ armnn::TensorInfo inputTensorInfo0 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor0);
+ armnn::TensorInfo inputTensorInfo1 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor1);
+
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ // Check if we need to expand the dims of the input tensor infos.
+ // This is required for a few of the backends.
+ if(inputTensorInfo0.GetNumDimensions() != inputTensorInfo1.GetNumDimensions())
+ {
+ ExpandTensorRankToEqual(inputTensorInfo0, inputTensorInfo1);
+ }
+
+ auto* tfLiteNodeParameters = reinterpret_cast<TfLiteAddParams*>(tfLiteNode->builtin_data);
+ TfLiteFusedActivation activationType = kTfLiteActNone;
+ if (tfLiteNodeParameters)
+ {
+ activationType = tfLiteNodeParameters->activation;
+ TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
+ outputTensorInfo, activationType);
+ if(activationStatus != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+ }
+
+ if (!delegateData.m_Network)
+ {
+ switch(elementwiseBinaryOperatorCode)
+ {
+ case kTfLiteBuiltinAdd:
+ return ValidateAddOperator(delegateData,
+ tfLiteContext,
+ inputTensorInfo0,
+ inputTensorInfo1,
+ outputTensorInfo);
+ case kTfLiteBuiltinDiv:
+ return ValidateDivOperator(delegateData,
+ tfLiteContext,
+ inputTensorInfo0,
+ inputTensorInfo1,
+ outputTensorInfo);
+ case kTfLiteBuiltinFloorDiv:
+ return ValidateFloorDivOperator(delegateData,
+ tfLiteContext,
+ inputTensorInfo0,
+ inputTensorInfo1,
+ outputTensorInfo);
+ case kTfLiteBuiltinMaximum:
+ return ValidateMaximumOperator(delegateData,
+ tfLiteContext,
+ inputTensorInfo0,
+ inputTensorInfo1,
+ outputTensorInfo);
+ case kTfLiteBuiltinMinimum:
+ return ValidateMinimumOperator(delegateData,
+ tfLiteContext,
+ inputTensorInfo0,
+ inputTensorInfo1,
+ outputTensorInfo);
+ case kTfLiteBuiltinMul:
+ return ValidateMulOperator(delegateData,
+ tfLiteContext,
+ inputTensorInfo0,
+ inputTensorInfo1,
+ outputTensorInfo);
+ case kTfLiteBuiltinSub:
+ return ValidateSubOperator(delegateData,
+ tfLiteContext,
+ inputTensorInfo0,
+ inputTensorInfo1,
+ outputTensorInfo);
+ default:
+ return kTfLiteError;
+ }
+ }
+
+ armnn::IConnectableLayer* elementwiseBinaryLayer = nullptr;
+ MultiLayerFacade multiLayer;
+ switch(elementwiseBinaryOperatorCode)
+ {
+ case kTfLiteBuiltinAdd:
+ elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
+ armnn::BinaryOperation::Add);
+ break;
+ case kTfLiteBuiltinDiv:
+ elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
+ armnn::BinaryOperation::Div);
+ break;
+ case kTfLiteBuiltinFloorDiv:
+ {
+ auto layers = AddFloorDivLayer(delegateData, outputTensorInfo);
+ multiLayer.AssignValues(layers.first, layers.second);
+ elementwiseBinaryLayer = &multiLayer;
+ }
+ break;
+ case kTfLiteBuiltinMaximum:
+ elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
+ armnn::BinaryOperation::Maximum);
+ break;
+ case kTfLiteBuiltinMinimum:
+ elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
+ armnn::BinaryOperation::Minimum);
+ break;
+ case kTfLiteBuiltinMul:
+ elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
+ armnn::BinaryOperation::Mul);
+ break;
+ case kTfLiteBuiltinSub:
+ elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
+ armnn::BinaryOperation::Sub);
+ break;
+ default:
+ return kTfLiteError;
+ }
+ ARMNN_ASSERT(elementwiseBinaryLayer != nullptr);
+ armnn::IOutputSlot& outputSlot = elementwiseBinaryLayer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ auto inputsTensorsProcess = ProcessInputs(elementwiseBinaryLayer,
+ delegateData,
+ tfLiteContext,
+ tfLiteNode);
+ if (inputsTensorsProcess == kTfLiteError)
+ {
+ return inputsTensorsProcess;
+ }
+
+ if(Connect(elementwiseBinaryLayer, tfLiteNode, delegateData) != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+
+ if (!tfLiteNodeParameters)
+ {
+ // No Activation
+ return kTfLiteOk;
+ }
+ // Check and Create Activation
+ return FusedActivation(tfLiteContext, tfLiteNode, activationType, elementwiseBinaryLayer, 0, delegateData);
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/classic/src/ElementwiseUnary.hpp b/delegate/classic/src/ElementwiseUnary.hpp
new file mode 100644
index 0000000000..562ce1fd9f
--- /dev/null
+++ b/delegate/classic/src/ElementwiseUnary.hpp
@@ -0,0 +1,91 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <DelegateUtils.hpp>
+
+#include <armnn/utility/Assert.hpp>
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitElementwiseUnaryOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ armnn::UnaryOperation unaryOperation)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (IsDynamicTensor(tfLiteInputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic input tensors are not supported in node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (IsDynamicTensor(tfLiteOutputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic output tensors are not supported in node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ armnn::ElementwiseUnaryDescriptor descriptor(unaryOperation);
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC("ELEMENTWISE_UNARY",
+ tfLiteContext,
+ IsElementwiseUnarySupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ outputTensorInfo,
+ descriptor);
+ };
+
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddElementwiseUnaryLayer(descriptor);
+ layer->SetBackendId(setBackend);
+ ARMNN_ASSERT(layer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
+ // Connect
+ return Connect(layer, tfLiteNode, delegateData);
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/classic/src/Fill.hpp b/delegate/classic/src/Fill.hpp
new file mode 100644
index 0000000000..15dc91e481
--- /dev/null
+++ b/delegate/classic/src/Fill.hpp
@@ -0,0 +1,114 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/utility/IgnoreUnused.hpp>
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitFillOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t tfLiteFillOperatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ switch(tfLiteFillOperatorCode)
+ {
+ case kTfLiteBuiltinFill:
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
+ break;
+ default:
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteInputTensor, tfLiteFillOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteFillTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
+ if (!IsValid(tfLiteContext, tfLiteFillTensor, tfLiteFillOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLiteFillOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ armnn::FillDescriptor descriptor;
+ switch (tfLiteFillTensor.type)
+ {
+ case kTfLiteFloat32:
+ descriptor.m_Value = tflite::GetTensorData<float>(&tfLiteFillTensor)[0];
+ break;
+ case kTfLiteInt32:
+ descriptor.m_Value = tflite::GetTensorData<int32_t>(&tfLiteFillTensor)[0];
+ break;
+ default:
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: FILL value data type is not supported in operator #%d node #%d: ",
+ tfLiteFillOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC("FILL",
+ tfLiteContext,
+ IsFillSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ outInfo,
+ descriptor);
+ };
+
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddFillLayer(descriptor);
+ layer->SetBackendId(setBackend);
+ ARMNN_ASSERT(layer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ auto inputsTensorsProcess = ProcessInputs(layer,
+ delegateData,
+ tfLiteContext,
+ tfLiteNode);
+ if (inputsTensorsProcess == kTfLiteError)
+ {
+ return inputsTensorsProcess;
+ }
+
+ return Connect(layer, tfLiteNode, delegateData);
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/classic/src/FullyConnected.hpp b/delegate/classic/src/FullyConnected.hpp
new file mode 100644
index 0000000000..28d43d06df
--- /dev/null
+++ b/delegate/classic/src/FullyConnected.hpp
@@ -0,0 +1,275 @@
+//
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <DelegateUtils.hpp>
+#include "armnnUtils/TensorUtils.hpp"
+#include <armnn/utility/IgnoreUnused.hpp>
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ auto numInputs = tfLiteNode->inputs->size;
+ if (numInputs < 2)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
+ 2, numInputs, nodeIndex);
+ return kTfLiteError;
+ }
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+ bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2);
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteWeightsTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
+ if (!IsValid(tfLiteContext, tfLiteWeightsTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& weightsTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteWeightsTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ // Check that we support fused activation before we attempt to create a layer
+ auto* tfLiteNodeParameters = reinterpret_cast<TfLiteFullyConnectedParams *>(tfLiteNode->builtin_data);
+ TfLiteFusedActivation activationType=kTfLiteActNone;
+ if (tfLiteNodeParameters)
+ {
+ activationType = tfLiteNodeParameters->activation;
+ TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
+ outputTensorInfo, activationType);
+ if(activationStatus != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+ }
+
+ // Fully Connected Layer accepts two dimensional weights input
+ int32_t weightsDimension = static_cast<int32_t>(weightsTensorInfo.GetNumDimensions());
+ if (weightsDimension != 2)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dimension #$d for Fully Connected weights is not supported by Armnn"
+ " in operator #%d node #%d: ", weightsDimension, operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ armnn::TensorInfo biasTensorInfo;
+ if (biasEnabled)
+ {
+ const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
+ if (!IsValid(tfLiteContext, tfLiteBiasTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+ biasTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBiasTensor);
+ }
+ else
+ {
+ biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
+ }
+
+ armnn::TensorInfo reshapedTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ if (inputTensorInfo.GetNumDimensions() > 2)
+ {
+ // Calculate reshape to flatten to 2D [batch_size, input_size]
+ std::vector<unsigned int> reshapedDimensions(2);
+ reshapedDimensions[1] = weightsTensorInfo.GetShape()[1];
+ reshapedDimensions[0] = inputTensorInfo.GetNumElements() / reshapedDimensions[1];
+
+ if (inputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Failed to deduce input tensor shape from filter size #%d #%d node #%d: ",
+ reshapedDimensions[1], operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ reshapedTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
+ }
+ armnn::TensorInfo reshapedOutputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+
+ if (outputTensorInfo.GetNumDimensions() > 2)
+ {
+ // Calculate reshape to flatten to 2D [batch_size, input_size]
+ std::vector<unsigned int> reshapedDimensions(2);
+ reshapedDimensions[1] = weightsTensorInfo.GetShape()[0];
+ reshapedDimensions[0] = outputTensorInfo.GetNumElements() / reshapedDimensions[1];
+
+ if (outputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Failed to deduce output tensor shape from filter size #%d #%d node #%d: ",
+ reshapedDimensions[1], operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ reshapedOutputTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
+ }
+
+ armnn::FullyConnectedDescriptor descriptor;
+ descriptor.m_TransposeWeightMatrix = true;
+ descriptor.m_BiasEnabled = biasEnabled;
+ descriptor.m_ConstantWeights = weightsTensorInfo.IsConstant();
+
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+ {
+
+ FORWARD_LAYER_SUPPORT_FUNC("FULLY_CONNECTED",
+ tfLiteContext,
+ IsFullyConnectedSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ reshapedTensorInfo,
+ outputTensorInfo,
+ weightsTensorInfo,
+ biasTensorInfo,
+ descriptor);
+ };
+
+ if (!delegateData.m_Network)
+ {
+ validateFunc(reshapedOutputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddFullyConnectedLayer(descriptor);
+ layer->SetBackendId(setBackend);
+ ARMNN_ASSERT(layer != nullptr);
+
+ // Add a constant layer for weights and biases if inputs are constant.
+ if (weightsTensorInfo.IsConstant())
+ {
+ auto weightsTensor = CreateConstTensor(&tfLiteWeightsTensor,
+ weightsTensorInfo);
+
+ armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(weightsTensor);
+
+ weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
+ weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsTensorInfo);
+ }
+
+ if (biasEnabled)
+ {
+ const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
+ if(biasTensorInfo.IsConstant())
+ {
+ auto biasTensor = CreateConstTensor(&tfLiteBiasTensor,
+ biasTensorInfo);
+
+ armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
+ ARMNN_ASSERT(biasLayer != nullptr);
+
+ biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
+ biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
+ }
+ }
+
+ // The data input can also be constant, so we must check that this is also allocated to an input slot
+ if(inputTensorInfo.IsConstant())
+ {
+ auto input =
+ CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]],
+ inputTensorInfo);
+
+ armnn::IConnectableLayer *inputLayer = delegateData.m_Network->AddConstantLayer(input);
+ inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
+ inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
+ }
+
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ armnn::IConnectableLayer* reshapeLayer = nullptr;
+ if (inputTensorInfo.GetNumDimensions() > 2)
+ {
+ // Add reshape to flatten to 2D [batch_size, input_size]
+ armnn::ReshapeDescriptor reshapeDescriptor;
+ reshapeDescriptor.m_TargetShape = reshapedTensorInfo.GetShape();
+ reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor);
+ ARMNN_ASSERT(reshapeLayer != nullptr);
+
+ reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
+
+ // Connect
+ delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[0]]->Connect(reshapeLayer->GetInputSlot(0));
+ reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
+
+ if (!descriptor.m_ConstantWeights)
+ {
+ delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[1]]->Connect(layer->GetInputSlot(1));
+ }
+
+ if (biasEnabled && !biasTensorInfo.IsConstant())
+ {
+ delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[2]]->Connect(layer->GetInputSlot(2));
+ }
+ delegateData.m_OutputSlotForNode[tfLiteNode->outputs->data[0]] = &outputSlot;
+ }
+
+ if (reshapeLayer == nullptr)
+ {
+ if(Connect(layer, tfLiteNode, delegateData) != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+ }
+
+ if (outputTensorInfo.GetNumDimensions() > 2)
+ {
+ layer = AddReshapeLayer(tfLiteContext, tfLiteNode, layer, reshapedOutputTensorInfo, outputTensorInfo,
+ delegateData);
+ if (!layer)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Failed to add reshape for FullyConnected #%d node #%d: ",
+ operatorCode,
+ nodeIndex);
+ return kTfLiteError;
+ }
+ }
+
+ if (!tfLiteNodeParameters)
+ {
+ // No Activation
+ return kTfLiteOk;
+ }
+
+ // Check and Create Activation
+ return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
+}
+
+} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/classic/src/Gather.hpp b/delegate/classic/src/Gather.hpp
new file mode 100644
index 0000000000..4c9cf82832
--- /dev/null
+++ b/delegate/classic/src/Gather.hpp
@@ -0,0 +1,106 @@
+//
+// Copyright © 2020,2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <DelegateUtils.hpp>
+#include <algorithm>
+#include <iterator>
+#include <string>
+#include <vector>
+
+namespace armnnDelegate
+{
+TfLiteStatus VisitGatherOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteIndicesTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
+ if (!IsValid(tfLiteContext, tfLiteIndicesTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ auto* gatherParameters = reinterpret_cast<TfLiteGatherParams*>(tfLiteNode->builtin_data);
+ auto axis = gatherParameters->axis;
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& indicesTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteIndicesTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+ armnn::GatherDescriptor gatherDescriptor;
+ gatherDescriptor.m_Axis = axis;
+
+ auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
+ auto indicesDimensions = indicesTensorInfo.GetNumDimensions();
+ auto outputDimensions = outputTensorInfo.GetNumDimensions();
+ if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG( tfLiteContext,
+ "TfLiteArmnnDelegate: Operation has invalid axis: %d. It is out of bounds [-%d, %d))",
+ axis, inputDimensions, inputDimensions);
+ return kTfLiteError;
+ }
+ if (outputDimensions != static_cast<unsigned int>(inputDimensions) + indicesDimensions - 1)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG( tfLiteContext,
+ "Operation has invalid output dimensions: %d. Output must be an (%d + %d - 1)-D tensor",
+ outputDimensions, inputDimensions, indicesDimensions);
+ return kTfLiteError;
+ }
+
+ armnn::BackendId setBackend;
+ if (!delegateData.m_Network)
+ {
+ // Check if supported
+ bool isSupported = false;
+ FORWARD_LAYER_SUPPORT_FUNC("GATHER",
+ tfLiteContext,
+ IsGatherSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ indicesTensorInfo,
+ outputTensorInfo,
+ gatherDescriptor);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddGatherLayer(gatherDescriptor);
+ layer->SetBackendId(setBackend);
+ ARMNN_ASSERT(layer != nullptr);
+ layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+ auto inputsTensorsProcess = ProcessInputs(layer,
+ delegateData,
+ tfLiteContext,
+ tfLiteNode);
+ if (inputsTensorsProcess == kTfLiteError)
+ {
+ return inputsTensorsProcess;
+ }
+
+ return Connect(layer, tfLiteNode, delegateData);
+}
+} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/classic/src/GatherNd.hpp b/delegate/classic/src/GatherNd.hpp
new file mode 100644
index 0000000000..12f0af306d
--- /dev/null
+++ b/delegate/classic/src/GatherNd.hpp
@@ -0,0 +1,82 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <DelegateUtils.hpp>
+#include <algorithm>
+#include <iterator>
+#include <string>
+#include <vector>
+
+namespace armnnDelegate
+{
+TfLiteStatus VisitGatherNdOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteIndicesTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
+ if (!IsValid(tfLiteContext, tfLiteIndicesTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& indicesTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteIndicesTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ armnn::BackendId setBackend;
+ if (!delegateData.m_Network)
+ {
+ // Check if supported
+ bool isSupported = false;
+ FORWARD_LAYER_SUPPORT_FUNC("GATHER_ND",
+ tfLiteContext,
+ IsGatherNdSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ indicesTensorInfo,
+ outputTensorInfo);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddGatherNdLayer();
+ layer->SetBackendId(setBackend);
+ ARMNN_ASSERT(layer != nullptr);
+ layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+ auto inputsTensorsProcess = ProcessInputs(layer,
+ delegateData,
+ tfLiteContext,
+ tfLiteNode);
+ if (inputsTensorsProcess == kTfLiteError)
+ {
+ return inputsTensorsProcess;
+ }
+
+ return Connect(layer, tfLiteNode, delegateData);
+}
+} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/classic/src/LogicalBinary.hpp b/delegate/classic/src/LogicalBinary.hpp
new file mode 100644
index 0000000000..d71618ee9c
--- /dev/null
+++ b/delegate/classic/src/LogicalBinary.hpp
@@ -0,0 +1,102 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitLogicalBinaryOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t logicalOperatorCode,
+ armnn::LogicalBinaryOperation binaryOperation)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor0 = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteInputTensor0, logicalOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteInputTensor1 = tfLiteTensors[tfLiteNode->inputs->data[1]];
+ if (!IsValid(tfLiteContext, tfLiteInputTensor1, logicalOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, logicalOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ armnn::TensorInfo inputTensorInfo0 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor0);
+ armnn::TensorInfo inputTensorInfo1 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor1);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ // Check if we need to expand the dims of any of the input tensor infos.
+ // This is required for a few of the backends.
+ if(inputTensorInfo0.GetNumDimensions() != inputTensorInfo1.GetNumDimensions())
+ {
+ ExpandTensorRankToEqual(inputTensorInfo0, inputTensorInfo1);
+ }
+
+ // Setup descriptor and assign operation
+ armnn::LogicalBinaryDescriptor desc;
+ desc.m_Operation = binaryOperation;
+
+ // Check if supported
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC("LOGICAL_BINARY",
+ tfLiteContext,
+ IsLogicalBinarySupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo0,
+ inputTensorInfo1,
+ outputTensorInfo,
+ desc);
+ };
+
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ armnn::IConnectableLayer* logicalBinaryLayer = delegateData.m_Network->AddLogicalBinaryLayer(desc);
+ logicalBinaryLayer->SetBackendId(setBackend);
+ ARMNN_ASSERT(logicalBinaryLayer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = logicalBinaryLayer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ auto inputsTensorsProcess = ProcessInputs(logicalBinaryLayer,
+ delegateData,
+ tfLiteContext,
+ tfLiteNode);
+ if (inputsTensorsProcess == kTfLiteError)
+ {
+ return inputsTensorsProcess;
+ }
+
+ return Connect(logicalBinaryLayer, tfLiteNode, delegateData);
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/classic/src/Lstm.hpp b/delegate/classic/src/Lstm.hpp
new file mode 100644
index 0000000000..460c61adf9
--- /dev/null
+++ b/delegate/classic/src/Lstm.hpp
@@ -0,0 +1,268 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <DelegateUtils.hpp>
+
+#include <armnn/LstmParams.hpp>
+#include <armnn/Tensor.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitLstmOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ auto numInputs = tfLiteNode->inputs->size;
+ if (numInputs < 2)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
+ 2, numInputs, nodeIndex);
+ return kTfLiteError;
+ }
+
+ const auto nodeParams = reinterpret_cast<TfLiteLSTMParams*>(tfLiteNode->builtin_data);
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ // Set the params structure for the AddLstmLayer call
+ armnn::LstmInputParams params;
+
+ if (IsOptionalOperandPresent(tfLiteNode, 1))
+ {
+ params.m_InputToInputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 1);
+ }
+
+ params.m_InputToForgetWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 2);
+ params.m_InputToCellWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 3);
+ params.m_InputToOutputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 4);
+
+ // Recurrent weight tensors of size {n_cell, n_output}
+ if (IsOptionalOperandPresent(tfLiteNode, 5))
+ {
+ params.m_RecurrentToInputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 5);
+ }
+
+ params.m_RecurrentToForgetWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 6);
+ params.m_RecurrentToCellWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 7);
+ params.m_RecurrentToOutputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 8);
+
+ // Peephole weights tensors of size {n_cell}, representing a diagonal matrix.
+ if (IsOptionalOperandPresent(tfLiteNode, 9))
+ {
+ params.m_CellToInputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 9);
+ }
+
+ if (IsOptionalOperandPresent(tfLiteNode, 10))
+ {
+ params.m_CellToForgetWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 10);
+ }
+
+ if (IsOptionalOperandPresent(tfLiteNode, 11))
+ {
+ params.m_CellToOutputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 11);
+ }
+
+ // Gates bias tensors of size {n_cell}
+ if (IsOptionalOperandPresent(tfLiteNode, 12))
+ {
+ params.m_InputGateBias = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 12);
+ }
+
+ params.m_ForgetGateBias = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 13);
+ params.m_CellBias = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 14);
+ params.m_OutputGateBias = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 15);
+
+ // Projection weight tensor of size {n_output, n_cell}
+ if (IsOptionalOperandPresent(tfLiteNode, 16))
+ {
+ params.m_ProjectionWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 16);
+ }
+ // Projection bias tensor of size {n_output}
+ if (IsOptionalOperandPresent(tfLiteNode, 17))
+ {
+ params.m_ProjectionBias = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 17);
+ }
+
+ // These state tensors are defined as variable tensors, and will be modified by this op.
+ armnn::TensorInfo outputStateInInfo = GetTensorInfoForTfLiteTensor(tfLiteTensors[tfLiteNode->inputs->data[18]]);
+ armnn::TensorInfo cellStateInInfo = GetTensorInfoForTfLiteTensor(tfLiteTensors[tfLiteNode->inputs->data[19]]);
+
+ // Layer norm coefficient tensors of size {n_cell}, representing a diagonal matrix.
+ if (IsOptionalOperandPresent(tfLiteNode, 20))
+ {
+ params.m_InputLayerNormWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 20);
+ }
+
+ if (IsOptionalOperandPresent(tfLiteNode, 21))
+ {
+ params.m_ForgetLayerNormWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 21);
+ }
+
+ if (IsOptionalOperandPresent(tfLiteNode, 22))
+ {
+ params.m_CellLayerNormWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 22);
+ }
+
+ if (IsOptionalOperandPresent(tfLiteNode, 23))
+ {
+ params.m_OutputLayerNormWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 23);
+ }
+
+ // set the layer descriptor
+ armnn::LstmDescriptor desc;
+ desc.m_ActivationFunc = NonNegative(nodeParams->activation, nodeIndex);
+ desc.m_ClippingThresCell = nodeParams->cell_clip;
+ desc.m_ClippingThresProj = nodeParams->proj_clip;
+ desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr
+ || params.m_RecurrentToInputWeights == nullptr
+ || params.m_InputGateBias == nullptr);
+ desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr || params.m_CellToOutputWeights != nullptr);
+ desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
+ desc.m_LayerNormEnabled = (params.m_InputLayerNormWeights != nullptr
+ || params.m_ForgetLayerNormWeights != nullptr
+ || params.m_CellLayerNormWeights != nullptr
+ || params.m_OutputLayerNormWeights != nullptr);
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ unsigned int batchSize = inputTensorInfo.GetShape()[0];
+ unsigned int outputSize = outputTensorInfo.GetShape()[1];
+ unsigned int numUnits = cellStateInInfo.GetShape()[1];
+
+ armnn::DataType dataType = inputTensorInfo.GetDataType();
+ float qScale = inputTensorInfo.GetQuantizationScale();
+ float qOffset = inputTensorInfo.GetQuantizationOffset();
+
+ armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 3}, dataType, qScale, qOffset);
+ if (!desc.m_CifgEnabled)
+ {
+ scratchBufferTensorInfo = armnn::TensorInfo({batchSize, numUnits * 4}, dataType, qScale, qOffset);
+ }
+ armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits}, dataType, qScale, qOffset);
+ armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, dataType, qScale, qOffset);
+
+ armnn::LstmInputParamsInfo paramsInfo;
+ paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
+ paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
+ paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
+ paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo());
+ paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
+ paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo());
+ paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
+ paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
+ paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
+
+ if (!desc.m_CifgEnabled)
+ {
+ paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
+ paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
+ if (params.m_CellToInputWeights != nullptr)
+ {
+ paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
+ }
+ paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
+ }
+
+ if (desc.m_ProjectionEnabled)
+ {
+ paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
+ if (params.m_ProjectionBias != nullptr)
+ {
+ paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
+ }
+ }
+
+ if (desc.m_PeepholeEnabled)
+ {
+ paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
+ paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
+ }
+
+ if (desc.m_LayerNormEnabled)
+ {
+ if(!desc.m_CifgEnabled)
+ {
+ paramsInfo.m_InputLayerNormWeights = &(params.m_InputLayerNormWeights->GetInfo());
+ }
+ paramsInfo.m_ForgetLayerNormWeights = &(params.m_ForgetLayerNormWeights->GetInfo());
+ paramsInfo.m_CellLayerNormWeights = &(params.m_CellLayerNormWeights->GetInfo());
+ paramsInfo.m_OutputLayerNormWeights = &(params.m_OutputLayerNormWeights->GetInfo());
+ }
+
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC("LSTM",
+ tfLiteContext,
+ IsLstmSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ outputStateInInfo,
+ cellStateInInfo,
+ scratchBufferTensorInfo,
+ outputStateOutTensorInfo,
+ cellStateOutTensorInfo,
+ outputInfo,
+ desc,
+ paramsInfo);
+ };
+
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddLstmLayer(desc, params);
+ layer->SetBackendId(setBackend);
+ ARMNN_ASSERT(layer != nullptr);
+
+ layer->GetOutputSlot(0).SetTensorInfo(scratchBufferTensorInfo);
+ layer->GetOutputSlot(1).SetTensorInfo(outputStateOutTensorInfo);
+ layer->GetOutputSlot(2).SetTensorInfo(cellStateOutTensorInfo);
+ layer->GetOutputSlot(3).SetTensorInfo(outputTensorInfo);
+
+ // Connect the inputs
+ // input_layer
+ delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[0]]->Connect(layer->GetInputSlot(0));
+ // cellStateIn
+ delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[18]]->Connect(layer->GetInputSlot(1));
+ //outputStateIn
+ delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[19]]->Connect(layer->GetInputSlot(2));
+
+ // In the test_model there is only 1 Output
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(1);
+ delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->outputs->data[0])] = &outputSlot;
+ return kTfLiteOk;
+}
+
+} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/classic/src/MultiLayerFacade.hpp b/delegate/classic/src/MultiLayerFacade.hpp
new file mode 100644
index 0000000000..90d0b3174d
--- /dev/null
+++ b/delegate/classic/src/MultiLayerFacade.hpp
@@ -0,0 +1,136 @@
+//
+// Copyright © 2021,2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+// NOTE: the MultiLayerFacade class is a utility class which makes a chain
+// of operators look like a single IConnectableLayer with the first
+// layer in the chain supplying the input slots and the last supplying
+// the output slots. It enables us, for example, to simulate a
+// Tensorflow Lite FloorDiv operator by chaining a Div layer followed
+// by a Floor layer and pass them as a single unit to the code that
+// connects up the graph as the delegate proceeds to build up the
+// Arm NN subgraphs.
+//
+
+#include <common/include/ProfilingGuid.hpp>
+#include <armnn/INetwork.hpp>
+
+namespace armnnDelegate
+{
+
+class MultiLayerFacade : public armnn::IConnectableLayer
+{
+public:
+ MultiLayerFacade() :
+ m_FirstLayer(nullptr), m_LastLayer(nullptr) {}
+
+ MultiLayerFacade(armnn::IConnectableLayer* firstLayer, armnn::IConnectableLayer* lastLayer) :
+ m_FirstLayer(firstLayer), m_LastLayer(lastLayer) {}
+
+ MultiLayerFacade(const MultiLayerFacade& obj) :
+ m_FirstLayer(obj.m_FirstLayer), m_LastLayer(obj.m_LastLayer) {}
+
+ ~MultiLayerFacade() {} // we don't own the pointers
+
+ MultiLayerFacade& operator=(const MultiLayerFacade& obj)
+ {
+ m_FirstLayer = obj.m_FirstLayer;
+ m_LastLayer = obj.m_LastLayer;
+ return *this;
+ }
+
+ void AssignValues(armnn::IConnectableLayer* firstLayer, armnn::IConnectableLayer* lastLayer)
+ {
+ m_FirstLayer = firstLayer;
+ m_LastLayer = lastLayer;
+ }
+
+ virtual const char* GetName() const override
+ {
+ return m_FirstLayer->GetName();
+ }
+
+ virtual unsigned int GetNumInputSlots() const override
+ {
+ return m_FirstLayer->GetNumInputSlots();
+ }
+
+ virtual unsigned int GetNumOutputSlots() const override
+ {
+ return m_LastLayer->GetNumOutputSlots();
+ }
+
+ virtual const armnn::IInputSlot& GetInputSlot(unsigned int index) const override
+ {
+ return m_FirstLayer->GetInputSlot(index);
+ }
+
+ virtual armnn::IInputSlot& GetInputSlot(unsigned int index) override
+ {
+ return m_FirstLayer->GetInputSlot(index);
+ }
+
+ virtual const armnn::IOutputSlot& GetOutputSlot(unsigned int index) const override
+ {
+ return m_LastLayer->GetOutputSlot(index);
+ }
+
+ virtual armnn::IOutputSlot& GetOutputSlot(unsigned int index) override
+ {
+ return m_LastLayer->GetOutputSlot(index);
+ }
+
+ virtual std::vector<armnn::TensorShape> InferOutputShapes(
+ const std::vector<armnn::TensorShape>& inputShapes) const override
+ {
+ // NOTE: do not expect this function to be used. Likely that if it is it might need to be overridden
+ // for particular sequences of operators.
+ return m_FirstLayer->InferOutputShapes(inputShapes);
+ }
+
+ virtual LayerGuid GetGuid() const override
+ {
+ return m_FirstLayer->GetGuid();
+ }
+
+ virtual void ExecuteStrategy(armnn::IStrategy& strategy) const override
+ {
+ // Do not expect this function to be used so not providing an implementation
+ // if an implementation is required and the chain contains more than two operators
+ // would have to provide a way to record the intermediate layers so they could be
+ // visited... the same applies to the BackendSelectionHint
+ // below.
+ }
+
+ virtual void BackendSelectionHint(armnn::Optional<armnn::BackendId> backend) override
+ {
+ // Do not expect this function to be used so not providing an implementation
+ }
+
+ virtual armnn::LayerType GetType() const override
+ {
+ return m_FirstLayer->GetType();
+ }
+
+ virtual const armnn::BaseDescriptor& GetParameters() const override { return m_NullDescriptor; }
+
+ void SetBackendId(const armnn::BackendId& id) override {}
+
+protected:
+ /// Retrieve the handles to the constant values stored by the layer.
+ /// @return A vector of the constant tensors stored by this layer.
+ ConstantTensors GetConstantTensorsByRef() override { return {}; }
+ ImmutableConstantTensors GetConstantTensorsByRef() const override { return {}; }
+
+private:
+ armnn::IConnectableLayer* m_FirstLayer;
+ armnn::IConnectableLayer* m_LastLayer;
+
+ // to satisfy the GetParameters method need to hand back a NullDescriptor
+ armnn::NullDescriptor m_NullDescriptor;
+};
+
+} // namespace armnnDelegate
diff --git a/delegate/classic/src/Normalization.hpp b/delegate/classic/src/Normalization.hpp
new file mode 100644
index 0000000000..ef2e524369
--- /dev/null
+++ b/delegate/classic/src/Normalization.hpp
@@ -0,0 +1,162 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitL2NormalizationOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ armnn::L2NormalizationDescriptor descriptor;
+ descriptor.m_DataLayout = armnn::DataLayout::NHWC;
+
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC("L2_NORMALIZATION",
+ tfLiteContext,
+ IsL2NormalizationSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ outInfo,
+ descriptor);
+ };
+
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ // Add a L2Normalization layer
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddL2NormalizationLayer(descriptor);
+ layer->SetBackendId(setBackend);
+ ARMNN_ASSERT(layer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
+ // Connect
+ return Connect(layer, tfLiteNode, delegateData);
+}
+
+
+TfLiteStatus VisitLocalResponseNormalizationOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t normalizationOperatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteInputTensor, normalizationOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, normalizationOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ armnn::NormalizationDescriptor descriptor;
+ descriptor.m_DataLayout = armnn::DataLayout::NHWC;
+ descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
+ descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
+
+ auto* params = reinterpret_cast<TfLiteLocalResponseNormParams*>(tfLiteNode->builtin_data);
+ descriptor.m_NormSize = params->radius;
+ descriptor.m_K = params->bias;
+ descriptor.m_Alpha = params->alpha;
+ descriptor.m_Beta = params->beta;
+
+ // ArmNN expects normSize to be the full size of the normalization window
+ descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
+
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC("NORMALIZATION",
+ tfLiteContext,
+ IsNormalizationSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ outInfo,
+ descriptor);
+ };
+
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ // Add a Normalization layer
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddNormalizationLayer(descriptor);
+ layer->SetBackendId(setBackend);
+ ARMNN_ASSERT(layer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
+ // Connect
+ return Connect(layer, tfLiteNode, delegateData);
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/classic/src/Pack.hpp b/delegate/classic/src/Pack.hpp
new file mode 100644
index 0000000000..99c8b804ff
--- /dev/null
+++ b/delegate/classic/src/Pack.hpp
@@ -0,0 +1,122 @@
+//
+// Copyright © 2021,2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitPackOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ unsigned int numInputs = tfLiteNode->inputs->size;
+ if (numInputs < 1)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext, "TfLiteArmnnDelegate: Must have at least one input in (%d != %d) in node #%d",
+ 1, numInputs, nodeIndex);
+ return kTfLiteError;
+ }
+
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+
+ // Validate all inputs and get TensorInfo
+ std::vector<armnn::TensorInfo> inputTensorInfos;
+ for (unsigned int i = 0; i < numInputs; ++i)
+ {
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[i]];
+ if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ inputTensorInfos.emplace_back(inputTensorInfo);
+ }
+
+ // Convert input tensors to const armnn::TensorInfo* type for FORWARD_LAYER_SUPPORT_FUNC.
+ std::vector<const armnn::TensorInfo*> inputConstTensorInfos;
+ std::transform(inputTensorInfos.begin(),
+ inputTensorInfos.end(),
+ std::back_inserter(inputConstTensorInfos),
+ [](armnn::TensorInfo& t)->const armnn::TensorInfo*{ return &t; });
+
+ // Validate output and get TensorInfo
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ armnn::StackDescriptor desc;
+ desc.m_NumInputs = static_cast<uint32_t>(numInputs);
+
+ // Get axis from TfLite parameters
+ auto* params = reinterpret_cast<TfLitePackParams*>(tfLiteNode->builtin_data);
+ desc.m_Axis = static_cast<uint32_t>(params->axis);
+
+ // Use the tensor shape of the first input as the "correct" input shape in the descriptor
+ desc.m_InputShape = inputTensorInfos[0].GetShape();
+
+ // Check if supported
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC("STACK",
+ tfLiteContext,
+ IsStackSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputConstTensorInfos,
+ outputTensorInfo,
+ desc);
+ };
+
+ // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the
+ // support for the operator
+ // If supported, VisitPackOperator will be called again to add the layer to the network as seen below
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ // The TfLite Pack operator is equivalent to the ArmNN Stack operator
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddStackLayer(desc);
+ layer->SetBackendId(setBackend);
+ ARMNN_ASSERT(layer != nullptr);
+
+ // Connect the Constant Inputs
+ auto inputsTensorsProcess = ProcessInputs(layer,
+ delegateData,
+ tfLiteContext,
+ tfLiteNode);
+ if (inputsTensorsProcess == kTfLiteError)
+ {
+ return inputsTensorsProcess;
+ }
+
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // Connect
+ return Connect(layer, tfLiteNode, delegateData);
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/classic/src/Pad.hpp b/delegate/classic/src/Pad.hpp
new file mode 100644
index 0000000000..440a3d023c
--- /dev/null
+++ b/delegate/classic/src/Pad.hpp
@@ -0,0 +1,179 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitPadOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t tfLitePadOperatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ switch(tfLitePadOperatorCode)
+ {
+ case kTfLiteBuiltinMirrorPad:
+ case kTfLiteBuiltinPad:
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
+ break;
+ case kTfLiteBuiltinPadv2:
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex));
+ break;
+ default:
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ const TfLiteTensor& tfLitepaddingTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
+
+ if (IsDynamicTensor(tfLiteInputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
+ tfLitePadOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (IsDynamicTensor(tfLiteOutputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
+ tfLitePadOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& paddingTensorInfo = GetTensorInfoForTfLiteTensor(tfLitepaddingTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ // Get the padding data from the input tensor
+ auto* paddingData = tflite::GetTensorData<int32_t>(&tfLitepaddingTensor);
+
+ size_t step = 2;
+ armnn::PadDescriptor descriptor;
+ for (unsigned int i = 0; i < paddingTensorInfo.GetNumElements() / step; ++i)
+ {
+ descriptor.m_PadList.emplace_back(paddingData[i * step], paddingData[i * step + 1]);
+ }
+
+ if (tfLitePadOperatorCode == kTfLiteBuiltinPad && inputTensorInfo.IsQuantized())
+ {
+ descriptor.m_PadValue = inputTensorInfo.GetQuantizationOffset();
+ }
+ else if (tfLitePadOperatorCode == kTfLiteBuiltinPadv2)
+ {
+ const TfLiteTensor& tfLitepaddingValue = tfLiteTensors[tfLiteNode->inputs->data[2]];
+ armnn::TensorInfo paddingValueTensorInfo = GetTensorInfoForTfLiteTensor(tfLitepaddingValue);
+ if (paddingValueTensorInfo.GetNumElements() != 1)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Multiple padding value are not supported in operator #%d node #%d: ",
+ tfLitePadOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ // Get the padding value from the input tensor
+ switch (tfLitepaddingValue.type)
+ {
+ case kTfLiteFloat32:
+ descriptor.m_PadValue = tflite::GetTensorData<float>(&tfLitepaddingValue)[0];
+ break;
+ case kTfLiteUInt8:
+ descriptor.m_PadValue = tflite::GetTensorData<uint8>(&tfLitepaddingValue)[0];
+ break;
+ case kTfLiteInt8:
+ descriptor.m_PadValue = tflite::GetTensorData<int8>(&tfLitepaddingValue)[0];
+ break;
+ default:
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Padding value datatype is not supported in operator #%d node #%d: ",
+ tfLitePadOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ }
+ else if (tfLitePadOperatorCode == kTfLiteBuiltinMirrorPad)
+ {
+ TfLiteMirrorPaddingParams* options = reinterpret_cast<TfLiteMirrorPaddingParams*>(tfLiteNode->builtin_data);
+
+
+ if (options->mode == TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingReflect)
+ {
+ descriptor.m_PaddingMode = armnn::PaddingMode::Reflect;
+ }
+ else if (options->mode == TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingSymmetric)
+ {
+ descriptor.m_PaddingMode = armnn::PaddingMode::Symmetric;
+ }
+ else
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: PaddingMode must be either REFLECT or SYMMETRIC in operator #%d node #%d: ",
+ tfLitePadOperatorCode, nodeIndex);
+ }
+
+ // If padding mode is Reflect then both paddings must be no greater than inputShape(i) - 1.
+ // If padding mode is Symmetric then both paddings must be no greater than inputShape(i).
+ auto inputShape = inputTensorInfo.GetShape();
+ auto padList = descriptor.m_PadList;
+
+ const unsigned int isReflect =
+ static_cast<unsigned int>(descriptor.m_PaddingMode == armnn::PaddingMode::Reflect);
+ for(unsigned int i = 0; i < padList.size(); ++i)
+ {
+ if(padList.at(i).first > (inputShape[i] - isReflect) ||
+ padList.at(i).second > (inputShape[i] - isReflect))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Padding values must be less (Reflect) or "
+ "equal (Symmetric) to the dimension size in operator #%d node #%d: ",
+ tfLitePadOperatorCode, nodeIndex);
+ }
+ }
+ }
+
+ armnn::BackendId setBackend;
+ if (!delegateData.m_Network)
+ {
+ bool isSupported = false;
+ FORWARD_LAYER_SUPPORT_FUNC("PAD",
+ tfLiteContext,
+ IsPadSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ outputTensorInfo,
+ descriptor);
+
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ armnn::IConnectableLayer* padLayer = delegateData.m_Network->AddPadLayer(descriptor);
+ padLayer->SetBackendId(setBackend);
+ ARMNN_ASSERT(padLayer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = padLayer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ return Connect(padLayer, tfLiteNode, delegateData);
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/classic/src/Pooling.hpp b/delegate/classic/src/Pooling.hpp
new file mode 100644
index 0000000000..2de40613fb
--- /dev/null
+++ b/delegate/classic/src/Pooling.hpp
@@ -0,0 +1,327 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <DelegateUtils.hpp>
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+#include <flatbuffers/flexbuffers.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitPooling2dOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t tfLitePoolingOperatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (IsDynamicTensor(tfLiteInputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
+ tfLitePoolingOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (IsDynamicTensor(tfLiteOutputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
+ tfLitePoolingOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ auto* tfLiteNodeParameters = reinterpret_cast<TfLitePoolParams*>(tfLiteNode->builtin_data);
+ TfLiteFusedActivation activationType = kTfLiteActNone;
+ if (tfLiteNodeParameters)
+ {
+ activationType = tfLiteNodeParameters->activation;
+ TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
+ outputTensorInfo, activationType);
+ if(activationStatus != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+
+ }
+
+ armnn::PoolingAlgorithm poolingAlgorithm;
+ switch(tfLitePoolingOperatorCode)
+ {
+ case kTfLiteBuiltinAveragePool2d:
+ poolingAlgorithm = armnn::PoolingAlgorithm::Average;
+ break;
+ case kTfLiteBuiltinL2Pool2d:
+ poolingAlgorithm = armnn::PoolingAlgorithm::L2;
+ break;
+ case kTfLiteBuiltinMaxPool2d:
+ poolingAlgorithm = armnn::PoolingAlgorithm::Max;
+ break;
+ default:
+ return kTfLiteError;
+ }
+
+ armnn::Pooling2dDescriptor descriptor;
+ descriptor.m_PoolType = poolingAlgorithm;
+
+ descriptor.m_PoolWidth = tfLiteNodeParameters->filter_width;
+ descriptor.m_PoolHeight = tfLiteNodeParameters->filter_height;
+ descriptor.m_StrideX = tfLiteNodeParameters->stride_width;
+ descriptor.m_StrideY = tfLiteNodeParameters->stride_height;
+ descriptor.m_DataLayout = armnn::DataLayout::NHWC;
+
+ unsigned int inputHeight = inputTensorInfo.GetShape()[1];
+ unsigned int inputWidth = inputTensorInfo.GetShape()[2];
+
+ CalcPadding(inputHeight, descriptor.m_PoolHeight, descriptor.m_StrideY, 1u,
+ descriptor.m_PadTop, descriptor.m_PadBottom, tfLiteNodeParameters->padding);
+ CalcPadding(inputWidth, descriptor.m_PoolWidth, descriptor.m_StrideX, 1u,
+ descriptor.m_PadLeft, descriptor.m_PadRight, tfLiteNodeParameters->padding);
+
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC("POOLING_2D",
+ tfLiteContext,
+ IsPooling2dSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ outputTensorInfo,
+ descriptor);
+ };
+
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling2dLayer(descriptor);
+ poolingLayer->SetBackendId(setBackend);
+ ARMNN_ASSERT(poolingLayer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = poolingLayer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(poolingLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
+ if(Connect(poolingLayer, tfLiteNode, delegateData) != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+
+ // Check and create activation
+ return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData);
+}
+
+TfLiteStatus VisitPooling3dOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ std::string customOperatorName)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (IsDynamicTensor(tfLiteInputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
+ customOperatorName.c_str(), nodeIndex);
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (IsDynamicTensor(tfLiteOutputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
+ customOperatorName.c_str(), nodeIndex);
+ return kTfLiteError;
+ }
+ // Set the input and output info
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ // Custom Operators are defined by the name string associated to the operator. Use this to determine
+ // which pooling algorithm to create the armnn operator with. L2 Pooling3D is unsupported in TfLite.
+ armnn::PoolingAlgorithm poolingAlgorithm;
+ if (customOperatorName == "MaxPool3D")
+ {
+ poolingAlgorithm = armnn::PoolingAlgorithm::Max;
+ }
+ else if (customOperatorName == "AveragePool3D")
+ {
+ poolingAlgorithm = armnn::PoolingAlgorithm::Average;
+ }
+ else
+ {
+ return kTfLiteError;
+ }
+ // Create the armnn pool3d descriptor and set the algorithm parsed above.
+ armnn::Pooling3dDescriptor descriptor;
+ descriptor.m_PoolType = poolingAlgorithm;
+
+ // custom_initial_data and custom_initial_data_size are void* variables defined in the tflite registration
+ // used to access the custom option buffer for the operator.
+ auto custom_data = tfLiteNode->custom_initial_data;
+ auto custom_data_size = tfLiteNode->custom_initial_data_size;
+ // Reinterpret the void* to a byte buffer to access the options data in the flexbuffers map.
+ const flexbuffers::Map& m = flexbuffers::GetRoot(reinterpret_cast<const uint8_t*>(custom_data),
+ custom_data_size).AsMap();
+ // poolDims is a vector of [ 1, Depth, Height, Width, 1 ]
+ const auto poolDims = m["ksize"].AsTypedVector();
+ descriptor.m_PoolWidth = poolDims[3].AsInt32();
+ descriptor.m_PoolHeight = poolDims[2].AsInt32();
+ descriptor.m_PoolDepth = poolDims[1].AsInt32();
+
+ // strideDimes is a vector of [ 1, Z, Y, X, 1]
+ const auto strideDims = m["strides"].AsTypedVector();
+ descriptor.m_StrideX = strideDims[3].AsInt32();
+ descriptor.m_StrideY = strideDims[2].AsInt32();
+ descriptor.m_StrideZ = strideDims[1].AsInt32();
+ descriptor.m_DataLayout = armnn::DataLayout::NDHWC;
+
+ unsigned int inputDepth = inputTensorInfo.GetShape()[1];
+ unsigned int inputHeight = inputTensorInfo.GetShape()[2];
+ unsigned int inputWidth = inputTensorInfo.GetShape()[3];
+
+ // CalcPadding expects a TfLitePadding type. Parse flexbuffers to extract padding string and create TfLitePadding.
+ std::string paddingStr = m["padding"].AsString().str();
+ TfLitePadding padding;
+ if (paddingStr == "VALID")
+ {
+ padding = kTfLitePaddingValid;
+ }
+ else if (paddingStr == "SAME")
+ {
+ padding = kTfLitePaddingSame;
+ }
+ else
+ {
+ padding = kTfLitePaddingUnknown;
+ }
+ // Calculates padding for each pooling dimension separately
+ CalcPadding(inputHeight, descriptor.m_PoolHeight, descriptor.m_StrideY, 1u,
+ descriptor.m_PadTop, descriptor.m_PadBottom, padding);
+ CalcPadding(inputWidth, descriptor.m_PoolWidth, descriptor.m_StrideX, 1u,
+ descriptor.m_PadLeft, descriptor.m_PadRight, padding);
+ CalcPadding(inputDepth, descriptor.m_PoolDepth, descriptor.m_StrideZ, 1u,
+ descriptor.m_PadFront, descriptor.m_PadBack, padding);
+
+
+ // Check activation by parsing the string from the flexbuffer map
+ std::string activationTypeStr = m["activation"].AsString().str();
+ TfLiteFusedActivation activationType = kTfLiteActNone;
+
+ if (activationTypeStr == "kTfLiteActRelu")
+ {
+ activationType = kTfLiteActRelu;
+ }
+ else if (activationTypeStr == "kTfLiteActReluN1To1")
+ {
+ activationType = kTfLiteActReluN1To1;
+ }
+ else if (activationTypeStr == "kTfLiteActRelu6")
+ {
+ activationType = kTfLiteActRelu6;
+ }
+ else if (activationTypeStr == "kTfLiteActTanh")
+ {
+ activationType = kTfLiteActTanh;
+ }
+ else if (activationTypeStr == "kTfLiteActSignBit")
+ {
+ activationType = kTfLiteActSignBit;
+ }
+ else if (activationTypeStr == "kTfLiteActSigmoid")
+ {
+ activationType = kTfLiteActSigmoid;
+ }
+ else
+ {
+ activationType = kTfLiteActNone;
+ }
+
+ TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
+ outputTensorInfo, activationType);
+ if(activationStatus != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+
+
+ // Validate the output info.
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported) {
+ FORWARD_LAYER_SUPPORT_FUNC("POOLING_3D",
+ tfLiteContext,
+ IsPooling3dSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ outputTensorInfo,
+ descriptor);
+ };
+
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ // Create the Layer
+ armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling3dLayer(descriptor);
+ poolingLayer->SetBackendId(setBackend);
+ ARMNN_ASSERT(poolingLayer != nullptr);
+
+ // Create and set output slots
+ armnn::IOutputSlot& outputSlot = poolingLayer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(poolingLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
+ if(Connect(poolingLayer, tfLiteNode, delegateData) != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+
+ return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData);
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/classic/src/Prelu.hpp b/delegate/classic/src/Prelu.hpp
new file mode 100644
index 0000000000..71a04a744e
--- /dev/null
+++ b/delegate/classic/src/Prelu.hpp
@@ -0,0 +1,108 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <DelegateUtils.hpp>
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus ValidatePreluOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ const armnn::TensorInfo& inputInfo,
+ const armnn::TensorInfo& alphaInfo,
+ const armnn::TensorInfo& outputInfo)
+{
+ bool isSupported = false;
+ auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC("PRELU",
+ tfLiteContext,
+ IsPreluSupported,
+ delegateData.m_Backends,
+ isSupported,
+ armnn::BackendId(),
+ inputInfo,
+ alphaInfo,
+ outputInfo);
+ };
+
+ validateFunc(outputInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+}
+
+TfLiteStatus VisitPreluOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteAlphaTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
+ if (!IsValid(tfLiteContext, tfLiteAlphaTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& alphaTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteAlphaTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ if (!delegateData.m_Network)
+ {
+ return ValidatePreluOperator(delegateData,
+ tfLiteContext,
+ inputTensorInfo,
+ alphaTensorInfo,
+ outputTensorInfo);
+ }
+
+ armnn::IConnectableLayer* preluLayer = delegateData.m_Network->AddPreluLayer();
+ ARMNN_ASSERT(preluLayer != nullptr);
+
+ bool isConstantAlpha = tflite::IsConstantTensor(&tfLiteAlphaTensor);
+
+ // Add constant layer for constant alpha
+ if (isConstantAlpha)
+ {
+ auto constAlphaTensor = armnn::ConstTensor(alphaTensorInfo, tfLiteAlphaTensor.data.data);
+
+ armnn::IConnectableLayer* constLayer = delegateData.m_Network->AddConstantLayer(constAlphaTensor);
+ ARMNN_ASSERT(constLayer != nullptr);
+
+ constLayer->GetOutputSlot(0).SetTensorInfo(alphaTensorInfo);
+ constLayer->GetOutputSlot(0).Connect(preluLayer->GetInputSlot(1));
+ }
+
+ armnn::IOutputSlot& outputSlot = preluLayer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // Connect
+ return Connect(preluLayer, tfLiteNode, delegateData);
+}
+
+} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/classic/src/Quantization.hpp b/delegate/classic/src/Quantization.hpp
new file mode 100644
index 0000000000..f1192960e4
--- /dev/null
+++ b/delegate/classic/src/Quantization.hpp
@@ -0,0 +1,171 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/utility/IgnoreUnused.hpp>
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitDequantizeOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t tfLiteDequantizeOperatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (IsDynamicTensor(tfLiteInputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
+ tfLiteDequantizeOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (IsDynamicTensor(tfLiteOutputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
+ tfLiteDequantizeOperatorCode, nodeIndex);
+
+ return kTfLiteError;
+ }
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ armnn::TensorInfo outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ UpdateConstantTensorOutputs(inputTensorInfo, outputTensorInfo);
+
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC("DEQUANTIZE",
+ tfLiteContext,
+ IsDequantizeSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ outputTensorInfo);
+ };
+
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ armnn::IConnectableLayer* dequantizeLayer = delegateData.m_Network->AddDequantizeLayer();
+ dequantizeLayer->SetBackendId(setBackend);
+ ARMNN_ASSERT(dequantizeLayer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = dequantizeLayer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ auto inputsTensorsProcess = ProcessInputs(dequantizeLayer,
+ delegateData,
+ tfLiteContext,
+ tfLiteNode);
+ if (inputsTensorsProcess == kTfLiteError)
+ {
+ return inputsTensorsProcess;
+ }
+
+ return Connect(dequantizeLayer, tfLiteNode, delegateData);
+}
+
+TfLiteStatus VisitQuantizeOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t tfLiteQuantizeOperatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (IsDynamicTensor(tfLiteInputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
+ tfLiteQuantizeOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (IsDynamicTensor(tfLiteOutputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
+ tfLiteQuantizeOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ // Only affine per-layer quantization is supported.
+ if (!IsAffineQuantization(tfLiteOutputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Only affine per-layer quantization is supported in operator #%d node #%d: ",
+ tfLiteQuantizeOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC("QUANTIZE",
+ tfLiteContext,
+ IsQuantizeSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ outputTensorInfo);
+ };
+
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ armnn::IConnectableLayer* quantizeLayer = delegateData.m_Network->AddQuantizeLayer();
+ quantizeLayer->SetBackendId(setBackend);
+ ARMNN_ASSERT(quantizeLayer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = quantizeLayer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(quantizeLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
+ return Connect(quantizeLayer, tfLiteNode, delegateData);
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/classic/src/Redefine.hpp b/delegate/classic/src/Redefine.hpp
new file mode 100644
index 0000000000..83c42d046e
--- /dev/null
+++ b/delegate/classic/src/Redefine.hpp
@@ -0,0 +1,289 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/utility/IgnoreUnused.hpp>
+
+#include <DelegateUtils.hpp>
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+#include <numeric>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitCastOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC("CAST",
+ tfLiteContext,
+ IsCastSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ outInfo);
+ };
+
+ // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the
+ // support for the operator
+ // If supported, VisitCastOperator will be called again to add the layer to the network as seen further below
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ // Add a Cast layer
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddCastLayer();
+ layer->SetBackendId(setBackend);
+ ARMNN_ASSERT(layer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
+ // Connect
+ return Connect(layer, tfLiteNode, delegateData);
+}
+
+
+TfLiteStatus CreateOutputTensorShape(const armnn::TensorInfo& inputTensorInfo,
+ const std::vector<int32_t>& targetShape,
+ armnn::ReshapeDescriptor& reshapeDesc)
+{
+ std::vector<unsigned int> outputDims(targetShape.begin(), targetShape.end());
+ const auto stretchDim = std::find(targetShape.begin(), targetShape.end(), -1);
+
+ if (stretchDim != targetShape.end())
+ {
+ if (std::find(std::next(stretchDim), targetShape.end(), -1) != targetShape.end())
+ {
+ // Return kTfLiteError and log the error after returning
+ return kTfLiteError;
+ }
+
+ auto targetNumElements =
+ armnn::numeric_cast<unsigned int>(
+ std::accumulate(targetShape.begin(), targetShape.end(), -1, std::multiplies<int32_t>()));
+
+ auto stretchIndex = static_cast<size_t>(std::distance(targetShape.begin(), stretchDim));
+ outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
+ }
+
+ armnn::TensorShape outputShape = armnn::TensorShape(static_cast<unsigned int>(outputDims.size()),
+ outputDims.data());
+ reshapeDesc.m_TargetShape = outputShape;
+ return kTfLiteOk;
+}
+
+TfLiteStatus VisitReshapeOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ auto numInputs = tfLiteNode->inputs->size;
+
+ if (numInputs == 2)
+ {
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
+ }
+ else
+ {
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+ }
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor0 = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteInputTensor0, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo0 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor0);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ armnn::ReshapeDescriptor reshapeDesc;
+ std::vector<int32_t> targetShape;
+
+ TfLiteReshapeParams* reshapeOptions = reinterpret_cast<TfLiteReshapeParams*>(tfLiteNode->builtin_data);
+
+ // The new shape can be defined by either a second input tensor or by a builtin option, we need to check for both.
+ // Options might be set without valid data. we need to check the dimensions are in a valid range.
+ if (reshapeOptions && reshapeOptions->num_dimensions > 0 && reshapeOptions->num_dimensions <= 8)
+ {
+ for (int i=0; i < reshapeOptions->num_dimensions; ++i)
+ {
+ targetShape.push_back(reshapeOptions->shape[i]);
+ }
+ }
+ else if (numInputs == 2)
+ {
+ // Get shape from the second input tensor
+ const TfLiteTensor& tfLiteShapeInputTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
+ if (!IsValid(tfLiteContext, tfLiteShapeInputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ if (tfLiteShapeInputTensor.dims->size != 1)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext,
+ "TfLiteArmnnDelegate: Target 'shape' input is not a 1D tensor in "
+ "operator #%d node #%d: Falling back to TfLiteOptions.",
+ operatorCode, nodeIndex);
+ }
+ else
+ {
+ // Get the shape data out of the input tensor
+ auto* shapeTensorDataPtr = tflite::GetTensorData<int32_t>(&tfLiteShapeInputTensor);
+ auto shapeTensorNumValues = tfLiteShapeInputTensor.dims->data[0];
+ for (auto i=0; i < shapeTensorNumValues; ++i)
+ {
+ targetShape.push_back(*(shapeTensorDataPtr+i));
+ }
+ }
+ }
+ else
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext,
+ "Target shape not defined in reshape parameters or input tensor. "
+ "At least one method required in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ // Use the data to create the required tensor shape.
+ if (CreateOutputTensorShape(inputTensorInfo0, targetShape, reshapeDesc) != kTfLiteOk)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext,
+ "TfLiteArmnnDelegate: At most one component of shape can be -1 in: "
+ "operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ if (reshapeDesc.m_TargetShape.GetNumElements() != inputTensorInfo0.GetNumElements())
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Reshape, number of elements in output shape does not match input "
+ "operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC("RESHAPE",
+ tfLiteContext,
+ IsReshapeSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo0,
+ outInfo,
+ reshapeDesc);
+ };
+
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc);
+ layer->SetBackendId(setBackend);
+ ARMNN_ASSERT(layer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
+ // Connect
+ return Connect(layer, tfLiteNode, delegateData);
+}
+
+TfLiteStatus VisitSqueezeOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ armnn::IgnoreUnused(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ operatorCode);
+
+ return kTfLiteError;
+}
+
+TfLiteStatus VisitExpandDimsOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ armnn::IgnoreUnused(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ operatorCode);
+
+ return kTfLiteError;
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/classic/src/Reduce.hpp b/delegate/classic/src/Reduce.hpp
new file mode 100644
index 0000000000..2d8b462cd2
--- /dev/null
+++ b/delegate/classic/src/Reduce.hpp
@@ -0,0 +1,146 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/kernels/internal/tensor_ctypes.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitReduceOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t reduceOperatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteInputTensor, reduceOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, reduceOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ // Get const axis value from model and set it to descriptor.
+ const TfLiteTensor& tfLiteAxisTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
+ if (!IsValid(tfLiteContext, tfLiteAxisTensor, reduceOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& axisTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteAxisTensor);
+ auto* axisTensorData = tflite::GetTensorData<int32_t>(&tfLiteAxisTensor);
+
+ std::vector<int32_t> axis;
+ // Add axis data to vector to be converter to unsigned int and assigned to descriptor axis.
+ if (axisTensorData != nullptr)
+ {
+ for (unsigned int i = 0; i < axisTensorInfo.GetNumElements(); ++i)
+ {
+ axis.emplace_back(axisTensorData[i]);
+ }
+ }
+ else
+ {
+ for (unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); ++i)
+ {
+ axis.push_back(i);
+ }
+ }
+
+ // Convert the axis to unsigned int and remove duplicates.
+ unsigned int rank = inputTensorInfo.GetNumDimensions();
+ std::set<unsigned int> uniqueAxis;
+ std::transform(axis.begin(),
+ axis.end(),
+ std::inserter(uniqueAxis, uniqueAxis.begin()),
+ [rank](int i)->unsigned int{ return (i + rank) % rank; });
+
+ armnn::ReduceDescriptor desc;
+ desc.m_vAxis.assign(uniqueAxis.begin(), uniqueAxis.end());
+
+ auto* reducerParameters = reinterpret_cast<TfLiteReducerParams*>(tfLiteNode->builtin_data);
+ desc.m_KeepDims = reducerParameters->keep_dims;
+ if (reduceOperatorCode == kTfLiteBuiltinReduceMax)
+ {
+ desc.m_ReduceOperation = armnn::ReduceOperation::Max;
+ }
+ else if (reduceOperatorCode == kTfLiteBuiltinReduceMin)
+ {
+ desc.m_ReduceOperation = armnn::ReduceOperation::Min;
+ }
+ else if (reduceOperatorCode == kTfLiteBuiltinSum)
+ {
+ desc.m_ReduceOperation = armnn::ReduceOperation::Sum;
+ }
+ else if (reduceOperatorCode == kTfLiteBuiltinReduceProd)
+ {
+ desc.m_ReduceOperation = armnn::ReduceOperation::Prod;
+ }
+ else
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Unsupported Reduction Operator #%d node #%d: ",
+ reduceOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC("REDUCE",
+ tfLiteContext,
+ IsReduceSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ outInfo,
+ desc);
+ };
+
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ // Add an Reduce layer
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddReduceLayer(desc);
+ layer->SetBackendId(setBackend);
+ ARMNN_ASSERT(layer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
+ // Connect
+ return Connect(layer, tfLiteNode, delegateData);
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/classic/src/Resize.hpp b/delegate/classic/src/Resize.hpp
new file mode 100644
index 0000000000..33c6c6ecd8
--- /dev/null
+++ b/delegate/classic/src/Resize.hpp
@@ -0,0 +1,205 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <DelegateUtils.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+
+#include <armnn/Descriptors.hpp>
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+#include <tensorflow/lite/kernels/internal/tensor_ctypes.h>
+
+namespace armnnDelegate
+{
+
+
+
+TfLiteStatus ValidateResizeOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ const armnn::TensorInfo& inputInfo,
+ const armnn::TensorInfo& outputInfo,
+ const armnn::ResizeDescriptor& descriptor)
+{
+ bool isSupported = false;
+ FORWARD_LAYER_SUPPORT_FUNC("RESIZE",
+ tfLiteContext,
+ IsResizeSupported,
+ delegateData.m_Backends,
+ isSupported,
+ armnn::BackendId(),
+ inputInfo,
+ outputInfo,
+ descriptor);
+
+ return isSupported ? kTfLiteOk : kTfLiteError;
+}
+
+TfLiteStatus VisitResizeOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t resizeOperatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+
+ // The first input contains the data of the image that should be resized [batch, height, width, channels]
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (IsDynamicTensor(tfLiteInputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
+ resizeOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ // The second input contains a size tensor. The size tensor contains two integer values
+ // that describe the new height and width of the image [new_height, new_width]
+ const TfLiteTensor& tfLiteSizeTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
+ if (IsDynamicTensor(tfLiteSizeTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
+ resizeOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ // The output tensor should have the shape [batch, new_height, new_width, channels]
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (IsDynamicTensor(tfLiteOutputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
+ resizeOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ std::string layerName("Resize");
+
+ // Fill descriptor
+ armnn::ResizeDescriptor desc;
+ switch (resizeOperatorCode)
+ {
+ case kTfLiteBuiltinResizeBilinear:
+ {
+ desc.m_Method = armnn::ResizeMethod::Bilinear;
+
+ layerName += "Bilinear:" + std::to_string(nodeIndex);
+
+ TfLiteResizeBilinearParams* biliniarOptions =
+ reinterpret_cast<TfLiteResizeBilinearParams*>(tfLiteNode->builtin_data);
+
+ desc.m_AlignCorners = biliniarOptions->align_corners;
+ desc.m_HalfPixelCenters = biliniarOptions->half_pixel_centers;
+ break;
+ }
+ case kTfLiteBuiltinResizeNearestNeighbor:
+ {
+ desc.m_Method = armnn::ResizeMethod::NearestNeighbor;
+ layerName += "NearestNeighbor:" + std::to_string(nodeIndex);
+
+ TfLiteResizeNearestNeighborParams* nearestNeighborOptions =
+ reinterpret_cast<TfLiteResizeNearestNeighborParams*>(tfLiteNode->builtin_data);
+
+ desc.m_AlignCorners = nearestNeighborOptions->align_corners;
+ desc.m_HalfPixelCenters = nearestNeighborOptions->half_pixel_centers;
+ break;
+ }
+ default:
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Unknown TfLite built in operation for Resize. Given operator: #%d node #%d: ",
+ resizeOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ }
+
+ // In armnn the values of the size input tensor [new_hight, new_width] is saved in the operator
+ // descriptor. We have to read it from the input tensor and write it to the descriptor.
+
+ auto* sizeTensorDataPtr = tflite::GetTensorData<int32_t>(&tfLiteSizeTensor);
+ auto sizeTensorNumDimensions = tfLiteSizeTensor.dims->size;
+ // The size tensor is only a 1D tensor -> [new_hight, new width]
+ if (sizeTensorNumDimensions != 1)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: The Size-Input-Tensor of the Resize operation is not allowed to be a "
+ "dynamic tensor. Operator: #%d node #%d: ",
+ resizeOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ // Get number of values in the size tensor
+ auto sizeTensorNumValues = tfLiteSizeTensor.dims->data[0];
+ if (sizeTensorNumValues == 0)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: The Size-Input-Tensor of the Resize operation is not allowed to be a "
+ "dynamic tensor. Operator: #%d node #%d: ",
+ resizeOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ else if (sizeTensorNumValues != 2)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: The Size-Input-Tensor of the Resize operation requires to "
+ "have a dimension of 2 [new_hight, new width] but a tensor with a dimension of #%d was given. "
+ "Operator: #%d node #%d: ",
+ sizeTensorNumValues, resizeOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ // get size tensor data
+ std::vector<int32_t> sizeTensorData(sizeTensorDataPtr, sizeTensorDataPtr+sizeTensorNumValues);
+
+ desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
+ desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
+ desc.m_DataLayout = armnn::DataLayout::NHWC;
+
+ // No network pointer indicates that only support for this operator should be checked
+ if (!delegateData.m_Network)
+ {
+ return ValidateResizeOperator(delegateData,
+ tfLiteContext,
+ inputTensorInfo,
+ outputTensorInfo,
+ desc);
+ }
+
+
+ armnn::IConnectableLayer* resizeLayer = nullptr;
+ resizeLayer = delegateData.m_Network->AddResizeLayer(desc, layerName.c_str());
+
+ armnn::IOutputSlot& outputSlot = resizeLayer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(resizeLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
+ ARMNN_ASSERT(resizeLayer != nullptr);
+
+ return Connect(resizeLayer, tfLiteNode, delegateData);
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/classic/src/Round.hpp b/delegate/classic/src/Round.hpp
new file mode 100644
index 0000000000..7a060b1d8f
--- /dev/null
+++ b/delegate/classic/src/Round.hpp
@@ -0,0 +1,71 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "SharedFunctions.hpp"
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitFloorOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ // NOTE: looks like the outputTensorInfo is the only thing that is required for the case
+ // where we are adding the floor layer so maybe move the other stuff inside the
+ // if !delegateData block for efficiency.
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the
+ // support for the operator
+ // If supported, VisitFloorOperator will be called again to add the layer to the network as seen further below
+ if (!delegateData.m_Network)
+ {
+ return ValidateFloorOperator(delegateData, tfLiteContext, inputTensorInfo, outputTensorInfo);
+ }
+
+ // Add a Floor layer
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddFloorLayer();
+ ARMNN_ASSERT(layer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
+ // Connect
+ return Connect(layer, tfLiteNode, delegateData);
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/classic/src/Shape.hpp b/delegate/classic/src/Shape.hpp
new file mode 100644
index 0000000000..381a87430f
--- /dev/null
+++ b/delegate/classic/src/Shape.hpp
@@ -0,0 +1,95 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <DelegateUtils.hpp>
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+#include <numeric>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitShapeOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ auto* shapeParameters = reinterpret_cast<TfLiteShapeParams*>(tfLiteNode->builtin_data);
+ if ( shapeParameters->out_type != kTfLiteInt32 && shapeParameters->out_type != kTfLiteInt64 )
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: output_type data type is not supported in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC("SHAPE",
+ tfLiteContext,
+ IsShapeSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ outInfo);
+ };
+
+ // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the
+ // support for the operator
+ // If supported, VisitShapeOperator will be called again to add the layer to the network as seen further below
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ // Add a Shape layer
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddShapeLayer();
+ layer->SetBackendId(setBackend);
+ ARMNN_ASSERT(layer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
+ // Connect
+ return Connect(layer, tfLiteNode, delegateData);
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/classic/src/SharedFunctions.cpp b/delegate/classic/src/SharedFunctions.cpp
new file mode 100644
index 0000000000..8de7d9c933
--- /dev/null
+++ b/delegate/classic/src/SharedFunctions.cpp
@@ -0,0 +1,116 @@
+//
+// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+
+#include "SharedFunctions.hpp"
+
+#include <DelegateUtils.hpp>
+
+#include "tensorflow/lite/builtin_ops.h"
+#include "tensorflow/lite/c/builtin_op_data.h"
+#include "tensorflow/lite/c/common.h"
+#include "tensorflow/lite/minimal_logging.h"
+
+namespace armnnDelegate
+{
+
+TfLiteStatus ValidateFloorOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ const armnn::TensorInfo& inputTensorInfo,
+ const armnn::TensorInfo& outputTensorInfo)
+{
+ bool isSupported = false;
+ auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC("FLOOR",
+ tfLiteContext,
+ IsFloorSupported,
+ delegateData.m_Backends,
+ isSupported,
+ armnn::BackendId(),
+ inputTensorInfo,
+ outInfo);
+ };
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+}
+
+TfLiteStatus ValidateFusedActivationOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ const armnn::TensorInfo& inputInfo,
+ const armnn::TensorInfo& outputInfo,
+ TfLiteFusedActivation activationType)
+{
+ armnn::ActivationDescriptor activationDesc;
+
+ switch (activationType)
+ {
+ case kTfLiteActNone:
+ {
+ // No Activation
+ return kTfLiteOk;
+ }
+ case kTfLiteActRelu:
+ {
+ activationDesc.m_Function = armnn::ActivationFunction::ReLu;
+ break;
+ }
+// The name of kTfLiteActRelu1 changed after TF Lite v2.3
+#if defined(ARMNN_POST_TFLITE_2_3)
+ case kTfLiteActReluN1To1:
+#else
+ case kTfLiteActRelu1:
+#endif
+ {
+ activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
+ activationDesc.m_A = 1.0f;
+ activationDesc.m_B = -1.0f;
+ break;
+ }
+ case kTfLiteActRelu6:
+ {
+ activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
+ activationDesc.m_A = 6.0f;
+ activationDesc.m_B = 0.0f;
+ break;
+ }
+ case kTfLiteActSigmoid:
+ {
+ activationDesc.m_Function = armnn::ActivationFunction::Sigmoid;
+ break;
+ }
+ case kTfLiteActTanh:
+ {
+ activationDesc.m_Function = armnn::ActivationFunction::TanH;
+ activationDesc.m_A = 1.0f;
+ activationDesc.m_B = 1.0f;
+ break;
+ }
+ default:
+ return kTfLiteError;
+ }
+
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+
+ auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC("ACTIVATION",
+ tfLiteContext,
+ IsActivationSupported,
+ delegateData.m_Backends,
+ isSupported,
+ armnn::BackendId(),
+ inputInfo,
+ outputInfo,
+ activationDesc);
+ };
+ validateFunc(outputInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+}
+
+
+} // namespace armnnDelegate
+
diff --git a/delegate/classic/src/SharedFunctions.hpp b/delegate/classic/src/SharedFunctions.hpp
new file mode 100644
index 0000000000..b03a63ded9
--- /dev/null
+++ b/delegate/classic/src/SharedFunctions.hpp
@@ -0,0 +1,25 @@
+//
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn_delegate.hpp>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus ValidateFloorOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ const armnn::TensorInfo& inputTensorInfo,
+ const armnn::TensorInfo& outputTensorInfo);
+
+TfLiteStatus ValidateFusedActivationOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ const armnn::TensorInfo& inputInfo,
+ const armnn::TensorInfo& outputInfo,
+ TfLiteFusedActivation activationType);
+
+} // namespace armnnDelegate
+
diff --git a/delegate/classic/src/Slice.hpp b/delegate/classic/src/Slice.hpp
new file mode 100644
index 0000000000..f19e3327e4
--- /dev/null
+++ b/delegate/classic/src/Slice.hpp
@@ -0,0 +1,141 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/utility/IgnoreUnused.hpp>
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitSliceOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t sliceOperatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ // Read inputs [input, begin, size]
+ int numInputs = tfLiteNode->inputs->size;
+ std::vector<const TfLiteTensor*> tfLiteInputs;
+ tfLiteInputs.reserve(numInputs);
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ for (int i = 0; i < numInputs; i++)
+ {
+ const TfLiteTensor* inputTensor = &tfLiteTensors[tfLiteNode->inputs->data[i]];
+ tfLiteInputs.push_back(inputTensor);
+ if (!IsValid(tfLiteContext, *inputTensor, sliceOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+ }
+
+ // We save the begin and size tensors in our descriptor. Therefore we have to read those values from inputs
+ int inputRank = tfLiteInputs[0]->dims->size;
+ auto ReadInt32Input = [&](int inputIndex, std::vector<uint32_t>& outputData) -> TfLiteStatus
+ {
+ if (tfLiteInputs[inputIndex]->type != kTfLiteInt32)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: The Begin- and Size-Tensors of the Slice operation need to "
+ "be of type int32. Operator: #%d node #%d: ",
+ sliceOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ int rank = tfLiteInputs[inputIndex]->dims->size;
+ if (rank != 1)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: The Begin- and Size-Tensors of the Slice operation need to "
+ "be a 1D-Tensor. Operator: #%d node #%d: ",
+ sliceOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ int numValues = tfLiteInputs[inputIndex]->dims->data[0];
+ if (numValues != inputRank)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: The number of values in the Begin- and Size-Tensors of the "
+ "Slice operation need to be equal to the rank of the Input-Tensor. Operator: #%d node #%d: ",
+ sliceOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ // return tensor data
+ auto* tensorDataPtr = tflite::GetTensorData<uint32_t>(tfLiteInputs[inputIndex]);
+ outputData.assign(tensorDataPtr, tensorDataPtr+numValues);
+ return kTfLiteOk;
+ };
+
+ std::vector<uint32_t> begin;
+ if (ReadInt32Input(1, begin) != kTfLiteOk)
+ return kTfLiteError;
+ std::vector<uint32_t> size;
+ if (ReadInt32Input(2, size) != kTfLiteOk)
+ return kTfLiteError;
+
+ // Write all data to the descriptor
+ armnn::SliceDescriptor descriptor(begin, size);
+
+ // Validate output
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, sliceOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(*tfLiteInputs[0]);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC("SLICE",
+ tfLiteContext,
+ IsSliceSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ outInfo,
+ descriptor);
+ };
+
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ // Add a Slice layer
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddSliceLayer(descriptor);
+ layer->SetBackendId(setBackend);
+ ARMNN_ASSERT(layer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
+ // Connect
+ return Connect(layer, tfLiteNode, delegateData);
+}
+
+} // namespace armnnDelegate
+
diff --git a/delegate/classic/src/Softmax.hpp b/delegate/classic/src/Softmax.hpp
new file mode 100644
index 0000000000..4fbd508437
--- /dev/null
+++ b/delegate/classic/src/Softmax.hpp
@@ -0,0 +1,155 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <DelegateUtils.hpp>
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus ValidateSoftmaxOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ const armnn::TensorInfo& inputInfo,
+ const armnn::TensorInfo& outputTensorInfo,
+ const armnn::SoftmaxDescriptor& descriptor)
+{
+ bool isSupported = false;
+ FORWARD_LAYER_SUPPORT_FUNC("SOFTMAX",
+ tfLiteContext,
+ IsSoftmaxSupported,
+ delegateData.m_Backends,
+ isSupported,
+ armnn::BackendId(),
+ inputInfo,
+ outputTensorInfo,
+ descriptor);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+}
+
+
+TfLiteStatus ValidateLogSoftmaxOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ const armnn::TensorInfo& inputInfo,
+ const armnn::TensorInfo& outputTensorInfo,
+ const armnn::LogSoftmaxDescriptor& descriptor)
+{
+ bool isSupported = false;
+ FORWARD_LAYER_SUPPORT_FUNC("LOG_SOFTMAX",
+ tfLiteContext,
+ IsLogSoftmaxSupported,
+ delegateData.m_Backends,
+ isSupported,
+ armnn::BackendId(),
+ inputInfo,
+ outputTensorInfo,
+ descriptor);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+}
+
+TfLiteStatus VisitSoftmaxOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t softmaxOperatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (IsDynamicTensor(tfLiteInputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic input tensors are not supported in node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (IsDynamicTensor(tfLiteOutputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic output tensors are not supported in node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+
+ if (!delegateData.m_Network)
+ {
+ switch(softmaxOperatorCode)
+ {
+ case kTfLiteBuiltinSoftmax:
+ {
+ armnn::SoftmaxDescriptor descriptor;
+ auto* params = reinterpret_cast<TfLiteSoftmaxParams*>(tfLiteNode->builtin_data);
+ descriptor.m_Beta = params->beta;
+ return ValidateSoftmaxOperator(delegateData,
+ tfLiteContext,
+ inputTensorInfo,
+ outputTensorInfo,
+ descriptor);
+ }
+ case kTfLiteBuiltinLogSoftmax:
+ {
+ armnn::LogSoftmaxDescriptor descriptor;
+ return ValidateLogSoftmaxOperator(delegateData,
+ tfLiteContext,
+ inputTensorInfo,
+ outputTensorInfo,
+ descriptor);
+ }
+ default:
+ return kTfLiteError;
+ }
+ }
+
+ armnn::IConnectableLayer* softmaxLayer = nullptr;
+
+ switch(softmaxOperatorCode)
+ {
+ case kTfLiteBuiltinSoftmax:
+ {
+ armnn::SoftmaxDescriptor descriptor;
+ auto* params = reinterpret_cast<TfLiteSoftmaxParams*>(tfLiteNode->builtin_data);
+ descriptor.m_Beta = params->beta;
+ softmaxLayer = delegateData.m_Network->AddSoftmaxLayer(descriptor);
+ break;
+ }
+ case kTfLiteBuiltinLogSoftmax:
+ {
+ armnn::LogSoftmaxDescriptor descriptor;
+ softmaxLayer = delegateData.m_Network->AddLogSoftmaxLayer(descriptor);
+ break;
+ }
+ default:
+ return kTfLiteError;
+ }
+ ARMNN_ASSERT(softmaxLayer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = softmaxLayer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(softmaxLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
+ // Connect
+ return Connect(softmaxLayer, tfLiteNode, delegateData);
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/classic/src/SpaceDepth.hpp b/delegate/classic/src/SpaceDepth.hpp
new file mode 100644
index 0000000000..cc7f03413d
--- /dev/null
+++ b/delegate/classic/src/SpaceDepth.hpp
@@ -0,0 +1,152 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitSpaceToDepthOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ armnn::SpaceToDepthDescriptor descriptor;
+ auto* params = reinterpret_cast<TfLiteSpaceToDepthParams*>(tfLiteNode->builtin_data);
+ descriptor.m_BlockSize = params->block_size;
+
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC("SPACE_TO_DEPTH",
+ tfLiteContext,
+ IsSpaceToDepthSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ outInfo,
+ descriptor);
+ };
+
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ // Add a SpaceToDepth layer
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddSpaceToDepthLayer(descriptor);
+ layer->SetBackendId(setBackend);
+ ARMNN_ASSERT(layer != nullptr);
+
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // Connect
+ return Connect(layer, tfLiteNode, delegateData);
+}
+
+TfLiteStatus VisitDepthToSpaceOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ armnn::DepthToSpaceDescriptor descriptor;
+ auto* params = reinterpret_cast<TfLiteDepthToSpaceParams*>(tfLiteNode->builtin_data);
+ descriptor.m_BlockSize = params->block_size;
+
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC("DEPTH_TO_SPACE",
+ tfLiteContext,
+ IsDepthToSpaceSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ outInfo,
+ descriptor);
+ };
+
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ // Add a DepthToSpace layer
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthToSpaceLayer(descriptor);
+ layer->SetBackendId(setBackend);
+ ARMNN_ASSERT(layer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
+ // Connect
+ return Connect(layer, tfLiteNode, delegateData);
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/classic/src/Split.hpp b/delegate/classic/src/Split.hpp
new file mode 100644
index 0000000000..fc193baf86
--- /dev/null
+++ b/delegate/classic/src/Split.hpp
@@ -0,0 +1,347 @@
+//
+// Copyright © 2020,2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <DelegateUtils.hpp>
+
+#include <algorithm>
+#include <iterator>
+#include <vector>
+
+namespace armnnDelegate
+{
+
+constexpr unsigned int MaxNumOfTensorDimensions = 5U;
+
+TfLiteStatus VisitSplitOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t tfLiteSplitOperatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
+
+ auto* splitParameters = reinterpret_cast<TfLiteSplitParams*>(tfLiteNode->builtin_data);
+ const unsigned int numSplits = NonNegative(splitParameters->num_splits, nodeIndex);
+
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, numSplits, nodeIndex));
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteAxisTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteAxisTensor, tfLiteSplitOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
+ if (!IsValid(tfLiteContext, tfLiteInputTensor, tfLiteSplitOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+
+ ARMNN_ASSERT(GetTensorInfoForTfLiteTensor(tfLiteAxisTensor).GetNumElements() == 1);
+ auto* axisTensorDataPtr = tflite::GetTensorData<int32_t>(&tfLiteAxisTensor);
+ std::vector<int32_t> axisTensorData(axisTensorDataPtr, axisTensorDataPtr + 1);
+ int32_t axis = axisTensorData[0];
+
+ auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
+ if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
+ {
+ // Square bracket denotes inclusive n while parenthesis denotes exclusive n
+ // E.g. Rank 4 tensor can have axis in range [-4, 3)
+ // -1 == 3, -2 == 2, -3 == 1, -4 == 0
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Operation has invalid axis: #%d. Axis must be in range [-n, n) in node #%d:",
+ axis, nodeIndex);
+ }
+ const unsigned int splitDim = ComputeWrappedIndex(axis, inputTensorInfo.GetNumDimensions());
+
+ std::vector<armnn::TensorInfo> outputs;
+ for (unsigned int i = 0; i < numSplits; ++i)
+ {
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[i]];
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLiteSplitOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+ outputs.push_back(GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true));
+ }
+ const std::vector<std::reference_wrapper<armnn::TensorInfo>> outputTensorInfos(outputs.begin(), outputs.end());
+
+ auto inputDimSize = inputTensorInfo.GetNumDimensions();
+ if (inputDimSize > MaxNumOfTensorDimensions)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: The number of dimensions: #%d for input tensors of the split op cannot be greater "
+ "than #%d in node #%d: ", inputDimSize, MaxNumOfTensorDimensions, nodeIndex);
+ return kTfLiteError;
+ }
+
+ std::vector<unsigned int> splitterDimSizes(inputDimSize);
+
+ // Add current input shape to splitterDimSizes
+ for (unsigned int i = 0; i < inputDimSize; ++i)
+ {
+ splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
+ }
+
+ if (splitterDimSizes[splitDim] % numSplits != 0)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Number of splits #%d must evenly divide the dimension #%d in node #%d: ",
+ numSplits, splitterDimSizes[splitDim], nodeIndex);
+ return kTfLiteError;
+ }
+ splitterDimSizes[splitDim] /= numSplits;
+
+ armnn::SplitterDescriptor splitDescriptor(numSplits, inputDimSize);
+ for (unsigned int j = 0; j < numSplits; ++j)
+ {
+ // Set the size of the views.
+ for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
+ {
+ splitDescriptor.SetViewSize(j, dimIdx, splitterDimSizes[dimIdx]);
+ }
+ splitDescriptor.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
+ }
+
+ armnn::BackendId setBackend;
+ if (!delegateData.m_Network)
+ {
+ // Check if supported
+ bool isSupported = false;
+ FORWARD_LAYER_SUPPORT_FUNC("SPLIT",
+ tfLiteContext,
+ IsSplitterSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ outputTensorInfos,
+ splitDescriptor);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddSplitterLayer(splitDescriptor);
+ layer->SetBackendId(setBackend);
+ ARMNN_ASSERT(layer != nullptr);
+
+ for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
+ {
+ layer->GetOutputSlot(k).SetTensorInfo(outputs[k]);
+ }
+
+ // Connect the input slots
+ delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[1]]->Connect(layer->GetInputSlot(0));
+
+ // Prepare output slots
+ for (unsigned int outputIndex = 0; outputIndex < layer->GetNumOutputSlots(); ++outputIndex)
+ {
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(outputIndex);
+ delegateData.m_OutputSlotForNode[
+ static_cast<unsigned long>(tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
+ }
+
+ return kTfLiteOk;
+}
+
+TfLiteStatus VisitSplitVOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t tfLiteSplitVOperatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex));
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteInputTensor, tfLiteSplitVOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteSplitsTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
+ if (!IsValid(tfLiteContext, tfLiteSplitsTensor, tfLiteSplitVOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteAxisTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
+ if (!IsValid(tfLiteContext, tfLiteAxisTensor, tfLiteSplitVOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& splitsTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteSplitsTensor);
+ ARMNN_ASSERT(splitsTensorInfo.GetNumDimensions() == 1);
+ ARMNN_ASSERT(GetTensorInfoForTfLiteTensor(tfLiteAxisTensor).GetNumElements() == 1);
+
+ auto* axisTensorDataPtr = tflite::GetTensorData<int32_t>(&tfLiteAxisTensor);
+ std::vector<int32_t> axisTensorData(axisTensorDataPtr, axisTensorDataPtr + 1);
+ int32_t axis = axisTensorData[0];
+
+ auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
+ if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Operation has invalid axis: #%d. Axis must be in range [-n, n) in node #%d:",
+ axis, nodeIndex);
+ }
+ const unsigned int splitDim = ComputeWrappedIndex(axisTensorData[0], inputTensorInfo.GetNumDimensions());
+
+ auto* splitVParameters = reinterpret_cast<TfLiteSplitVParams*>(tfLiteNode->builtin_data);
+ unsigned int numSplits = 0;
+ if (splitVParameters)
+ {
+ numSplits = NonNegative(splitVParameters->num_splits, nodeIndex);
+ }
+ else
+ {
+ numSplits = splitsTensorInfo.GetNumElements();
+ }
+
+ if (numSplits <= 0)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext, "TfLiteArmnnDelegate: Invalid number of splits %d in node #%d",
+ numSplits, nodeIndex);
+ return kTfLiteError;
+ }
+
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, numSplits, nodeIndex));
+ std::vector<armnn::TensorInfo> outputs;
+ for (unsigned int i = 0; i < numSplits; ++i)
+ {
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[i]];
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLiteSplitVOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+ outputs.push_back(GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true));
+ }
+ const std::vector<std::reference_wrapper<armnn::TensorInfo>> outputTensorInfos(outputs.begin(), outputs.end());
+
+ auto inputDimSize = inputTensorInfo.GetNumDimensions();
+ if (inputDimSize > MaxNumOfTensorDimensions)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: The number of dimensions: #%d for input tensors of the split op cannot be greater "
+ "than #%d in node #%d: ", inputDimSize, MaxNumOfTensorDimensions, nodeIndex);
+ return kTfLiteError;
+ }
+
+ std::vector<int32_t> splitsTensorData(numSplits);
+ std::memcpy(splitsTensorData.data(), tfLiteSplitsTensor.data.data, splitsTensorInfo.GetNumBytes());
+
+
+ unsigned int index = 0;
+ unsigned int inferredIndex = 0;
+ int numberOfInferred = 0;
+ int splitSum = 0;
+
+ for (auto splitData : splitsTensorData)
+ {
+ if (splitData < 0)
+ {
+ ++numberOfInferred;
+ inferredIndex = index;
+ }
+ else
+ {
+ splitSum += splitData;
+ }
+ ++index;
+ }
+
+ // Check for inferred axis
+ if (numberOfInferred == 0)
+ {
+ if (splitSum != armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext, "TfLiteArmnnDelegate: SplitV split_sizes does not sum to the dimension of value along"
+ " split_dim in node #%d", nodeIndex);
+ return kTfLiteError;
+ }
+ }
+ else if (numberOfInferred == 1)
+ {
+ splitsTensorData[inferredIndex] = armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]) - splitSum;
+ }
+ else
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext, "TfLiteArmnnDelegate: SplitV cannot infer split size for more than one split in node #%d",
+ nodeIndex);
+ return kTfLiteError;
+ }
+
+ armnn::SplitterDescriptor splitDescriptor(numSplits, inputDimSize);
+ unsigned int accumSplit = 0;
+ for (unsigned int j = 0; j < numSplits; ++j)
+ {
+ unsigned int splitSize = armnn::numeric_cast<unsigned int>(splitsTensorData[j]);
+
+ // Set the size of the views.
+ for (unsigned int dimIdx = 0; dimIdx < inputTensorInfo.GetNumDimensions(); ++dimIdx)
+ {
+ unsigned int dimSize = inputTensorInfo.GetShape()[dimIdx];
+ if (dimIdx == splitDim)
+ {
+ dimSize = splitSize;
+ }
+ splitDescriptor.SetViewSize(j, dimIdx, dimSize);
+ }
+
+ splitDescriptor.SetViewOriginCoord(j, splitDim, accumSplit);
+ accumSplit += splitSize;
+ }
+
+ armnn::BackendId setBackend;
+ if (!delegateData.m_Network)
+ {
+ // Check if supported
+ bool isSupported = false;
+ FORWARD_LAYER_SUPPORT_FUNC("SPLIT",
+ tfLiteContext,
+ IsSplitterSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ outputTensorInfos,
+ splitDescriptor);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddSplitterLayer(splitDescriptor);
+ layer->SetBackendId(setBackend);
+ ARMNN_ASSERT(layer != nullptr);
+
+ for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
+ {
+ layer->GetOutputSlot(k).SetTensorInfo(outputs[k]);
+ }
+
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
+ // Connect
+ return Connect(layer, tfLiteNode, delegateData);
+}
+
+} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/classic/src/StridedSlice.hpp b/delegate/classic/src/StridedSlice.hpp
new file mode 100644
index 0000000000..998e3d3e14
--- /dev/null
+++ b/delegate/classic/src/StridedSlice.hpp
@@ -0,0 +1,156 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/utility/IgnoreUnused.hpp>
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitStridedSliceOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t sliceOperatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 4, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ // Read inputs [input, begin, end, strides]
+ int numInputs = tfLiteNode->inputs->size;
+ std::vector<const TfLiteTensor*> tfLiteInputs;
+ tfLiteInputs.reserve(numInputs);
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ for (int i = 0; i < numInputs; i++)
+ {
+ const TfLiteTensor* inputTensor = &tfLiteTensors[tfLiteNode->inputs->data[i]];
+ tfLiteInputs.push_back(inputTensor);
+ if (!IsValid(tfLiteContext, *inputTensor, sliceOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+ }
+
+ // We save the begin, end and strides tensors in our descriptor. Therefore we have to read those values from inputs
+ int inputRank = tfLiteInputs[0]->dims->size;
+ auto ReadInt32Input = [&](int inputIndex, std::vector<int32_t>& outputData) -> TfLiteStatus
+ {
+ if (tfLiteInputs[inputIndex]->type != kTfLiteInt32)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: The Begin-, End- and Stride-Tensors of the StridedSlice operation need to "
+ "be of type int32. Operator: #%d node #%d: ",
+ sliceOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ int rank = tfLiteInputs[inputIndex]->dims->size;
+ if (rank != 1)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: The Begin-, End- and Stride-Tensors of the StridedSlice operation need to "
+ "be a 1D-Tensor. Operator: #%d node #%d: ",
+ sliceOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ int numValues = tfLiteInputs[inputIndex]->dims->data[0];
+ if (numValues != inputRank)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: The number of values in the Begin-, End- and Stride-Tensors of the "
+ "StridedSlice operation need to be equal to the rank of the Input-Tensor. Operator: #%d node #%d: ",
+ sliceOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ // return tensor data
+ auto* tensorDataPtr = tflite::GetTensorData<int32_t>(tfLiteInputs[inputIndex]);
+ outputData.assign(tensorDataPtr, tensorDataPtr+numValues);
+ return kTfLiteOk;
+ };
+
+ std::vector<int32_t> beginData;
+ if (ReadInt32Input(1, beginData) != kTfLiteOk)
+ return kTfLiteError;
+ std::vector<int32_t> endData;
+ if (ReadInt32Input(2, endData) != kTfLiteOk)
+ return kTfLiteError;
+ std::vector<int32_t> strideData;
+ if (ReadInt32Input(3, strideData) != kTfLiteOk)
+ return kTfLiteError;
+
+ // parse built in options
+ auto* stridedSliceParams = reinterpret_cast<TfLiteStridedSliceParams*>(tfLiteNode->builtin_data);
+
+ // Write all data to the descriptor
+ armnn::StridedSliceDescriptor descriptor;
+ descriptor.m_Begin = std::move(beginData);
+ descriptor.m_End = std::move(endData);
+ descriptor.m_Stride = std::move(strideData);
+ descriptor.m_BeginMask = stridedSliceParams->begin_mask;
+ descriptor.m_EllipsisMask = stridedSliceParams->ellipsis_mask;
+ descriptor.m_EndMask = stridedSliceParams->end_mask;
+ descriptor.m_NewAxisMask = stridedSliceParams->new_axis_mask;
+ descriptor.m_ShrinkAxisMask = stridedSliceParams->shrink_axis_mask;
+ descriptor.m_DataLayout = armnn::DataLayout::NHWC;
+
+ // Validate output
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, sliceOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(*tfLiteInputs[0]);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC("STRIDED_SLICE",
+ tfLiteContext,
+ IsStridedSliceSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ outInfo,
+ descriptor);
+ };
+
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ // Add a StridedSlice layer
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddStridedSliceLayer(descriptor);
+ layer->SetBackendId(setBackend);
+ ARMNN_ASSERT(layer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
+ // Connect
+ return Connect(layer, tfLiteNode, delegateData);
+}
+
+} // namespace armnnDelegate
+
diff --git a/delegate/classic/src/Transpose.hpp b/delegate/classic/src/Transpose.hpp
new file mode 100644
index 0000000000..41178d0b59
--- /dev/null
+++ b/delegate/classic/src/Transpose.hpp
@@ -0,0 +1,110 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/utility/IgnoreUnused.hpp>
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+#include <tensorflow/lite/kernels/internal/tensor_ctypes.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitTransposeOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t tfliteTransposeOperatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ const TfLiteTensor *tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor0 = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (IsDynamicTensor(tfLiteInputTensor0))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic input tensors are not supported in "
+ "operator #%d node #%d: ",
+ tfliteTransposeOperatorCode, nodeIndex);
+
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteInputTensor1 = tfLiteTensors[tfLiteNode->inputs->data[1]];
+ if (IsDynamicTensor(tfLiteInputTensor1))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic input tensors are not supported in "
+ "operator #%d node #%d: ",
+ tfliteTransposeOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (IsDynamicTensor(tfLiteOutputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic output tensors are not supported in "
+ "operator #%d node #%d: ",
+ tfliteTransposeOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo0 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor0);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ auto* permTensorDataPtr = tflite::GetTensorData<int32_t>(&tfLiteInputTensor1);
+ unsigned int numEl = tfLiteInputTensor1.dims->data[0];
+
+ ARMNN_ASSERT( numEl <= static_cast<int>(armnn::MaxNumOfTensorDimensions));
+ ARMNN_ASSERT( tfLiteInputTensor1.dims->size == 1); // ensure only single dimension to the permutation tensor
+
+ armnn::TransposeDescriptor descriptor(armnn::PermutationVector(
+ reinterpret_cast<const armnn::PermutationVector::ValueType *> (permTensorDataPtr),
+ static_cast<armnn::PermutationVector::SizeType>(numEl)));
+
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC("TRANSPOSE",
+ tfLiteContext,
+ IsTransposeSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo0,
+ outputTensorInfo,
+ descriptor);
+ };
+
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ armnn::IConnectableLayer* transposeLayer = delegateData.m_Network->AddTransposeLayer(descriptor);
+ transposeLayer->SetBackendId(setBackend);
+ ARMNN_ASSERT(transposeLayer != nullptr);
+ ARMNN_ASSERT(transposeLayer->GetNumInputSlots() == 1); // permutation vector given to descriptor object
+
+ armnn::IOutputSlot& outputSlot = transposeLayer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(transposeLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
+ return Connect(transposeLayer, tfLiteNode, delegateData);
+}
+} // namespace armnnDelegate
diff --git a/delegate/classic/src/UnidirectionalSequenceLstm.hpp b/delegate/classic/src/UnidirectionalSequenceLstm.hpp
new file mode 100644
index 0000000000..f8689d263f
--- /dev/null
+++ b/delegate/classic/src/UnidirectionalSequenceLstm.hpp
@@ -0,0 +1,302 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <DelegateUtils.hpp>
+
+#include <armnn/LstmParams.hpp>
+#include <armnn/Tensor.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitUnidirectionalSequenceLstmOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ auto numInputs = tfLiteNode->inputs->size;
+ if (numInputs < 2)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
+ 2, numInputs, nodeIndex);
+ return kTfLiteError;
+ }
+
+ const auto nodeParams = reinterpret_cast<TfLiteUnidirectionalSequenceLSTMParams *>(tfLiteNode->builtin_data);
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ // Set the params structure for the AddUnidirectionalSequenceLstmLayer call
+ // Please refer to each operand at
+ // https://www.tensorflow.org/mlir/tfl_ops#tflunidirectional_sequence_lstm_tflunidirectionalsequencelstmop
+ armnn::LstmInputParams params;
+
+ if (IsOptionalOperandPresent(tfLiteNode, 1))
+ {
+ params.m_InputToInputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 1);
+ }
+
+ params.m_InputToForgetWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 2);
+ params.m_InputToCellWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 3);
+ params.m_InputToOutputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 4);
+
+ // Recurrent weight tensors of size {n_cell, n_output}
+ if (IsOptionalOperandPresent(tfLiteNode, 5))
+ {
+ params.m_RecurrentToInputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 5);
+ }
+
+ params.m_RecurrentToForgetWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 6);
+ params.m_RecurrentToCellWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 7);
+ params.m_RecurrentToOutputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 8);
+
+ // Peephole weights tensors of size {n_cell}, representing a diagonal matrix.
+ if (IsOptionalOperandPresent(tfLiteNode, 9))
+ {
+ params.m_CellToInputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 9);
+ }
+
+ if (IsOptionalOperandPresent(tfLiteNode, 10))
+ {
+ params.m_CellToForgetWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 10);
+ }
+
+ if (IsOptionalOperandPresent(tfLiteNode, 11))
+ {
+ params.m_CellToOutputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 11);
+ }
+
+ // Gates bias tensors of size {n_cell}
+ if (IsOptionalOperandPresent(tfLiteNode, 12))
+ {
+ params.m_InputGateBias = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 12);
+ }
+
+ params.m_ForgetGateBias = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 13);
+ params.m_CellBias = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 14);
+ params.m_OutputGateBias = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 15);
+
+ // Projection weight tensor of size {n_output, n_cell}
+ if (IsOptionalOperandPresent(tfLiteNode, 16))
+ {
+ params.m_ProjectionWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 16);
+ }
+ // Projection bias tensor of size {n_output}
+ if (IsOptionalOperandPresent(tfLiteNode, 17))
+ {
+ params.m_ProjectionBias = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 17);
+ }
+
+ // These state tensors are defined as variable tensors, and will be modified by this op.
+ armnn::TensorInfo outputStateInInfo = GetTensorInfoForTfLiteTensor(tfLiteTensors[tfLiteNode->inputs->data[18]]);
+ armnn::TensorInfo cellStateInInfo = GetTensorInfoForTfLiteTensor(tfLiteTensors[tfLiteNode->inputs->data[19]]);
+
+ // Layer norm coefficient tensors of size {n_cell}, representing a diagonal matrix.
+ if (IsOptionalOperandPresent(tfLiteNode, 20))
+ {
+ params.m_InputLayerNormWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 20);
+ }
+
+ if (IsOptionalOperandPresent(tfLiteNode, 21))
+ {
+ params.m_ForgetLayerNormWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 21);
+ }
+
+ if (IsOptionalOperandPresent(tfLiteNode, 22))
+ {
+ params.m_CellLayerNormWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 22);
+ }
+
+ if (IsOptionalOperandPresent(tfLiteNode, 23))
+ {
+ params.m_OutputLayerNormWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 23);
+ }
+
+ // set the layer descriptor
+ armnn::UnidirectionalSequenceLstmDescriptor desc;
+ desc.m_ActivationFunc = NonNegative(nodeParams->activation, nodeIndex);
+ desc.m_ClippingThresCell = nodeParams->cell_clip;
+ desc.m_ClippingThresProj = nodeParams->proj_clip;
+ desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr
+ || params.m_RecurrentToInputWeights == nullptr
+ || params.m_InputGateBias == nullptr);
+ desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr || params.m_CellToOutputWeights != nullptr);
+ desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
+ desc.m_LayerNormEnabled = (params.m_InputLayerNormWeights != nullptr
+ || params.m_ForgetLayerNormWeights != nullptr
+ || params.m_CellLayerNormWeights != nullptr
+ || params.m_OutputLayerNormWeights != nullptr);
+ desc.m_TimeMajor = nodeParams->time_major;
+
+ if (tfLiteNode->intermediates->size > 3 && desc.m_LayerNormEnabled)
+ {
+ auto inputIntermediateTensorInfo = GetTensorInfoForTfLiteTensor(
+ tfLiteTensors[tfLiteNode->intermediates->data[0]]);
+ auto forgetIntermediateTensorInfo = GetTensorInfoForTfLiteTensor(
+ tfLiteTensors[tfLiteNode->intermediates->data[1]]);
+ auto cellIntermediateTensorInfo = GetTensorInfoForTfLiteTensor(
+ tfLiteTensors[tfLiteNode->intermediates->data[2]]);
+ auto outputIntermediateTensorInfo = GetTensorInfoForTfLiteTensor(
+ tfLiteTensors[tfLiteNode->intermediates->data[3]]);
+
+ desc.m_InputIntermediateScale = inputIntermediateTensorInfo.GetQuantizationScale();
+ desc.m_ForgetIntermediateScale = forgetIntermediateTensorInfo.GetQuantizationScale();
+ desc.m_CellIntermediateScale = cellIntermediateTensorInfo.GetQuantizationScale();
+ desc.m_OutputIntermediateScale = outputIntermediateTensorInfo.GetQuantizationScale();
+ }
+ else
+ {
+ float defaultIntermediate = std::pow(2, -12);
+ desc.m_InputIntermediateScale = defaultIntermediate;
+ desc.m_ForgetIntermediateScale = defaultIntermediate;
+ desc.m_CellIntermediateScale = defaultIntermediate;
+ desc.m_OutputIntermediateScale = defaultIntermediate;
+ }
+ if (tfLiteNode->intermediates->size > 4)
+ {
+ auto hiddentensorInfo = GetTensorInfoForTfLiteTensor(tfLiteTensors[tfLiteNode->intermediates->data[4]]);
+ desc.m_HiddenStateScale = hiddentensorInfo.GetQuantizationScale();
+ desc.m_HiddenStateZeroPoint = hiddentensorInfo.GetQuantizationOffset();
+ }
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ unsigned int batchSize = inputTensorInfo.GetShape()[0];
+ unsigned int outputSize = outputTensorInfo.GetShape()[2];
+ unsigned int numUnits = cellStateInInfo.GetShape()[1];
+
+ armnn::DataType dataType = inputTensorInfo.GetDataType();
+ float qScale = inputTensorInfo.GetQuantizationScale();
+ float qOffset = inputTensorInfo.GetQuantizationOffset();
+
+ armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 3}, dataType, qScale, qOffset);
+ if (!desc.m_CifgEnabled)
+ {
+ scratchBufferTensorInfo = armnn::TensorInfo({batchSize, numUnits * 4}, dataType, qScale, qOffset);
+ }
+ armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits},
+ cellStateInInfo.GetDataType(),
+ cellStateInInfo.GetQuantizationScale(),
+ cellStateInInfo.GetQuantizationOffset());
+
+ armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, dataType, qScale, qOffset);
+
+ armnn::LstmInputParamsInfo paramsInfo;
+ paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
+ paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
+ paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
+ paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo());
+ paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
+ paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo());
+ paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
+ paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
+ paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
+
+ if (!desc.m_CifgEnabled)
+ {
+ paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
+ paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
+ if (params.m_CellToInputWeights != nullptr)
+ {
+ paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
+ }
+ paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
+ }
+
+ if (desc.m_ProjectionEnabled)
+ {
+ paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
+ if (params.m_ProjectionBias != nullptr)
+ {
+ paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
+ }
+ }
+
+ if (desc.m_PeepholeEnabled)
+ {
+ paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
+ paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
+ }
+
+ if (desc.m_LayerNormEnabled)
+ {
+ if(!desc.m_CifgEnabled)
+ {
+ paramsInfo.m_InputLayerNormWeights = &(params.m_InputLayerNormWeights->GetInfo());
+ }
+ paramsInfo.m_ForgetLayerNormWeights = &(params.m_ForgetLayerNormWeights->GetInfo());
+ paramsInfo.m_CellLayerNormWeights = &(params.m_CellLayerNormWeights->GetInfo());
+ paramsInfo.m_OutputLayerNormWeights = &(params.m_OutputLayerNormWeights->GetInfo());
+ }
+
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC("UNIDIRECTIONAL_SEQUENCE_LSTM",
+ tfLiteContext,
+ IsUnidirectionalSequenceLstmSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ outputStateInInfo,
+ cellStateInInfo,
+ outputStateOutTensorInfo,
+ cellStateOutTensorInfo,
+ outputInfo,
+ desc,
+ paramsInfo);
+ };
+
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddUnidirectionalSequenceLstmLayer(desc, params);
+ layer->SetBackendId(setBackend);
+ ARMNN_ASSERT(layer != nullptr);
+
+ layer->GetOutputSlot(0).SetTensorInfo(outputStateOutTensorInfo);
+ layer->GetOutputSlot(1).SetTensorInfo(cellStateOutTensorInfo);
+ layer->GetOutputSlot(2).SetTensorInfo(outputTensorInfo);
+
+ // Connect the inputs
+ // input_layer
+ delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[0]]->Connect(layer->GetInputSlot(0));
+ // cellStateIn
+ delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[18]]->Connect(layer->GetInputSlot(1));
+ //outputStateIn
+ delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[19]]->Connect(layer->GetInputSlot(2));
+
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(2);
+ delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->outputs->data[0])] = &outputSlot;
+ return kTfLiteOk;
+}
+
+} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/classic/src/Unpack.hpp b/delegate/classic/src/Unpack.hpp
new file mode 100644
index 0000000000..c9b737040c
--- /dev/null
+++ b/delegate/classic/src/Unpack.hpp
@@ -0,0 +1,214 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/utility/IgnoreUnused.hpp>
+
+#include <DelegateUtils.hpp>
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+#include <numeric>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitUnpackOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+
+ if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ // Get Unpack Axis
+ const auto params = reinterpret_cast<TfLiteUnpackParams*>(tfLiteNode->builtin_data);
+
+ const unsigned int unpackAxis = NonNegative(params->axis, nodeIndex);
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+
+ if (unpackAxis >= inputTensorInfo.GetNumDimensions())
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: The unpack axis #%d cannot be greater than or equal to "
+ "the number of input dimensions #%d in operator #%d node #%d",
+ unpackAxis, inputTensorInfo.GetNumDimensions(), operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ // Get Unpack Num
+ unsigned int unpackNum = NonNegative(params->num, nodeIndex);
+
+ // If num is not defined, automatically infer from the length of the dimension axis.
+ if(unpackNum == 0)
+ {
+ unpackNum = inputTensorInfo.GetShape()[unpackAxis];
+ }
+
+ // If unpack number cannot be inferred and is still zero, return kTfLiteError.
+ if(unpackNum == 0)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Number to unpack must greater than zero in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ // Check outputs
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, unpackNum, nodeIndex));
+
+
+ auto inputDimSize = inputTensorInfo.GetNumDimensions();
+ std::vector<unsigned int> unpackDimSizes(inputDimSize);
+
+ // Add current input shape to unpackDimSizes
+ for (unsigned int i = 0; i < inputDimSize; ++i)
+ {
+ unpackDimSizes[i] = inputTensorInfo.GetShape()[i];
+ }
+
+ if (unpackDimSizes[unpackAxis] != unpackNum)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Number to unpack must be the same as length "
+ "of the dimension to unpack along in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ unpackDimSizes[unpackAxis] /= unpackNum;
+
+ armnn::SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
+ for (unsigned int j = 0; j < unpackNum; ++j)
+ {
+ // Set the size of the views.
+ for (unsigned int dimIdx = 0; dimIdx < unpackDimSizes.size(); ++dimIdx)
+ {
+ splitDesc.SetViewSize(j, dimIdx, unpackDimSizes[dimIdx]);
+ }
+ splitDesc.SetViewOriginCoord(j, unpackAxis, unpackDimSizes[unpackAxis] * j);
+ }
+
+ std::vector<armnn::TensorInfo> outputs;
+ for (unsigned int i = 0; i < unpackNum; ++i)
+ {
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[i]];
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+ outputs.push_back(GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true));
+ }
+ const std::vector<std::reference_wrapper<armnn::TensorInfo>> outputTensorInfos(outputs.begin(), outputs.end());
+
+ // Determine the shape of the Splitter layer outputs for validation
+ armnn::TensorShape splitOutShape = armnn::TensorShape(static_cast<unsigned int>(unpackDimSizes.size()),
+ unpackDimSizes.data());
+
+ std::vector<armnn::TensorInfo> splitterOutputs;
+ for (unsigned int outputIndex = 0; outputIndex < outputTensorInfos.size(); ++outputIndex)
+ {
+ splitterOutputs.push_back(armnn::TensorInfo(splitOutShape,
+ outputTensorInfos[outputIndex].get().GetDataType(),
+ outputTensorInfos[outputIndex].get().GetQuantizationScale(),
+ outputTensorInfos[outputIndex].get().GetQuantizationOffset()));
+ }
+ std::vector<std::reference_wrapper<armnn::TensorInfo>> splitterOutputTensorInfos(splitterOutputs.begin(),
+ splitterOutputs.end());
+
+ armnn::BackendId setBackendSplit;
+ if (!delegateData.m_Network)
+ {
+ // Check if splitter is supported
+ bool isSupported = false;
+ FORWARD_LAYER_SUPPORT_FUNC("UNPACK",
+ tfLiteContext,
+ IsSplitterSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackendSplit,
+ inputTensorInfo,
+ splitterOutputTensorInfos,
+ splitDesc);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ // Create Reshape descriptor from the first outputTensorInfo to validate a single Reshape layer
+ // Use this descriptor later when creating every ReshapeLayer as all Reshape Layers should be the same
+ armnn::ReshapeDescriptor reshapeDescriptor;
+ reshapeDescriptor.m_TargetShape = outputTensorInfos[0].get().GetShape();
+
+ armnn::BackendId setBackendReshape;
+ if (!delegateData.m_Network)
+ {
+ bool isSupported = false;
+ FORWARD_LAYER_SUPPORT_FUNC("RESHAPE",
+ tfLiteContext,
+ IsReshapeSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackendReshape,
+ splitterOutputTensorInfos[0],
+ outputTensorInfos[0],
+ reshapeDescriptor);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ };
+
+ std::string splitterLayerName("Unpack Splitter");
+
+ armnn::IConnectableLayer* splitterLayer = delegateData.m_Network->AddSplitterLayer(splitDesc,
+ splitterLayerName.c_str());
+ splitterLayer->SetBackendId(setBackendSplit);
+ ARMNN_ASSERT(splitterLayer != nullptr);
+
+ for (unsigned int k = 0; k < splitterLayer->GetNumOutputSlots(); ++k)
+ {
+ splitterLayer->GetOutputSlot(k).SetTensorInfo(outputs[k]);
+ }
+
+ // Connect the input slots
+ delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[0]]->Connect(splitterLayer->GetInputSlot(0));
+
+ // Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter.
+ for (unsigned int outputIndex = 0; outputIndex < splitterLayer->GetNumOutputSlots(); ++outputIndex)
+ {
+ std::string reshapeLayerName("Unpack Reshape");
+ armnn::IConnectableLayer* reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor,
+ reshapeLayerName.c_str());
+ reshapeLayer->SetBackendId(setBackendReshape);
+ ARMNN_ASSERT(reshapeLayer != nullptr);
+
+ splitterLayer->GetOutputSlot(outputIndex).SetTensorInfo(splitterOutputTensorInfos[outputIndex]);
+ splitterLayer->GetOutputSlot(outputIndex).Connect(reshapeLayer->GetInputSlot(0));
+
+ armnn::TensorInfo outputTensorInfo = outputTensorInfos[outputIndex];
+ reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+ armnn::IOutputSlot& slot = reshapeLayer->GetOutputSlot(0);
+
+ delegateData.m_OutputSlotForNode[
+ static_cast<unsigned long>(tfLiteNode->outputs->data[outputIndex])] = &slot;
+
+ }
+
+ return kTfLiteOk;
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/classic/src/armnn_delegate.cpp b/delegate/classic/src/armnn_delegate.cpp
new file mode 100644
index 0000000000..4ddfc1a35f
--- /dev/null
+++ b/delegate/classic/src/armnn_delegate.cpp
@@ -0,0 +1,1059 @@
+//
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <armnn_delegate.hpp>
+
+#include "Version.hpp"
+
+#include "Activation.hpp"
+#include "ArgMinMax.hpp"
+#include "BatchMatMul.hpp"
+#include "BatchSpace.hpp"
+#include "Comparison.hpp"
+#include "Convolution.hpp"
+#include "Control.hpp"
+#include "ElementwiseBinary.hpp"
+#include "ElementwiseUnary.hpp"
+#include "Fill.hpp"
+#include "FullyConnected.hpp"
+#include "Gather.hpp"
+#include "GatherNd.hpp"
+#include "LogicalBinary.hpp"
+#include "Lstm.hpp"
+#include "Normalization.hpp"
+#include "Pack.hpp"
+#include "Pad.hpp"
+#include "Pooling.hpp"
+#include "Prelu.hpp"
+#include "Quantization.hpp"
+#include "Redefine.hpp"
+#include "Reduce.hpp"
+#include "Resize.hpp"
+#include "Round.hpp"
+#include "Shape.hpp"
+#include "Slice.hpp"
+#include "StridedSlice.hpp"
+#include "Softmax.hpp"
+#include "SpaceDepth.hpp"
+#include "Split.hpp"
+#include "Transpose.hpp"
+#include "UnidirectionalSequenceLstm.hpp"
+#include "Unpack.hpp"
+
+#include <armnnUtils/Filesystem.hpp>
+#include <armnn/utility/Timer.hpp>
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/context_util.h>
+#include <tensorflow/lite/schema/schema_generated.h>
+
+#include <algorithm>
+#include <iostream>
+#include <sstream>
+
+namespace armnnDelegate
+{
+
+DelegateOptions TfLiteArmnnDelegateOptionsDefault()
+{
+ DelegateOptions options(armnn::Compute::CpuRef);
+ return options;
+}
+
+TfLiteDelegate* TfLiteArmnnDelegateCreate(armnnDelegate::DelegateOptions options)
+{
+ auto* armnnDelegate = new ::armnnDelegate::Delegate(options);
+ return armnnDelegate->GetDelegate();
+}
+
+void TfLiteArmnnDelegateDelete(TfLiteDelegate* tfLiteDelegate)
+{
+ if (tfLiteDelegate != nullptr)
+ {
+ delete static_cast<::armnnDelegate::Delegate*>(tfLiteDelegate->data_);
+ }
+}
+
+TfLiteStatus DoPrepare(TfLiteContext* tfLiteContext, TfLiteDelegate* tfLiteDelegate)
+{
+ TfLiteIntArray* supportedOperators =
+ static_cast<::armnnDelegate::Delegate*>(tfLiteDelegate->data_)->IdentifyOperatorsToDelegate(tfLiteContext);
+
+ // ArmNN Delegate Registration
+ static const TfLiteRegistration kArmnnSubgraphRegistration = {
+ // ArmnnSubgraph Init
+ .init = [](TfLiteContext* tfLiteContext, const char* buffer, size_t length) -> void* {
+ armnn::IgnoreUnused(length);
+ const TfLiteDelegateParams* parameters = reinterpret_cast<const TfLiteDelegateParams*>(buffer);
+
+ return static_cast<void*>(ArmnnSubgraph::Create(
+ tfLiteContext, parameters, static_cast<::armnnDelegate::Delegate*>(parameters->delegate->data_)));
+ },
+ // ArmnnSubgraph Free
+ .free = [](TfLiteContext* tfLiteContext, void* buffer) -> void {
+ armnn::IgnoreUnused(tfLiteContext);
+ if (buffer != nullptr)
+ {
+ delete static_cast<ArmnnSubgraph*>(buffer);
+ }
+ },
+ // ArmnnSubgraph Prepare
+ .prepare = [](TfLiteContext* tfLiteContext, TfLiteNode* tfLiteNode) -> TfLiteStatus {
+ if (tfLiteNode->user_data == nullptr)
+ {
+ return kTfLiteError;
+ }
+ return static_cast<ArmnnSubgraph*>(tfLiteNode->user_data)->Prepare(tfLiteContext);
+ },
+ // ArmnnSubgraph Invoke
+ .invoke = [](TfLiteContext* tfLiteContext, TfLiteNode* tfLiteNode) -> TfLiteStatus {
+ if (tfLiteNode->user_data == nullptr)
+ {
+ return kTfLiteError;
+ }
+
+ return static_cast<ArmnnSubgraph*>(tfLiteNode->user_data)->Invoke(tfLiteContext, tfLiteNode);
+ },
+
+ .profiling_string = nullptr,
+ .builtin_code = kTfLiteBuiltinDelegate,
+ .custom_name = "TfLiteArmNnDelegate",
+ .version = 1,
+ .registration_external = nullptr,
+ };
+
+ const TfLiteStatus status =
+ tfLiteContext->ReplaceNodeSubsetsWithDelegateKernels(
+ tfLiteContext, kArmnnSubgraphRegistration, supportedOperators, tfLiteDelegate);
+
+ TfLiteIntArrayFree(supportedOperators);
+ return status;
+
+}
+
+Delegate::Delegate(armnnDelegate::DelegateOptions options)
+ : m_Options(std::move(options))
+{
+ // Configures logging for ARMNN
+ if (m_Options.IsLoggingEnabled())
+ {
+ armnn::ConfigureLogging(true, true, m_Options.GetLoggingSeverity());
+ }
+ // Create/Get the static ArmNN Runtime. Note that the m_Runtime will be shared by all armnn_delegate
+ // instances so the RuntimeOptions cannot be altered for different armnn_delegate instances.
+ m_Runtime = GetRuntime(m_Options.GetRuntimeOptions());
+ std::vector<armnn::BackendId> backends;
+ if (m_Runtime)
+ {
+ const armnn::BackendIdSet supportedDevices = m_Runtime->GetDeviceSpec().GetSupportedBackends();
+ for (auto& backend : m_Options.GetBackends())
+ {
+ if (std::find(supportedDevices.cbegin(), supportedDevices.cend(), backend) == supportedDevices.cend())
+ {
+ TFLITE_LOG_PROD(tflite::TFLITE_LOG_INFO,
+ "TfLiteArmnnDelegate: Requested unknown backend %s", backend.Get().c_str());
+ }
+ else
+ {
+ backends.push_back(backend);
+ }
+ }
+ }
+
+ if (backends.empty())
+ {
+ // No known backend specified
+ throw armnn::InvalidArgumentException("TfLiteArmnnDelegate: No known backend specified.");
+ }
+ m_Options.SetBackends(backends);
+
+ TFLITE_LOG_PROD_ONCE(tflite::TFLITE_LOG_INFO, "TfLiteArmnnDelegate: Created TfLite ArmNN delegate.");
+}
+
+TfLiteIntArray* Delegate::IdentifyOperatorsToDelegate(TfLiteContext* tfLiteContext)
+{
+ TfLiteIntArray* executionPlan = nullptr;
+ if (tfLiteContext->GetExecutionPlan(tfLiteContext, &executionPlan) != kTfLiteOk)
+ {
+ TF_LITE_KERNEL_LOG(tfLiteContext, "TfLiteArmnnDelegate: Unable to get graph execution plan.");
+ return nullptr;
+ }
+
+ // Delegate data with null network
+ DelegateData delegateData(m_Options.GetBackends());
+
+ TfLiteIntArray* nodesToDelegate = TfLiteIntArrayCreate(executionPlan->size);
+ nodesToDelegate->size = 0;
+
+ std::set<int32_t> unsupportedOperators;
+
+ for (int i = 0; i < executionPlan->size; ++i)
+ {
+ const int nodeIndex = executionPlan->data[i];
+
+ // If TfLite nodes can be delegated to ArmNN
+ TfLiteNode* tfLiteNode = nullptr;
+ TfLiteRegistration* tfLiteRegistration = nullptr;
+ if (tfLiteContext->GetNodeAndRegistration(
+ tfLiteContext, nodeIndex, &tfLiteNode, &tfLiteRegistration) != kTfLiteOk)
+ {
+ TF_LITE_KERNEL_LOG(tfLiteContext,
+ "TfLiteArmnnDelegate: Unable to get node and registration for node %d.",
+ nodeIndex);
+ continue;
+ }
+
+ TfLiteStatus visitStatus;
+
+ try
+ {
+ visitStatus = ArmnnSubgraph::VisitNode(
+ delegateData, tfLiteContext, tfLiteRegistration, tfLiteNode, nodeIndex);
+ }
+ catch(std::exception& ex)
+ {
+ ARMNN_LOG(error) << "ArmNN Failed to visit node with error: " << ex.what();
+ visitStatus = kTfLiteError;
+ }
+
+ if ( visitStatus != kTfLiteOk)
+ {
+ // node is not supported by ArmNN
+ unsupportedOperators.insert(tfLiteRegistration->builtin_code);
+ continue;
+ }
+
+ nodesToDelegate->data[nodesToDelegate->size++] = nodeIndex;
+ }
+
+ for (std::set<int32_t>::iterator it=unsupportedOperators.begin(); it!=unsupportedOperators.end(); ++it)
+ {
+ TF_LITE_KERNEL_LOG(tfLiteContext,
+ "Operator %s [%d] is not supported by armnn_delegate.",
+ tflite::EnumNameBuiltinOperator(tflite::BuiltinOperator(*it)),
+ *it);
+ }
+
+ if (!unsupportedOperators.empty() && m_Options.TfLiteRuntimeFallbackDisabled())
+ {
+ std::stringstream exMessage;
+ exMessage << "TfLiteArmnnDelegate: There are unsupported operators in the model. ";
+ exMessage << "Not falling back to TfLite Runtime as fallback is disabled. ";
+ exMessage << "This should only be disabled under test conditions.";
+ throw armnn::Exception(exMessage.str());
+ }
+ if (nodesToDelegate->size == 0)
+ {
+ ARMNN_LOG(info) << "No operators in this model are supported by the Arm NN TfLite delegate." <<
+ " The model will be executed entirely by TfLite runtime.";
+ }
+
+ std::sort(&nodesToDelegate->data[0], &nodesToDelegate->data[nodesToDelegate->size]);
+ return nodesToDelegate;
+}
+
+TfLiteDelegate* Delegate::GetDelegate()
+{
+ return &m_Delegate;
+}
+
+const std::string Delegate::GetVersion()
+{
+ return DELEGATE_VERSION;
+}
+
+TfLiteStatus ArmnnSubgraph::AddInputLayer(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ const TfLiteIntArray* inputs,
+ std::vector<armnn::BindingPointInfo>& inputBindings)
+{
+ const size_t numInputs = static_cast<size_t>(inputs->size);
+ for (unsigned int i = 0; i < numInputs; ++i)
+ {
+ const int32_t tensorId = inputs->data[i];
+ const TfLiteTensor tensor = tfLiteContext->tensors[tensorId];
+ // Do not create bindings for constant inputs
+ if (tensor.allocation_type == kTfLiteMmapRo)
+ {
+ continue;
+ }
+
+ auto bindingId = static_cast<armnn::LayerBindingId>((tensorId));
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddInputLayer(bindingId);
+
+ auto tensorInfo = GetTensorInfoForTfLiteTensor(tensor);
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(tensorInfo);
+
+ // Store for creating connections
+ delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tensorId)] = &outputSlot;
+
+ inputBindings.push_back(std::make_pair(bindingId, tensorInfo));
+ }
+
+ return kTfLiteOk;
+}
+
+TfLiteStatus ArmnnSubgraph::AddOutputLayer(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ const TfLiteIntArray* outputs,
+ std::vector<armnn::BindingPointInfo>& outputBindings)
+{
+ const size_t numOutputs = static_cast<size_t>(outputs->size);
+ for (unsigned int i = 0; i < numOutputs; ++i)
+ {
+ const int32_t tensorId = outputs->data[i];
+ const TfLiteTensor tensor = tfLiteContext->tensors[tensorId];
+
+ auto bindingId = static_cast<armnn::LayerBindingId>((tensorId));
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddOutputLayer(bindingId);
+
+ auto tensorInfo = GetTensorInfoForTfLiteTensor(tensor);
+ ARMNN_ASSERT(delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tensorId)] != nullptr);
+ delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tensorId)]->Connect(layer->GetInputSlot(0));
+ outputBindings.push_back(std::make_pair(bindingId, tensorInfo));
+ }
+
+ return kTfLiteOk;
+}
+
+ArmnnSubgraph* ArmnnSubgraph::Create(TfLiteContext* tfLiteContext,
+ const TfLiteDelegateParams* parameters,
+ const Delegate* delegate)
+{
+ const auto startTime = armnn::GetTimeNow();
+ ARMNN_LOG(info) << "ArmnnSubgraph creation";
+
+ TfLiteIntArray* executionPlan;
+ if (tfLiteContext->GetExecutionPlan(tfLiteContext, &executionPlan) != kTfLiteOk)
+ {
+ return nullptr;
+ }
+
+ // Initialize DelegateData holds network and output slots information
+ DelegateData delegateData(delegate->m_Options.GetBackends());
+
+ // Build ArmNN Network
+ armnn::NetworkOptions networkOptions = delegate->m_Options.GetOptimizerOptions().m_ModelOptions;
+ armnn::NetworkId networkId;
+ delegateData.m_Network = armnn::INetwork::Create(networkOptions);
+
+ delegateData.m_OutputSlotForNode = std::vector<armnn::IOutputSlot*>(tfLiteContext->tensors_size, nullptr);
+
+ std::vector<armnn::BindingPointInfo> inputBindings;
+ std::vector<armnn::BindingPointInfo> outputBindings;
+
+ // Add input layer
+ auto status = AddInputLayer(delegateData, tfLiteContext, parameters->input_tensors, inputBindings);
+ if (status != kTfLiteOk)
+ {
+ throw armnn::Exception("TfLiteArmnnDelegate: Unable to add Inputs to the network!");
+ }
+
+ // Parse TfLite delegate nodes to ArmNN
+ const auto parseStartTime = armnn::GetTimeNow();
+ for (int i = 0; i < parameters->nodes_to_replace->size; ++i)
+ {
+ const int nodeIndex = parameters->nodes_to_replace->data[i];
+
+ TfLiteNode* tfLiteNode = nullptr;
+ TfLiteRegistration* tfLiteRegistration = nullptr;
+ if (tfLiteContext->GetNodeAndRegistration(
+ tfLiteContext, nodeIndex, &tfLiteNode, &tfLiteRegistration) != kTfLiteOk)
+ {
+ throw armnn::Exception(&"TfLiteArmnnDelegate: Unable to get node registration: " [ nodeIndex]);
+ }
+
+ if (VisitNode(delegateData, tfLiteContext, tfLiteRegistration, tfLiteNode, nodeIndex) != kTfLiteOk)
+ {
+ throw armnn::Exception(&"TfLiteArmnnDelegate: Unable to parse node: " [ nodeIndex]);
+ }
+ }
+ ARMNN_LOG(info) << "Parse nodes to ArmNN time: " << std::setprecision(2)
+ << std::fixed << armnn::GetTimeDuration(parseStartTime).count() << " ms";
+
+ // Add Output layer
+ status = AddOutputLayer(delegateData, tfLiteContext, parameters->output_tensors, outputBindings);
+ if (status != kTfLiteOk)
+ {
+ throw armnn::Exception("TfLiteArmnnDelegate: Unable to add Outputs to the network!");
+ }
+
+ // Optimize ArmNN network
+ armnn::IOptimizedNetworkPtr optNet(nullptr, nullptr);
+ try
+ {
+ const auto optimizeStartTime = armnn::GetTimeNow();
+ optNet = armnn::Optimize(*(delegateData.m_Network.get()),
+ delegate->m_Options.GetBackends(),
+ delegate->m_Runtime->GetDeviceSpec(),
+ delegate->m_Options.GetOptimizerOptions());
+ ARMNN_LOG(info) << "Optimize ArmnnSubgraph time: " << std::setprecision(2)
+ << std::fixed << armnn::GetTimeDuration(optimizeStartTime).count() << " ms";
+ }
+ catch (std::exception& ex)
+ {
+ std::stringstream exMessage;
+ exMessage << "TfLiteArmnnDelegate: Exception (" << ex.what() << ") caught from optimize.";
+ throw armnn::Exception(exMessage.str());
+ }
+ if (!optNet)
+ {
+ // Optimize failed
+ throw armnn::Exception("TfLiteArmnnDelegate: Unable to optimize the network!");
+ }
+
+ // If set, we will serialize the optimized model into a dot file.
+ const std::string serializeToDotFile = delegate->m_Options.GetSerializeToDot();
+ if (!serializeToDotFile.empty())
+ {
+ ARMNN_LOG(info) << "Writing graph to dot file: " << serializeToDotFile;
+ fs::path filename = serializeToDotFile;
+ std::fstream file(filename.c_str(), std::ios_base::out);
+ optNet->SerializeToDot(file);
+ }
+
+ try
+ {
+ const auto loadStartTime = armnn::GetTimeNow();
+
+ // Load graph into runtime
+ std::string errorMessage;
+ armnn::Status loadingStatus;
+ armnn::MemorySource inputSource = armnn::MemorySource::Undefined;
+ armnn::MemorySource outputSource = armnn::MemorySource::Undefined;
+ // There's a bit of an assumption here that the delegate will only support Malloc memory source.
+ if (delegate->m_Options.GetOptimizerOptions().m_ImportEnabled)
+ {
+ inputSource = armnn::MemorySource::Malloc;
+ }
+ if (delegate->m_Options.GetOptimizerOptions().m_ExportEnabled)
+ {
+ outputSource = armnn::MemorySource::Malloc;
+ }
+ armnn::INetworkProperties networkProperties(false,
+ inputSource,
+ outputSource,
+ delegate->m_Options.GetInternalProfilingState(),
+ delegate->m_Options.GetInternalProfilingDetail());
+ loadingStatus = delegate->m_Runtime->LoadNetwork(networkId,
+ std::move(optNet),
+ errorMessage,
+ networkProperties);
+ if (loadingStatus != armnn::Status::Success)
+ {
+ // Network load failed.
+ throw armnn::Exception("TfLiteArmnnDelegate: Network could not be loaded: " + errorMessage);
+ }
+
+ ARMNN_LOG(info) << "Load ArmnnSubgraph time: " << std::setprecision(2)
+ << std::fixed << armnn::GetTimeDuration(loadStartTime).count() << " ms";
+ }
+ catch (std::exception& ex)
+ {
+ std::stringstream exMessage;
+ exMessage << "TfLiteArmnnDelegate: Exception (" << ex.what() << ") caught from LoadNetwork.";
+ throw armnn::Exception(exMessage.str());
+ }
+
+ // Register debug callback function
+ if (delegate->m_Options.GetDebugCallbackFunction().has_value())
+ {
+ delegate->m_Runtime->RegisterDebugCallback(networkId, delegate->m_Options.GetDebugCallbackFunction().value());
+ }
+
+ ARMNN_LOG(info) << "Overall ArmnnSubgraph creation time: " << std::setprecision(2)
+ << std::fixed << armnn::GetTimeDuration(startTime).count() << " ms\n";
+
+ // Create a new SubGraph with networkId and runtime
+ return new ArmnnSubgraph(networkId, delegate->m_Runtime, inputBindings, outputBindings);
+}
+
+TfLiteStatus ArmnnSubgraph::Prepare(TfLiteContext* tfLiteContext)
+{
+ armnn::IgnoreUnused(tfLiteContext);
+ return kTfLiteOk;
+}
+
+TfLiteStatus ArmnnSubgraph::Invoke(TfLiteContext* tfLiteContext, TfLiteNode* tfLiteNode)
+{
+ // Prepare inputs
+ armnn::InputTensors inputTensors;
+ size_t inputIndex = 0;
+ for (auto inputIdx : tflite::TfLiteIntArrayView(tfLiteNode->inputs))
+ {
+ TfLiteTensor* tensor = &tfLiteContext->tensors[inputIdx];
+ if (tensor->allocation_type != kTfLiteMmapRo)
+ {
+ const armnn::BindingPointInfo& inputBinding = m_InputBindings[inputIndex];
+ armnn::TensorInfo inputTensorInfo = inputBinding.second;
+ inputTensorInfo.SetConstant(true);
+ const armnn::ConstTensor inputTensor(inputTensorInfo, tensor->data.data);
+ inputTensors.emplace_back(inputIdx, inputTensor);
+
+ ++inputIndex;
+ }
+ }
+
+ // Prepare outputs
+ armnn::OutputTensors outputTensors;
+ size_t outputIndex = 0;
+ for (auto outputIdx : tflite::TfLiteIntArrayView(tfLiteNode->outputs))
+ {
+ const armnn::BindingPointInfo& outputBinding = m_OutputBindings[outputIndex];
+ TfLiteTensor* tensor = &tfLiteContext->tensors[outputIdx];
+ const armnn::Tensor outputTensor(outputBinding.second, tensor->data.data);
+ outputTensors.emplace_back(outputIdx, outputTensor);
+
+ ++outputIndex;
+ }
+
+ // Run graph
+ auto status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors);
+ // The delegate holds its own Arm NN runtime so this is our last chance to print internal profiling data.
+ std::shared_ptr<armnn::IProfiler> profiler = m_Runtime->GetProfiler(m_NetworkId);
+ if (profiler && profiler->IsProfilingEnabled())
+ {
+ profiler->Print(std::cout);
+ }
+ return (status == armnn::Status::Success) ? kTfLiteOk : kTfLiteError;
+}
+
+TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteRegistration* tfLiteRegistration,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex)
+{
+ switch (tfLiteRegistration->builtin_code)
+ {
+ case kTfLiteBuiltinCustom:
+ {
+#if defined(ARMNN_POST_TFLITE_2_5)
+ // Custom operators are defined by the name rather than the builtin code.
+ // Parse the custom_name param in the registration to point to the correct visitor function.
+ std::string customOperatorName = tfLiteRegistration->custom_name;
+ if ( customOperatorName == "AveragePool3D" )
+ {
+ return VisitPooling3dOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ customOperatorName);
+ }
+ else if (customOperatorName == "MaxPool3D")
+ {
+ return VisitPooling3dOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ customOperatorName);
+ }
+#endif
+ // Invalid or unsupported custom operator
+ return kTfLiteError;
+ }
+ case kTfLiteBuiltinAbs:
+ return VisitElementwiseUnaryOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ armnn::UnaryOperation::Abs);
+ case kTfLiteBuiltinAdd:
+ return VisitElementwiseBinaryOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinAdd);
+ case kTfLiteBuiltinArgMax:
+ return VisitArgMinMaxOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinArgMax);
+ case kTfLiteBuiltinArgMin:
+ return VisitArgMinMaxOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinArgMin);
+ case kTfLiteBuiltinAveragePool2d:
+ return VisitPooling2dOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinAveragePool2d);
+ case kTfLiteBuiltinBatchMatmul:
+ return VisitBatchMatMulOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinBatchMatmul);
+ case kTfLiteBuiltinBatchToSpaceNd:
+ return VisitBatchToSpaceNdOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinBatchToSpaceNd);
+ case kTfLiteBuiltinCast:
+ return VisitCastOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinCast);
+ case kTfLiteBuiltinConcatenation:
+ return VisitControlOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinConcatenation);
+ case kTfLiteBuiltinConv2d:
+ return VisitConvolutionOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinConv2d);
+// Conv3d is only correctly supported for external delegates from TF Lite v2.6, as there was a breaking bug in v2.5.
+#if defined(ARMNN_POST_TFLITE_2_5)
+ case kTfLiteBuiltinConv3d:
+ return VisitConvolutionOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinConv3d);
+#endif
+ case kTfLiteBuiltinDepthToSpace:
+ return VisitDepthToSpaceOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinDepthToSpace);
+ case kTfLiteBuiltinDepthwiseConv2d:
+ return VisitConvolutionOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinDepthwiseConv2d);
+ case kTfLiteBuiltinDequantize:
+ return VisitDequantizeOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinDequantize);
+ case kTfLiteBuiltinDiv:
+ return VisitElementwiseBinaryOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinDiv);
+ case kTfLiteBuiltinElu:
+ return VisitActivationOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinElu);
+ case kTfLiteBuiltinEqual:
+ return VisitComparisonOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinEqual);
+ case kTfLiteBuiltinExp:
+ return VisitElementwiseUnaryOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ armnn::UnaryOperation::Exp);
+ case kTfLiteBuiltinExpandDims:
+ return VisitExpandDimsOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinExpandDims);
+ case kTfLiteBuiltinFill:
+ return VisitFillOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinFill);
+ case kTfLiteBuiltinFloor:
+ return VisitFloorOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinFloor);
+ case kTfLiteBuiltinFloorDiv:
+ return VisitElementwiseBinaryOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinFloorDiv);
+ case kTfLiteBuiltinFullyConnected:
+ return VisitFullyConnectedOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinFullyConnected);
+ case kTfLiteBuiltinGather:
+ return VisitGatherOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinGather);
+ case kTfLiteBuiltinGatherNd:
+ return VisitGatherNdOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinGatherNd);
+ case kTfLiteBuiltinGreater:
+ return VisitComparisonOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinGreater);
+ case kTfLiteBuiltinGreaterEqual:
+ return VisitComparisonOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinGreaterEqual);
+ case kTfLiteBuiltinHardSwish:
+ return VisitActivationOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinHardSwish);
+ case kTfLiteBuiltinL2Normalization:
+ return VisitL2NormalizationOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinL2Normalization);
+ case kTfLiteBuiltinL2Pool2d:
+ return VisitPooling2dOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinL2Pool2d);
+ case kTfLiteBuiltinLess:
+ return VisitComparisonOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinLess);
+ case kTfLiteBuiltinLessEqual:
+ return VisitComparisonOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinLessEqual);
+ case kTfLiteBuiltinLocalResponseNormalization:
+ return VisitLocalResponseNormalizationOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinLocalResponseNormalization);
+ case kTfLiteBuiltinLog:
+ return VisitElementwiseUnaryOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ armnn::UnaryOperation::Log);
+ case kTfLiteBuiltinLogicalAnd:
+ return VisitLogicalBinaryOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinLogicalAnd,
+ armnn::LogicalBinaryOperation::LogicalAnd);
+ case kTfLiteBuiltinLogicalNot:
+ return VisitElementwiseUnaryOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ armnn::UnaryOperation::LogicalNot);
+ case kTfLiteBuiltinLogicalOr:
+ return VisitLogicalBinaryOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinLogicalOr,
+ armnn::LogicalBinaryOperation::LogicalOr);
+ case kTfLiteBuiltinLogistic:
+ return VisitActivationOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinLogistic);
+ case kTfLiteBuiltinLogSoftmax:
+ return VisitSoftmaxOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinLogSoftmax);
+ case kTfLiteBuiltinLstm:
+ return VisitLstmOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinLstm);
+ case kTfLiteBuiltinMaxPool2d:
+ return VisitPooling2dOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinMaxPool2d);
+ case kTfLiteBuiltinMaximum:
+ return VisitElementwiseBinaryOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinMaximum);
+ case kTfLiteBuiltinMean:
+ return VisitControlOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinMean);
+ case kTfLiteBuiltinMinimum:
+ return VisitElementwiseBinaryOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinMinimum);
+ case kTfLiteBuiltinMirrorPad:
+ return VisitPadOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinMirrorPad);
+ case kTfLiteBuiltinMul:
+ return VisitElementwiseBinaryOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinMul);
+ case kTfLiteBuiltinNeg:
+ return VisitElementwiseUnaryOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ armnn::UnaryOperation::Neg);
+ case kTfLiteBuiltinNotEqual:
+ return VisitComparisonOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinNotEqual);
+ case kTfLiteBuiltinPack:
+ return VisitPackOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinPack);
+ case kTfLiteBuiltinPad:
+ return VisitPadOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinPad);
+ case kTfLiteBuiltinPadv2:
+ return VisitPadOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinPadv2);
+ case kTfLiteBuiltinPrelu:
+ return VisitPreluOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinPrelu);
+ case kTfLiteBuiltinQuantize:
+ return VisitQuantizeOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinQuantize);
+ case kTfLiteBuiltinRank:
+ return VisitControlOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinRank);
+ case kTfLiteBuiltinReduceMax:
+ return VisitReduceOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinReduceMax);
+ case kTfLiteBuiltinReduceMin:
+ return VisitReduceOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinReduceMin);
+ case kTfLiteBuiltinReduceProd:
+ return VisitReduceOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinReduceProd);
+ case kTfLiteBuiltinRelu:
+ return VisitActivationOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinRelu);
+ case kTfLiteBuiltinReluN1To1:
+ return VisitActivationOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinReluN1To1);
+ case kTfLiteBuiltinRelu6:
+ return VisitActivationOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinRelu6);
+ case kTfLiteBuiltinReshape:
+ return VisitReshapeOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinReshape);
+ case kTfLiteBuiltinResizeBilinear:
+ return VisitResizeOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinResizeBilinear);
+ case kTfLiteBuiltinResizeNearestNeighbor:
+ return VisitResizeOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinResizeNearestNeighbor);
+ case kTfLiteBuiltinRsqrt:
+ return VisitElementwiseUnaryOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ armnn::UnaryOperation::Rsqrt);
+ case kTfLiteBuiltinShape:
+ return VisitShapeOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinShape);
+ case kTfLiteBuiltinSin:
+ return VisitElementwiseUnaryOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ armnn::UnaryOperation::Sin);
+ case kTfLiteBuiltinSplit:
+ return VisitSplitOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinSplit);
+ case kTfLiteBuiltinSplitV:
+ return VisitSplitVOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinSplitV);
+ case kTfLiteBuiltinSqrt:
+ return VisitElementwiseUnaryOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ armnn::UnaryOperation::Sqrt);
+ case kTfLiteBuiltinSqueeze:
+ return VisitSqueezeOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinSqueeze);
+ case kTfLiteBuiltinSlice:
+ return VisitSliceOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinSlice);
+ case kTfLiteBuiltinStridedSlice:
+ return VisitStridedSliceOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinStridedSlice);
+ case kTfLiteBuiltinSum:
+ return VisitReduceOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinSum);
+ case kTfLiteBuiltinTranspose:
+ return VisitTransposeOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinTranspose);
+ case kTfLiteBuiltinTransposeConv:
+ return VisitConvolutionOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinTransposeConv);
+ case kTfLiteBuiltinSoftmax:
+ return VisitSoftmaxOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinSoftmax);
+ case kTfLiteBuiltinSpaceToBatchNd:
+ return VisitSpaceToBatchNdOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinSpaceToBatchNd);
+ case kTfLiteBuiltinSpaceToDepth:
+ return VisitSpaceToDepthOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinSpaceToDepth);
+ case kTfLiteBuiltinSub:
+ return VisitElementwiseBinaryOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinSub);
+ case kTfLiteBuiltinTanh:
+ return VisitActivationOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinTanh);
+ case kTfLiteBuiltinUnidirectionalSequenceLstm:
+ return VisitUnidirectionalSequenceLstmOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinUnidirectionalSequenceLstm);
+ case kTfLiteBuiltinUnpack:
+ return VisitUnpackOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinUnpack);
+ default:
+ return kTfLiteError;
+ }
+}
+
+} // armnnDelegate namespace \ No newline at end of file
diff --git a/delegate/classic/src/armnn_external_delegate.cpp b/delegate/classic/src/armnn_external_delegate.cpp
new file mode 100644
index 0000000000..444015d71c
--- /dev/null
+++ b/delegate/classic/src/armnn_external_delegate.cpp
@@ -0,0 +1,68 @@
+//
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include "armnn_delegate.hpp"
+#include <armnn/Logging.hpp>
+#include <armnn/utility/NumericCast.hpp>
+
+#include <iostream>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace tflite
+{
+
+/**
+ * This file defines two symbols that need to be exported to use the TFLite external delegate provider. This is a plugin
+ * that can be used for fast integration of delegates into benchmark tests and other tools. It allows loading of
+ * a dynamic delegate library at runtime.
+ *
+ * The external delegate also has Tensorflow Lite Python bindings. Therefore the dynamic external delegate
+ * can be directly used with Tensorflow Lite Python APIs.
+ *
+ * See tensorflow/lite/delegates/external for details or visit the tensorflow guide
+ * [here](https://www.tensorflow.org/lite/performance/implementing_delegate#option_2_leverage_external_delegate)
+ */
+
+extern "C"
+{
+
+/**
+ * Implementation of the TfLite external delegate plugin
+ *
+ * For details about what options_keys and option_values are supported please see:
+ * armnnDelegate::DelegateOptions::DelegateOptions(char const* const*, char const* const*,size_t,void (*)(const char*))
+ */
+TfLiteDelegate* tflite_plugin_create_delegate(char** options_keys,
+ char** options_values,
+ size_t num_options,
+ void (*report_error)(const char*))
+{
+ // Returning null indicates an error during delegate creation, we initialize with that
+ TfLiteDelegate* delegate = nullptr;
+ try
+ {
+ armnnDelegate::DelegateOptions options (options_keys, options_values, num_options, (*report_error));
+ delegate = TfLiteArmnnDelegateCreate(options);
+ }
+ catch (const std::exception& ex)
+ {
+ if(report_error)
+ {
+ report_error(ex.what());
+ }
+ }
+ return delegate;
+}
+
+/** Destroy a given delegate plugin
+ *
+ * @param[in] delegate Delegate to destruct
+ */
+void tflite_plugin_destroy_delegate(TfLiteDelegate* delegate)
+{
+ armnnDelegate::TfLiteArmnnDelegateDelete(delegate);
+}
+
+} // extern "C"
+} // namespace tflite \ No newline at end of file