aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Sloyan <matthew.sloyan@arm.com>2023-03-16 10:17:51 +0000
committerMatthew Sloyan <matthew.sloyan@arm.com>2023-04-03 09:20:43 +0000
commit1157232551c23c9a553aed37972b8119d37c4543 (patch)
treeb1e55023c0f7b7d0f65a012f700c91fbf659a3d1
parent1a05aad6d5adf3b25848ffd873a0e0e82756aa06 (diff)
downloadarmnn-1157232551c23c9a553aed37972b8119d37c4543.tar.gz
IVGCVSW-7558 Implement Delegate Utils
* Implement OpaqueDelegateUtils.hpp using new accessors. * Moved classic delegate utils to ClassicDelegateUtils.hpp. * DelegateUtils.hpp now contains common utils. * Removed unused ConnectConstant function. Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com> Change-Id: I0fa611c82f5c06b0b7e0c37bfc343e09fb6a96c9
-rw-r--r--delegate/classic/CMakeLists.txt1
-rw-r--r--delegate/classic/src/Activation.hpp2
-rw-r--r--delegate/classic/src/BatchMatMul.hpp3
-rw-r--r--delegate/classic/src/ClassicDelegateUtils.hpp518
-rw-r--r--delegate/classic/src/Comparison.hpp3
-rw-r--r--delegate/classic/src/Convolution.hpp4
-rw-r--r--delegate/classic/src/ElementwiseBinary.hpp2
-rw-r--r--delegate/classic/src/FullyConnected.hpp3
-rw-r--r--delegate/classic/src/Gather.hpp3
-rw-r--r--delegate/classic/src/GatherNd.hpp3
-rw-r--r--delegate/classic/src/Lstm.hpp2
-rw-r--r--delegate/classic/src/Pooling.hpp2
-rw-r--r--delegate/classic/src/Prelu.hpp2
-rw-r--r--delegate/classic/src/Redefine.hpp2
-rw-r--r--delegate/classic/src/Resize.hpp4
-rw-r--r--delegate/classic/src/Shape.hpp2
-rw-r--r--delegate/classic/src/SharedFunctions.cpp3
-rw-r--r--delegate/classic/src/Softmax.hpp2
-rw-r--r--delegate/classic/src/Split.hpp2
-rw-r--r--delegate/classic/src/UnidirectionalSequenceLstm.hpp2
-rw-r--r--delegate/classic/src/Unpack.hpp2
-rw-r--r--delegate/common/src/DelegateUtils.hpp531
-rw-r--r--delegate/opaque/src/OpaqueDelegateUtils.hpp658
23 files changed, 1205 insertions, 551 deletions
diff --git a/delegate/classic/CMakeLists.txt b/delegate/classic/CMakeLists.txt
index 04167130d7..367ac40790 100644
--- a/delegate/classic/CMakeLists.txt
+++ b/delegate/classic/CMakeLists.txt
@@ -13,6 +13,7 @@ list(APPEND armnnClassicDelegateObject_sources
src/ArgMinMax.hpp
src/BatchMatMul.hpp
src/BatchSpace.hpp
+ src/ClassicDelegateUtils.hpp
src/Comparison.hpp
src/Convolution.hpp
src/Control.hpp
diff --git a/delegate/classic/src/Activation.hpp b/delegate/classic/src/Activation.hpp
index b86d89b4e5..e813956f6f 100644
--- a/delegate/classic/src/Activation.hpp
+++ b/delegate/classic/src/Activation.hpp
@@ -5,7 +5,7 @@
#pragma once
-#include <DelegateUtils.hpp>
+#include <ClassicDelegateUtils.hpp>
#include <tensorflow/lite/builtin_ops.h>
#include <tensorflow/lite/c/builtin_op_data.h>
diff --git a/delegate/classic/src/BatchMatMul.hpp b/delegate/classic/src/BatchMatMul.hpp
index f56f728ef5..94b25fe7b5 100644
--- a/delegate/classic/src/BatchMatMul.hpp
+++ b/delegate/classic/src/BatchMatMul.hpp
@@ -5,7 +5,8 @@
#pragma once
-#include <DelegateUtils.hpp>
+#include <ClassicDelegateUtils.hpp>
+
#include <algorithm>
#include <iterator>
#include <string>
diff --git a/delegate/classic/src/ClassicDelegateUtils.hpp b/delegate/classic/src/ClassicDelegateUtils.hpp
new file mode 100644
index 0000000000..52e9f5cf63
--- /dev/null
+++ b/delegate/classic/src/ClassicDelegateUtils.hpp
@@ -0,0 +1,518 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn_delegate.hpp>
+#include <DelegateUtils.hpp>
+
+#include <armnn/ArmNN.hpp>
+#include <armnn/BackendHelper.hpp>
+#include <armnn/utility/Assert.hpp>
+#include <armnn/utility/NumericCast.hpp>
+
+#include <armnnUtils/Permute.hpp>
+#include <armnnUtils/TensorUtils.hpp>
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+#include <tensorflow/lite/kernels/kernel_util.h>
+
+namespace
+{
+
+// Macro to call an Is<layer_name>Supported function and log caller name together with reason for lack of support
+#define FORWARD_LAYER_SUPPORT_FUNC(opName, tfLiteContext, func, backends, supported, setBackend, ...) \
+try \
+{ \
+ for (auto&& backendId : backends) \
+ { \
+ auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
+ if (layerSupportObject.IsBackendRegistered()) \
+ { \
+ std::string reasonIfUnsupported; \
+ supported = \
+ layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
+ if (supported) \
+ { \
+ setBackend = backendId; \
+ break; \
+ } \
+ else \
+ { \
+ if (reasonIfUnsupported.size() > 0) \
+ { \
+ TFLITE_LOG_PROD(tflite::TFLITE_LOG_WARNING, \
+ "%s: not supported by armnn: %s", opName, reasonIfUnsupported.c_str()); \
+ } \
+ else \
+ { \
+ TFLITE_LOG_PROD(tflite::TFLITE_LOG_WARNING, \
+ "%s: not supported by armnn", opName); \
+ } \
+ } \
+ } \
+ else \
+ { \
+ TF_LITE_KERNEL_LOG(tfLiteContext, "%s: backend not registered: %s", opName, backendId.Get().c_str()); \
+ } \
+ } \
+ if (!supported) \
+ { \
+ TF_LITE_KERNEL_LOG(tfLiteContext, "%s: not supported by any specified backend", opName); \
+ } \
+} \
+catch (const armnn::InvalidArgumentException &e) \
+{ \
+ throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
+}
+
+TfLiteStatus ValidateNumInputs(TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ const unsigned int expectedSize,
+ int nodeIndex)
+{
+ auto numInputs = tfLiteNode->inputs->size;
+ if (static_cast<unsigned int >(numInputs) != expectedSize)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of inputs (%d != %d) in node #%d",
+ numInputs, expectedSize, nodeIndex);
+ return kTfLiteError;
+ }
+ return kTfLiteOk;
+}
+
+TfLiteStatus ValidateNumOutputs(TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ const unsigned int expectedSize,
+ int nodeIndex)
+{
+ auto numOutputs = tfLiteNode->outputs->size;
+ if (static_cast<unsigned int >(numOutputs) != expectedSize)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of outputs (%d != %d) in node #%d",
+ numOutputs, expectedSize, nodeIndex);
+ return kTfLiteError;
+ }
+ return kTfLiteOk;
+}
+
+bool IsDynamicTensor(const TfLiteTensor& tfLiteTensor)
+{
+ auto tensorAllocationType = tfLiteTensor.allocation_type;
+ if (tensorAllocationType == kTfLiteDynamic)
+ {
+ return true;
+ }
+ return false;
+}
+
+bool IsValid(const TfLiteTensor* tfLiteTensor)
+{
+ return tfLiteTensor == nullptr ? false : true;
+}
+
+bool IsValid(TfLiteContext* tfLiteContext, const TfLiteTensor& tfLiteTensor, int32_t operatorCode, int32_t nodeIndex)
+{
+ if(!IsValid(&tfLiteTensor))
+ {
+ std::cout << "..Is Not Valid" << std::endl;
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Invalid TfLite tensor in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return false;
+ }
+ if (IsDynamicTensor(tfLiteTensor))
+ {
+ std::cout << "..IsDynamicTensor" << std::endl;
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic tensors are not supported in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return false;
+ }
+ return true;
+}
+
+bool IsAffineQuantization(const TfLiteTensor& tfLiteTensor)
+{
+ auto quantizationInfo = tfLiteTensor.quantization;
+ if (quantizationInfo.type == kTfLiteAffineQuantization)
+ {
+ return true;
+ }
+ return false;
+}
+
+TfLiteStatus Connect(armnn::IConnectableLayer* layer,
+ TfLiteNode* tfLiteNode,
+ armnnDelegate::DelegateData& data)
+{
+ ARMNN_ASSERT(static_cast<unsigned int>(tfLiteNode->outputs->size) == layer->GetNumOutputSlots());
+
+ // Connect the input slots
+ for (unsigned int inputIndex = 0; inputIndex < layer->GetNumInputSlots(); ++inputIndex)
+ {
+ if (data.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]] != nullptr)
+ {
+ data.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]]->Connect(layer->GetInputSlot(inputIndex));
+ }
+ }
+
+ // Prepare output slots
+ for (unsigned int outputIndex = 0; outputIndex < layer->GetNumOutputSlots(); ++outputIndex)
+ {
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(outputIndex);
+ data.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
+ }
+
+ return kTfLiteOk;
+}
+
+TfLiteStatus FusedActivation(TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ TfLiteFusedActivation activationType,
+ armnn::IConnectableLayer* prevLayer,
+ unsigned int outputSlotIndex,
+ armnnDelegate::DelegateData& data)
+{
+
+ const armnn::TensorInfo& activationOutputInfo = prevLayer->GetOutputSlot(outputSlotIndex).GetTensorInfo();
+
+ armnn::ActivationDescriptor activationDesc;
+
+ switch (activationType)
+ {
+ case kTfLiteActNone:
+ {
+ // No Activation
+ return kTfLiteOk;
+ }
+ case kTfLiteActRelu:
+ {
+ activationDesc.m_Function = armnn::ActivationFunction::ReLu;
+ break;
+ }
+// The name of kTfLiteActRelu1 changed after TF Lite v2.3
+#if defined(ARMNN_POST_TFLITE_2_3)
+ case kTfLiteActReluN1To1:
+#else
+ case kTfLiteActRelu1:
+#endif
+ {
+ activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
+ activationDesc.m_A = 1.0f;
+ activationDesc.m_B = -1.0f;
+ break;
+ }
+ case kTfLiteActRelu6:
+ {
+ activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
+ activationDesc.m_A = 6.0f;
+ activationDesc.m_B = 0.0f;
+ break;
+ }
+ case kTfLiteActSigmoid:
+ {
+ activationDesc.m_Function = armnn::ActivationFunction::Sigmoid;
+ break;
+ }
+ case kTfLiteActTanh:
+ {
+ activationDesc.m_Function = armnn::ActivationFunction::TanH;
+ activationDesc.m_A = 1.0f;
+ activationDesc.m_B = 1.0f;
+ break;
+ }
+ default:
+ return kTfLiteError;
+ }
+
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ FORWARD_LAYER_SUPPORT_FUNC("ACTIVATION",
+ tfLiteContext,
+ IsActivationSupported,
+ data.m_Backends,
+ isSupported,
+ setBackend,
+ activationOutputInfo,
+ activationOutputInfo,
+ activationDesc);
+ if (!isSupported)
+ {
+ return kTfLiteError;
+ }
+ armnn::IConnectableLayer* activationLayer = data.m_Network->AddActivationLayer(activationDesc);
+ activationLayer->SetBackendId(setBackend);
+
+ ARMNN_ASSERT(activationLayer != nullptr);
+ activationLayer->GetOutputSlot(0).SetTensorInfo(activationOutputInfo);
+
+ // Connect and prepare output slots
+ for (unsigned int outputIndex = 0; outputIndex < activationLayer->GetNumOutputSlots(); ++outputIndex)
+ {
+ data.m_OutputSlotForNode[static_cast<unsigned long>(
+ tfLiteNode->outputs->data[outputIndex])]->Connect(activationLayer->GetInputSlot(0));
+ armnn::IOutputSlot& outputSlot = activationLayer->GetOutputSlot(outputIndex);
+ data.m_OutputSlotForNode[static_cast<unsigned long>(
+ tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
+ }
+ return kTfLiteOk;
+}
+
+armnn::IConnectableLayer* AddReshapeLayer(TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ armnn::IConnectableLayer* prevLayer,
+ armnn::TensorInfo reshapedOutputTensorInfo,
+ armnn::TensorInfo outputTensorInfo,
+ armnnDelegate::DelegateData& data)
+{
+ armnn::ReshapeDescriptor desc;
+ desc.m_TargetShape = outputTensorInfo.GetShape();
+
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ FORWARD_LAYER_SUPPORT_FUNC("RESHAPE",
+ tfLiteContext,
+ IsReshapeSupported,
+ data.m_Backends,
+ isSupported,
+ setBackend,
+ reshapedOutputTensorInfo,
+ outputTensorInfo,
+ desc);
+
+ if (!isSupported)
+ {
+ return nullptr;
+ }
+
+ armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(desc);
+ reshapeLayer->SetBackendId(setBackend);
+ ARMNN_ASSERT(reshapeLayer != nullptr);
+
+ prevLayer->GetOutputSlot(0).SetTensorInfo(reshapedOutputTensorInfo);
+ reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+ // Connect and prepare output slots
+ for (unsigned int outputIndex = 0; outputIndex < reshapeLayer->GetNumOutputSlots(); ++outputIndex)
+ {
+ data.m_OutputSlotForNode[static_cast<unsigned long>(
+ tfLiteNode->outputs->data[outputIndex])]->Connect(reshapeLayer->GetInputSlot(0));
+ armnn::IOutputSlot& outputSlot = reshapeLayer->GetOutputSlot(outputIndex);
+ data.m_OutputSlotForNode[static_cast<unsigned long>(
+ tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
+ }
+ return reshapeLayer;
+}
+
+armnn::DataType GetDataType(const TfLiteTensor& tfLiteTensor)
+{
+ switch (tfLiteTensor.type)
+ {
+ case kTfLiteBool:
+ return armnn::DataType::Boolean;
+ case kTfLiteFloat32:
+ return armnn::DataType::Float32;
+ case kTfLiteFloat16:
+ return armnn::DataType::Float16;
+ case kTfLiteUInt8:
+ return armnn::DataType::QAsymmU8;
+ case kTfLiteInt8:
+ {
+ auto quantizationInfo = tfLiteTensor.quantization;
+ if (quantizationInfo.type == kTfLiteAffineQuantization)
+ {
+ auto* quantization =
+ reinterpret_cast<TfLiteAffineQuantization*>(tfLiteTensor.quantization.params);
+ if (quantization->zero_point != nullptr && quantization->zero_point->size == 1)
+ {
+ return armnn::DataType::QAsymmS8;
+ }
+ else
+ {
+ return armnn::DataType::QSymmS8;
+ }
+ }
+ else
+ {
+ return armnn::DataType::QAsymmS8;
+ }
+ }
+ case kTfLiteInt16:
+ return armnn::DataType::QSymmS16;
+ case kTfLiteInt32:
+ return armnn::DataType::Signed32;
+ case kTfLiteInt64:
+ return armnn::DataType::Signed64;
+ default:
+ throw armnn::Exception(&"TfLiteArmnnDelegate: Unsupported data type: " [ tfLiteTensor.type]);
+ }
+}
+
+armnn::TensorInfo GetTensorInfoForTfLiteTensor(const TfLiteTensor& tfLiteTensor, bool isOutput = false)
+{
+ armnn::DataType type = GetDataType(tfLiteTensor);
+ armnn::TensorInfo ret;
+ auto tensorDimensionSize = tfLiteTensor.dims->size;
+ if (tensorDimensionSize == 0)
+ {
+ // If input tensor does not have a shape
+ // assuming that it has 1D tensor
+ if (!isOutput)
+ {
+ std::vector<unsigned int> safeShape = { 1 };
+ bool dimensionsSpecificity[1] = { true };
+ armnn::TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
+ safeShape.data(),
+ dimensionsSpecificity);
+ ret = armnn::TensorInfo(tensorShape, type);
+ if(tflite::IsConstantTensor(&tfLiteTensor))
+ {
+ ret.SetConstant(true);
+ }
+ }
+ else
+ {
+ armnn::TensorShape tensorShape(armnn::Dimensionality::NotSpecified);
+ ret = armnn::TensorInfo(tensorShape, type);
+ }
+ }
+ else
+ {
+ std::vector<unsigned int> tensorDims(static_cast<unsigned int>(tensorDimensionSize));
+ bool dimensionsSpecificity[5] = { true, true, true, true, true };
+ for (unsigned int i = 0; i < static_cast<unsigned int>(tensorDimensionSize); ++i) {
+ auto dim = tfLiteTensor.dims->data[i];
+ if (dim == 0)
+ {
+ dimensionsSpecificity[i] = false;
+ }
+ tensorDims[i] = static_cast<unsigned int>(dim);
+ }
+ armnn::TensorShape tensorShape(static_cast<unsigned int>(tensorDimensionSize),
+ tensorDims.data(),
+ dimensionsSpecificity);
+
+ if(tflite::IsConstantTensor(&tfLiteTensor))
+ {
+ ret = armnn::TensorInfo(tensorShape, type);
+ ret.SetConstant(true);
+ }
+ else
+ {
+ ret = armnn::TensorInfo(tensorShape, type);
+ }
+ }
+
+ auto quantizationInfo = tfLiteTensor.quantization;
+ if (quantizationInfo.type == kTfLiteAffineQuantization)
+ {
+ // get per-channel quantization parameters
+ const auto* affineQuantization =
+ reinterpret_cast<TfLiteAffineQuantization*>(tfLiteTensor.quantization.params);
+ if (affineQuantization->scale->size > 1)
+ {
+ std::vector<float> quantizationScales;
+ for (unsigned int i = 0; i < static_cast<unsigned int>(affineQuantization->scale->size); ++i)
+ {
+ quantizationScales.push_back(affineQuantization->scale->data[i]);
+ }
+ ret.SetQuantizationScales(quantizationScales);
+ ret.SetQuantizationDim(armnn::numeric_cast<unsigned int>(affineQuantization->quantized_dimension));
+ }
+ else
+ {
+ ret.SetQuantizationScale(affineQuantization->scale->data[0]);
+ ret.SetQuantizationOffset(affineQuantization->zero_point->data[0]);
+ }
+ }
+ else
+ {
+ auto quantizationParameters = tfLiteTensor.params;
+ ret.SetQuantizationScale(quantizationParameters.scale);
+ ret.SetQuantizationOffset(quantizationParameters.zero_point);
+ }
+
+ return ret;
+}
+
+armnn::ConstTensor CreateConstTensor(const TfLiteTensor* tfLiteTensor,
+ const armnn::TensorInfo& tensorInfo)
+{
+ if (tfLiteTensor->allocation_type != kTfLiteMmapRo)
+ {
+ throw armnn::Exception(
+ "TfLiteArmnnDelegate: Not constant allocation type: " + std::to_string(tfLiteTensor->allocation_type));
+ }
+
+ return armnn::ConstTensor(tensorInfo, tfLiteTensor->data.data);
+}
+
+armnn::ConstTensor* GetConstTensorForTfLiteTensor(const TfLiteTensor* tfLiteTensors, TfLiteNode* tfLiteNode, int index)
+{
+ const TfLiteTensor &tfLiteTensor = tfLiteTensors[tfLiteNode->inputs->data[index]];
+ armnn::TensorInfo tensorInfo = GetTensorInfoForTfLiteTensor(tfLiteTensor);
+ return new armnn::ConstTensor(tensorInfo, tfLiteTensor.data.data);
+}
+
+bool IsOptionalOperandPresent(TfLiteNode* tfLiteNode, const int operandIndex)
+{
+ // If the inputs array has fewer than operandIndex entries or if the entry at operandIndex has a value of -1 or
+ // less then the input is not present.
+ if (tfLiteNode->inputs->size > operandIndex && tfLiteNode->inputs->data[operandIndex] >= 0)
+ {
+ return true;
+ }
+ return false;
+}
+
+TfLiteStatus ProcessInputs(armnn::IConnectableLayer* layer,
+ armnnDelegate::DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode)
+{
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ // Process input tensors
+ // If input tensor is a Constant tensor create a constant layer and connect it to the network
+ for (unsigned int inputIndex = 0; inputIndex < layer->GetNumInputSlots(); ++inputIndex)
+ {
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[inputIndex]];
+ if (tflite::IsConstantTensor(&tfLiteInputTensor))
+ {
+ armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ FORWARD_LAYER_SUPPORT_FUNC("CONSTANT",
+ tfLiteContext,
+ IsConstantSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo);
+ if (!isSupported)
+ {
+ return kTfLiteError;
+ }
+ auto constantInput = CreateConstTensor(&tfLiteInputTensor,
+ inputTensorInfo);
+ armnn::IConnectableLayer* constantLayer = delegateData.m_Network->AddConstantLayer(constantInput);
+ constantLayer->SetBackendId(setBackend);
+ armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(inputTensorInfo);
+
+ delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]] = &outputSlot;
+ }
+ }
+ return kTfLiteOk;
+}
+
+} // namespace anonymous
diff --git a/delegate/classic/src/Comparison.hpp b/delegate/classic/src/Comparison.hpp
index 6d7700d191..1db554cfbf 100644
--- a/delegate/classic/src/Comparison.hpp
+++ b/delegate/classic/src/Comparison.hpp
@@ -5,7 +5,8 @@
#pragma once
-#include <DelegateUtils.hpp>
+#include <ClassicDelegateUtils.hpp>
+
#include <armnn/utility/IgnoreUnused.hpp>
#include <tensorflow/lite/builtin_ops.h>
diff --git a/delegate/classic/src/Convolution.hpp b/delegate/classic/src/Convolution.hpp
index 8963d2ead8..107d4de21c 100644
--- a/delegate/classic/src/Convolution.hpp
+++ b/delegate/classic/src/Convolution.hpp
@@ -5,8 +5,8 @@
#pragma once
-#include <DelegateUtils.hpp>
-#include "SharedFunctions.hpp"
+#include <ClassicDelegateUtils.hpp>
+#include <SharedFunctions.hpp>
#include <tensorflow/lite/builtin_ops.h>
#include <tensorflow/lite/c/builtin_op_data.h>
diff --git a/delegate/classic/src/ElementwiseBinary.hpp b/delegate/classic/src/ElementwiseBinary.hpp
index e11327b95a..dbbf47941a 100644
--- a/delegate/classic/src/ElementwiseBinary.hpp
+++ b/delegate/classic/src/ElementwiseBinary.hpp
@@ -5,7 +5,7 @@
#pragma once
-#include <DelegateUtils.hpp>
+#include <ClassicDelegateUtils.hpp>
#include "MultiLayerFacade.hpp"
#include "SharedFunctions.hpp"
diff --git a/delegate/classic/src/FullyConnected.hpp b/delegate/classic/src/FullyConnected.hpp
index 28d43d06df..9ce06a8d45 100644
--- a/delegate/classic/src/FullyConnected.hpp
+++ b/delegate/classic/src/FullyConnected.hpp
@@ -5,7 +5,8 @@
#pragma once
-#include <DelegateUtils.hpp>
+#include <ClassicDelegateUtils.hpp>
+
#include "armnnUtils/TensorUtils.hpp"
#include <armnn/utility/IgnoreUnused.hpp>
diff --git a/delegate/classic/src/Gather.hpp b/delegate/classic/src/Gather.hpp
index 4c9cf82832..f9611a40cf 100644
--- a/delegate/classic/src/Gather.hpp
+++ b/delegate/classic/src/Gather.hpp
@@ -5,7 +5,8 @@
#pragma once
-#include <DelegateUtils.hpp>
+#include <ClassicDelegateUtils.hpp>
+
#include <algorithm>
#include <iterator>
#include <string>
diff --git a/delegate/classic/src/GatherNd.hpp b/delegate/classic/src/GatherNd.hpp
index 12f0af306d..e1ee2ac8c0 100644
--- a/delegate/classic/src/GatherNd.hpp
+++ b/delegate/classic/src/GatherNd.hpp
@@ -5,7 +5,8 @@
#pragma once
-#include <DelegateUtils.hpp>
+#include <ClassicDelegateUtils.hpp>
+
#include <algorithm>
#include <iterator>
#include <string>
diff --git a/delegate/classic/src/Lstm.hpp b/delegate/classic/src/Lstm.hpp
index 460c61adf9..518559fc21 100644
--- a/delegate/classic/src/Lstm.hpp
+++ b/delegate/classic/src/Lstm.hpp
@@ -5,7 +5,7 @@
#pragma once
-#include <DelegateUtils.hpp>
+#include <ClassicDelegateUtils.hpp>
#include <armnn/LstmParams.hpp>
#include <armnn/Tensor.hpp>
diff --git a/delegate/classic/src/Pooling.hpp b/delegate/classic/src/Pooling.hpp
index 2de40613fb..50e944effc 100644
--- a/delegate/classic/src/Pooling.hpp
+++ b/delegate/classic/src/Pooling.hpp
@@ -5,7 +5,7 @@
#pragma once
-#include <DelegateUtils.hpp>
+#include <ClassicDelegateUtils.hpp>
#include <tensorflow/lite/builtin_ops.h>
#include <tensorflow/lite/c/builtin_op_data.h>
diff --git a/delegate/classic/src/Prelu.hpp b/delegate/classic/src/Prelu.hpp
index 71a04a744e..4fdad4a6b7 100644
--- a/delegate/classic/src/Prelu.hpp
+++ b/delegate/classic/src/Prelu.hpp
@@ -5,7 +5,7 @@
#pragma once
-#include <DelegateUtils.hpp>
+#include <ClassicDelegateUtils.hpp>
#include <tensorflow/lite/builtin_ops.h>
#include <tensorflow/lite/c/builtin_op_data.h>
diff --git a/delegate/classic/src/Redefine.hpp b/delegate/classic/src/Redefine.hpp
index 83c42d046e..7aef74f76b 100644
--- a/delegate/classic/src/Redefine.hpp
+++ b/delegate/classic/src/Redefine.hpp
@@ -7,7 +7,7 @@
#include <armnn/utility/IgnoreUnused.hpp>
-#include <DelegateUtils.hpp>
+#include <ClassicDelegateUtils.hpp>
#include <tensorflow/lite/builtin_ops.h>
#include <tensorflow/lite/c/builtin_op_data.h>
diff --git a/delegate/classic/src/Resize.hpp b/delegate/classic/src/Resize.hpp
index 33c6c6ecd8..32c7f46b9f 100644
--- a/delegate/classic/src/Resize.hpp
+++ b/delegate/classic/src/Resize.hpp
@@ -5,9 +5,9 @@
#pragma once
-#include <DelegateUtils.hpp>
-#include <armnn/utility/IgnoreUnused.hpp>
+#include <ClassicDelegateUtils.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/Descriptors.hpp>
#include <tensorflow/lite/builtin_ops.h>
diff --git a/delegate/classic/src/Shape.hpp b/delegate/classic/src/Shape.hpp
index 381a87430f..e5dae23238 100644
--- a/delegate/classic/src/Shape.hpp
+++ b/delegate/classic/src/Shape.hpp
@@ -5,7 +5,7 @@
#pragma once
-#include <DelegateUtils.hpp>
+#include <ClassicDelegateUtils.hpp>
#include <tensorflow/lite/builtin_ops.h>
#include <tensorflow/lite/c/builtin_op_data.h>
diff --git a/delegate/classic/src/SharedFunctions.cpp b/delegate/classic/src/SharedFunctions.cpp
index 37e1c7fc64..bcff3a1dd0 100644
--- a/delegate/classic/src/SharedFunctions.cpp
+++ b/delegate/classic/src/SharedFunctions.cpp
@@ -3,10 +3,9 @@
// SPDX-License-Identifier: MIT
//
-
#include "SharedFunctions.hpp"
-#include <DelegateUtils.hpp>
+#include <ClassicDelegateUtils.hpp>
#include <tensorflow/lite/builtin_ops.h>
#include <tensorflow/lite/c/builtin_op_data.h>
diff --git a/delegate/classic/src/Softmax.hpp b/delegate/classic/src/Softmax.hpp
index 4fbd508437..bfc6874faa 100644
--- a/delegate/classic/src/Softmax.hpp
+++ b/delegate/classic/src/Softmax.hpp
@@ -5,7 +5,7 @@
#pragma once
-#include <DelegateUtils.hpp>
+#include <ClassicDelegateUtils.hpp>
#include <tensorflow/lite/builtin_ops.h>
#include <tensorflow/lite/c/builtin_op_data.h>
diff --git a/delegate/classic/src/Split.hpp b/delegate/classic/src/Split.hpp
index fc193baf86..877e0b5729 100644
--- a/delegate/classic/src/Split.hpp
+++ b/delegate/classic/src/Split.hpp
@@ -5,7 +5,7 @@
#pragma once
-#include <DelegateUtils.hpp>
+#include <ClassicDelegateUtils.hpp>
#include <algorithm>
#include <iterator>
diff --git a/delegate/classic/src/UnidirectionalSequenceLstm.hpp b/delegate/classic/src/UnidirectionalSequenceLstm.hpp
index f8689d263f..0e1ad1c754 100644
--- a/delegate/classic/src/UnidirectionalSequenceLstm.hpp
+++ b/delegate/classic/src/UnidirectionalSequenceLstm.hpp
@@ -5,7 +5,7 @@
#pragma once
-#include <DelegateUtils.hpp>
+#include <ClassicDelegateUtils.hpp>
#include <armnn/LstmParams.hpp>
#include <armnn/Tensor.hpp>
diff --git a/delegate/classic/src/Unpack.hpp b/delegate/classic/src/Unpack.hpp
index c9b737040c..685293bcb6 100644
--- a/delegate/classic/src/Unpack.hpp
+++ b/delegate/classic/src/Unpack.hpp
@@ -7,7 +7,7 @@
#include <armnn/utility/IgnoreUnused.hpp>
-#include <DelegateUtils.hpp>
+#include <ClassicDelegateUtils.hpp>
#include <tensorflow/lite/builtin_ops.h>
#include <tensorflow/lite/c/builtin_op_data.h>
diff --git a/delegate/common/src/DelegateUtils.hpp b/delegate/common/src/DelegateUtils.hpp
index 1aa9029271..b953699016 100644
--- a/delegate/common/src/DelegateUtils.hpp
+++ b/delegate/common/src/DelegateUtils.hpp
@@ -19,134 +19,17 @@
#include <tensorflow/lite/c/builtin_op_data.h>
#include <tensorflow/lite/c/common.h>
#include <tensorflow/lite/minimal_logging.h>
-
-#include "tensorflow/lite/kernels/kernel_util.h"
+#include <tensorflow/lite/kernels/kernel_util.h>
namespace
{
-// Macro to call an Is<layer_name>Supported function and log caller name together with reason for lack of support
-#define FORWARD_LAYER_SUPPORT_FUNC(opName, tfLiteContext, func, backends, supported, setBackend, ...) \
-try \
-{ \
- for (auto&& backendId : backends) \
- { \
- auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
- if (layerSupportObject.IsBackendRegistered()) \
- { \
- std::string reasonIfUnsupported; \
- supported = \
- layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
- if (supported) \
- { \
- setBackend = backendId; \
- break; \
- } \
- else \
- { \
- if (reasonIfUnsupported.size() > 0) \
- { \
- TFLITE_LOG_PROD(tflite::TFLITE_LOG_WARNING, \
- "%s: not supported by armnn: %s", opName, reasonIfUnsupported.c_str()); \
- } \
- else \
- { \
- TFLITE_LOG_PROD(tflite::TFLITE_LOG_WARNING, \
- "%s: not supported by armnn", opName); \
- } \
- } \
- } \
- else \
- { \
- TF_LITE_KERNEL_LOG(tfLiteContext, "%s: backend not registered: %s", opName, backendId.Get().c_str()); \
- } \
- } \
- if (!supported) \
- { \
- TF_LITE_KERNEL_LOG(tfLiteContext, "%s: not supported by any specified backend", opName); \
- } \
-} \
-catch (const armnn::InvalidArgumentException &e) \
-{ \
- throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
-}
-
-TfLiteStatus ValidateNumInputs(TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- const unsigned int expectedSize,
- int nodeIndex)
-{
- auto numInputs = tfLiteNode->inputs->size;
- if (static_cast<unsigned int >(numInputs) != expectedSize)
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of inputs (%d != %d) in node #%d",
- numInputs, expectedSize, nodeIndex);
- return kTfLiteError;
- }
- return kTfLiteOk;
-}
-
-TfLiteStatus ValidateNumOutputs(TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- const unsigned int expectedSize,
- int nodeIndex)
-{
- auto numOutputs = tfLiteNode->outputs->size;
- if (static_cast<unsigned int >(numOutputs) != expectedSize)
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of outputs (%d != %d) in node #%d",
- numOutputs, expectedSize, nodeIndex);
- return kTfLiteError;
- }
- return kTfLiteOk;
-}
-
-bool IsDynamicTensor(const TfLiteTensor& tfLiteTensor)
-{
- auto tensorAllocationType = tfLiteTensor.allocation_type;
- if (tensorAllocationType == kTfLiteDynamic)
- {
- return true;
- }
- return false;
-}
-
-bool IsValid(const TfLiteTensor* tfLiteTensor)
-{
- return tfLiteTensor == nullptr ? false : true;
-}
-
-bool IsValid(TfLiteContext* tfLiteContext, const TfLiteTensor& tfLiteTensor, int32_t operatorCode, int32_t nodeIndex)
-{
- if(!IsValid(&tfLiteTensor))
- {
- std::cout << "..Is Not Valid" << std::endl;
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Invalid TfLite tensor in operator #%d node #%d: ",
- operatorCode, nodeIndex);
- return false;
- }
- if (IsDynamicTensor(tfLiteTensor))
- {
- std::cout << "..IsDynamicTensor" << std::endl;
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic tensors are not supported in operator #%d node #%d: ",
- operatorCode, nodeIndex);
- return false;
- }
- return true;
-}
-
uint32_t NonNegative(int32_t value, int nodeIndex)
{
if (value < 0)
{
throw armnn::Exception(
- "TfLiteArmnnDelegate: Non-negative value in node " + std::to_string(static_cast<int>(nodeIndex)));
+ "TfLiteArmnnDelegate: Non-negative value in node " + std::to_string(static_cast<int>(nodeIndex)));
}
else
{
@@ -154,41 +37,6 @@ uint32_t NonNegative(int32_t value, int nodeIndex)
}
}
-bool IsAffineQuantization(const TfLiteTensor& tfLiteTensor)
-{
- auto quantizationInfo = tfLiteTensor.quantization;
- if (quantizationInfo.type == kTfLiteAffineQuantization)
- {
- return true;
- }
- return false;
-}
-
-TfLiteStatus Connect(armnn::IConnectableLayer* layer,
- TfLiteNode* tfLiteNode,
- armnnDelegate::DelegateData& data)
-{
- ARMNN_ASSERT(static_cast<unsigned int>(tfLiteNode->outputs->size) == layer->GetNumOutputSlots());
-
- // Connect the input slots
- for (unsigned int inputIndex = 0; inputIndex < layer->GetNumInputSlots(); ++inputIndex)
- {
- if (data.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]] != nullptr)
- {
- data.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]]->Connect(layer->GetInputSlot(inputIndex));
- }
- }
-
- // Prepare output slots
- for (unsigned int outputIndex = 0; outputIndex < layer->GetNumOutputSlots(); ++outputIndex)
- {
- armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(outputIndex);
- data.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
- }
-
- return kTfLiteOk;
-}
-
void ExpandTensorRankToEqual(armnn::TensorInfo& inputInfo0,
armnn::TensorInfo& inputInfo1)
{
@@ -207,295 +55,6 @@ void ExpandTensorRankToEqual(armnn::TensorInfo& inputInfo0,
const armnn::TensorShape& newShape = armnnUtils::ExpandDimsToRank(smallInfo.GetShape(), biggerInputDimensions);
smallInfo.SetShape(newShape);
-
-}
-
-TfLiteStatus FusedActivation(TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- TfLiteFusedActivation activationType,
- armnn::IConnectableLayer* prevLayer,
- unsigned int outputSlotIndex,
- armnnDelegate::DelegateData& data)
-{
-
- const armnn::TensorInfo& activationOutputInfo = prevLayer->GetOutputSlot(outputSlotIndex).GetTensorInfo();
-
- armnn::ActivationDescriptor activationDesc;
-
- switch (activationType)
- {
- case kTfLiteActNone:
- {
- // No Activation
- return kTfLiteOk;
- }
- case kTfLiteActRelu:
- {
- activationDesc.m_Function = armnn::ActivationFunction::ReLu;
- break;
- }
-// The name of kTfLiteActRelu1 changed after TF Lite v2.3
-#if defined(ARMNN_POST_TFLITE_2_3)
- case kTfLiteActReluN1To1:
-#else
- case kTfLiteActRelu1:
-#endif
- {
- activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
- activationDesc.m_A = 1.0f;
- activationDesc.m_B = -1.0f;
- break;
- }
- case kTfLiteActRelu6:
- {
- activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
- activationDesc.m_A = 6.0f;
- activationDesc.m_B = 0.0f;
- break;
- }
- case kTfLiteActSigmoid:
- {
- activationDesc.m_Function = armnn::ActivationFunction::Sigmoid;
- break;
- }
- case kTfLiteActTanh:
- {
- activationDesc.m_Function = armnn::ActivationFunction::TanH;
- activationDesc.m_A = 1.0f;
- activationDesc.m_B = 1.0f;
- break;
- }
- default:
- return kTfLiteError;
- }
-
- bool isSupported = false;
- armnn::BackendId setBackend;
- FORWARD_LAYER_SUPPORT_FUNC("ACTIVATION",
- tfLiteContext,
- IsActivationSupported,
- data.m_Backends,
- isSupported,
- setBackend,
- activationOutputInfo,
- activationOutputInfo,
- activationDesc);
- if (!isSupported)
- {
- return kTfLiteError;
- }
- armnn::IConnectableLayer* activationLayer = data.m_Network->AddActivationLayer(activationDesc);
- activationLayer->SetBackendId(setBackend);
-
- ARMNN_ASSERT(activationLayer != nullptr);
- activationLayer->GetOutputSlot(0).SetTensorInfo(activationOutputInfo);
-
- // Connect and prepare output slots
- for (unsigned int outputIndex = 0; outputIndex < activationLayer->GetNumOutputSlots(); ++outputIndex)
- {
- data.m_OutputSlotForNode[static_cast<unsigned long>(
- tfLiteNode->outputs->data[outputIndex])]->Connect(activationLayer->GetInputSlot(0));
- armnn::IOutputSlot& outputSlot = activationLayer->GetOutputSlot(outputIndex);
- data.m_OutputSlotForNode[static_cast<unsigned long>(
- tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
- }
- return kTfLiteOk;
-}
-
-armnn::IConnectableLayer* AddReshapeLayer(TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- armnn::IConnectableLayer* prevLayer,
- armnn::TensorInfo reshapedOutputTensorInfo,
- armnn::TensorInfo outputTensorInfo,
- armnnDelegate::DelegateData& data)
-{
- armnn::ReshapeDescriptor desc;
- desc.m_TargetShape = outputTensorInfo.GetShape();
-
- bool isSupported = false;
- armnn::BackendId setBackend;
- FORWARD_LAYER_SUPPORT_FUNC("RESHAPE",
- tfLiteContext,
- IsReshapeSupported,
- data.m_Backends,
- isSupported,
- setBackend,
- reshapedOutputTensorInfo,
- outputTensorInfo,
- desc);
-
- if (!isSupported)
- {
- return nullptr;
- }
-
- armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(desc);
- reshapeLayer->SetBackendId(setBackend);
- ARMNN_ASSERT(reshapeLayer != nullptr);
-
- prevLayer->GetOutputSlot(0).SetTensorInfo(reshapedOutputTensorInfo);
- reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
-
- // Connect and prepare output slots
- for (unsigned int outputIndex = 0; outputIndex < reshapeLayer->GetNumOutputSlots(); ++outputIndex)
- {
- data.m_OutputSlotForNode[static_cast<unsigned long>(
- tfLiteNode->outputs->data[outputIndex])]->Connect(reshapeLayer->GetInputSlot(0));
- armnn::IOutputSlot& outputSlot = reshapeLayer->GetOutputSlot(outputIndex);
- data.m_OutputSlotForNode[static_cast<unsigned long>(
- tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
- }
- return reshapeLayer;
-}
-
-armnn::DataType GetDataType(const TfLiteTensor& tfLiteTensor)
-{
- switch (tfLiteTensor.type)
- {
- case kTfLiteBool:
- return armnn::DataType::Boolean;
- case kTfLiteFloat32:
- return armnn::DataType::Float32;
- case kTfLiteFloat16:
- return armnn::DataType::Float16;
- case kTfLiteUInt8:
- return armnn::DataType::QAsymmU8;
- case kTfLiteInt8:
- {
- auto quantizationInfo = tfLiteTensor.quantization;
- if (quantizationInfo.type == kTfLiteAffineQuantization)
- {
- auto* quantization =
- reinterpret_cast<TfLiteAffineQuantization*>(tfLiteTensor.quantization.params);
- if (quantization->zero_point != nullptr && quantization->zero_point->size == 1)
- {
- return armnn::DataType::QAsymmS8;
- }
- else
- {
- return armnn::DataType::QSymmS8;
- }
- }
- else
- {
- return armnn::DataType::QAsymmS8;
- }
- }
- case kTfLiteInt16:
- return armnn::DataType::QSymmS16;
- case kTfLiteInt32:
- return armnn::DataType::Signed32;
- case kTfLiteInt64:
- return armnn::DataType::Signed64;
- default:
- throw armnn::Exception(&"TfLiteArmnnDelegate: Unsupported data type: " [ tfLiteTensor.type]);
- }
-}
-
-armnn::TensorInfo GetTensorInfoForTfLiteTensor(const TfLiteTensor& tfLiteTensor, bool isOutput = false)
-{
- armnn::DataType type = GetDataType(tfLiteTensor);
- armnn::TensorInfo ret;
- auto tensorDimensionSize = tfLiteTensor.dims->size;
- if (tensorDimensionSize == 0)
- {
- // If input tensor does not have a shape
- // assuming that it has 1D tensor
- if (!isOutput)
- {
- std::vector<unsigned int> safeShape = { 1 };
- bool dimensionsSpecificity[1] = { true };
- armnn::TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
- safeShape.data(),
- dimensionsSpecificity);
- ret = armnn::TensorInfo(tensorShape, type);
- if(tflite::IsConstantTensor(&tfLiteTensor))
- {
- ret.SetConstant(true);
- }
- }
- else
- {
- armnn::TensorShape tensorShape(armnn::Dimensionality::NotSpecified);
- ret = armnn::TensorInfo(tensorShape, type);
- }
- }
- else
- {
- std::vector<unsigned int> tensorDims(static_cast<unsigned int>(tensorDimensionSize));
- bool dimensionsSpecificity[5] = { true, true, true, true, true };
- for (unsigned int i = 0; i < static_cast<unsigned int>(tensorDimensionSize); ++i) {
- auto dim = tfLiteTensor.dims->data[i];
- if (dim == 0)
- {
- dimensionsSpecificity[i] = false;
- }
- tensorDims[i] = static_cast<unsigned int>(dim);
- }
- armnn::TensorShape tensorShape(static_cast<unsigned int>(tensorDimensionSize),
- tensorDims.data(),
- dimensionsSpecificity);
-
- if(tflite::IsConstantTensor(&tfLiteTensor))
- {
- ret = armnn::TensorInfo(tensorShape, type);
- ret.SetConstant(true);
- }
- else
- {
- ret = armnn::TensorInfo(tensorShape, type);
- }
- }
-
- auto quantizationInfo = tfLiteTensor.quantization;
- if (quantizationInfo.type == kTfLiteAffineQuantization)
- {
- // get per-channel quantization parameters
- const auto* affineQuantization =
- reinterpret_cast<TfLiteAffineQuantization*>(tfLiteTensor.quantization.params);
- if (affineQuantization->scale->size > 1)
- {
- std::vector<float> quantizationScales;
- for (unsigned int i = 0; i < static_cast<unsigned int>(affineQuantization->scale->size); ++i)
- {
- quantizationScales.push_back(affineQuantization->scale->data[i]);
- }
- ret.SetQuantizationScales(quantizationScales);
- ret.SetQuantizationDim(armnn::numeric_cast<unsigned int>(affineQuantization->quantized_dimension));
- }
- else
- {
- ret.SetQuantizationScale(affineQuantization->scale->data[0]);
- ret.SetQuantizationOffset(affineQuantization->zero_point->data[0]);
- }
- }
- else
- {
- auto quantizationParameters = tfLiteTensor.params;
- ret.SetQuantizationScale(quantizationParameters.scale);
- ret.SetQuantizationOffset(quantizationParameters.zero_point);
- }
-
- return ret;
-}
-
-armnn::ConstTensor CreateConstTensor(const TfLiteTensor* tfLiteTensor,
- const armnn::TensorInfo& tensorInfo)
-{
- if (tfLiteTensor->allocation_type != kTfLiteMmapRo)
- {
- throw armnn::Exception(
- "TfLiteArmnnDelegate: Not constant allocation type: " + std::to_string(tfLiteTensor->allocation_type));
- }
-
- return armnn::ConstTensor(tensorInfo, tfLiteTensor->data.data);
-}
-
-armnn::ConstTensor* GetConstTensorForTfLiteTensor(const TfLiteTensor* tfLiteTensors, TfLiteNode* tfLiteNode, int index)
-{
- const TfLiteTensor &tfLiteTensor = tfLiteTensors[tfLiteNode->inputs->data[index]];
- armnn::TensorInfo tensorInfo = GetTensorInfoForTfLiteTensor(tfLiteTensor);
- return new armnn::ConstTensor(tensorInfo, tfLiteTensor.data.data);
}
void CalcPadding(uint32_t inputSize,
@@ -521,91 +80,6 @@ void CalcPadding(uint32_t inputSize,
}
}
-TfLiteStatus ConnectConstant(armnn::IConnectableLayer* layer,
- const armnn::TensorInfo& constTensorInfo,
- TfLiteContext* tfLiteContext,
- const TfLiteTensor& tfLiteTensor,
- armnnDelegate::DelegateData& data,
- unsigned int slotIndex)
-{
- IgnoreUnused(layer);
- bool isSupported = false;
- armnn::BackendId setBackend;
- FORWARD_LAYER_SUPPORT_FUNC("CONSTANT",
- tfLiteContext,
- IsConstantSupported,
- data.m_Backends,
- isSupported,
- setBackend,
- constTensorInfo);
- if (!isSupported)
- {
- return kTfLiteError;
- }
-
- auto constantInput = CreateConstTensor(&tfLiteTensor,
- constTensorInfo);
- armnn::IConnectableLayer* constantLayer = data.m_Network->AddConstantLayer(constantInput);
- constantLayer->SetBackendId(setBackend);
- armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(constTensorInfo);
-
- data.m_OutputSlotForNode[static_cast<unsigned long>(slotIndex)] = &outputSlot;
-
- return kTfLiteOk;
-}
-
-bool IsOptionalOperandPresent(TfLiteNode* tfLiteNode, const int operandIndex)
-{
- // If the inputs array has fewer than operandIndex entries or if the entry at operandIndex has a value of -1 or
- // less then the input is not present.
- if (tfLiteNode->inputs->size > operandIndex && tfLiteNode->inputs->data[operandIndex] >= 0)
- {
- return true;
- }
- return false;
-}
-
-TfLiteStatus ProcessInputs(armnn::IConnectableLayer* layer,
- armnnDelegate::DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode)
-{
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
- // Process input tensors
- // If input tensor is a Constant tensor create a constant layer and connect it to the network
- for (unsigned int inputIndex = 0; inputIndex < layer->GetNumInputSlots(); ++inputIndex)
- {
- const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[inputIndex]];
- if (tflite::IsConstantTensor(&tfLiteInputTensor))
- {
- armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
- bool isSupported = false;
- armnn::BackendId setBackend;
- FORWARD_LAYER_SUPPORT_FUNC("CONSTANT",
- tfLiteContext,
- IsConstantSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- inputTensorInfo);
- if (!isSupported)
- {
- return kTfLiteError;
- }
- auto constantInput = CreateConstTensor(&tfLiteInputTensor,
- inputTensorInfo);
- armnn::IConnectableLayer* constantLayer = delegateData.m_Network->AddConstantLayer(constantInput);
- constantLayer->SetBackendId(setBackend);
- armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(inputTensorInfo);
-
- delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]] = &outputSlot;
- }
- }
- return kTfLiteOk;
-}
-
unsigned int ComputeWrappedIndex(int index, unsigned int numDimensions)
{
int numDims = armnn::numeric_cast<int>(numDimensions);
@@ -633,7 +107,6 @@ void UpdateConstantTensorOutputs(const armnn::TensorInfo& inputInfo, armnn::Tens
{
outputInfo.SetShape(inputInfo.GetShape());
}
- return;
}
} // namespace anonymous
diff --git a/delegate/opaque/src/OpaqueDelegateUtils.hpp b/delegate/opaque/src/OpaqueDelegateUtils.hpp
new file mode 100644
index 0000000000..d4ef9ca9c5
--- /dev/null
+++ b/delegate/opaque/src/OpaqueDelegateUtils.hpp
@@ -0,0 +1,658 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn_delegate.hpp>
+#include <DelegateUtils.hpp>
+
+#include <armnn/ArmNN.hpp>
+#include <armnn/BackendHelper.hpp>
+#include <armnn/utility/Assert.hpp>
+#include <armnn/utility/NumericCast.hpp>
+
+#include <armnnUtils/Permute.hpp>
+#include <armnnUtils/TensorUtils.hpp>
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/c/c_api_opaque.h>
+#include <tensorflow/lite/minimal_logging.h>
+#include <tensorflow/lite/kernels/kernel_util.h>
+
+namespace
+{
+
+// Macro to call an Is<layer_name>Supported function and log caller name together with reason for lack of support
+#define FORWARD_LAYER_OPAQUE_SUPPORT_FUNC(opName, tfLiteContext, func, backends, supported, setBackend, ...) \
+try \
+{ \
+ for (auto&& backendId : backends) \
+ { \
+ auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
+ if (layerSupportObject.IsBackendRegistered()) \
+ { \
+ std::string reasonIfUnsupported; \
+ supported = \
+ layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
+ if (supported) \
+ { \
+ setBackend = backendId; \
+ break; \
+ } \
+ else \
+ { \
+ if (reasonIfUnsupported.size() > 0) \
+ { \
+ TFLITE_LOG_PROD(tflite::TFLITE_LOG_WARNING, \
+ "%s: not supported by armnn: %s", opName, reasonIfUnsupported.c_str()); \
+ } \
+ else \
+ { \
+ TFLITE_LOG_PROD(tflite::TFLITE_LOG_WARNING, \
+ "%s: not supported by armnn", opName); \
+ } \
+ } \
+ } \
+ else \
+ { \
+ TF_LITE_OPAQUE_KERNEL_LOG(tfLiteContext, "%s: backend not registered: %s", \
+ opName, backendId.Get().c_str()); \
+ } \
+ } \
+ if (!supported) \
+ { \
+ TF_LITE_OPAQUE_KERNEL_LOG(tfLiteContext, "%s: not supported by any specified backend", opName); \
+ } \
+} \
+catch (const armnn::InvalidArgumentException &e) \
+{ \
+ throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
+}
+
+TfLiteStatus ValidateNumInputs(TfLiteOpaqueContext* tfLiteContext,
+ TfLiteOpaqueNode* tfLiteNode,
+ const unsigned int expectedSize,
+ int nodeIndex)
+{
+ int numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode);
+ if (static_cast<unsigned int>(numInputs) != expectedSize)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext, "TfLiteArmnnOpaqueDelegate: Unexpected number of inputs (%d != %d) in node #%d",
+ numInputs, expectedSize, nodeIndex);
+ return kTfLiteError;
+ }
+ return kTfLiteOk;
+}
+
+TfLiteStatus ValidateNumOutputs(TfLiteOpaqueContext* tfLiteContext,
+ TfLiteOpaqueNode* tfLiteNode,
+ const unsigned int expectedSize,
+ int nodeIndex)
+{
+ auto numOutputs = TfLiteOpaqueNodeNumberOfOutputs(tfLiteNode);
+ if (static_cast<unsigned int>(numOutputs) != expectedSize)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext, "TfLiteArmnnOpaqueDelegate: Unexpected number of outputs (%d != %d) in node #%d",
+ numOutputs, expectedSize, nodeIndex);
+ return kTfLiteError;
+ }
+ return kTfLiteOk;
+}
+
+bool IsConstantTensor(const TfLiteOpaqueTensor* tfLiteTensor)
+{
+ auto tensorAllocationType = TfLiteOpaqueTensorGetAllocationType(tfLiteTensor);
+ if (tensorAllocationType == kTfLiteMmapRo)
+ {
+ return true;
+ }
+ return false;
+}
+
+bool IsDynamicTensor(const TfLiteOpaqueTensor& tfLiteTensor)
+{
+ auto tensorAllocationType = TfLiteOpaqueTensorGetAllocationType(&tfLiteTensor);
+ if (tensorAllocationType == kTfLiteDynamic)
+ {
+ return true;
+ }
+ return false;
+}
+
+bool IsValid(const TfLiteOpaqueTensor* tfLiteTensor)
+{
+ return tfLiteTensor == nullptr ? false : true;
+}
+
+bool IsValid(TfLiteOpaqueContext* tfLiteContext,
+ const TfLiteOpaqueTensor& tfLiteTensor,
+ int32_t operatorCode,
+ int32_t nodeIndex)
+{
+ if(!IsValid(&tfLiteTensor))
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Invalid TfLite tensor in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return false;
+ }
+ if (IsDynamicTensor(tfLiteTensor))
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic tensors are not supported in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return false;
+ }
+ return true;
+}
+
+bool IsAffineQuantization(const TfLiteOpaqueTensor& tfLiteTensor)
+{
+ auto quantizationInfo = TfLiteOpaqueTensorGetQuantization(&tfLiteTensor);
+ if (quantizationInfo.type == kTfLiteAffineQuantization)
+ {
+ return true;
+ }
+ return false;
+}
+
+// Load input indices into array if found and validate.
+// This replaces node->inputs->data.
+TfLiteStatus GetInputIndices(const int* inputIndices,
+ TfLiteOpaqueNode* tfLiteNode,
+ TfLiteOpaqueContext* tfLiteContext,
+ unsigned int numInputs)
+{
+ int actualNumInputs = 0;
+
+ TfLiteStatus status = TfLiteOpaqueNodeInputs(tfLiteNode, &inputIndices, &actualNumInputs);
+ if(status != kTfLiteOk)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext, "TfLiteArmnnOpaqueDelegate: Unable to gather input information from node.");
+ return kTfLiteError;
+ }
+
+ if (static_cast<unsigned int>(actualNumInputs) != numInputs)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext, "TfLiteArmnnOpaqueDelegate: Unexpected number of inputs (%d != %d) in node.",
+ actualNumInputs, numInputs);
+ return kTfLiteError;
+ }
+
+ return kTfLiteOk;
+}
+
+// Load output indices into array if found and validate.
+// This replaces node->outputs->data.
+TfLiteStatus GetOutputIndices(const int* outputIndices,
+ TfLiteOpaqueNode* tfLiteNode,
+ TfLiteOpaqueContext* tfLiteContext,
+ unsigned int numOutputs)
+{
+ int actualNumOutputs = 0;
+
+ TfLiteStatus status = TfLiteOpaqueNodeOutputs(tfLiteNode, &outputIndices, &actualNumOutputs);
+ if(status != kTfLiteOk)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext, "TfLiteArmnnOpaqueDelegate: Unable to gather output information from node.");
+ return kTfLiteError;
+ }
+
+ if (static_cast<unsigned int>(actualNumOutputs) != numOutputs)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext, "TfLiteArmnnOpaqueDelegate: Unexpected number of outputs (%d != %d) in node.",
+ actualNumOutputs, numOutputs);
+ return kTfLiteError;
+ }
+
+ return kTfLiteOk;
+}
+
+TfLiteStatus Connect(armnn::IConnectableLayer* layer,
+ TfLiteOpaqueContext* tfLiteContext,
+ TfLiteOpaqueNode* tfLiteNode,
+ armnnOpaqueDelegate::DelegateData& data)
+{
+ // Get array of indices, replaces node->inputs->data
+ const int* inputIndices = nullptr;
+ TfLiteStatus inputStatus = GetInputIndices(inputIndices, tfLiteNode, tfLiteContext, layer->GetNumInputSlots());
+ if(inputStatus != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+
+ // Connect the input slots
+ for (unsigned int inputIndex = 0; inputIndex < layer->GetNumInputSlots(); ++inputIndex)
+ {
+ if (data.m_OutputSlotForNode[inputIndices[inputIndex]] != nullptr)
+ {
+ data.m_OutputSlotForNode[inputIndices[inputIndex]]->Connect(layer->GetInputSlot(inputIndex));
+ }
+ }
+
+ // Get array of indices, replaces node->outputs->data
+ const int* outputIndices = nullptr;
+ TfLiteStatus outputStatus = GetOutputIndices(outputIndices, tfLiteNode, tfLiteContext, layer->GetNumOutputSlots());
+ if(outputStatus != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+
+ // Prepare output slots
+ for (unsigned int outputIndex = 0; outputIndex < layer->GetNumOutputSlots(); ++outputIndex)
+ {
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(outputIndex);
+ data.m_OutputSlotForNode[static_cast<unsigned long>(outputIndices[outputIndex])] = &outputSlot;
+ }
+
+ return kTfLiteOk;
+}
+
+TfLiteStatus FusedActivation(TfLiteOpaqueContext* tfLiteContext,
+ TfLiteOpaqueNode* tfLiteNode,
+ TfLiteFusedActivation activationType,
+ armnn::IConnectableLayer* prevLayer,
+ unsigned int outputSlotIndex,
+ armnnOpaqueDelegate::DelegateData& data)
+{
+ const armnn::TensorInfo& activationOutputInfo = prevLayer->GetOutputSlot(outputSlotIndex).GetTensorInfo();
+
+ armnn::ActivationDescriptor activationDesc;
+
+ switch (activationType)
+ {
+ case kTfLiteActNone:
+ {
+ // No Activation
+ return kTfLiteOk;
+ }
+ case kTfLiteActRelu:
+ {
+ activationDesc.m_Function = armnn::ActivationFunction::ReLu;
+ break;
+ }
+ case kTfLiteActReluN1To1:
+ {
+ activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
+ activationDesc.m_A = 1.0f;
+ activationDesc.m_B = -1.0f;
+ break;
+ }
+ case kTfLiteActRelu6:
+ {
+ activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
+ activationDesc.m_A = 6.0f;
+ activationDesc.m_B = 0.0f;
+ break;
+ }
+ case kTfLiteActSigmoid:
+ {
+ activationDesc.m_Function = armnn::ActivationFunction::Sigmoid;
+ break;
+ }
+ case kTfLiteActTanh:
+ {
+ activationDesc.m_Function = armnn::ActivationFunction::TanH;
+ activationDesc.m_A = 1.0f;
+ activationDesc.m_B = 1.0f;
+ break;
+ }
+ default:
+ return kTfLiteError;
+ }
+
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("ACTIVATION",
+ tfLiteContext,
+ IsActivationSupported,
+ data.m_Backends,
+ isSupported,
+ setBackend,
+ activationOutputInfo,
+ activationOutputInfo,
+ activationDesc);
+ if (!isSupported)
+ {
+ return kTfLiteError;
+ }
+ armnn::IConnectableLayer* activationLayer = data.m_Network->AddActivationLayer(activationDesc);
+ activationLayer->SetBackendId(setBackend);
+
+ ARMNN_ASSERT(activationLayer != nullptr);
+ activationLayer->GetOutputSlot(0).SetTensorInfo(activationOutputInfo);
+
+ // Get array of indices, replaces node->outputs->data
+ const int* outputIndices = nullptr;
+ TfLiteStatus status = GetOutputIndices(outputIndices,
+ tfLiteNode,
+ tfLiteContext,
+ activationLayer->GetNumOutputSlots());
+ if(status != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+
+ // Connect and prepare output slots
+ for (unsigned int outputIndex = 0; outputIndex < activationLayer->GetNumOutputSlots(); ++outputIndex)
+ {
+ data.m_OutputSlotForNode[static_cast<unsigned long>(
+ outputIndices[outputIndex])]->Connect(activationLayer->GetInputSlot(0));
+
+ armnn::IOutputSlot& outputSlot = activationLayer->GetOutputSlot(outputIndex);
+ data.m_OutputSlotForNode[static_cast<unsigned long>(outputIndices[outputIndex])] = &outputSlot;
+ }
+ return kTfLiteOk;
+}
+
+armnn::IConnectableLayer* AddReshapeLayer(TfLiteOpaqueContext* tfLiteContext,
+ TfLiteOpaqueNode* tfLiteNode,
+ armnn::IConnectableLayer* prevLayer,
+ armnn::TensorInfo reshapedOutputTensorInfo,
+ armnn::TensorInfo outputTensorInfo,
+ armnnOpaqueDelegate::DelegateData& data)
+{
+ armnn::ReshapeDescriptor desc;
+ desc.m_TargetShape = outputTensorInfo.GetShape();
+
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("RESHAPE",
+ tfLiteContext,
+ IsReshapeSupported,
+ data.m_Backends,
+ isSupported,
+ setBackend,
+ reshapedOutputTensorInfo,
+ outputTensorInfo,
+ desc);
+
+ if (!isSupported)
+ {
+ return nullptr;
+ }
+
+ armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(desc);
+ reshapeLayer->SetBackendId(setBackend);
+ ARMNN_ASSERT(reshapeLayer != nullptr);
+
+ prevLayer->GetOutputSlot(0).SetTensorInfo(reshapedOutputTensorInfo);
+ reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+ // Gather array of indices and it's length, replaces node->outputs->data[i]
+ const int* outputIndices = nullptr;
+ int numOutputs = 0;
+
+ TfLiteStatus status = TfLiteOpaqueNodeOutputs(tfLiteNode, &outputIndices, &numOutputs);
+ if(status != kTfLiteOk)
+ {
+ throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to gather output information from node.");
+ }
+
+ if (static_cast<unsigned int>(numOutputs) != reshapeLayer->GetNumOutputSlots())
+ {
+ throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unexpected number of outputs (" +
+ std::to_string(numOutputs) +
+ "!= " +
+ std::to_string(reshapeLayer->GetNumOutputSlots()) +
+ ") in node.");
+ }
+
+ // Connect and prepare output slots
+ for (unsigned int outputIndex = 0; outputIndex < reshapeLayer->GetNumOutputSlots(); ++outputIndex)
+ {
+ data.m_OutputSlotForNode[static_cast<unsigned long>(
+ outputIndices[outputIndex])]->Connect(reshapeLayer->GetInputSlot(0));
+
+ armnn::IOutputSlot& outputSlot = reshapeLayer->GetOutputSlot(outputIndex);
+ data.m_OutputSlotForNode[static_cast<unsigned long>(outputIndices[outputIndex])] = &outputSlot;
+ }
+ return reshapeLayer;
+}
+
+armnn::DataType GetDataType(const TfLiteOpaqueTensor* tfLiteTensor)
+{
+ switch (TfLiteOpaqueTensorType(tfLiteTensor))
+ {
+ case kTfLiteBool:
+ return armnn::DataType::Boolean;
+ case kTfLiteFloat32:
+ return armnn::DataType::Float32;
+ case kTfLiteFloat16:
+ return armnn::DataType::Float16;
+ case kTfLiteUInt8:
+ return armnn::DataType::QAsymmU8;
+ case kTfLiteInt8:
+ {
+ auto quantizationInfo = TfLiteOpaqueTensorGetQuantization(tfLiteTensor);
+ if (quantizationInfo.type == kTfLiteAffineQuantization)
+ {
+ auto* quantization =
+ reinterpret_cast<TfLiteAffineQuantization*>(quantizationInfo.params);
+
+ if (quantization->zero_point != nullptr && quantization->zero_point->size == 1)
+ {
+ return armnn::DataType::QAsymmS8;
+ }
+ else
+ {
+ return armnn::DataType::QSymmS8;
+ }
+ }
+ else
+ {
+ return armnn::DataType::QAsymmS8;
+ }
+ }
+ case kTfLiteInt16:
+ return armnn::DataType::QSymmS16;
+ case kTfLiteInt32:
+ return armnn::DataType::Signed32;
+ case kTfLiteInt64:
+ return armnn::DataType::Signed64;
+ default:
+ throw armnn::Exception(
+ &"TfLiteArmnnDelegate: Unsupported data type: " [ TfLiteOpaqueTensorType(tfLiteTensor) ]);
+ }
+}
+
+armnn::TensorInfo GetTensorInfoForTfLiteOpaqueTensor(const TfLiteOpaqueTensor* tfLiteTensor, bool isOutput = false)
+{
+ armnn::DataType type = GetDataType(tfLiteTensor);
+ armnn::TensorInfo ret;
+
+ auto tensorDimensionSize = TfLiteOpaqueTensorNumDims(tfLiteTensor);
+ if (tensorDimensionSize == 0)
+ {
+ // If input tensor does not have a shape
+ // assuming that it has 1D tensor
+ if (!isOutput)
+ {
+ std::vector<unsigned int> safeShape = { 1 };
+ bool dimensionsSpecificity[1] = { true };
+
+ armnn::TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
+ safeShape.data(),
+ dimensionsSpecificity);
+ ret = armnn::TensorInfo(tensorShape, type);
+
+ if(IsConstantTensor(tfLiteTensor))
+ {
+ ret.SetConstant(true);
+ }
+ }
+ else
+ {
+ armnn::TensorShape tensorShape(armnn::Dimensionality::NotSpecified);
+ ret = armnn::TensorInfo(tensorShape, type);
+ }
+ }
+ else
+ {
+ std::vector<unsigned int> tensorDims(static_cast<unsigned int>(tensorDimensionSize));
+ bool dimensionsSpecificity[5] = { true, true, true, true, true };
+
+ for (int32_t i = 0; i < tensorDimensionSize; ++i)
+ {
+ int32_t dim = TfLiteOpaqueTensorDim(tfLiteTensor, i);
+
+ if (dim == 0)
+ {
+ dimensionsSpecificity[i] = false;
+ }
+ tensorDims[i] = static_cast<unsigned int>(dim);
+ }
+
+ armnn::TensorShape tensorShape(static_cast<unsigned int>(tensorDimensionSize),
+ tensorDims.data(),
+ dimensionsSpecificity);
+
+ if(IsConstantTensor(tfLiteTensor))
+ {
+ ret = armnn::TensorInfo(tensorShape, type);
+ ret.SetConstant(true);
+ }
+ else
+ {
+ ret = armnn::TensorInfo(tensorShape, type);
+ }
+ }
+
+ auto quantizationInfo = TfLiteOpaqueTensorGetQuantization(tfLiteTensor);
+ if (quantizationInfo.type == kTfLiteAffineQuantization)
+ {
+ // get per-channel quantization parameters
+ const auto* affineQuantization =
+ reinterpret_cast<TfLiteAffineQuantization*>(quantizationInfo.params);
+ if (affineQuantization->scale->size > 1)
+ {
+ std::vector<float> quantizationScales;
+ for (unsigned int i = 0; i < static_cast<unsigned int>(affineQuantization->scale->size); ++i)
+ {
+ quantizationScales.push_back(affineQuantization->scale->data[i]);
+ }
+ ret.SetQuantizationScales(quantizationScales);
+ ret.SetQuantizationDim(armnn::numeric_cast<unsigned int>(affineQuantization->quantized_dimension));
+ }
+ else
+ {
+ ret.SetQuantizationScale(affineQuantization->scale->data[0]);
+ ret.SetQuantizationOffset(affineQuantization->zero_point->data[0]);
+ }
+ }
+ else
+ {
+ auto quantizationParameters = TfLiteOpaqueTensorGetQuantizationParams(tfLiteTensor);
+ ret.SetQuantizationScale(quantizationParameters.scale);
+ ret.SetQuantizationOffset(quantizationParameters.zero_point);
+ }
+
+ return ret;
+}
+
+armnn::ConstTensor CreateConstTensor(const TfLiteOpaqueTensor* tfLiteTensor,
+ const armnn::TensorInfo& tensorInfo)
+{
+ auto allocType = TfLiteOpaqueTensorGetAllocationType(tfLiteTensor);
+ if (allocType != kTfLiteMmapRo)
+ {
+ throw armnn::Exception("TfLiteArmnnDelegate: Not constant allocation type: " + std::to_string(allocType));
+ }
+
+ return armnn::ConstTensor(tensorInfo, TfLiteOpaqueTensorData(tfLiteTensor));
+}
+
+armnn::ConstTensor* GetConstTensorForTfLiteTensor(const TfLiteOpaqueContext* tfLiteContext,
+ TfLiteOpaqueNode* tfLiteNode,
+ int index)
+{
+ const TfLiteOpaqueTensor* tfLiteTensor = TfLiteOpaqueNodeGetInput(tfLiteContext, tfLiteNode, index);
+ armnn::TensorInfo tensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteTensor);
+
+ return new armnn::ConstTensor(tensorInfo, TfLiteOpaqueTensorData(tfLiteTensor));
+}
+
+bool IsOptionalOperandPresent(TfLiteOpaqueNode* tfLiteNode, const int operandIndex)
+{
+ // Gather array of indices and it's length, replaces node->inputs->data[i] and node->inputs->size
+ const int* inputIndices = nullptr;
+ int numInputs = 0;
+
+ TfLiteStatus status = TfLiteOpaqueNodeInputs(tfLiteNode, &inputIndices, &numInputs);
+ if(status != kTfLiteOk)
+ {
+ throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to gather input information from node.");
+ }
+
+ // If the inputs array has fewer than operandIndex entries or if the entry at operandIndex has a value of -1 or
+ // less then the input is not present.
+ if (numInputs > operandIndex && inputIndices[operandIndex] >= 0)
+ {
+ return true;
+ }
+ return false;
+}
+
+TfLiteStatus ProcessInputs(armnn::IConnectableLayer* layer,
+ armnnOpaqueDelegate::DelegateData& delegateData,
+ TfLiteOpaqueContext* tfLiteContext,
+ TfLiteOpaqueNode* tfLiteNode)
+{
+ // Get array of indices, replaces node->inputs->data
+ const int* inputIndices = nullptr;
+ TfLiteStatus status = GetInputIndices(inputIndices, tfLiteNode, tfLiteContext, layer->GetNumInputSlots());
+ if(status != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+
+ // Process input tensors
+ // If input tensor is a Constant tensor create a constant layer and connect it to the network
+ for (int32_t inputIndex = 0; inputIndex < static_cast<int32_t>(layer->GetNumInputSlots()); ++inputIndex)
+ {
+ const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueNodeGetInput(tfLiteContext, tfLiteNode, inputIndex);
+
+ if (IsConstantTensor(tfLiteInputTensor))
+ {
+ armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
+
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("CONSTANT",
+ tfLiteContext,
+ IsConstantSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo);
+ if (!isSupported)
+ {
+ return kTfLiteError;
+ }
+
+ auto constantInput = CreateConstTensor(tfLiteInputTensor, inputTensorInfo);
+
+ armnn::IConnectableLayer* constantLayer = delegateData.m_Network->AddConstantLayer(constantInput);
+ constantLayer->SetBackendId(setBackend);
+ armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(inputTensorInfo);
+
+ delegateData.m_OutputSlotForNode[inputIndices[inputIndex]] = &outputSlot;
+ }
+ }
+ return kTfLiteOk;
+}
+
+} // namespace anonymous