aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2023-04-28 14:23:33 +0100
committerTeresaARM <teresa.charlinreyes@arm.com>2023-04-28 16:54:49 +0000
commit4236296b3715ca3cfe83142f2e170f8f48a7b18d (patch)
treea48932cc56a9fc7c99cacfce2904ccd5e75cf29a
parent36d94ef824c516f27fad8f17a96e2123565e6f9f (diff)
downloadarmnn-4236296b3715ca3cfe83142f2e170f8f48a7b18d.tar.gz
IVGCVSW-7611 IVGCVSW-7614 IVGCVSW-7615 IVGCVSW-7617 Softmax, SpaceToDepth, DepthToSpace and Tranpose for opaque delegate
Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: Ie0c608f94a76956e9be75f555824cef865cab395
-rw-r--r--delegate/CMakeLists.txt6
-rw-r--r--delegate/opaque/CMakeLists.txt3
-rw-r--r--delegate/opaque/src/Softmax.hpp157
-rw-r--r--delegate/opaque/src/SpaceDepth.hpp189
-rw-r--r--delegate/opaque/src/Transpose.hpp110
-rw-r--r--delegate/opaque/src/armnn_delegate.cpp42
6 files changed, 501 insertions, 6 deletions
diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt
index b0145ca919..acd1255b0f 100644
--- a/delegate/CMakeLists.txt
+++ b/delegate/CMakeLists.txt
@@ -308,9 +308,15 @@ if(BUILD_UNIT_TESTS)
test/QuantizationTestHelper.hpp
test/ShapeTest.cpp
test/ShapeTestHelper.hpp
+ test/SoftmaxTest.cpp
+ test/SoftmaxTestHelper.hpp
+ test/SpaceDepthTest.cpp
+ test/SpaceDepthTestHelper.hpp
test/TestUtils.hpp
test/TestUtils.cpp
test/TransposeConvolution2dTest.cpp
+ test/TransposeTest.cpp
+ test/TransposeTestHelper.hpp
test/UnpackTest.cpp
test/UnpackTestHelper.hpp)
diff --git a/delegate/opaque/CMakeLists.txt b/delegate/opaque/CMakeLists.txt
index c5eaa20872..ac205ee959 100644
--- a/delegate/opaque/CMakeLists.txt
+++ b/delegate/opaque/CMakeLists.txt
@@ -32,6 +32,9 @@ list(APPEND armnnOpaqueDelegateObject_sources
src/Shape.hpp
src/SharedFunctions.cpp
src/SharedFunctions.hpp
+ src/Softmax.hpp
+ src/SpaceDepth.hpp
+ src/Transpose.hpp
src/Unpack.hpp)
add_library(armnnOpaqueDelegateObject OBJECT ${armnnOpaqueDelegateObject_sources})
diff --git a/delegate/opaque/src/Softmax.hpp b/delegate/opaque/src/Softmax.hpp
index e16969768e..87927616ff 100644
--- a/delegate/opaque/src/Softmax.hpp
+++ b/delegate/opaque/src/Softmax.hpp
@@ -2,3 +2,160 @@
// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
+#pragma once
+
+#include <OpaqueDelegateUtils.hpp>
+
+namespace armnnOpaqueDelegate
+{
+TfLiteStatus ValidateSoftmaxOperator(DelegateData& delegateData,
+ TfLiteOpaqueContext* tfLiteContext,
+ const armnn::TensorInfo& inputInfo,
+ const armnn::TensorInfo& outputTensorInfo,
+ const armnn::SoftmaxDescriptor& descriptor)
+{
+ bool isSupported = false;
+ FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("SOFTMAX",
+ tfLiteContext,
+ IsSoftmaxSupported,
+ delegateData.m_Backends,
+ isSupported,
+ armnn::BackendId(),
+ inputInfo,
+ outputTensorInfo,
+ descriptor);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+}
+
+TfLiteStatus ValidateLogSoftmaxOperator(DelegateData& delegateData,
+ TfLiteOpaqueContext* tfLiteContext,
+ const armnn::TensorInfo& inputInfo,
+ const armnn::TensorInfo& outputTensorInfo,
+ const armnn::LogSoftmaxDescriptor& descriptor)
+{
+ bool isSupported = false;
+ FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("LOG_SOFTMAX",
+ tfLiteContext,
+ IsLogSoftmaxSupported,
+ delegateData.m_Backends,
+ isSupported,
+ armnn::BackendId(),
+ inputInfo,
+ outputTensorInfo,
+ descriptor);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+}
+
+TfLiteStatus VisitSoftmaxOperator(DelegateData& delegateData,
+ TfLiteOpaqueContext* tfLiteContext,
+ TfLiteOpaqueNode* tfLiteNode,
+ int nodeIndex,
+ int32_t tfliteSoftmaxOperatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ // Gather input indices and use to get input tensor.
+ const int* inputTensors;
+ int numInputs;
+ if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+
+ const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
+ if (!IsValid(tfLiteContext, tfLiteInputTensor, tfliteSoftmaxOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ // Gather output indices and use to get output tensor.
+ const int* outputTensors;
+ int numOutputs = 0;
+ if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+
+ const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfliteSoftmaxOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
+
+ if (!delegateData.m_Network)
+ {
+ switch(tfliteSoftmaxOperatorCode)
+ {
+ case kTfLiteBuiltinSoftmax:
+ {
+ armnn::SoftmaxDescriptor descriptor;
+ auto* nodeParams = reinterpret_cast<TfLiteSoftmaxParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
+ descriptor.m_Beta = nodeParams->beta;
+ return ValidateSoftmaxOperator(delegateData,
+ tfLiteContext,
+ inputTensorInfo,
+ outputTensorInfo,
+ descriptor);
+ }
+ case kTfLiteBuiltinLogSoftmax:
+ {
+ armnn::LogSoftmaxDescriptor descriptor;
+ return ValidateLogSoftmaxOperator(delegateData,
+ tfLiteContext,
+ inputTensorInfo,
+ outputTensorInfo,
+ descriptor);
+ }
+ default:
+ return kTfLiteError;
+ }
+ }
+
+ armnn::IConnectableLayer* softmaxLayer = nullptr;
+ switch(tfliteSoftmaxOperatorCode)
+ {
+ case kTfLiteBuiltinSoftmax:
+ {
+ armnn::SoftmaxDescriptor descriptor;
+ auto* nodeParameters = reinterpret_cast<TfLiteSoftmaxParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
+ descriptor.m_Beta = nodeParameters->beta;
+ softmaxLayer = delegateData.m_Network->AddSoftmaxLayer(descriptor);
+ break;
+ }
+ case kTfLiteBuiltinLogSoftmax:
+ {
+ armnn::LogSoftmaxDescriptor descriptor;
+ softmaxLayer = delegateData.m_Network->AddLogSoftmaxLayer(descriptor);
+ break;
+ }
+ default:
+ return kTfLiteError;
+ }
+
+ ARMNN_ASSERT(softmaxLayer != nullptr);
+ armnn::IOutputSlot& outputSlot = softmaxLayer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(softmaxLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
+ // Connect
+ return Connect(softmaxLayer, tfLiteContext, tfLiteNode, delegateData);
+}
+} // namespace armnnOpaqueDelegate \ No newline at end of file
diff --git a/delegate/opaque/src/SpaceDepth.hpp b/delegate/opaque/src/SpaceDepth.hpp
index e16969768e..9cc61eb603 100644
--- a/delegate/opaque/src/SpaceDepth.hpp
+++ b/delegate/opaque/src/SpaceDepth.hpp
@@ -2,3 +2,192 @@
// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
+#pragma once
+
+#include <OpaqueDelegateUtils.hpp>
+
+namespace armnnOpaqueDelegate
+{
+
+TfLiteStatus VisitSpaceToDepthOperator(DelegateData& delegateData,
+ TfLiteOpaqueContext* tfLiteContext,
+ TfLiteOpaqueNode* tfLiteNode,
+ int nodeIndex,
+ int32_t tfLiteSpaceDepthOperatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ // Gather input indices and use to get input tensor.
+ const int* inputTensors;
+ int numInputs;
+ if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+ const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
+ if (!IsValid(tfLiteContext, tfLiteInputTensor, tfLiteSpaceDepthOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ // Gather output indices and use to get output tensors.
+ const int* outputTensors;
+ int numOutputs;
+ if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+
+ const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLiteSpaceDepthOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
+
+ armnn::SpaceToDepthDescriptor descriptor;
+ auto* nodeParameters = reinterpret_cast<TfLiteSpaceToDepthParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
+ descriptor.m_BlockSize = nodeParameters->block_size;
+
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("SPACE_TO_DEPTH",
+ tfLiteContext,
+ IsSpaceToDepthSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ outInfo,
+ descriptor);
+ };
+
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ // Add a SpaceToDepth layer
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddSpaceToDepthLayer(descriptor);
+ layer->SetBackendId(setBackend);
+ ARMNN_ASSERT(layer != nullptr);
+
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // Connect
+ return Connect(layer, tfLiteContext, tfLiteNode, delegateData);
+}
+
+TfLiteStatus VisitDepthToSpaceOperator(DelegateData& delegateData,
+ TfLiteOpaqueContext* tfLiteContext,
+ TfLiteOpaqueNode* tfLiteNode,
+ int nodeIndex,
+ int32_t tfLiteDepthSpaceOperatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ // Gather input indices and use to get input tensor.
+ const int* inputTensors;
+ int numInputs;
+ if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+ const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
+ if (!IsValid(tfLiteContext, tfLiteInputTensor, tfLiteDepthSpaceOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ // Gather output indices and use to get output tensors.
+ const int* outputTensors;
+ int numOutputs;
+ if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+
+ const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLiteDepthSpaceOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
+
+ armnn::DepthToSpaceDescriptor descriptor;
+ auto* nodeParameters = reinterpret_cast<TfLiteSpaceToDepthParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
+ descriptor.m_BlockSize = nodeParameters->block_size;
+
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("DEPTH_TO_SPACE",
+ tfLiteContext,
+ IsDepthToSpaceSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ outInfo,
+ descriptor);
+ };
+
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ // Add a DepthToSpace layer
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthToSpaceLayer(descriptor);
+ layer->SetBackendId(setBackend);
+ ARMNN_ASSERT(layer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
+ // Connect
+ return Connect(layer, tfLiteContext, tfLiteNode, delegateData);
+}
+
+} // namespace armnnOpaqueDelegate
diff --git a/delegate/opaque/src/Transpose.hpp b/delegate/opaque/src/Transpose.hpp
index e16969768e..2627c42f1f 100644
--- a/delegate/opaque/src/Transpose.hpp
+++ b/delegate/opaque/src/Transpose.hpp
@@ -2,3 +2,113 @@
// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
+#pragma once
+
+#include <OpaqueDelegateUtils.hpp>
+
+namespace armnnOpaqueDelegate
+{
+
+TfLiteStatus VisitTransposeOperator(DelegateData& delegateData,
+ TfLiteOpaqueContext* tfLiteContext,
+ TfLiteOpaqueNode* tfLiteNode,
+ int nodeIndex,
+ int32_t tfliteTransposeOperatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ // Gather input indices and use to get input tensor.
+ const int* inputTensors;
+ int numInputs;
+ if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+ const TfLiteOpaqueTensor* tfLiteInputTensor0 = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
+ if (!IsValid(tfLiteContext, tfLiteInputTensor0, tfliteTransposeOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+ const TfLiteOpaqueTensor* tfLiteInputTensor1 = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]);
+ if (!IsValid(tfLiteContext, tfLiteInputTensor1, tfliteTransposeOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ // Gather output indices and use to get output tensors.
+ const int* outputTensors;
+ int numOutputs;
+ if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+
+ const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfliteTransposeOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo0 = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor0);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
+
+ auto* permTensorDataPtr = static_cast<int32_t*>(TfLiteOpaqueTensorData(tfLiteInputTensor1));
+ unsigned int numEl = TfLiteOpaqueTensorDim(tfLiteInputTensor1, 0);
+
+ ARMNN_ASSERT( numEl <= static_cast<int>(armnn::MaxNumOfTensorDimensions) );
+ // Ensure only single dimension to the permutation tensor
+ ARMNN_ASSERT( TfLiteOpaqueTensorNumDims(tfLiteInputTensor1) == 1 );
+
+ armnn::TransposeDescriptor descriptor(armnn::PermutationVector(
+ reinterpret_cast<const armnn::PermutationVector::ValueType *> (permTensorDataPtr),
+ static_cast<armnn::PermutationVector::SizeType>(numEl)));
+
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("TRANSPOSE",
+ tfLiteContext,
+ IsTransposeSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo0,
+ outputTensorInfo,
+ descriptor);
+ };
+
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ armnn::IConnectableLayer* transposeLayer = delegateData.m_Network->AddTransposeLayer(descriptor);
+ transposeLayer->SetBackendId(setBackend);
+ ARMNN_ASSERT(transposeLayer != nullptr);
+ // Permutation vector given to descriptor object
+ ARMNN_ASSERT(transposeLayer->GetNumInputSlots() == 1);
+
+ armnn::IOutputSlot& outputSlot = transposeLayer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(transposeLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
+ return Connect(transposeLayer, tfLiteContext, tfLiteNode, delegateData);
+}
+} // namespace armnnOpaqueDelegate
diff --git a/delegate/opaque/src/armnn_delegate.cpp b/delegate/opaque/src/armnn_delegate.cpp
index 8c3ddfaeea..9b1c3a1f46 100644
--- a/delegate/opaque/src/armnn_delegate.cpp
+++ b/delegate/opaque/src/armnn_delegate.cpp
@@ -738,12 +738,6 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData,
tfLiteNode,
nodeIndex,
kTfLiteBuiltinDiv);
- case kTfLiteBuiltinElu:
- return VisitActivationOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinElu);
case kTfLiteBuiltinEqual:
return VisitComparisonOperator(delegateData,
tfLiteContext,
@@ -751,6 +745,18 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData,
nodeIndex,
kTfLiteBuiltinEqual,
armnn::ComparisonOperation::Equal);
+ case kTfLiteBuiltinDepthToSpace:
+ return VisitDepthToSpaceOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinDepthToSpace);
+ case kTfLiteBuiltinElu:
+ return VisitActivationOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinElu);
case kTfLiteBuiltinExp:
return VisitElementwiseUnaryOperator(delegateData,
tfLiteContext,
@@ -874,6 +880,12 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData,
nodeIndex,
kTfLiteBuiltinLogicalOr,
armnn::LogicalBinaryOperation::LogicalOr);
+ case kTfLiteBuiltinLogSoftmax:
+ return VisitSoftmaxOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinLogSoftmax);
case kTfLiteBuiltinLstm:
return VisitLstmOperator(delegateData,
tfLiteContext,
@@ -992,12 +1004,24 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData,
nodeIndex,
kTfLiteBuiltinSin,
armnn::UnaryOperation::Sin);
+ case kTfLiteBuiltinSoftmax:
+ return VisitSoftmaxOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinSoftmax);
case kTfLiteBuiltinSpaceToBatchNd:
return VisitSpaceToBatchNdOperator(delegateData,
tfLiteContext,
tfLiteNode,
nodeIndex,
kTfLiteBuiltinSpaceToBatchNd);
+ case kTfLiteBuiltinSpaceToDepth:
+ return VisitSpaceToDepthOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinSpaceToDepth);
case kTfLiteBuiltinSub:
return VisitElementwiseBinaryOperator(delegateData,
tfLiteContext,
@@ -1017,6 +1041,12 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData,
tfLiteNode,
nodeIndex,
kTfLiteBuiltinTanh);
+ case kTfLiteBuiltinTranspose:
+ return VisitTransposeOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinTranspose);
case kTfLiteBuiltinTransposeConv:
return VisitConvolutionOperator(delegateData,
tfLiteContext,