aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Mcloughlin <john.mcloughlin@arm.com>2023-04-27 16:55:00 +0100
committerTeresaARM <teresa.charlinreyes@arm.com>2023-04-27 17:42:51 +0000
commit0422cf2f80f740751273fe6eed6cdfddb0c487f8 (patch)
tree26dcaa07bf3a27082f1e12e5915156ebcc5f5a92
parent3a9e7bac89a120fddaf3bcf312105258a840dab1 (diff)
downloadarmnn-0422cf2f80f740751273fe6eed6cdfddb0c487f8.tar.gz
IVGCVSW-7576 IVGCVSW-7609 Add BatchMatMul and Shape to Opaque Delegate
Signed-off-by: John Mcloughlin <john.mcloughlin@arm.com> Change-Id: Id4b02b951ed81c69171f6af2d0d327175c6e5d3c
-rw-r--r--delegate/CMakeLists.txt4
-rw-r--r--delegate/opaque/CMakeLists.txt2
-rw-r--r--delegate/opaque/src/ArgMinMax.hpp2
-rw-r--r--delegate/opaque/src/BatchMatMul.hpp126
-rw-r--r--delegate/opaque/src/Shape.hpp110
-rw-r--r--delegate/opaque/src/armnn_delegate.cpp12
6 files changed, 255 insertions, 1 deletions
diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt
index 4bb9bdd5de..4600f42301 100644
--- a/delegate/CMakeLists.txt
+++ b/delegate/CMakeLists.txt
@@ -263,6 +263,8 @@ if(BUILD_UNIT_TESTS)
test/ActivationTestHelper.hpp
test/ArgMinMaxTest.cpp
test/ArgMinMaxTestHelper.hpp
+ test/BatchMatMulTest.cpp
+ test/BatchMatMulTestHelper.hpp
test/BatchSpaceTest.cpp
test/BatchSpaceTestHelper.hpp
test/CastTest.cpp
@@ -289,6 +291,8 @@ if(BUILD_UNIT_TESTS)
test/NormalizationTestHelper.hpp
test/PreluTest.cpp
test/PreluTestHelper.hpp
+ test/ShapeTest.cpp
+ test/ShapeTestHelper.hpp
test/TestUtils.hpp
test/TestUtils.cpp
test/TransposeConvolution2dTest.cpp)
diff --git a/delegate/opaque/CMakeLists.txt b/delegate/opaque/CMakeLists.txt
index c019649ddf..dd16a70048 100644
--- a/delegate/opaque/CMakeLists.txt
+++ b/delegate/opaque/CMakeLists.txt
@@ -10,6 +10,7 @@ list(APPEND armnnOpaqueDelegateObject_sources
src/Activation.hpp
src/ArgMinMax.hpp
src/armnn_delegate.cpp
+ src/BatchMatMul.hpp
src/BatchSpace.hpp
src/Comparison.hpp
src/Control.hpp
@@ -22,6 +23,7 @@ list(APPEND armnnOpaqueDelegateObject_sources
src/Normalization.hpp
src/Prelu.hpp
src/Redefine.hpp
+ src/Shape.hpp
src/SharedFunctions.cpp
src/SharedFunctions.hpp)
diff --git a/delegate/opaque/src/ArgMinMax.hpp b/delegate/opaque/src/ArgMinMax.hpp
index 7dfd89f57b..e5499022c6 100644
--- a/delegate/opaque/src/ArgMinMax.hpp
+++ b/delegate/opaque/src/ArgMinMax.hpp
@@ -63,7 +63,7 @@ TfLiteStatus VisitArgMinMaxOperator(DelegateData& delegateData,
}
const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
- if (!IsValid(tfLiteContext, tfLiteInputTensor, argMinMaxOperatorCode, nodeIndex))
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, argMinMaxOperatorCode, nodeIndex))
{
return kTfLiteError;
}
diff --git a/delegate/opaque/src/BatchMatMul.hpp b/delegate/opaque/src/BatchMatMul.hpp
index e16969768e..5da6e5ac6a 100644
--- a/delegate/opaque/src/BatchMatMul.hpp
+++ b/delegate/opaque/src/BatchMatMul.hpp
@@ -2,3 +2,129 @@
// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
+#pragma once
+
+#include <OpaqueDelegateUtils.hpp>
+
+namespace armnnOpaqueDelegate
+{
+TfLiteStatus VisitBatchMatMulOperator(DelegateData& delegateData,
+ TfLiteOpaqueContext* tfLiteContext,
+ TfLiteOpaqueNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ // Gather input indices and use to get input tensor.
+ auto numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode);
+ const int* inputTensors;
+ if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+
+ const TfLiteOpaqueTensor* kTfLiteLHSInputTensor =
+ TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
+ const TfLiteOpaqueTensor* kTfLiteRHSInputTensor =
+ TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]);
+
+ if (!IsValid(tfLiteContext, kTfLiteLHSInputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+ if (!IsValid(tfLiteContext, kTfLiteRHSInputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ if (IsDynamicTensor(kTfLiteLHSInputTensor) || IsDynamicTensor(kTfLiteRHSInputTensor))
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ // Gather output indices and use to get output tensors.
+ int numOutputs = 0;
+ const int* outputTensors;
+ if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+
+ const TfLiteOpaqueTensor* kTfLiteOutputTensor =
+ TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
+ if (IsDynamicTensor(kTfLiteOutputTensor))
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& armnnLHSInputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(kTfLiteLHSInputTensor);
+ const armnn::TensorInfo& armnnRHSInputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(kTfLiteRHSInputTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(kTfLiteOutputTensor, true);
+
+ armnn::BatchMatMulDescriptor descriptor;
+ auto* params = reinterpret_cast<TfLiteBatchMatMulParams *>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
+
+ // Tensorflow params are called adjoint, however they are actually just transposes behind the scene. They do
+ // not perform ajoint.
+ descriptor.m_TransposeX = params->adj_x;
+ descriptor.m_TransposeY = params->adj_y;
+
+ // Check if supported
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("BATCH_MATMUL",
+ tfLiteContext,
+ IsBatchMatMulSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ armnnLHSInputTensorInfo,
+ armnnRHSInputTensorInfo,
+ outputTensorInfo,
+ descriptor);
+ };
+
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddBatchMatMulLayer(descriptor);
+ layer->SetBackendId(setBackend);
+ ARMNN_ASSERT(layer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
+ return Connect(layer, tfLiteContext, tfLiteNode, delegateData);
+}
+
+} // namespace armnnOpaqueDelegate
diff --git a/delegate/opaque/src/Shape.hpp b/delegate/opaque/src/Shape.hpp
index e16969768e..ecc545d7c5 100644
--- a/delegate/opaque/src/Shape.hpp
+++ b/delegate/opaque/src/Shape.hpp
@@ -2,3 +2,113 @@
// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
+#pragma once
+
+#include <OpaqueDelegateUtils.hpp>
+
+namespace armnnOpaqueDelegate
+{
+
+TfLiteStatus VisitShapeOperator(DelegateData& delegateData,
+ TfLiteOpaqueContext* tfLiteContext,
+ TfLiteOpaqueNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ // Gather input indices and use to get input tensor.
+ auto numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode);
+ const int* inputTensors;
+ if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+
+ const TfLiteOpaqueTensor* tfLiteInputTensor =
+ TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
+ if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ // Gather output indices and use to get output tensors.
+ int numOutputs = 0;
+ const int* outputTensors;
+ if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+
+ const TfLiteOpaqueTensor* tfLiteOutputTensor =
+ TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
+
+ auto* shapeParameters = reinterpret_cast<TfLiteShapeParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
+ if ( shapeParameters->out_type != kTfLiteInt32 && shapeParameters->out_type != kTfLiteInt64 )
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: output_type data type is not supported in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("SHAPE",
+ tfLiteContext,
+ IsShapeSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ outInfo);
+ };
+
+ // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the
+ // support for the operator
+ // If supported, VisitShapeOperator will be called again to add the layer to the network as seen further below
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ // Add a Shape layer
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddShapeLayer();
+ layer->SetBackendId(setBackend);
+ ARMNN_ASSERT(layer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
+ // Connect
+ return Connect(layer, tfLiteContext, tfLiteNode, delegateData);
+}
+
+} // namespace armnnOpaqueDelegate
diff --git a/delegate/opaque/src/armnn_delegate.cpp b/delegate/opaque/src/armnn_delegate.cpp
index bffbca8111..3b647f3531 100644
--- a/delegate/opaque/src/armnn_delegate.cpp
+++ b/delegate/opaque/src/armnn_delegate.cpp
@@ -641,6 +641,12 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData,
tfLiteNode,
nodeIndex,
kTfLiteBuiltinArgMin);
+ case kTfLiteBuiltinBatchMatmul:
+ return VisitBatchMatMulOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinBatchMatmul);
case kTfLiteBuiltinBatchToSpaceNd:
return VisitBatchToSpaceNdOperator(delegateData,
tfLiteContext,
@@ -853,6 +859,12 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData,
nodeIndex,
kTfLiteBuiltinRsqrt,
armnn::UnaryOperation::Rsqrt);
+ case kTfLiteBuiltinShape:
+ return VisitShapeOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinShape);
case kTfLiteBuiltinSin:
return VisitElementwiseUnaryOperator(delegateData,
tfLiteContext,