aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Sloyan <matthew.sloyan@arm.com>2023-04-26 11:42:46 +0100
committerMatthew Sloyan <matthew.sloyan@arm.com>2023-04-26 11:36:49 +0000
commit2b04ec3b94da152281fbbc69f8539378589b1f56 (patch)
tree13fd9f3a8ca44cf4f3a53ccf3f44960cfe627475
parentf2dffdb00bdf3108ebda6aaa142249d208f0c507 (diff)
downloadarmnn-2b04ec3b94da152281fbbc69f8539378589b1f56.tar.gz
IVGCVSW-7579 IVGCVSW-7581 IVGCVSW-7583 Implement Comparison, Concat and Mean in Opaque Delegate
* Removed input slot check from Connect function as number of TFLite and Arm NN inputs can differ. * Moved SetupConcatViewOrigin function to DelegateUtils.hpp * Simplified validation checks in VistConvolution functions as IsValid and IsDynamic were already being called. Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com> Change-Id: I858dbe4b643f9d350d9c38ea255ce5effbda4612
-rw-r--r--delegate/CMakeLists.txt4
-rw-r--r--delegate/classic/src/Control.hpp37
-rw-r--r--delegate/common/src/DelegateUtils.hpp29
-rw-r--r--delegate/opaque/src/Comparison.hpp141
-rw-r--r--delegate/opaque/src/Control.hpp315
-rw-r--r--delegate/opaque/src/Convolution.hpp60
-rw-r--r--delegate/opaque/src/OpaqueDelegateUtils.hpp15
-rw-r--r--delegate/opaque/src/armnn_delegate.cpp48
8 files changed, 546 insertions, 103 deletions
diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt
index b81feb6b1e..ab61337dce 100644
--- a/delegate/CMakeLists.txt
+++ b/delegate/CMakeLists.txt
@@ -261,6 +261,10 @@ if(BUILD_UNIT_TESTS)
opaque/src/test/DelegateTestInterpreter.cpp
test/CastTest.cpp
test/CastTestHelper.hpp
+ test/ComparisonTest.cpp
+ test/ComparisonTestHelper.hpp
+ test/ControlTest.cpp
+ test/ControlTestHelper.hpp
test/Convolution2dTest.cpp
test/ConvolutionTestHelper.hpp
test/DepthwiseConvolution2dTest.cpp
diff --git a/delegate/classic/src/Control.hpp b/delegate/classic/src/Control.hpp
index a3ea6e92a7..e6779f360a 100644
--- a/delegate/classic/src/Control.hpp
+++ b/delegate/classic/src/Control.hpp
@@ -5,7 +5,7 @@
#pragma once
-#include <armnn/utility/IgnoreUnused.hpp>
+#include <DelegateUtils.hpp>
#include <tensorflow/lite/builtin_ops.h>
#include <tensorflow/lite/c/builtin_op_data.h>
@@ -21,35 +21,6 @@
namespace armnnDelegate
{
-void SetupConcatViewOrigin(const armnn::TensorInfo& inputTensorInfo,
- armnn::OriginsDescriptor& concatDescriptor,
- const unsigned int concatAxis,
- unsigned int inputIndex,
- unsigned int& mergeDimOrigin)
-{
- const uint32_t inputRank = concatDescriptor.GetNumDimensions();
-
- // double check dimensions of the tensors
- if (inputTensorInfo.GetNumDimensions() != inputRank)
- {
- throw armnn::ParseException("The number of dimensions for input tensors "
- "of the concatenation operator should be: " + std::to_string(inputRank));
- }
-
- for (unsigned int j = 0; j < concatAxis; ++j)
- {
- concatDescriptor.SetViewOriginCoord(inputIndex, j, 0);
- }
-
- concatDescriptor.SetViewOriginCoord(inputIndex, concatAxis, mergeDimOrigin);
- mergeDimOrigin += inputTensorInfo.GetShape()[concatAxis];
-
- for (unsigned int j = concatAxis + 1; j < inputRank; ++j)
- {
- concatDescriptor.SetViewOriginCoord(inputIndex, j, 0);
- }
-}
-
TfLiteStatus VisitConcatenationOperator(DelegateData& delegateData,
TfLiteContext* tfLiteContext,
TfLiteNode* tfLiteNode,
@@ -322,12 +293,6 @@ TfLiteStatus VisitControlOperator(DelegateData& delegateData,
int nodeIndex,
int32_t operatorCode)
{
- armnn::IgnoreUnused(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- operatorCode);
-
switch(operatorCode)
{
case kTfLiteBuiltinConcatenation:
diff --git a/delegate/common/src/DelegateUtils.hpp b/delegate/common/src/DelegateUtils.hpp
index b953699016..51c70f9ba1 100644
--- a/delegate/common/src/DelegateUtils.hpp
+++ b/delegate/common/src/DelegateUtils.hpp
@@ -109,4 +109,33 @@ void UpdateConstantTensorOutputs(const armnn::TensorInfo& inputInfo, armnn::Tens
}
}
+void SetupConcatViewOrigin(const armnn::TensorInfo& inputTensorInfo,
+ armnn::OriginsDescriptor& concatDescriptor,
+ const unsigned int concatAxis,
+ unsigned int inputIndex,
+ unsigned int& mergeDimOrigin)
+{
+ const uint32_t inputRank = concatDescriptor.GetNumDimensions();
+
+ // double check dimensions of the tensors
+ if (inputTensorInfo.GetNumDimensions() != inputRank)
+ {
+ throw armnn::ParseException("The number of dimensions for input tensors "
+ "of the concatenation operator should be: " + std::to_string(inputRank));
+ }
+
+ for (unsigned int j = 0; j < concatAxis; ++j)
+ {
+ concatDescriptor.SetViewOriginCoord(inputIndex, j, 0);
+ }
+
+ concatDescriptor.SetViewOriginCoord(inputIndex, concatAxis, mergeDimOrigin);
+ mergeDimOrigin += inputTensorInfo.GetShape()[concatAxis];
+
+ for (unsigned int j = concatAxis + 1; j < inputRank; ++j)
+ {
+ concatDescriptor.SetViewOriginCoord(inputIndex, j, 0);
+ }
+}
+
} // namespace anonymous
diff --git a/delegate/opaque/src/Comparison.hpp b/delegate/opaque/src/Comparison.hpp
index e16969768e..046be83094 100644
--- a/delegate/opaque/src/Comparison.hpp
+++ b/delegate/opaque/src/Comparison.hpp
@@ -2,3 +2,144 @@
// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
+#pragma once
+
+#include <OpaqueDelegateUtils.hpp>
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnOpaqueDelegate
+{
+
+TfLiteStatus VisitComparisonOperator(DelegateData& delegateData,
+ TfLiteOpaqueContext* tfLiteContext,
+ TfLiteOpaqueNode* tfLiteNode,
+ int nodeIndex,
+ int32_t tfLiteComparisonOperatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ // Gather input indices and use to get input tensor.
+ int numInputs = 0;
+ const int* inputTensors;
+ if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+
+ // Use input indices to get input tensors.
+ const TfLiteOpaqueTensor* tfLiteInputTensor0 = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
+ if (!IsValid(tfLiteContext, tfLiteInputTensor0, tfLiteComparisonOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteOpaqueTensor* tfLiteInputTensor1 = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]);
+ if (!IsValid(tfLiteContext, tfLiteInputTensor1, tfLiteComparisonOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ // Gather output indices and use to get output tensors.
+ int numOutputs = 0;
+ const int* outputTensors;
+ if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+
+ const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLiteComparisonOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ armnn::TensorInfo inputTensorInfo0 = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor0);
+ armnn::TensorInfo inputTensorInfo1 = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor1);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
+
+ // Check if we need to expand the dims of the input tensor infos.
+ // This is required for a few of the backends.
+ if(inputTensorInfo0.GetNumDimensions() != inputTensorInfo1.GetNumDimensions())
+ {
+ ExpandTensorRankToEqual(inputTensorInfo0, inputTensorInfo1);
+ }
+
+ armnn::ComparisonOperation comparisonOperation = armnn::ComparisonOperation::Equal;
+ switch(tfLiteComparisonOperatorCode)
+ {
+ case kTfLiteBuiltinEqual:
+ comparisonOperation = armnn::ComparisonOperation::Equal;
+ break;
+ case kTfLiteBuiltinGreater:
+ comparisonOperation = armnn::ComparisonOperation::Greater;
+ break;
+ case kTfLiteBuiltinGreaterEqual:
+ comparisonOperation = armnn::ComparisonOperation::GreaterOrEqual;
+ break;
+ case kTfLiteBuiltinLess:
+ comparisonOperation = armnn::ComparisonOperation::Less;
+ break;
+ case kTfLiteBuiltinLessEqual:
+ comparisonOperation = armnn::ComparisonOperation::LessOrEqual;
+ break;
+ case kTfLiteBuiltinNotEqual:
+ comparisonOperation = armnn::ComparisonOperation::NotEqual;
+ break;
+ default:
+ return kTfLiteError;
+ }
+
+ armnn::ComparisonDescriptor descriptor(comparisonOperation);
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("COMPARISON",
+ tfLiteContext,
+ IsComparisonSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo0,
+ inputTensorInfo1,
+ outputTensorInfo,
+ descriptor);
+ };
+
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ armnn::IConnectableLayer* comparisonLayer = delegateData.m_Network->AddComparisonLayer(descriptor);
+ comparisonLayer->SetBackendId(setBackend);
+ ARMNN_ASSERT(comparisonLayer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = comparisonLayer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(comparisonLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
+ return Connect(comparisonLayer, tfLiteContext, tfLiteNode, delegateData);
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/opaque/src/Control.hpp b/delegate/opaque/src/Control.hpp
index e16969768e..b3d589756b 100644
--- a/delegate/opaque/src/Control.hpp
+++ b/delegate/opaque/src/Control.hpp
@@ -2,3 +2,318 @@
// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
+#pragma once
+
+#include <DelegateUtils.hpp>
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/kernels/internal/tensor_ctypes.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+#include <algorithm>
+#include <iterator>
+#include <string>
+#include <vector>
+
+namespace armnnOpaqueDelegate
+{
+
+TfLiteStatus VisitConcatenationOperator(DelegateData& delegateData,
+ TfLiteOpaqueContext* tfLiteContext,
+ TfLiteOpaqueNode* tfLiteNode,
+ int nodeIndex,
+ int32_t tfLiteConcatOperatorCode)
+{
+ auto numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode);
+ if (numInputs < 2)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Minimum number of inputs (%d != %d) in node #%d",
+ 2, numInputs, nodeIndex);
+ return kTfLiteError;
+ }
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ // Gather input indices and use to get input tensor.
+ const int* inputTensors;
+ if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+
+ std::vector<armnn::TensorInfo> inputTensorInfos;
+ for (int i = 0; i < numInputs; ++i)
+ {
+ const TfLiteOpaqueTensor* inputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[i]);
+ if (!IsValid(tfLiteContext, inputTensor, tfLiteConcatOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(inputTensor);
+ inputTensorInfos.emplace_back(inputTensorInfo);
+ }
+
+ // Convert input tensors to const armnn::TensorInfo* type for FORWARD_LAYER_SUPPORT_FUNC.
+ std::vector<const armnn::TensorInfo*> inputConstTensorInfos;
+ std::transform(inputTensorInfos.begin(),
+ inputTensorInfos.end(),
+ std::back_inserter(inputConstTensorInfos),
+ [](armnn::TensorInfo& t)->const armnn::TensorInfo*{ return &t; });
+
+ // Gather output indices and use to get output tensors.
+ int numOutputs = 0;
+ const int* outputTensors;
+ if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+
+ const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLiteConcatOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ // Setup OriginsDescriptor, axis and view origin
+ auto numConcatView = static_cast<unsigned int>(numInputs);
+ uint32_t inputRank = TfLiteOpaqueTensorNumDims(TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]));
+
+ auto* concatenationParameters =
+ reinterpret_cast<TfLiteConcatenationParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
+
+ if(!concatenationParameters)
+ {
+ throw armnn::Exception(&"TfLiteArmnnDelegate: Concat parameters are null in: " [ nodeIndex ]);
+ }
+
+ const auto concatDimInput = static_cast<unsigned int>(
+ (static_cast<int>(inputRank) + concatenationParameters->axis) % static_cast<int>(inputRank));
+
+ armnn::OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
+ concatDescriptor.SetConcatAxis(concatDimInput);
+
+ unsigned int mergeDimOrigin = 0;
+ for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
+ {
+ armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(
+ TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[viewIndex]));
+
+ // Sets up concatDescriptor view origin
+ SetupConcatViewOrigin(inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
+ }
+
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
+
+ // Verify we support the fused activation before attempting to create a layer
+ TfLiteFusedActivation activationType = concatenationParameters->activation;
+
+ TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
+ outputTensorInfo, activationType);
+ if(activationStatus != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+
+ // Check if supported
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("CONCATENATION",
+ tfLiteContext,
+ IsConcatSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputConstTensorInfos,
+ outputTensorInfo,
+ concatDescriptor);
+ };
+
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ // Setup layer and connect.
+ armnn::IConnectableLayer* concatenationLayer = delegateData.m_Network->AddConcatLayer(concatDescriptor);
+ concatenationLayer->SetBackendId(setBackend);
+ ARMNN_ASSERT(concatenationLayer != nullptr);
+
+ // Connect the Constant Inputs
+ auto inputsTensorsProcess = ProcessInputs(concatenationLayer,
+ delegateData,
+ tfLiteContext,
+ tfLiteNode);
+ if (inputsTensorsProcess == kTfLiteError)
+ {
+ return inputsTensorsProcess;
+ }
+
+ armnn::IOutputSlot& outputSlot = concatenationLayer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+ if(Connect(concatenationLayer, tfLiteContext, tfLiteNode, delegateData) != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+
+ if (activationType == kTfLiteActNone)
+ {
+ // No Activation
+ return kTfLiteOk;
+ }
+
+ // Check and Create activation
+ return FusedActivation(tfLiteContext, tfLiteNode, activationType, concatenationLayer, 0, delegateData);
+}
+
+TfLiteStatus VisitMeanOperator(DelegateData& delegateData,
+ TfLiteOpaqueContext* tfLiteContext,
+ TfLiteOpaqueNode* tfLiteNode,
+ int nodeIndex,
+ int32_t tfLiteMeanOperatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ // Gather input indices and use to get input tensor.
+ int numInputs = 0;
+ const int* inputTensors;
+ if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+
+ const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
+ if (!IsValid(tfLiteContext, tfLiteInputTensor, tfLiteMeanOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ // Use input indices to get axis tensor.
+ const TfLiteOpaqueTensor* tfLiteAxisTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]);
+ if (!IsValid(tfLiteContext, tfLiteAxisTensor, tfLiteMeanOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ // Gather output indices and use to get output tensors.
+ int numOutputs = 0;
+ const int* outputTensors;
+ if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+
+ const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLiteMeanOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& axisTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteAxisTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
+
+ auto* axisTensorData = static_cast<int32_t*>(TfLiteOpaqueTensorData(tfLiteAxisTensor));
+
+ std::vector<int32_t> axis;
+ // Add axis data to vector to be converter to unsigned int and assigned to descriptor axis.
+ for (unsigned int i = 0; i < axisTensorInfo.GetNumElements(); ++i)
+ {
+ axis.emplace_back(axisTensorData[i]);
+ }
+
+ // Convert the axis to unsigned int and remove duplicates.
+ unsigned int rank = inputTensorInfo.GetNumDimensions();
+ std::set<unsigned int> uniqueAxis;
+ std::transform(axis.begin(),
+ axis.end(),
+ std::inserter(uniqueAxis, uniqueAxis.begin()),
+ [rank](int i)->unsigned int{ return (i + rank) % rank; });
+
+ // Setup MeanDescriptor and assign axis and keepDims
+ armnn::MeanDescriptor desc;
+ desc.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
+ desc.m_KeepDims = inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ? true : false;
+
+ // Check if supported
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("MEAN",
+ tfLiteContext,
+ IsMeanSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ outputTensorInfo,
+ desc);
+ };
+
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ // Setup layer and connect.
+ armnn::IConnectableLayer* meanLayer = delegateData.m_Network->AddMeanLayer(desc);
+ meanLayer->SetBackendId(setBackend);
+ ARMNN_ASSERT(meanLayer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = meanLayer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(meanLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
+ return Connect(meanLayer, tfLiteContext, tfLiteNode, delegateData);
+}
+
+TfLiteStatus VisitControlOperator(DelegateData& delegateData,
+ TfLiteOpaqueContext* tfLiteContext,
+ TfLiteOpaqueNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ switch(operatorCode)
+ {
+ case kTfLiteBuiltinConcatenation:
+ return VisitConcatenationOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
+ case kTfLiteBuiltinMean:
+ return VisitMeanOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
+ default:
+ return kTfLiteError;
+ }
+}
+
+} // namespace armnnDelegate
+
diff --git a/delegate/opaque/src/Convolution.hpp b/delegate/opaque/src/Convolution.hpp
index 163290b542..50c57d1d1f 100644
--- a/delegate/opaque/src/Convolution.hpp
+++ b/delegate/opaque/src/Convolution.hpp
@@ -47,31 +47,11 @@ TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
{
return kTfLiteError;
}
- if (IsDynamicTensor(tfLiteInputTensor))
- {
- TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnOpaqueDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
- operatorCode, nodeIndex);
- return kTfLiteError;
- }
// Use input indices to get filter tensor.
const TfLiteOpaqueTensor* tfLiteFilterTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]);
- if(!IsValid(tfLiteFilterTensor))
- {
- TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnOpaqueDelegate: Invalid filter tensor in operator #%d node #%d: ",
- operatorCode, nodeIndex);
- return kTfLiteError;
- }
- if (IsDynamicTensor(tfLiteFilterTensor))
+ if (!IsValid(tfLiteContext, tfLiteFilterTensor, operatorCode, nodeIndex))
{
- TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnOpaqueDelegate: Dynamic filter tensors are not supported in node #%d: ",
- nodeIndex);
return kTfLiteError;
}
@@ -92,14 +72,6 @@ TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
{
return kTfLiteError;
}
- if (IsDynamicTensor(tfLiteOutputTensor))
- {
- TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnOpaqueDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
- operatorCode, nodeIndex);
- return kTfLiteError;
- }
const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteFilterTensor);
@@ -281,31 +253,11 @@ TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
{
return kTfLiteError;
}
- if (IsDynamicTensor(tfLiteInputTensor))
- {
- TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnOpaqueDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
- operatorCode, nodeIndex);
- return kTfLiteError;
- }
// Use input indices to get filter tensor.
const TfLiteOpaqueTensor* tfLiteFilterTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]);
- if(!IsValid(tfLiteFilterTensor))
- {
- TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnOpaqueDelegate: Invalid filter tensor in operator #%d node #%d: ",
- operatorCode, nodeIndex);
- return kTfLiteError;
- }
- if (IsDynamicTensor(tfLiteFilterTensor))
+ if (!IsValid(tfLiteContext, tfLiteFilterTensor, operatorCode, nodeIndex))
{
- TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnOpaqueDelegate: Dynamic filter tensors are not supported in node #%d: ",
- nodeIndex);
return kTfLiteError;
}
@@ -326,14 +278,6 @@ TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
{
return kTfLiteError;
}
- if (IsDynamicTensor(tfLiteOutputTensor))
- {
- TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnOpaqueDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
- operatorCode, nodeIndex);
- return kTfLiteError;
- }
const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteFilterTensor);
diff --git a/delegate/opaque/src/OpaqueDelegateUtils.hpp b/delegate/opaque/src/OpaqueDelegateUtils.hpp
index 688c683fa8..1fbfade038 100644
--- a/delegate/opaque/src/OpaqueDelegateUtils.hpp
+++ b/delegate/opaque/src/OpaqueDelegateUtils.hpp
@@ -139,7 +139,7 @@ bool IsValid(TfLiteOpaqueContext* tfLiteContext,
{
TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
tfLiteContext,
- "TfLiteArmnnDelegate: Invalid TfLite tensor in operator #%d node #%d: ",
+ "TfLiteArmnnOpaqueDelegate: Invalid TfLite tensor in operator #%d node #%d: ",
operatorCode, nodeIndex);
return false;
}
@@ -147,7 +147,7 @@ bool IsValid(TfLiteOpaqueContext* tfLiteContext,
{
TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic tensors are not supported in operator #%d node #%d: ",
+ "TfLiteArmnnOpaqueDelegate: Dynamic tensors are not supported in operator #%d node #%d: ",
operatorCode, nodeIndex);
return false;
}
@@ -179,13 +179,10 @@ TfLiteStatus Connect(armnn::IConnectableLayer* layer,
{
return kTfLiteError;
}
- // numInputs is set from TfLiteOpaqueNodeInputs.
- if(numInputs != static_cast<int>(layer->GetNumInputSlots()))
- {
- ARMNN_LOG(error) << "Layer: " << layer->GetName() << ": Expected number of input slots does not match actual "
- "number of input slots.";
- return kTfLiteError;
- }
+ // We can't validate the number of inputs vs the layer->GetNumOutputSlots() as some operators differ.
+ // An example is Mean where the number of TFLite inputs is 2, but number of Arm NN inputs is 1,
+ // as we store the axis within the descriptor.
+
// Connect the input slots.
// For each input slot, get the index of the opaque tensor that was allocated for it.
for (unsigned int inputIndex = 0; inputIndex < layer->GetNumInputSlots(); ++inputIndex)
diff --git a/delegate/opaque/src/armnn_delegate.cpp b/delegate/opaque/src/armnn_delegate.cpp
index 2ef1e0069a..c305c4020c 100644
--- a/delegate/opaque/src/armnn_delegate.cpp
+++ b/delegate/opaque/src/armnn_delegate.cpp
@@ -628,6 +628,12 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData,
tfLiteNode,
nodeIndex,
kTfLiteBuiltinCast);
+ case kTfLiteBuiltinConcatenation:
+ return VisitControlOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinConcatenation);
case kTfLiteBuiltinConv2d:
return VisitConvolutionOperator(delegateData,
tfLiteContext,
@@ -640,6 +646,48 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData,
tfLiteNode,
nodeIndex,
kTfLiteBuiltinDepthwiseConv2d);
+ case kTfLiteBuiltinEqual:
+ return VisitComparisonOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinEqual);
+ case kTfLiteBuiltinGreater:
+ return VisitComparisonOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinGreater);
+ case kTfLiteBuiltinGreaterEqual:
+ return VisitComparisonOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinGreaterEqual);
+ case kTfLiteBuiltinLess:
+ return VisitComparisonOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinLess);
+ case kTfLiteBuiltinLessEqual:
+ return VisitComparisonOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinLessEqual);
+ case kTfLiteBuiltinMean:
+ return VisitControlOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinMean);
+ case kTfLiteBuiltinNotEqual:
+ return VisitComparisonOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinNotEqual);
default:
return kTfLiteError;
}