aboutsummaryrefslogtreecommitdiff
path: root/delegate/src
diff options
context:
space:
mode:
Diffstat (limited to 'delegate/src')
-rw-r--r--delegate/src/Comparison.hpp23
-rw-r--r--delegate/src/Control.hpp5
-rw-r--r--delegate/src/Convolution.hpp16
-rw-r--r--delegate/src/DelegateUtils.hpp79
-rw-r--r--delegate/src/ElementwiseBinary.hpp15
-rw-r--r--delegate/src/Gather.hpp6
-rw-r--r--delegate/src/GatherNd.hpp6
-rw-r--r--delegate/src/LogicalBinary.hpp22
-rw-r--r--delegate/src/test/ElementwiseBinaryTest.cpp6
9 files changed, 58 insertions, 120 deletions
diff --git a/delegate/src/Comparison.hpp b/delegate/src/Comparison.hpp
index 80354e835d..688f90c597 100644
--- a/delegate/src/Comparison.hpp
+++ b/delegate/src/Comparison.hpp
@@ -57,10 +57,17 @@ TfLiteStatus VisitComparisonOperator(DelegateData& delegateData,
return kTfLiteError;
}
- const armnn::TensorInfo& inputTensorInfo0 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor0);
- const armnn::TensorInfo& inputTensorInfo1 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor1);
+ armnn::TensorInfo inputTensorInfo0 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor0);
+ armnn::TensorInfo inputTensorInfo1 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor1);
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+ // Check if we need to expand the dims of any of the input tensor infos.
+ // This is required for a few of the backends.
+ if(inputTensorInfo0.GetNumDimensions() != inputTensorInfo1.GetNumDimensions())
+ {
+ ExpandTensorRankToEqual(inputTensorInfo0, inputTensorInfo1);
+ }
+
armnn::ComparisonOperation comparisonOperation = armnn::ComparisonOperation::Equal;
switch(tfLiteComparisonOperatorCode)
{
@@ -122,17 +129,7 @@ TfLiteStatus VisitComparisonOperator(DelegateData& delegateData,
return kTfLiteError;
}
- auto reshapeLayer = BroadcastTensor(inputTensorInfo0,
- inputTensorInfo1,
- comparisonLayer,
- tfLiteContext,
- tfLiteNode,
- delegateData);
- if (!reshapeLayer)
- {
- return kTfLiteError;
- }
- return kTfLiteOk;
+ return Connect(comparisonLayer, tfLiteNode, delegateData);
}
} // namespace armnnDelegate
diff --git a/delegate/src/Control.hpp b/delegate/src/Control.hpp
index 17f23d81ad..a3ea6e92a7 100644
--- a/delegate/src/Control.hpp
+++ b/delegate/src/Control.hpp
@@ -172,7 +172,10 @@ TfLiteStatus VisitConcatenationOperator(DelegateData& delegateData,
armnn::IOutputSlot& outputSlot = concatenationLayer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
- Connect(concatenationLayer, tfLiteNode, delegateData);
+ if(Connect(concatenationLayer, tfLiteNode, delegateData) != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
if (activationType == kTfLiteActNone)
{
diff --git a/delegate/src/Convolution.hpp b/delegate/src/Convolution.hpp
index a8559e2548..31cb2ab9ac 100644
--- a/delegate/src/Convolution.hpp
+++ b/delegate/src/Convolution.hpp
@@ -222,7 +222,10 @@ TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
- Connect(layer, tfLiteNode, delegateData);
+ if(Connect(layer, tfLiteNode, delegateData) != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
if (!tfLiteNodeParameters)
{
@@ -408,7 +411,10 @@ TfLiteStatus VisitConv3dOperator(DelegateData& delegateData,
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
- Connect(layer, tfLiteNode, delegateData);
+ if(Connect(layer, tfLiteNode, delegateData) != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
if (!tfLiteNodeParameters)
{
@@ -624,7 +630,11 @@ TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
- Connect(layer, tfLiteNode, delegateData);
+ if(Connect(layer, tfLiteNode, delegateData) != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+
if (!tfLiteNodeParameters)
{
// No Activation
diff --git a/delegate/src/DelegateUtils.hpp b/delegate/src/DelegateUtils.hpp
index 3e74225b15..1aa9029271 100644
--- a/delegate/src/DelegateUtils.hpp
+++ b/delegate/src/DelegateUtils.hpp
@@ -13,6 +13,7 @@
#include <armnn/utility/NumericCast.hpp>
#include <armnnUtils/Permute.hpp>
+#include <armnnUtils/TensorUtils.hpp>
#include <tensorflow/lite/builtin_ops.h>
#include <tensorflow/lite/c/builtin_op_data.h>
@@ -188,91 +189,25 @@ TfLiteStatus Connect(armnn::IConnectableLayer* layer,
return kTfLiteOk;
}
-armnn::IConnectableLayer* BroadcastTensor(const armnn::TensorInfo& inputInfo0,
- const armnn::TensorInfo& inputInfo1,
- armnn::IConnectableLayer* startLayer,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- armnnDelegate::DelegateData& delegateData)
+void ExpandTensorRankToEqual(armnn::TensorInfo& inputInfo0,
+ armnn::TensorInfo& inputInfo1)
{
unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
if (inputDimensions0 == inputDimensions1)
{
- auto status = Connect(startLayer, tfLiteNode, delegateData);
- return status == kTfLiteOk ? startLayer : nullptr;
+ return;
}
unsigned int biggerInputDimensions = std::max(inputDimensions0, inputDimensions1);
- unsigned int dimDifference = static_cast<unsigned int>(std::abs(armnn::numeric_cast<int>(inputDimensions0) -
- armnn::numeric_cast<int>(inputDimensions1)));
bool input0IsSmaller = inputDimensions0 < inputDimensions1;
- const armnn::TensorInfo& smallInfo = input0IsSmaller ? inputInfo0 : inputInfo1;
- const armnn::TensorShape& smallShape = smallInfo.GetShape();
-
- std::vector<unsigned int> reshapedDimensions(biggerInputDimensions, 1);
- for (unsigned int i = dimDifference; i < biggerInputDimensions; ++i)
- {
- reshapedDimensions[i] = smallShape[i - dimDifference];
- }
-
- armnn::TensorInfo reshapedInfo = smallInfo;
- reshapedInfo.SetShape(armnn::TensorShape{ armnn::numeric_cast<unsigned int>(reshapedDimensions.size()),
- reshapedDimensions.data() });
-
- armnn::ReshapeDescriptor reshapeDescriptor;
- reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
- bool isSupported = false;
- armnn::BackendId setBackend;
- FORWARD_LAYER_SUPPORT_FUNC("RESHAPE",
- tfLiteContext,
- IsReshapeSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- smallInfo,
- reshapedInfo,
- reshapeDescriptor);
- if (!isSupported)
- {
- return nullptr;
- }
+ armnn::TensorInfo& smallInfo = input0IsSmaller ? inputInfo0 : inputInfo1;
+ const armnn::TensorShape& newShape = armnnUtils::ExpandDimsToRank(smallInfo.GetShape(), biggerInputDimensions);
- ARMNN_ASSERT(delegateData.m_Network != nullptr);
- // Add Reshape layer
- armnn::IConnectableLayer* reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor);
- reshapeLayer->SetBackendId(setBackend);
- ARMNN_ASSERT(reshapeLayer != nullptr);
- reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
+ smallInfo.SetShape(newShape);
- if (input0IsSmaller)
- {
- delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[0])]
- ->Connect(reshapeLayer->GetInputSlot(0));
- reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
- delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[1])]
- ->Connect(startLayer->GetInputSlot(1));
- }
- else
- {
- delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[1])]
- ->Connect(reshapeLayer->GetInputSlot(0));
- reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
- delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[0])]
- ->Connect(startLayer->GetInputSlot(0));
- }
-
- // Prepare output slots
- for (unsigned int outputIndex = 0; outputIndex < startLayer->GetNumOutputSlots(); ++outputIndex)
- {
- armnn::IOutputSlot& outputSlot = startLayer->GetOutputSlot(outputIndex);
- delegateData.m_OutputSlotForNode
- [static_cast<unsigned long>(tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
- }
-
- return reshapeLayer;
}
TfLiteStatus FusedActivation(TfLiteContext* tfLiteContext,
diff --git a/delegate/src/ElementwiseBinary.hpp b/delegate/src/ElementwiseBinary.hpp
index 8096acfefb..52c6b2434b 100644
--- a/delegate/src/ElementwiseBinary.hpp
+++ b/delegate/src/ElementwiseBinary.hpp
@@ -254,6 +254,13 @@ TfLiteStatus VisitElementwiseBinaryOperator(DelegateData& delegateData,
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+ // Check if we need to expand the dims of the input tensor infos.
+ // This is required for a few of the backends.
+ if(inputTensorInfo0.GetNumDimensions() != inputTensorInfo1.GetNumDimensions())
+ {
+ ExpandTensorRankToEqual(inputTensorInfo0, inputTensorInfo1);
+ }
+
auto* tfLiteNodeParameters = reinterpret_cast<TfLiteAddParams*>(tfLiteNode->builtin_data);
TfLiteFusedActivation activationType = kTfLiteActNone;
if (tfLiteNodeParameters)
@@ -363,13 +370,7 @@ TfLiteStatus VisitElementwiseBinaryOperator(DelegateData& delegateData,
return inputsTensorsProcess;
}
- auto reshapeLayer = BroadcastTensor(inputTensorInfo0,
- inputTensorInfo1,
- elementwiseBinaryLayer,
- tfLiteContext,
- tfLiteNode,
- delegateData);
- if (!reshapeLayer)
+ if(Connect(elementwiseBinaryLayer, tfLiteNode, delegateData) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/src/Gather.hpp b/delegate/src/Gather.hpp
index 9e98966471..9125997417 100644
--- a/delegate/src/Gather.hpp
+++ b/delegate/src/Gather.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020,2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020,2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -101,8 +101,6 @@ TfLiteStatus VisitGatherOperator(DelegateData& delegateData,
return inputsTensorsProcess;
}
- Connect(layer, tfLiteNode, delegateData);
-
- return kTfLiteOk;
+ return Connect(layer, tfLiteNode, delegateData);
}
} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/GatherNd.hpp b/delegate/src/GatherNd.hpp
index f2192f77c3..cf526e1995 100644
--- a/delegate/src/GatherNd.hpp
+++ b/delegate/src/GatherNd.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -77,8 +77,6 @@ TfLiteStatus VisitGatherNdOperator(DelegateData& delegateData,
return inputsTensorsProcess;
}
- Connect(layer, tfLiteNode, delegateData);
-
- return kTfLiteOk;
+ return Connect(layer, tfLiteNode, delegateData);
}
} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/LogicalBinary.hpp b/delegate/src/LogicalBinary.hpp
index b6a8f5d5f6..d71618ee9c 100644
--- a/delegate/src/LogicalBinary.hpp
+++ b/delegate/src/LogicalBinary.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -46,6 +46,13 @@ TfLiteStatus VisitLogicalBinaryOperator(DelegateData& delegateData,
armnn::TensorInfo inputTensorInfo1 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor1);
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+ // Check if we need to expand the dims of any of the input tensor infos.
+ // This is required for a few of the backends.
+ if(inputTensorInfo0.GetNumDimensions() != inputTensorInfo1.GetNumDimensions())
+ {
+ ExpandTensorRankToEqual(inputTensorInfo0, inputTensorInfo1);
+ }
+
// Setup descriptor and assign operation
armnn::LogicalBinaryDescriptor desc;
desc.m_Operation = binaryOperation;
@@ -89,18 +96,7 @@ TfLiteStatus VisitLogicalBinaryOperator(DelegateData& delegateData,
return inputsTensorsProcess;
}
- // LogicalBinary operators support broadcasting
- auto reshapeLayer = BroadcastTensor(inputTensorInfo0,
- inputTensorInfo1,
- logicalBinaryLayer,
- tfLiteContext,
- tfLiteNode,
- delegateData);
- if (!reshapeLayer)
- {
- return kTfLiteError;
- }
- return kTfLiteOk;
+ return Connect(logicalBinaryLayer, tfLiteNode, delegateData);
}
} // namespace armnnDelegate
diff --git a/delegate/src/test/ElementwiseBinaryTest.cpp b/delegate/src/test/ElementwiseBinaryTest.cpp
index 9d03204263..8099efebff 100644
--- a/delegate/src/test/ElementwiseBinaryTest.cpp
+++ b/delegate/src/test/ElementwiseBinaryTest.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2021, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -885,7 +885,7 @@ TEST_CASE ("ADD_Broadcast_CpuAcc_Test")
AddBroadcastTest(backends);
}
-TEST_CASE ("ADD_Actiation_CpuAcc_Test")
+TEST_CASE ("ADD_Activation_CpuAcc_Test")
{
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
AddActivationTest(backends);
@@ -1017,7 +1017,7 @@ TEST_CASE ("ADD_Constant_Input_CpuRef_Test")
AddConstInputTest(backends);
}
-TEST_CASE ("ADD_Actiation_CpuRef_Test")
+TEST_CASE ("ADD_Activation_CpuRef_Test")
{
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
AddActivationTest(backends);