aboutsummaryrefslogtreecommitdiff
path: root/delegate
diff options
context:
space:
mode:
Diffstat (limited to 'delegate')
-rw-r--r--delegate/CMakeLists.txt2
-rw-r--r--delegate/classic/src/ArgMinMax.hpp11
-rw-r--r--delegate/classic/src/BroadcastTo.hpp12
-rw-r--r--delegate/classic/src/Control.hpp12
-rw-r--r--delegate/classic/src/Convolution.hpp24
-rw-r--r--delegate/classic/src/ElementwiseBinary.hpp31
-rw-r--r--delegate/classic/src/Fill.hpp12
-rw-r--r--delegate/classic/src/FullyConnected.hpp12
-rw-r--r--delegate/classic/src/Pad.hpp15
-rw-r--r--delegate/classic/src/Redefine.hpp30
-rw-r--r--delegate/classic/src/Reduce.hpp12
-rw-r--r--delegate/classic/src/Shape.hpp12
-rw-r--r--delegate/classic/src/Split.hpp30
-rw-r--r--delegate/classic/src/StridedSlice.hpp73
-rw-r--r--delegate/classic/src/Unpack.hpp15
-rw-r--r--delegate/common/src/DelegateUtils.hpp15
-rw-r--r--delegate/common/src/test/DelegateUtilsTest.cpp54
-rw-r--r--delegate/opaque/src/ArgMinMax.hpp11
-rw-r--r--delegate/opaque/src/BroadcastTo.hpp12
-rw-r--r--delegate/opaque/src/Control.hpp12
-rw-r--r--delegate/opaque/src/Convolution.hpp22
-rw-r--r--delegate/opaque/src/ElementwiseBinary.hpp24
-rw-r--r--delegate/opaque/src/Fill.hpp12
-rw-r--r--delegate/opaque/src/FullyConnected.hpp12
-rw-r--r--delegate/opaque/src/Pad.hpp83
-rw-r--r--delegate/opaque/src/Redefine.hpp31
-rw-r--r--delegate/opaque/src/Reduce.hpp12
-rw-r--r--delegate/opaque/src/Shape.hpp12
-rw-r--r--delegate/opaque/src/Split.hpp32
-rw-r--r--delegate/opaque/src/StridedSlice.hpp73
-rw-r--r--delegate/opaque/src/Unpack.hpp13
-rw-r--r--delegate/opaque/src/armnn_delegate.cpp4
-rw-r--r--delegate/test/DelegateOptionsTest.cpp24
-rw-r--r--delegate/test/SplitTest.cpp34
-rw-r--r--delegate/test/SplitTestHelper.hpp17
-rw-r--r--delegate/test/UnpackTest.cpp40
36 files changed, 765 insertions, 87 deletions
diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt
index ebde7c69ce..0b27baa968 100644
--- a/delegate/CMakeLists.txt
+++ b/delegate/CMakeLists.txt
@@ -3,7 +3,7 @@
# SPDX-License-Identifier: MIT
#
-cmake_minimum_required (VERSION 3.7.0)
+cmake_minimum_required (VERSION 3.22)
project(armnnDelegate)
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
diff --git a/delegate/classic/src/ArgMinMax.hpp b/delegate/classic/src/ArgMinMax.hpp
index 3729b3bd83..0ae89be413 100644
--- a/delegate/classic/src/ArgMinMax.hpp
+++ b/delegate/classic/src/ArgMinMax.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -39,6 +39,15 @@ TfLiteStatus VisitArgMinMaxOperator(DelegateData& delegateData,
const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+ if(outputTensorInfo.GetShape().GetDimensionality() == armnn::Dimensionality::NotSpecified)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: NotSpecified Dimensionality is not supported in operator #%d node #%d: ",
+ argMinMaxOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
// Get const axis value from model and set it to descriptor.
const TfLiteTensor& tfLiteAxisTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
if (!IsValid(tfLiteContext, tfLiteAxisTensor, argMinMaxOperatorCode, nodeIndex))
diff --git a/delegate/classic/src/BroadcastTo.hpp b/delegate/classic/src/BroadcastTo.hpp
index 92aed79982..2e2b3ab155 100644
--- a/delegate/classic/src/BroadcastTo.hpp
+++ b/delegate/classic/src/BroadcastTo.hpp
@@ -1,11 +1,12 @@
//
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
#include <armnn/utility/IgnoreUnused.hpp>
+#include <DelegateUtils.hpp>
#include <tensorflow/lite/builtin_ops.h>
#include <tensorflow/lite/c/builtin_op_data.h>
@@ -83,6 +84,15 @@ namespace armnnDelegate
const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+ if (ZeroDimPresent({inputTensorInfo, outputTensorInfo}))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Zero dimension tensors are not supported in operator #%d node #%d: ",
+ broadcastToOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
auto* shapeData = tflite::GetTensorData<int32_t>(&tfLiteShapeTensor);
auto shapeTensorNum = tfLiteShapeTensor.dims->data[0];
diff --git a/delegate/classic/src/Control.hpp b/delegate/classic/src/Control.hpp
index 0adf262c23..2f5181e7ea 100644
--- a/delegate/classic/src/Control.hpp
+++ b/delegate/classic/src/Control.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -229,6 +229,16 @@ TfLiteStatus VisitMeanOperator(DelegateData& delegateData,
const armnn::TensorInfo& axisTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteAxisTensor);
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+ // Check for unsupported 0-size dimensions in the tensor shapes
+ if(ZeroDimPresent({inputTensorInfo, axisTensorInfo, outputTensorInfo}))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Zero dimension tensors are not supported in operator #%d node #%d",
+ tfLiteMeanOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
auto* axisTensorData = tflite::GetTensorData<int32_t>(&tfLiteAxisTensor);
std::vector<int32_t> axis;
diff --git a/delegate/classic/src/Convolution.hpp b/delegate/classic/src/Convolution.hpp
index 71ecd4c97a..fb19f62f32 100644
--- a/delegate/classic/src/Convolution.hpp
+++ b/delegate/classic/src/Convolution.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -645,6 +645,22 @@ TfLiteStatus VisitTransposeConv2dOperator(DelegateData& delegateData,
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
+ TfLiteFusedActivation activationType = kTfLiteActNone;
+ if (parameters->activation != kTfLiteActNone)
+ {
+ activationType = parameters->activation;
+
+ TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData,
+ tfLiteContext,
+ outputTensorInfo,
+ outputTensorInfo,
+ activationType);
+ if(activationStatus != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+ }
+
// TfLite uses NHWC tensors
const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
@@ -779,6 +795,12 @@ TfLiteStatus VisitTransposeConv2dOperator(DelegateData& delegateData,
delegateData.m_OutputSlotForNode[static_cast<unsigned int>(tfLiteNode->outputs->data[outputIndex])] =
&outputSlot;
}
+
+ if (activationType != kTfLiteActNone)
+ {
+ return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData, nodeIndex);
+ }
+
return kTfLiteOk;
}
diff --git a/delegate/classic/src/ElementwiseBinary.hpp b/delegate/classic/src/ElementwiseBinary.hpp
index 8309a79d38..4be8e295f8 100644
--- a/delegate/classic/src/ElementwiseBinary.hpp
+++ b/delegate/classic/src/ElementwiseBinary.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -254,9 +254,10 @@ std::pair<armnn::IConnectableLayer*, armnn::IConnectableLayer*> AddFloorDivLayer
const armnn::TensorInfo& outputTensorInfo,
int nodeIndex)
{
- auto divName = GetLayerName(armnn::BinaryOperation::Div, nodeIndex);
+ auto layerName = "FloorDiv:" + std::to_string(nodeIndex);
armnn::IConnectableLayer* divisionLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
- armnn::BinaryOperation::Div, divName.c_str());
+ armnn::BinaryOperation::Div, layerName.c_str());
+
// if the output of the div is Signed32 the Floor layer is not required
if (armnn::DataType::Signed32 == outputTensorInfo.GetDataType())
{
@@ -264,7 +265,8 @@ std::pair<armnn::IConnectableLayer*, armnn::IConnectableLayer*> AddFloorDivLayer
}
armnn::IOutputSlot& outputSlot = divisionLayer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
- auto floorName = GetLayerName(armnn::LayerType::Floor, nodeIndex);
+
+ auto floorName = GetLayerName(armnn::BinaryOperation::Div, nodeIndex);
armnn::IConnectableLayer* floorLayer = delegateData.m_Network->AddFloorLayer(floorName.c_str());
outputSlot.Connect(floorLayer->GetInputSlot(0));
return std::make_pair(divisionLayer, floorLayer);
@@ -312,9 +314,28 @@ TfLiteStatus VisitElementwiseBinaryOperator(DelegateData& delegateData,
armnn::TensorInfo inputTensorInfo0 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor0);
armnn::TensorInfo inputTensorInfo1 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor1);
-
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+ // Check for unspecified dimensions in the output tensor
+ if (outputTensorInfo.GetShape().GetDimensionality() == armnn::Dimensionality::NotSpecified)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Shape dimensionality is not specified in operator #%d node #%d: ",
+ elementwiseBinaryOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ // Check for unsupported 0-size dimensions in the tensor shapes
+ if(ZeroDimPresent({inputTensorInfo0, inputTensorInfo1, outputTensorInfo}))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Zero dimension tensors are not supported in operator #%d node #%d",
+ elementwiseBinaryOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
// Check if we need to expand the dims of the input tensor infos.
// This is required for a few of the backends.
if(inputTensorInfo0.GetNumDimensions() != inputTensorInfo1.GetNumDimensions())
diff --git a/delegate/classic/src/Fill.hpp b/delegate/classic/src/Fill.hpp
index e0ba2f9b75..7a99dee7a1 100644
--- a/delegate/classic/src/Fill.hpp
+++ b/delegate/classic/src/Fill.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -52,8 +52,18 @@ TfLiteStatus VisitFillOperator(DelegateData& delegateData,
}
armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ armnn::TensorInfo fillTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFillTensor);
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+ if(!fillTensorInfo.IsConstant())
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: FILL tensor must be a constant input in operator #%d node #%d: ",
+ tfLiteFillOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
armnn::FillDescriptor descriptor;
switch (tfLiteFillTensor.type)
{
diff --git a/delegate/classic/src/FullyConnected.hpp b/delegate/classic/src/FullyConnected.hpp
index 2d4e987942..45ac30cf10 100644
--- a/delegate/classic/src/FullyConnected.hpp
+++ b/delegate/classic/src/FullyConnected.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -58,6 +58,16 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData,
const armnn::TensorInfo& weightsTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteWeightsTensor);
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+ // Check for zero dimension in input and output tensors
+ if(ZeroDimPresent({inputTensorInfo, outputTensorInfo}))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Zero dimension tensors are not supported in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
// Check that we support fused activation before we attempt to create a layer
auto* tfLiteNodeParameters = reinterpret_cast<TfLiteFullyConnectedParams *>(tfLiteNode->builtin_data);
TfLiteFusedActivation activationType=kTfLiteActNone;
diff --git a/delegate/classic/src/Pad.hpp b/delegate/classic/src/Pad.hpp
index f8e8014d18..b762006bc1 100644
--- a/delegate/classic/src/Pad.hpp
+++ b/delegate/classic/src/Pad.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -78,6 +78,19 @@ TfLiteStatus VisitPadOperator(DelegateData& delegateData,
else if (tfLitePadOperatorCode == kTfLiteBuiltinPadv2)
{
const TfLiteTensor& tfLitepaddingValue = tfLiteTensors[tfLiteNode->inputs->data[2]];
+
+ // Fall back to TFLite if the padding value input is passed through a non-constant tensor,
+ // as the armnn delegate doesn't handle non-constant but non-network tensor input well
+ if(tfLitepaddingValue.allocation_type != kTfLiteMmapRo)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Unsupported padding input through non-const tensor "
+ "in operator #%d node #%d",
+ tfLitePadOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
armnn::TensorInfo paddingValueTensorInfo = GetTensorInfoForTfLiteTensor(tfLitepaddingValue);
if (paddingValueTensorInfo.GetNumElements() != 1)
{
diff --git a/delegate/classic/src/Redefine.hpp b/delegate/classic/src/Redefine.hpp
index c3422a2fb5..a7760c71c4 100644
--- a/delegate/classic/src/Redefine.hpp
+++ b/delegate/classic/src/Redefine.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -116,6 +116,16 @@ TfLiteStatus VisitReshapeOperator(DelegateData& delegateData,
const armnn::TensorInfo& inputTensorInfo0 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor0);
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+ // Check for unsupported 0-size dimensions in the input/output tensor shapes
+ if(ZeroDimPresent({inputTensorInfo0, outputTensorInfo}))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Zero dimension tensors are not supported in operator #%d node #%d",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
armnn::ReshapeDescriptor reshapeDesc;
std::vector<int32_t> targetShape;
@@ -264,18 +274,20 @@ TfLiteStatus VisitSqueezeOperator(DelegateData& delegateData,
const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
std::vector<uint32_t> squeezeDim;
+
// A single negative dim index is interpreted as a negative index in python
// Meaning the index will be the shape size plus the negative index value
- if (options->num_squeeze_dims == 1 && options->squeeze_dims[0] < 0)
- {
- int32_t dim = static_cast<int32_t>(inputTensorInfo.GetShape().GetNumDimensions()) + options->squeeze_dims[0];
- squeezeDim.push_back(static_cast<uint32_t>(dim));
- }
- else
+ for (int32_t i = 0; i < options->num_squeeze_dims; ++i)
{
- for (int32_t i = 0; i < options->num_squeeze_dims; ++i)
+ int32_t dim = options->squeeze_dims[i];
+ if(dim < 0)
+ {
+ dim += static_cast<int32_t>(inputTensorInfo.GetShape().GetNumDimensions());
+ squeezeDim.emplace_back(dim);
+ }
+ else
{
- squeezeDim.push_back(static_cast<uint32_t>(options->squeeze_dims[i]));
+ squeezeDim.emplace_back(static_cast<uint32_t>(options->squeeze_dims[i]));
}
}
diff --git a/delegate/classic/src/Reduce.hpp b/delegate/classic/src/Reduce.hpp
index 8731ef5235..ae0e1746a4 100644
--- a/delegate/classic/src/Reduce.hpp
+++ b/delegate/classic/src/Reduce.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -49,6 +49,16 @@ TfLiteStatus VisitReduceOperator(DelegateData& delegateData,
const armnn::TensorInfo& axisTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteAxisTensor);
auto* axisTensorData = tflite::GetTensorData<int32_t>(&tfLiteAxisTensor);
+ // Check for unsupported 0-size dimensions in the tensor shapes
+ if(ZeroDimPresent({inputTensorInfo, axisTensorInfo, outputTensorInfo}))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Zero dimension tensors are not supported in operator #%d node #%d",
+ reduceOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
std::vector<int32_t> axis;
// Add axis data to vector to be converter to unsigned int and assigned to descriptor axis.
if (axisTensorData != nullptr)
diff --git a/delegate/classic/src/Shape.hpp b/delegate/classic/src/Shape.hpp
index 10800b843b..e728788d6f 100644
--- a/delegate/classic/src/Shape.hpp
+++ b/delegate/classic/src/Shape.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -41,6 +41,16 @@ TfLiteStatus VisitShapeOperator(DelegateData& delegateData,
const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+ // Check for zero dimension in input and output tensors
+ if(ZeroDimPresent({inputTensorInfo, outputTensorInfo}))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Zero dimension tensors are not supported in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
auto* shapeParameters = reinterpret_cast<TfLiteShapeParams*>(tfLiteNode->builtin_data);
if (shapeParameters->out_type != kTfLiteInt32 && shapeParameters->out_type != kTfLiteInt64)
{
diff --git a/delegate/classic/src/Split.hpp b/delegate/classic/src/Split.hpp
index 57b7f8074e..d8b2fafa42 100644
--- a/delegate/classic/src/Split.hpp
+++ b/delegate/classic/src/Split.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020,2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020,2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -70,11 +70,23 @@ TfLiteStatus VisitSplitOperator(DelegateData& delegateData,
for (unsigned int i = 0; i < numSplits; ++i)
{
const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[i]];
+
+ const armnn::TensorInfo& outputTensorShape = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ if (ZeroDimPresent({outputTensorShape}))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Zero dimension tensors are not supported in operator #%d node #%d: ",
+ tfLiteSplitOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLiteSplitOperatorCode, nodeIndex))
{
return kTfLiteError;
}
- outputs.push_back(GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true));
+ outputs.push_back(outputTensorShape);
}
const std::vector<std::reference_wrapper<armnn::TensorInfo>> outputTensorInfos(outputs.begin(), outputs.end());
@@ -242,7 +254,19 @@ TfLiteStatus VisitSplitVOperator(DelegateData& delegateData,
{
return kTfLiteError;
}
- outputs.push_back(GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true));
+
+ const armnn::TensorInfo& outputTensorShape = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ if (ZeroDimPresent({outputTensorShape}))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Zero dimension tensors are not supported in operator #%d node #%d: ",
+ tfLiteSplitVOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ outputs.push_back(outputTensorShape);
}
const std::vector<std::reference_wrapper<armnn::TensorInfo>> outputTensorInfos(outputs.begin(), outputs.end());
diff --git a/delegate/classic/src/StridedSlice.hpp b/delegate/classic/src/StridedSlice.hpp
index 43f96411b6..5f0a8edf7e 100644
--- a/delegate/classic/src/StridedSlice.hpp
+++ b/delegate/classic/src/StridedSlice.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -37,10 +37,33 @@ TfLiteStatus VisitStridedSliceOperator(DelegateData& delegateData,
{
return kTfLiteError;
}
+ // Checking for unsupported non-const non-network input tensors
+ // Index 0 is the input, index 1-3 should be constant
+ if(i > 0 && inputTensor->allocation_type != kTfLiteMmapRo)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Unsupported constant data input through non-const tensor "
+ "in operator #%d node #%d",
+ sliceOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
}
// We save the begin, end and strides tensors in our descriptor. Therefore we have to read those values from inputs
int inputRank = tfLiteInputs[0]->dims->size;
+
+ // Input tensors of rank greater than 4 are unsupported - delegate back to TFLite runtime
+ if(inputRank > 4)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLitearmnnOpaqueDelegate: Tensors of rank greater than 4 are unsupported"
+ " in the StridedSlice operator. Operator: #%d node #%d: ",
+ sliceOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
auto ReadInt32Input = [&](int inputIndex, std::vector<int32_t>& outputData) -> TfLiteStatus
{
if (tfLiteInputs[inputIndex]->type != kTfLiteInt32)
@@ -103,6 +126,44 @@ TfLiteStatus VisitStridedSliceOperator(DelegateData& delegateData,
descriptor.m_ShrinkAxisMask = stridedSliceParams->shrink_axis_mask;
descriptor.m_DataLayout = armnn::DataLayout::NHWC;
+ // Checking begin and end bounds with ShrinkAxisMask
+ for(unsigned int i = 0; i < inputRank; ++i)
+ {
+ if((descriptor.m_ShrinkAxisMask & (1 << i)) &&
+ (((descriptor.m_Begin[i] - descriptor.m_End[i]) > 1) ||
+ ((descriptor.m_Begin[i] - descriptor.m_End[i]) < -1)))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLitearmnnDelegate: Invalid combination of ShrinkAxisMask, Begin- and End-Tensor values "
+ "in the StridedSlice operator. Operator: #%d node #%d: ",
+ sliceOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ }
+
+ // Checking that NewAxisMask doesn't extend the output beyond the supported rank
+ if(inputRank >= 3 && (descriptor.m_NewAxisMask > 4 || descriptor.m_NewAxisMask == 3))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLitearmnnOpaqueDelegate: Maximum output tensor rank is 4, the currently set NewAxisMask "
+ "results in an unsupported higher rank. Operator: #%d node #%d: ",
+ sliceOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ // The variable 'offset' is documented in TFLite builtin_op_data.h:
+ // "If true, then the end tensor is an offset of the begin tensor."
+ if(stridedSliceParams->offset &&
+ descriptor.m_Begin.size() == descriptor.m_End.size())
+ {
+ for(unsigned int i = 0; i < descriptor.m_End.size(); ++i)
+ {
+ descriptor.m_End[i] += descriptor.m_Begin[i];
+ }
+ }
+
// Validate output
const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
if (!IsValid(tfLiteContext, tfLiteOutputTensor, sliceOperatorCode, nodeIndex))
@@ -113,6 +174,16 @@ TfLiteStatus VisitStridedSliceOperator(DelegateData& delegateData,
const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(*tfLiteInputs[0]);
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+ // Check for unsupported 0-size dimensions in the input/output tensor shapes
+ if(ZeroDimPresent({inputTensorInfo, outputTensorInfo}))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Zero dimension tensors are not supported in operator #%d node #%d",
+ sliceOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
bool isSupported = false;
armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
diff --git a/delegate/classic/src/Unpack.hpp b/delegate/classic/src/Unpack.hpp
index b3336ec990..15484f807f 100644
--- a/delegate/classic/src/Unpack.hpp
+++ b/delegate/classic/src/Unpack.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -112,11 +112,22 @@ TfLiteStatus VisitUnpackOperator(DelegateData& delegateData,
for (unsigned int i = 0; i < unpackNum; ++i)
{
const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[i]];
+
if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
{
return kTfLiteError;
}
- outputs.push_back(GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true));
+
+ armnn::TensorInfo outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+ armnn::TensorShape shape = outputTensorInfo.GetShape();
+ if (shape.GetDimensionality() == armnn::Dimensionality::NotSpecified)
+ {
+ shape.SetNumDimensions(1, true);
+ shape.SetDimensionSize(0, 1);
+ outputTensorInfo.SetShape(shape);
+ }
+
+ outputs.push_back(outputTensorInfo);
}
const std::vector<std::reference_wrapper<armnn::TensorInfo>> outputTensorInfos(outputs.begin(), outputs.end());
diff --git a/delegate/common/src/DelegateUtils.hpp b/delegate/common/src/DelegateUtils.hpp
index 96767ff78c..245fc9be90 100644
--- a/delegate/common/src/DelegateUtils.hpp
+++ b/delegate/common/src/DelegateUtils.hpp
@@ -300,4 +300,19 @@ armnn::TensorInfo OutputShapeOfSqueeze(std::vector<uint32_t> squeezeDims,
return outTensorInfo;
}
+bool ZeroDimPresent(std::initializer_list<armnn::TensorInfo> tensorInfoList)
+{
+ for (armnn::TensorInfo tensorInfo : tensorInfoList)
+ {
+ for (unsigned int i = 0; i < tensorInfo.GetNumDimensions(); ++i)
+ {
+ if (tensorInfo.GetShape()[i] == 0)
+ {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
} // namespace anonymous
diff --git a/delegate/common/src/test/DelegateUtilsTest.cpp b/delegate/common/src/test/DelegateUtilsTest.cpp
new file mode 100644
index 0000000000..5ce470e289
--- /dev/null
+++ b/delegate/common/src/test/DelegateUtilsTest.cpp
@@ -0,0 +1,54 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <armnn/Tensor.hpp>
+#include <doctest/doctest.h>
+#include <common/src/DelegateUtils.hpp>
+
+namespace armnn
+{
+
+TEST_SUITE("DelegateUtils_Tests")
+{
+ TEST_CASE("Zero_Dim_In_Input_Test_True")
+ {
+ unsigned int inputDimSizes[] = {0, 1, 2, 3};
+ TensorInfo inputTensor = armnn::TensorInfo(4, inputDimSizes, DataType::Float32);
+
+ CHECK(ZeroDimPresent({inputTensor}) == true);
+ }
+
+ TEST_CASE("Zero_Dim_In_Input_Test_False")
+ {
+ unsigned int inputDimSizes[] = {1, 2, 3, 4};
+ TensorInfo inputTensor = armnn::TensorInfo(4, inputDimSizes, DataType::Float32);
+
+ CHECK(ZeroDimPresent({inputTensor}) == false);
+ }
+
+ TEST_CASE("Zero_Dim_In_Output_Test_True")
+ {
+ unsigned int inputDimSizes[] = {1, 2, 3, 4};
+ TensorInfo inputTensor = armnn::TensorInfo(4, inputDimSizes, DataType::Float32);
+
+ unsigned int outputDimSizes[] = {0, 1, 2, 3};
+ TensorInfo outputTensor = armnn::TensorInfo(4, outputDimSizes, DataType::Float32);
+
+ CHECK(ZeroDimPresent({inputTensor, outputTensor}) == true);
+ }
+
+ TEST_CASE("Zero_Dim_In_Output_Test_False")
+ {
+ unsigned int inputDimSizes[] = {1, 2, 3, 4};
+ TensorInfo inputTensor = armnn::TensorInfo(4, inputDimSizes, DataType::Float32);
+
+ unsigned int outputDimSizes[] = {1, 2, 3, 4};
+ TensorInfo outputTensor = armnn::TensorInfo(4, outputDimSizes, DataType::Float32);
+
+ CHECK(ZeroDimPresent({inputTensor, outputTensor}) == false);
+ }
+}
+
+} // namespace armnn \ No newline at end of file
diff --git a/delegate/opaque/src/ArgMinMax.hpp b/delegate/opaque/src/ArgMinMax.hpp
index 5ea7aa8655..f5b9e66ab2 100644
--- a/delegate/opaque/src/ArgMinMax.hpp
+++ b/delegate/opaque/src/ArgMinMax.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -71,6 +71,15 @@ TfLiteStatus VisitArgMinMaxOperator(DelegateData& delegateData,
const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
+ if(outputTensorInfo.GetShape().GetDimensionality() == armnn::Dimensionality::NotSpecified)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: NotSpecified Dimensionality is not supported in operator #%d node #%d: ",
+ argMinMaxOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
// Get const axis value from model and set it to descriptor.
if (!IsValid(tfLiteContext, tfLiteAxisTensor, argMinMaxOperatorCode, nodeIndex))
{
diff --git a/delegate/opaque/src/BroadcastTo.hpp b/delegate/opaque/src/BroadcastTo.hpp
index 379587546f..8fcea9393c 100644
--- a/delegate/opaque/src/BroadcastTo.hpp
+++ b/delegate/opaque/src/BroadcastTo.hpp
@@ -1,11 +1,12 @@
//
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
#include <OpaqueDelegateUtils.hpp>
+#include <DelegateUtils.hpp>
namespace armnnOpaqueDelegate
{
@@ -102,6 +103,15 @@ namespace armnnOpaqueDelegate
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor,
true);
+ if (ZeroDimPresent({inputTensorInfo, outputTensorInfo}))
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Zero dimension tensors are not supported in operator #%d node #%d: ",
+ broadcastToOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
auto* shapeData = static_cast<int32_t*>(TfLiteOpaqueTensorData(tfLiteShapeTensor));
int32_t shapeTensorNum = TfLiteOpaqueTensorDim(tfLiteShapeTensor, 0);
diff --git a/delegate/opaque/src/Control.hpp b/delegate/opaque/src/Control.hpp
index 9aef8380af..b7862ddee5 100644
--- a/delegate/opaque/src/Control.hpp
+++ b/delegate/opaque/src/Control.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -230,6 +230,16 @@ TfLiteStatus VisitMeanOperator(DelegateData& delegateData,
const armnn::TensorInfo& axisTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteAxisTensor);
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
+ // Check for unsupported 0-size dimensions in the tensor shapes
+ if(ZeroDimPresent({inputTensorInfo, axisTensorInfo, outputTensorInfo}))
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Zero dimension tensors are not supported in operator #%d node #%d",
+ tfLiteMeanOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
auto* axisTensorData = static_cast<int32_t*>(TfLiteOpaqueTensorData(tfLiteAxisTensor));
std::vector<int32_t> axis;
diff --git a/delegate/opaque/src/Convolution.hpp b/delegate/opaque/src/Convolution.hpp
index e4393e7bb0..744108713c 100644
--- a/delegate/opaque/src/Convolution.hpp
+++ b/delegate/opaque/src/Convolution.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -742,6 +742,21 @@ TfLiteStatus VisitTransposeConv2dOperator(DelegateData& delegateData,
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteFilterTensor);
+ TfLiteFusedActivation activationType = kTfLiteActNone;
+ if (parameters->activation != kTfLiteActNone)
+ {
+ activationType = parameters->activation;
+ TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData,
+ tfLiteContext,
+ outputTensorInfo,
+ outputTensorInfo,
+ activationType);
+ if(activationStatus != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+ }
+
// TfLite uses NHWC tensors
const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
@@ -876,6 +891,11 @@ TfLiteStatus VisitTransposeConv2dOperator(DelegateData& delegateData,
return kTfLiteError;
}
+ if (activationType != kTfLiteActNone)
+ {
+ return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData, nodeIndex);
+ }
+
return kTfLiteOk;
}
diff --git a/delegate/opaque/src/ElementwiseBinary.hpp b/delegate/opaque/src/ElementwiseBinary.hpp
index 2a67802028..ab946fa3a2 100644
--- a/delegate/opaque/src/ElementwiseBinary.hpp
+++ b/delegate/opaque/src/ElementwiseBinary.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -247,7 +247,7 @@ std::pair<armnn::IConnectableLayer*, armnn::IConnectableLayer*> AddFloorDivLayer
const armnn::TensorInfo& outputTensorInfo,
int nodeIndex)
{
- auto layerName = GetName(armnn::BinaryOperation::Div, nodeIndex);
+ auto layerName = "FloorDiv:" + std::to_string(nodeIndex);
armnn::IConnectableLayer* divisionLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
armnn::BinaryOperation::Div,
layerName.c_str());
@@ -259,7 +259,7 @@ std::pair<armnn::IConnectableLayer*, armnn::IConnectableLayer*> AddFloorDivLayer
}
armnn::IOutputSlot& outputSlot = divisionLayer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
- auto floorName = GetName(armnn::LayerType::Floor, nodeIndex);
+ auto floorName = GetName(armnn::BinaryOperation::Div, nodeIndex);
armnn::IConnectableLayer* floorLayer = delegateData.m_Network->AddFloorLayer(floorName.c_str());
outputSlot.Connect(floorLayer->GetInputSlot(0));
return std::make_pair(divisionLayer, floorLayer);
@@ -326,7 +326,25 @@ TfLiteStatus VisitElementwiseBinaryOperator(DelegateData& delegateData,
armnn::TensorInfo inputTensorInfo1 = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor1);
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
+ // Check for unspecified dimensions in the output tensor
+ if (outputTensorInfo.GetShape().GetDimensionality() == armnn::Dimensionality::NotSpecified)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Shape dimensionality is not specified in operator #%d node #%d: ",
+ elementwiseBinaryOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ // Check for unsupported 0-size dimensions in the tensor shapes
+ if(ZeroDimPresent({inputTensorInfo0, inputTensorInfo1, outputTensorInfo}))
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Zero dimension tensors are not supported in operator #%d node #%d",
+ elementwiseBinaryOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
// Check if we need to expand the dims of the input tensor infos.
// This is required for a few of the backends.
diff --git a/delegate/opaque/src/Fill.hpp b/delegate/opaque/src/Fill.hpp
index fe27255590..c5baef987f 100644
--- a/delegate/opaque/src/Fill.hpp
+++ b/delegate/opaque/src/Fill.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -48,6 +48,16 @@ namespace armnnOpaqueDelegate
const TfLiteOpaqueTensor* tfLiteFillTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext,
inputTensors[1]);
+
+ if(TfLiteOpaqueTensorGetAllocationType(tfLiteFillTensor) != kTfLiteMmapRo)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: FILL tensor must be constant - not supported in operator #%d node #%d: ",
+ tfLiteFillOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
if (!IsValid(tfLiteContext, tfLiteFillTensor, tfLiteFillOperatorCode, nodeIndex))
{
return kTfLiteError;
diff --git a/delegate/opaque/src/FullyConnected.hpp b/delegate/opaque/src/FullyConnected.hpp
index 7be06683a5..b7ada030db 100644
--- a/delegate/opaque/src/FullyConnected.hpp
+++ b/delegate/opaque/src/FullyConnected.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -73,6 +73,16 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData,
const armnn::TensorInfo& weightsTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteWeightsTensor);
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
+ // Check for zero dimension in input and output tensors
+ if(ZeroDimPresent({inputTensorInfo, outputTensorInfo}))
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Zero dimension tensors are not supported in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
// Check that we support fused activation before we attempt to create a layer
auto* tfLiteNodeParameters =
reinterpret_cast<TfLiteFullyConnectedParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
diff --git a/delegate/opaque/src/Pad.hpp b/delegate/opaque/src/Pad.hpp
index 4305224003..0b9b305675 100644
--- a/delegate/opaque/src/Pad.hpp
+++ b/delegate/opaque/src/Pad.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -16,6 +16,7 @@ TfLiteStatus VisitPadOperator(DelegateData& delegateData,
int nodeIndex,
int32_t tfLitePadOperatorCode)
{
+
switch(tfLitePadOperatorCode)
{
case kTfLiteBuiltinMirrorPad:
@@ -32,6 +33,7 @@ TfLiteStatus VisitPadOperator(DelegateData& delegateData,
// Inputs
int numInputs = 0;
const int* inputTensors;
+
if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
{
TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
@@ -44,12 +46,18 @@ TfLiteStatus VisitPadOperator(DelegateData& delegateData,
const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
if (!IsValid(tfLiteContext, tfLiteInputTensor, tfLitePadOperatorCode, nodeIndex))
{
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Invalid input tensor at node #%d: ",
+ nodeIndex);
return kTfLiteError;
}
const TfLiteOpaqueTensor* tfLitePaddingTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]);
if (!IsValid(tfLiteContext, tfLitePaddingTensor, tfLitePadOperatorCode, nodeIndex))
{
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Invalid padding tensor at node #%d: ",
+ nodeIndex);
return kTfLiteError;
}
@@ -70,6 +78,8 @@ TfLiteStatus VisitPadOperator(DelegateData& delegateData,
const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLitePadOperatorCode, nodeIndex))
{
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Invalid output tensor at node #%d: ", nodeIndex);
return kTfLiteError;
}
@@ -77,25 +87,57 @@ TfLiteStatus VisitPadOperator(DelegateData& delegateData,
const armnn::TensorInfo& paddingTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLitePaddingTensor);
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
- // Get the padding data from the input tensor
- auto* paddingData = static_cast<int32_t*>(TfLiteOpaqueTensorData(tfLitePaddingTensor));
+ armnn::PadDescriptor descriptor;
+ // Get the padding size from the input tensor 1
size_t step = 2;
- armnn::PadDescriptor descriptor;
- for (unsigned int i = 0; i < paddingTensorInfo.GetNumElements() / step; ++i)
+ if(paddingTensorInfo.GetDataType() == armnn::DataType::Signed64)
{
- descriptor.m_PadList.emplace_back(paddingData[i * step], paddingData[i * step + 1]);
+ auto* paddingData = static_cast<int64_t*>(TfLiteOpaqueTensorData(tfLitePaddingTensor));
+ for (uint16_t i = 0; i < paddingTensorInfo.GetNumElements() / step; ++i)
+ {
+ descriptor.m_PadList.emplace_back(paddingData[i * step], paddingData[i * step + 1]);
+ }
+ }
+ else
+ {
+ auto* paddingData = static_cast<int32_t*>(TfLiteOpaqueTensorData(tfLitePaddingTensor));
+ for (uint16_t i = 0; i < paddingTensorInfo.GetNumElements() / step; ++i)
+ {
+ descriptor.m_PadList.emplace_back(paddingData[i * step], paddingData[i * step + 1]);
+ }
}
+ // Get the padding value from input tensor 2
if (tfLitePadOperatorCode == kTfLiteBuiltinPad && inputTensorInfo.IsQuantized())
{
descriptor.m_PadValue = inputTensorInfo.GetQuantizationOffset();
}
else if (tfLitePadOperatorCode == kTfLiteBuiltinPadv2)
{
- const TfLiteOpaqueTensor* tfLitepaddingValue = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext,
+ const TfLiteOpaqueTensor* tfLitePaddingValue = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext,
inputTensors[2]);
- armnn::TensorInfo paddingValueTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLitepaddingValue);
+ if (!IsValid(tfLiteContext, tfLitePaddingValue, tfLitePadOperatorCode, nodeIndex))
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Invalid padding value at node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+
+ // Fall back to TFLite if the padding value input is passed through a non-constant tensor,
+ // as the armnn delegate doesn't handle non-constant but non-network tensor input well
+ if(TfLiteOpaqueTensorGetAllocationType(tfLitePaddingValue) != kTfLiteMmapRo)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Unsupported padding input through non-const tensor "
+ "in operator #%d node #%d",
+ tfLitePadOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ armnn::TensorInfo paddingValueTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLitePaddingValue);
if (paddingValueTensorInfo.GetNumElements() != 1)
{
TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
@@ -104,17 +146,21 @@ TfLiteStatus VisitPadOperator(DelegateData& delegateData,
tfLitePadOperatorCode, nodeIndex);
return kTfLiteError;
}
+
// Get the padding value from the input tensor
- switch (TfLiteOpaqueTensorType(tfLitepaddingValue))
+ switch (TfLiteOpaqueTensorType(tfLitePaddingValue))
{
case kTfLiteFloat32:
- descriptor.m_PadValue = static_cast<float*>(TfLiteOpaqueTensorData(tfLitepaddingValue))[0];
+ descriptor.m_PadValue = *static_cast<float*>(TfLiteOpaqueTensorData(tfLitePaddingValue));
+ break;
+ case kTfLiteInt32:
+ descriptor.m_PadValue = *static_cast<int32_t*>(TfLiteOpaqueTensorData(tfLitePaddingValue));
break;
case kTfLiteUInt8:
- descriptor.m_PadValue = static_cast<uint8_t*>(TfLiteOpaqueTensorData(tfLitepaddingValue))[0];
+ descriptor.m_PadValue = *static_cast<uint8_t*>(TfLiteOpaqueTensorData(tfLitePaddingValue));
break;
case kTfLiteInt8:
- descriptor.m_PadValue = static_cast<int8_t*>(TfLiteOpaqueTensorData(tfLitepaddingValue))[0];
+ descriptor.m_PadValue = *static_cast<int8_t*>(TfLiteOpaqueTensorData(tfLitePaddingValue));
break;
default:
TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
@@ -166,9 +212,9 @@ TfLiteStatus VisitPadOperator(DelegateData& delegateData,
}
armnn::BackendId setBackend;
- if (!delegateData.m_Network)
+ bool isSupported = false;
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo)
{
- bool isSupported = false;
FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("PAD",
tfLiteContext,
IsPadSupported,
@@ -178,7 +224,11 @@ TfLiteStatus VisitPadOperator(DelegateData& delegateData,
inputTensorInfo,
outputTensorInfo,
descriptor);
+ };
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo);
return isSupported ? kTfLiteOk : kTfLiteError;
}
@@ -190,6 +240,11 @@ TfLiteStatus VisitPadOperator(DelegateData& delegateData,
armnn::IOutputSlot& outputSlot = padLayer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
+ if (ProcessInputs(padLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+
return Connect(padLayer, tfLiteContext, tfLiteNode, delegateData);
}
diff --git a/delegate/opaque/src/Redefine.hpp b/delegate/opaque/src/Redefine.hpp
index 6319ca7841..477449a3bc 100644
--- a/delegate/opaque/src/Redefine.hpp
+++ b/delegate/opaque/src/Redefine.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -147,6 +147,16 @@ TfLiteStatus VisitReshapeOperator(DelegateData& delegateData,
const armnn::TensorInfo& inputTensorInfo0 = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
+ // Check for unsupported 0-size dimensions in the input/output tensor shapes
+ if(ZeroDimPresent({inputTensorInfo0, outputTensorInfo}))
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Zero dimension tensors are not supported in operator #%d node #%d",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
armnn::ReshapeDescriptor reshapeDesc;
std::vector<int32_t> targetShape;
@@ -326,19 +336,22 @@ TfLiteStatus VisitSqueezeOperator(DelegateData& delegateData,
std::vector<uint32_t> squeezeDim;
// A single negative dim index is interpreted as a negative index in python
// Meaning the index will be the shape size plus the negative index value
- if (options->num_squeeze_dims == 1 && options->squeeze_dims[0] < 0)
- {
- int32_t dim = static_cast<int32_t>(inputTensorInfo.GetShape().GetNumDimensions()) + options->squeeze_dims[0];
- squeezeDim.push_back(static_cast<uint32_t>(dim));
- }
- else
+
+ for (int32_t i = 0; i < options->num_squeeze_dims; ++i)
{
- for (int32_t i = 0; i < options->num_squeeze_dims; ++i)
+ int32_t dim = options->squeeze_dims[i];
+ if(dim < 0)
+ {
+ dim += static_cast<int32_t>(inputTensorInfo.GetShape().GetNumDimensions());
+ squeezeDim.emplace_back(dim);
+ }
+ else
{
- squeezeDim.push_back(static_cast<uint32_t>(options->squeeze_dims[i]));
+ squeezeDim.emplace_back(static_cast<uint32_t>(options->squeeze_dims[i]));
}
}
+
armnn::TensorInfo outputTensorInfo = OutputShapeOfSqueeze(squeezeDim, inputTensorInfo);
armnn::ReshapeDescriptor reshapeDesc;
diff --git a/delegate/opaque/src/Reduce.hpp b/delegate/opaque/src/Reduce.hpp
index a7948ae98d..c13f82b9a6 100644
--- a/delegate/opaque/src/Reduce.hpp
+++ b/delegate/opaque/src/Reduce.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -70,6 +70,16 @@ TfLiteStatus VisitReduceOperator(DelegateData& delegateData,
const armnn::TensorInfo& axisTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteAxisTensor);
auto* axisTensorData = static_cast<int*>(TfLiteOpaqueTensorData(tfLiteAxisTensor));
+ // Check for unsupported 0-size dimensions in the tensor shapes
+ if(ZeroDimPresent({inputTensorInfo, axisTensorInfo, outputTensorInfo}))
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Zero dimension tensors are not supported in operator #%d node #%d",
+ reduceOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
std::vector<int32_t> axis;
// Add axis data to vector to be converter to unsigned int and assigned to descriptor axis.
if (axisTensorData != nullptr)
diff --git a/delegate/opaque/src/Shape.hpp b/delegate/opaque/src/Shape.hpp
index 9f15a4f739..e3e4b3aaee 100644
--- a/delegate/opaque/src/Shape.hpp
+++ b/delegate/opaque/src/Shape.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -58,6 +58,16 @@ TfLiteStatus VisitShapeOperator(DelegateData& delegateData,
const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
+ // Check for zero dimension in input and output tensors
+ if(ZeroDimPresent({inputTensorInfo, outputTensorInfo}))
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Zero dimension tensors are not supported in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
auto* shapeParameters = reinterpret_cast<TfLiteShapeParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
if (shapeParameters->out_type != kTfLiteInt32 && shapeParameters->out_type != kTfLiteInt64)
{
diff --git a/delegate/opaque/src/Split.hpp b/delegate/opaque/src/Split.hpp
index 199f46b126..0c0c930a14 100644
--- a/delegate/opaque/src/Split.hpp
+++ b/delegate/opaque/src/Split.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -100,7 +100,19 @@ TfLiteStatus VisitSplitOperator(DelegateData& delegateData,
{
return kTfLiteError;
}
- outputs.push_back(GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true));
+
+ const armnn::TensorInfo& outputTensorShape = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
+
+ if (ZeroDimPresent({outputTensorShape}))
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Zero dimension tensors are not supported in operator #%d node #%d: ",
+ tfLiteSplitOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ outputs.push_back(outputTensorShape);
}
const std::vector<std::reference_wrapper<armnn::TensorInfo>> outputTensorInfos(outputs.begin(), outputs.end());
@@ -294,16 +306,30 @@ TfLiteStatus VisitSplitVOperator(DelegateData& delegateData,
nodeIndex);
return kTfLiteError;
}
+
std::vector<armnn::TensorInfo> outputs;
for (int i = 0; i < numSplits; ++i)
{
const TfLiteOpaqueTensor* tfLiteOutputTensor =
TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[i]);
+
if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLiteSplitVOperatorCode, nodeIndex))
{
return kTfLiteError;
}
- outputs.push_back(GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true));
+
+ const armnn::TensorInfo& outputTensorShape = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
+
+ if (ZeroDimPresent({outputTensorShape}))
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Zero dimension tensors are not supported in operator #%d node #%d: ",
+ tfLiteSplitVOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ outputs.push_back(outputTensorShape);
}
const std::vector<std::reference_wrapper<armnn::TensorInfo>> outputTensorInfos(outputs.begin(), outputs.end());
diff --git a/delegate/opaque/src/StridedSlice.hpp b/delegate/opaque/src/StridedSlice.hpp
index 2e17e3292f..46e5eecbb1 100644
--- a/delegate/opaque/src/StridedSlice.hpp
+++ b/delegate/opaque/src/StridedSlice.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -42,12 +42,35 @@ TfLiteStatus VisitStridedSliceOperator(DelegateData& delegateData,
{
return kTfLiteError;
}
+ // Checking for unsupported non-const non-network input tensors
+ // Index 0 is the input, index 1-3 should be constant
+ if(i > 0 && TfLiteOpaqueTensorGetAllocationType(inputTensor) != kTfLiteMmapRo)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Unsupported constant data input through non-const tensor "
+ "in operator #%d node #%d",
+ tfLiteStridedSliceOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
}
const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensors[0]);
// We save the begin, end and strides tensors in our descriptor. Therefore we have to read those values from inputs
unsigned int inputRank = inputTensorInfo.GetNumDimensions();
+
+ // Input tensors of rank greater than 4 are unsupported - delegate back to TFLite runtime
+ if(inputRank > 4)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLitearmnnOpaqueDelegate: Tensors of rank greater than 4 are unsupported"
+ " in the StridedSlice operator. Operator: #%d node #%d: ",
+ tfLiteStridedSliceOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
auto ReadInt32Input = [&](int inputIndex, std::vector<int32_t>& outputData) -> TfLiteStatus
{
if (TfLiteOpaqueTensorType(tfLiteInputTensors[inputIndex]) != kTfLiteInt32)
@@ -110,6 +133,44 @@ TfLiteStatus VisitStridedSliceOperator(DelegateData& delegateData,
descriptor.m_ShrinkAxisMask = nodeParameters->shrink_axis_mask;
descriptor.m_DataLayout = armnn::DataLayout::NHWC;
+ // Checking begin and end bounds with ShrinkAxisMask
+ for(unsigned int i = 0; i < inputRank; ++i)
+ {
+ if((descriptor.m_ShrinkAxisMask & (1 << i)) &&
+ (((descriptor.m_Begin[i] - descriptor.m_End[i]) > 1) ||
+ ((descriptor.m_Begin[i] - descriptor.m_End[i]) < -1)))
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLitearmnnOpaqueDelegate: Invalid combination of ShrinkAxisMask, Begin- and End-Tensor values "
+ "in the StridedSlice operator. Operator: #%d node #%d: ",
+ tfLiteStridedSliceOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ }
+
+ // Checking that NewAxisMask doesn't extend the output beyond the supported rank
+ if(inputRank >= 3 && (descriptor.m_NewAxisMask > 4 || descriptor.m_NewAxisMask == 3))
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLitearmnnOpaqueDelegate: Maximum output tensor rank is 4, the currently set NewAxisMask "
+ "results in an unsupported higher rank. Operator: #%d node #%d: ",
+ tfLiteStridedSliceOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ // The variable 'offset' is documented in TFLite builtin_op_data.h:
+ // "If true, then the end tensor is an offset of the begin tensor."
+ if(nodeParameters->offset &&
+ descriptor.m_Begin.size() == descriptor.m_End.size())
+ {
+ for(unsigned int i = 0; i < descriptor.m_End.size(); ++i)
+ {
+ descriptor.m_End[i] += descriptor.m_Begin[i];
+ }
+ }
+
// Validate output
// Gather output indices and use to get output tensor.
const int* outputTensors;
@@ -131,6 +192,16 @@ TfLiteStatus VisitStridedSliceOperator(DelegateData& delegateData,
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor);
+ // Check for unsupported 0-size dimensions in the input/output tensor shapes
+ if(ZeroDimPresent({inputTensorInfo, outputTensorInfo}))
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Zero dimension tensors are not supported in operator #%d node #%d",
+ tfLiteStridedSliceOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
bool isSupported = false;
armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
diff --git a/delegate/opaque/src/Unpack.hpp b/delegate/opaque/src/Unpack.hpp
index 525529ff7b..29acfdff01 100644
--- a/delegate/opaque/src/Unpack.hpp
+++ b/delegate/opaque/src/Unpack.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -131,7 +131,16 @@ TfLiteStatus VisitUnpackOperator(DelegateData& delegateData,
return kTfLiteError;
}
- outputs.push_back(GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true));
+ armnn::TensorInfo outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
+ armnn::TensorShape shape = outputTensorInfo.GetShape();
+ if (shape.GetDimensionality() == armnn::Dimensionality::NotSpecified)
+ {
+ shape.SetNumDimensions(1, true);
+ shape.SetDimensionSize(0, 1);
+ outputTensorInfo.SetShape(shape);
+ }
+
+ outputs.push_back(outputTensorInfo);
}
const std::vector<std::reference_wrapper<armnn::TensorInfo>> outputTensorInfos(outputs.begin(), outputs.end());
diff --git a/delegate/opaque/src/armnn_delegate.cpp b/delegate/opaque/src/armnn_delegate.cpp
index 71ef114b80..f45420d74b 100644
--- a/delegate/opaque/src/armnn_delegate.cpp
+++ b/delegate/opaque/src/armnn_delegate.cpp
@@ -110,8 +110,8 @@ armnnDelegate::DelegateOptions ParseArmNNSettings(const tflite::TFLiteSettings*
// Build the key and value lists to pass into the constructor of the DelegateOptions
size_t num_options = options.size();
- std::unique_ptr<const char*> options_keys = std::unique_ptr<const char*>(new const char*[num_options + 1]);
- std::unique_ptr<const char*> options_values = std::unique_ptr<const char*>(new const char*[num_options + 1]);
+ std::unique_ptr<const char*[]> options_keys = std::unique_ptr<const char*[]>(new const char*[num_options + 1]);
+ std::unique_ptr<const char*[]> options_values = std::unique_ptr<const char*[]>(new const char*[num_options + 1]);
for (size_t i=0; i<num_options; ++i)
{
diff --git a/delegate/test/DelegateOptionsTest.cpp b/delegate/test/DelegateOptionsTest.cpp
index 7724f916d4..0c581ae7a0 100644
--- a/delegate/test/DelegateOptionsTest.cpp
+++ b/delegate/test/DelegateOptionsTest.cpp
@@ -148,10 +148,10 @@ TEST_CASE ("ArmnnDelegateStringParsingOptionDisableTfLiteRuntimeFallback")
// Create options_keys and options_values char array
size_t num_options = keys.size();
- std::unique_ptr<const char*> options_keys =
- std::unique_ptr<const char*>(new const char*[num_options + 1]);
- std::unique_ptr<const char*> options_values =
- std::unique_ptr<const char*>(new const char*[num_options + 1]);
+ std::unique_ptr<const char*[]> options_keys =
+ std::unique_ptr<const char*[]>(new const char*[num_options + 1]);
+ std::unique_ptr<const char*[]> options_values =
+ std::unique_ptr<const char*[]>(new const char*[num_options + 1]);
for (size_t i=0; i<num_options; ++i)
{
options_keys.get()[i] = keys[i].c_str();
@@ -183,10 +183,10 @@ TEST_CASE ("ArmnnDelegateStringParsingOptionEnableTfLiteRuntimeFallback")
// Create options_keys and options_values char array
size_t num_options = keys.size();
- std::unique_ptr<const char*> options_keys =
- std::unique_ptr<const char*>(new const char*[num_options + 1]);
- std::unique_ptr<const char*> options_values =
- std::unique_ptr<const char*>(new const char*[num_options + 1]);
+ std::unique_ptr<const char*[]> options_keys =
+ std::unique_ptr<const char*[]>(new const char*[num_options + 1]);
+ std::unique_ptr<const char*[]> options_values =
+ std::unique_ptr<const char*[]>(new const char*[num_options + 1]);
for (size_t i=0; i<num_options; ++i)
{
options_keys.get()[i] = keys[i].c_str();
@@ -299,10 +299,10 @@ void CreateFp16StringParsingTestRun(std::vector<std::string>& keys,
// Create options_keys and options_values char array
size_t num_options = keys.size();
- std::unique_ptr<const char*> options_keys =
- std::unique_ptr<const char*>(new const char*[num_options + 1]);
- std::unique_ptr<const char*> options_values =
- std::unique_ptr<const char*>(new const char*[num_options + 1]);
+ std::unique_ptr<const char*[]> options_keys =
+ std::unique_ptr<const char*[]>(new const char*[num_options + 1]);
+ std::unique_ptr<const char*[]> options_values =
+ std::unique_ptr<const char*[]>(new const char*[num_options + 1]);
for (size_t i=0; i<num_options; ++i)
{
options_keys.get()[i] = keys[i].c_str();
diff --git a/delegate/test/SplitTest.cpp b/delegate/test/SplitTest.cpp
index 73dd4a53ba..a1e7b3b94a 100644
--- a/delegate/test/SplitTest.cpp
+++ b/delegate/test/SplitTest.cpp
@@ -4,7 +4,7 @@
//
#include "SplitTestHelper.hpp"
-
+#include <common/src/DelegateUtils.hpp>
#include <doctest/doctest.h>
namespace armnnDelegate
@@ -161,6 +161,33 @@ void SplitVFp32Test()
numSplits);
}
+void SplitVFp32ZeroDimTest()
+{
+ uint32_t inputShape[] = { 1 };
+
+ uint32_t outputShape0[] = { 1 };
+ uint32_t outputShape1[] = { 1 };
+ uint32_t outputShape2[] = { 1 };
+ uint32_t outputShape3[] = { 1 };
+ uint32_t outputShape4[] = { 1 };
+ uint32_t outputShape5[] = { 1 };
+ uint32_t outputShape6[] = { 2 };
+ uint32_t outputShape7[] = { 0 };
+
+ armnn::TensorInfo inputTensor = armnn::TensorInfo(1, inputShape, armnn::DataType::Float32);
+ armnn::TensorInfo outputTensor0 = armnn::TensorInfo(1, outputShape0, armnn::DataType::Float32);
+ armnn::TensorInfo outputTensor1 = armnn::TensorInfo(1, outputShape1, armnn::DataType::Float32);
+ armnn::TensorInfo outputTensor2 = armnn::TensorInfo(1, outputShape2, armnn::DataType::Float32);
+ armnn::TensorInfo outputTensor3 = armnn::TensorInfo(1, outputShape3, armnn::DataType::Float32);
+ armnn::TensorInfo outputTensor4 = armnn::TensorInfo(1, outputShape4, armnn::DataType::Float32);
+ armnn::TensorInfo outputTensor5 = armnn::TensorInfo(1, outputShape5, armnn::DataType::Float32);
+ armnn::TensorInfo outputTensor6 = armnn::TensorInfo(1, outputShape6, armnn::DataType::Float32);
+ armnn::TensorInfo outputTensor7 = armnn::TensorInfo(1, outputShape7, armnn::DataType::Float32);
+
+ CHECK(ZeroDimPresent({inputTensor, outputTensor0, outputTensor1, outputTensor2, outputTensor3,
+ outputTensor4, outputTensor5, outputTensor6, outputTensor7}) == true);
+}
+
// SPLIT_V Test Suite
TEST_SUITE("SPLIT_VTests")
{
@@ -170,6 +197,11 @@ TEST_CASE ("SPLIT_V_Uint8_Test")
SplitVUint8Test();
}
+TEST_CASE ("SPLIT_V_Fp32_ZeroDim_Test")
+{
+ SplitVFp32ZeroDimTest();
+}
+
TEST_CASE ("SPLIT_V_Fp32_Test")
{
SplitVFp32Test();
diff --git a/delegate/test/SplitTestHelper.hpp b/delegate/test/SplitTestHelper.hpp
index d46721577e..bb0227d54d 100644
--- a/delegate/test/SplitTestHelper.hpp
+++ b/delegate/test/SplitTestHelper.hpp
@@ -233,7 +233,14 @@ std::vector<char> CreateSplitVTfLiteModel(tflite::TensorType tensorType,
flatbuffers::Offset<void> operatorBuiltinOptions = CreateSplitVOptions(flatBufferBuilder, numSplits).Union();
const std::vector<int> operatorInputs{ {0, 1, 2} };
- const std::vector<int> operatorOutputs{ {3, 4} };
+ std::vector<int> operatorOutputs;
+
+ for (uint32_t i = 0; i< outputTensorShapes.size(); ++i)
+ {
+ operatorOutputs.emplace_back(i+3);
+ }
+
+
flatbuffers::Offset <Operator> controlOperator =
CreateOperator(flatBufferBuilder,
0,
@@ -243,7 +250,13 @@ std::vector<char> CreateSplitVTfLiteModel(tflite::TensorType tensorType,
operatorBuiltinOptions);
const std::vector<int> subgraphInputs{ {0, 1, 2} };
- const std::vector<int> subgraphOutputs{ {3, 4} };
+ std::vector<int> subgraphOutputs;
+
+ for (uint32_t i = 0; i< outputTensorShapes.size(); ++i)
+ {
+ subgraphOutputs.emplace_back(i+3);
+ }
+
flatbuffers::Offset <SubGraph> subgraph =
CreateSubGraph(flatBufferBuilder,
flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
diff --git a/delegate/test/UnpackTest.cpp b/delegate/test/UnpackTest.cpp
index 64256bcfe0..52d71d6b36 100644
--- a/delegate/test/UnpackTest.cpp
+++ b/delegate/test/UnpackTest.cpp
@@ -42,6 +42,37 @@ void UnpackAxis0Num4Test(tflite::TensorType tensorType)
}
template <typename T>
+void UnpackAxis0Output0ShapeTest(tflite::TensorType tensorType)
+{
+ std::vector<int32_t> inputShape { 5 };
+ std::vector<int32_t> expectedOutputShape {};
+
+ std::vector<T> inputValues { 2, 4, 6, 8, 10 };
+
+ std::vector<T> expectedOutputValues0 { 2 };
+ std::vector<T> expectedOutputValues1 { 4 };
+ std::vector<T> expectedOutputValues2 { 6 };
+ std::vector<T> expectedOutputValues3 { 8 };
+ std::vector<T> expectedOutputValues4 { 10 };
+
+ std::vector<std::vector<T>> expectedOutputValues{ expectedOutputValues0,
+ expectedOutputValues1,
+ expectedOutputValues2,
+ expectedOutputValues3,
+ expectedOutputValues4
+ };
+
+ UnpackTest<T>(tflite::BuiltinOperator_UNPACK,
+ tensorType,
+ inputShape,
+ expectedOutputShape,
+ inputValues,
+ expectedOutputValues,
+ {},
+ 0);
+}
+
+template <typename T>
void UnpackAxis2Num6Test(tflite::TensorType tensorType)
{
std::vector<int32_t> inputShape { 4, 1, 6 };
@@ -90,6 +121,11 @@ TEST_CASE ("Unpack_Fp32_Axis2_Num6_Test")
UnpackAxis2Num6Test<float>(tflite::TensorType_FLOAT32);
}
+TEST_CASE ("Unpack_Fp32_Axis0_Output0Shape_Test")
+{
+UnpackAxis0Output0ShapeTest<float>(tflite::TensorType_FLOAT32);
+}
+
// Uint8
TEST_CASE ("Unpack_Uint8_Axis0_Num4_Test")
{
@@ -100,6 +136,10 @@ TEST_CASE ("Unpack_Uint8_Axis2_Num6_Test")
{
UnpackAxis2Num6Test<uint8_t>(tflite::TensorType_UINT8);
}
+TEST_CASE ("Unpack_Uint8_Axis0_Output0Shape_Test")
+{
+ UnpackAxis0Output0ShapeTest<uint8_t>(tflite::TensorType_UINT8);
+}
}