aboutsummaryrefslogtreecommitdiff
path: root/delegate/classic/src
diff options
context:
space:
mode:
Diffstat (limited to 'delegate/classic/src')
-rw-r--r--delegate/classic/src/ArgMinMax.hpp11
-rw-r--r--delegate/classic/src/BatchSpace.hpp12
-rw-r--r--delegate/classic/src/ClassicDelegateUtils.hpp1
-rw-r--r--delegate/classic/src/Convolution.hpp24
-rw-r--r--delegate/classic/src/GatherNd.hpp12
-rw-r--r--delegate/classic/src/Quantization.hpp14
-rw-r--r--delegate/classic/src/Split.hpp30
-rw-r--r--delegate/classic/src/StridedSlice.hpp73
-rw-r--r--delegate/classic/src/Unpack.hpp15
9 files changed, 181 insertions, 11 deletions
diff --git a/delegate/classic/src/ArgMinMax.hpp b/delegate/classic/src/ArgMinMax.hpp
index 3729b3bd83..0ae89be413 100644
--- a/delegate/classic/src/ArgMinMax.hpp
+++ b/delegate/classic/src/ArgMinMax.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -39,6 +39,15 @@ TfLiteStatus VisitArgMinMaxOperator(DelegateData& delegateData,
const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+ if(outputTensorInfo.GetShape().GetDimensionality() == armnn::Dimensionality::NotSpecified)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: NotSpecified Dimensionality is not supported in operator #%d node #%d: ",
+ argMinMaxOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
// Get const axis value from model and set it to descriptor.
const TfLiteTensor& tfLiteAxisTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
if (!IsValid(tfLiteContext, tfLiteAxisTensor, argMinMaxOperatorCode, nodeIndex))
diff --git a/delegate/classic/src/BatchSpace.hpp b/delegate/classic/src/BatchSpace.hpp
index 07491cee0d..224d90d589 100644
--- a/delegate/classic/src/BatchSpace.hpp
+++ b/delegate/classic/src/BatchSpace.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -52,6 +52,16 @@ TfLiteStatus VisitBatchToSpaceNdOperator(DelegateData& delegateData,
const armnn::TensorInfo& cropsTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteCropsTensor);
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+ // Check for unsupported 0-size dimensions in the tensor shapes
+ if(ZeroDimPresent({inputTensorInfo, outputTensorInfo}))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Zero dimension tensors are not supported in operator #%d node #%d",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
::memcpy(blockShape.data(), tfLiteBlockShapeTensor.data.data, blockShapeTensorInfo.GetNumBytes());
diff --git a/delegate/classic/src/ClassicDelegateUtils.hpp b/delegate/classic/src/ClassicDelegateUtils.hpp
index 78eb662d7b..117e590566 100644
--- a/delegate/classic/src/ClassicDelegateUtils.hpp
+++ b/delegate/classic/src/ClassicDelegateUtils.hpp
@@ -429,6 +429,7 @@ armnn::TensorInfo GetTensorInfoForTfLiteTensor(const TfLiteTensor& tfLiteTensor,
}
ret.SetQuantizationScales(quantizationScales);
ret.SetQuantizationDim(armnn::numeric_cast<unsigned int>(affineQuantization->quantized_dimension));
+ ret.SetQuantizationOffset(affineQuantization->zero_point->data[0]);
}
else
{
diff --git a/delegate/classic/src/Convolution.hpp b/delegate/classic/src/Convolution.hpp
index 71ecd4c97a..fb19f62f32 100644
--- a/delegate/classic/src/Convolution.hpp
+++ b/delegate/classic/src/Convolution.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -645,6 +645,22 @@ TfLiteStatus VisitTransposeConv2dOperator(DelegateData& delegateData,
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
+ TfLiteFusedActivation activationType = kTfLiteActNone;
+ if (parameters->activation != kTfLiteActNone)
+ {
+ activationType = parameters->activation;
+
+ TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData,
+ tfLiteContext,
+ outputTensorInfo,
+ outputTensorInfo,
+ activationType);
+ if(activationStatus != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+ }
+
// TfLite uses NHWC tensors
const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
@@ -779,6 +795,12 @@ TfLiteStatus VisitTransposeConv2dOperator(DelegateData& delegateData,
delegateData.m_OutputSlotForNode[static_cast<unsigned int>(tfLiteNode->outputs->data[outputIndex])] =
&outputSlot;
}
+
+ if (activationType != kTfLiteActNone)
+ {
+ return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData, nodeIndex);
+ }
+
return kTfLiteOk;
}
diff --git a/delegate/classic/src/GatherNd.hpp b/delegate/classic/src/GatherNd.hpp
index a49b768873..27c8e92df0 100644
--- a/delegate/classic/src/GatherNd.hpp
+++ b/delegate/classic/src/GatherNd.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -47,6 +47,16 @@ TfLiteStatus VisitGatherNdOperator(DelegateData& delegateData,
const armnn::TensorInfo& indicesTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteIndicesTensor);
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+ // Check for unsupported 0-size dimensions in the tensor shapes
+ if(ZeroDimPresent({inputTensorInfo, outputTensorInfo}))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Zero dimension tensors are not supported in operator #%d node #%d",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
armnn::BackendId setBackend;
if (!delegateData.m_Network)
{
diff --git a/delegate/classic/src/Quantization.hpp b/delegate/classic/src/Quantization.hpp
index 7fcb9c7c44..d7ed9402cd 100644
--- a/delegate/classic/src/Quantization.hpp
+++ b/delegate/classic/src/Quantization.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -141,6 +141,18 @@ TfLiteStatus VisitQuantizeOperator(DelegateData& delegateData,
const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+ if (outputTensorInfo.HasPerAxisQuantization())
+ {
+ auto outputTensorDataType = outputTensorInfo.GetDataType();
+ if (outputTensorDataType == armnn::DataType::QAsymmU8 || outputTensorDataType == armnn::DataType::QAsymmS8)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Per Axis Quantization is not supported in asymmetric Quantization Datatype.");
+ return kTfLiteError;
+ }
+ }
+
bool isSupported = false;
armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
diff --git a/delegate/classic/src/Split.hpp b/delegate/classic/src/Split.hpp
index 57b7f8074e..d8b2fafa42 100644
--- a/delegate/classic/src/Split.hpp
+++ b/delegate/classic/src/Split.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020,2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020,2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -70,11 +70,23 @@ TfLiteStatus VisitSplitOperator(DelegateData& delegateData,
for (unsigned int i = 0; i < numSplits; ++i)
{
const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[i]];
+
+ const armnn::TensorInfo& outputTensorShape = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ if (ZeroDimPresent({outputTensorShape}))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Zero dimension tensors are not supported in operator #%d node #%d: ",
+ tfLiteSplitOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLiteSplitOperatorCode, nodeIndex))
{
return kTfLiteError;
}
- outputs.push_back(GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true));
+ outputs.push_back(outputTensorShape);
}
const std::vector<std::reference_wrapper<armnn::TensorInfo>> outputTensorInfos(outputs.begin(), outputs.end());
@@ -242,7 +254,19 @@ TfLiteStatus VisitSplitVOperator(DelegateData& delegateData,
{
return kTfLiteError;
}
- outputs.push_back(GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true));
+
+ const armnn::TensorInfo& outputTensorShape = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ if (ZeroDimPresent({outputTensorShape}))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Zero dimension tensors are not supported in operator #%d node #%d: ",
+ tfLiteSplitVOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ outputs.push_back(outputTensorShape);
}
const std::vector<std::reference_wrapper<armnn::TensorInfo>> outputTensorInfos(outputs.begin(), outputs.end());
diff --git a/delegate/classic/src/StridedSlice.hpp b/delegate/classic/src/StridedSlice.hpp
index 43f96411b6..5f0a8edf7e 100644
--- a/delegate/classic/src/StridedSlice.hpp
+++ b/delegate/classic/src/StridedSlice.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -37,10 +37,33 @@ TfLiteStatus VisitStridedSliceOperator(DelegateData& delegateData,
{
return kTfLiteError;
}
+ // Checking for unsupported non-const non-network input tensors
+ // Index 0 is the input, index 1-3 should be constant
+ if(i > 0 && inputTensor->allocation_type != kTfLiteMmapRo)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Unsupported constant data input through non-const tensor "
+ "in operator #%d node #%d",
+ sliceOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
}
// We save the begin, end and strides tensors in our descriptor. Therefore we have to read those values from inputs
int inputRank = tfLiteInputs[0]->dims->size;
+
+ // Input tensors of rank greater than 4 are unsupported - delegate back to TFLite runtime
+ if(inputRank > 4)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLitearmnnOpaqueDelegate: Tensors of rank greater than 4 are unsupported"
+ " in the StridedSlice operator. Operator: #%d node #%d: ",
+ sliceOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
auto ReadInt32Input = [&](int inputIndex, std::vector<int32_t>& outputData) -> TfLiteStatus
{
if (tfLiteInputs[inputIndex]->type != kTfLiteInt32)
@@ -103,6 +126,44 @@ TfLiteStatus VisitStridedSliceOperator(DelegateData& delegateData,
descriptor.m_ShrinkAxisMask = stridedSliceParams->shrink_axis_mask;
descriptor.m_DataLayout = armnn::DataLayout::NHWC;
+ // Checking begin and end bounds with ShrinkAxisMask
+ for(unsigned int i = 0; i < inputRank; ++i)
+ {
+ if((descriptor.m_ShrinkAxisMask & (1 << i)) &&
+ (((descriptor.m_Begin[i] - descriptor.m_End[i]) > 1) ||
+ ((descriptor.m_Begin[i] - descriptor.m_End[i]) < -1)))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLitearmnnDelegate: Invalid combination of ShrinkAxisMask, Begin- and End-Tensor values "
+ "in the StridedSlice operator. Operator: #%d node #%d: ",
+ sliceOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ }
+
+ // Checking that NewAxisMask doesn't extend the output beyond the supported rank
+ if(inputRank >= 3 && (descriptor.m_NewAxisMask > 4 || descriptor.m_NewAxisMask == 3))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLitearmnnOpaqueDelegate: Maximum output tensor rank is 4, the currently set NewAxisMask "
+ "results in an unsupported higher rank. Operator: #%d node #%d: ",
+ sliceOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ // The variable 'offset' is documented in TFLite builtin_op_data.h:
+ // "If true, then the end tensor is an offset of the begin tensor."
+ if(stridedSliceParams->offset &&
+ descriptor.m_Begin.size() == descriptor.m_End.size())
+ {
+ for(unsigned int i = 0; i < descriptor.m_End.size(); ++i)
+ {
+ descriptor.m_End[i] += descriptor.m_Begin[i];
+ }
+ }
+
// Validate output
const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
if (!IsValid(tfLiteContext, tfLiteOutputTensor, sliceOperatorCode, nodeIndex))
@@ -113,6 +174,16 @@ TfLiteStatus VisitStridedSliceOperator(DelegateData& delegateData,
const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(*tfLiteInputs[0]);
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+ // Check for unsupported 0-size dimensions in the input/output tensor shapes
+ if(ZeroDimPresent({inputTensorInfo, outputTensorInfo}))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Zero dimension tensors are not supported in operator #%d node #%d",
+ sliceOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
bool isSupported = false;
armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
diff --git a/delegate/classic/src/Unpack.hpp b/delegate/classic/src/Unpack.hpp
index b3336ec990..15484f807f 100644
--- a/delegate/classic/src/Unpack.hpp
+++ b/delegate/classic/src/Unpack.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -112,11 +112,22 @@ TfLiteStatus VisitUnpackOperator(DelegateData& delegateData,
for (unsigned int i = 0; i < unpackNum; ++i)
{
const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[i]];
+
if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
{
return kTfLiteError;
}
- outputs.push_back(GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true));
+
+ armnn::TensorInfo outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+ armnn::TensorShape shape = outputTensorInfo.GetShape();
+ if (shape.GetDimensionality() == armnn::Dimensionality::NotSpecified)
+ {
+ shape.SetNumDimensions(1, true);
+ shape.SetDimensionSize(0, 1);
+ outputTensorInfo.SetShape(shape);
+ }
+
+ outputs.push_back(outputTensorInfo);
}
const std::vector<std::reference_wrapper<armnn::TensorInfo>> outputTensorInfos(outputs.begin(), outputs.end());