aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--delegate/classic/src/BroadcastTo.hpp12
-rw-r--r--delegate/common/src/DelegateUtils.hpp15
-rw-r--r--delegate/common/src/test/DelegateUtilsTest.cpp54
-rw-r--r--delegate/opaque/src/BroadcastTo.hpp12
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp2
-rw-r--r--src/backends/reference/workloads/BatchMatMulImpl.cpp12
-rw-r--r--src/backends/reference/workloads/Broadcast.cpp24
-rw-r--r--src/backends/tosaCommon/TosaMappings.cpp7
-rw-r--r--src/backends/tosaCommon/operatorMappings/ActivationOperator.hpp20
-rw-r--r--src/backends/tosaCommon/operatorMappings/CMakeLists.txt6
-rw-r--r--src/backends/tosaCommon/operatorMappings/LeakyReluOperator.cpp (renamed from src/backends/tosaCommon/operatorMappings/ActivationOperator.cpp)77
-rw-r--r--src/backends/tosaCommon/operatorMappings/LeakyReluOperator.hpp20
-rw-r--r--src/backends/tosaCommon/operatorMappings/ReluOperator.cpp156
-rw-r--r--src/backends/tosaCommon/operatorMappings/ReluOperator.hpp20
-rw-r--r--src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp3
-rw-r--r--src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp7
-rw-r--r--src/backends/tosaReference/test/TosaRefEndToEndTests.cpp51
17 files changed, 387 insertions, 111 deletions
diff --git a/delegate/classic/src/BroadcastTo.hpp b/delegate/classic/src/BroadcastTo.hpp
index 92aed79982..2e2b3ab155 100644
--- a/delegate/classic/src/BroadcastTo.hpp
+++ b/delegate/classic/src/BroadcastTo.hpp
@@ -1,11 +1,12 @@
//
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
#include <armnn/utility/IgnoreUnused.hpp>
+#include <DelegateUtils.hpp>
#include <tensorflow/lite/builtin_ops.h>
#include <tensorflow/lite/c/builtin_op_data.h>
@@ -83,6 +84,15 @@ namespace armnnDelegate
const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+ if (ZeroDimPresent({inputTensorInfo, outputTensorInfo}))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Zero dimension tensors are not supported in operator #%d node #%d: ",
+ broadcastToOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
auto* shapeData = tflite::GetTensorData<int32_t>(&tfLiteShapeTensor);
auto shapeTensorNum = tfLiteShapeTensor.dims->data[0];
diff --git a/delegate/common/src/DelegateUtils.hpp b/delegate/common/src/DelegateUtils.hpp
index 96767ff78c..245fc9be90 100644
--- a/delegate/common/src/DelegateUtils.hpp
+++ b/delegate/common/src/DelegateUtils.hpp
@@ -300,4 +300,19 @@ armnn::TensorInfo OutputShapeOfSqueeze(std::vector<uint32_t> squeezeDims,
return outTensorInfo;
}
+bool ZeroDimPresent(std::initializer_list<armnn::TensorInfo> tensorInfoList)
+{
+ for (armnn::TensorInfo tensorInfo : tensorInfoList)
+ {
+ for (unsigned int i = 0; i < tensorInfo.GetNumDimensions(); ++i)
+ {
+ if (tensorInfo.GetShape()[i] == 0)
+ {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
} // namespace anonymous
diff --git a/delegate/common/src/test/DelegateUtilsTest.cpp b/delegate/common/src/test/DelegateUtilsTest.cpp
new file mode 100644
index 0000000000..5ce470e289
--- /dev/null
+++ b/delegate/common/src/test/DelegateUtilsTest.cpp
@@ -0,0 +1,54 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <armnn/Tensor.hpp>
+#include <doctest/doctest.h>
+#include <common/src/DelegateUtils.hpp>
+
+namespace armnn
+{
+
+TEST_SUITE("DelegateUtils_Tests")
+{
+ TEST_CASE("Zero_Dim_In_Input_Test_True")
+ {
+ unsigned int inputDimSizes[] = {0, 1, 2, 3};
+ TensorInfo inputTensor = armnn::TensorInfo(4, inputDimSizes, DataType::Float32);
+
+ CHECK(ZeroDimPresent({inputTensor}) == true);
+ }
+
+ TEST_CASE("Zero_Dim_In_Input_Test_False")
+ {
+ unsigned int inputDimSizes[] = {1, 2, 3, 4};
+ TensorInfo inputTensor = armnn::TensorInfo(4, inputDimSizes, DataType::Float32);
+
+ CHECK(ZeroDimPresent({inputTensor}) == false);
+ }
+
+ TEST_CASE("Zero_Dim_In_Output_Test_True")
+ {
+ unsigned int inputDimSizes[] = {1, 2, 3, 4};
+ TensorInfo inputTensor = armnn::TensorInfo(4, inputDimSizes, DataType::Float32);
+
+ unsigned int outputDimSizes[] = {0, 1, 2, 3};
+ TensorInfo outputTensor = armnn::TensorInfo(4, outputDimSizes, DataType::Float32);
+
+ CHECK(ZeroDimPresent({inputTensor, outputTensor}) == true);
+ }
+
+ TEST_CASE("Zero_Dim_In_Output_Test_False")
+ {
+ unsigned int inputDimSizes[] = {1, 2, 3, 4};
+ TensorInfo inputTensor = armnn::TensorInfo(4, inputDimSizes, DataType::Float32);
+
+ unsigned int outputDimSizes[] = {1, 2, 3, 4};
+ TensorInfo outputTensor = armnn::TensorInfo(4, outputDimSizes, DataType::Float32);
+
+ CHECK(ZeroDimPresent({inputTensor, outputTensor}) == false);
+ }
+}
+
+} // namespace armnn \ No newline at end of file
diff --git a/delegate/opaque/src/BroadcastTo.hpp b/delegate/opaque/src/BroadcastTo.hpp
index 379587546f..8fcea9393c 100644
--- a/delegate/opaque/src/BroadcastTo.hpp
+++ b/delegate/opaque/src/BroadcastTo.hpp
@@ -1,11 +1,12 @@
//
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
#include <OpaqueDelegateUtils.hpp>
+#include <DelegateUtils.hpp>
namespace armnnOpaqueDelegate
{
@@ -102,6 +103,15 @@ namespace armnnOpaqueDelegate
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor,
true);
+ if (ZeroDimPresent({inputTensorInfo, outputTensorInfo}))
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Zero dimension tensors are not supported in operator #%d node #%d: ",
+ broadcastToOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
auto* shapeData = static_cast<int32_t*>(TfLiteOpaqueTensorData(tfLiteShapeTensor));
int32_t shapeTensorNum = TfLiteOpaqueTensorDim(tfLiteShapeTensor, 0);
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 7055092be2..5334641803 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -4305,7 +4305,7 @@ void BatchMatMulQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) cons
auto axesXToMul = BatchMatMulDescriptor::GetAxesToMul(m_Parameters.m_DataLayoutX,
inputXInfoAfterParams.GetShape());
auto axesYToMul = BatchMatMulDescriptor::GetAxesToMul(m_Parameters.m_DataLayoutY,
- inputXInfoBeforeParams.GetShape());
+ inputYInfoBeforeParams.GetShape());
if(inputXInfoAfterParams.GetShape()[axesXToMul.second]
!= inputYInfoAfterParams.GetShape()[axesYToMul.first])
diff --git a/src/backends/reference/workloads/BatchMatMulImpl.cpp b/src/backends/reference/workloads/BatchMatMulImpl.cpp
index 8e169cbab8..e0c36c5db8 100644
--- a/src/backends/reference/workloads/BatchMatMulImpl.cpp
+++ b/src/backends/reference/workloads/BatchMatMulImpl.cpp
@@ -42,13 +42,16 @@ void BatchMatMul::ApplyBatchMatMul()
inputXInfo.GetShape());
auto axesYToMul = BatchMatMulDescriptor::GetAxesToMul(params.m_DataLayoutY,
inputYInfo.GetShape());
+
+ // the inputYRowSize (or inputXColSize) needs to be obtained using the original (unadjusted) axis value,
+ // because it's obtained from the original tensor shape
+ unsigned int inputYRowSize = inputYInfo.GetShape()[axesYToMul.first];
+
AdjustAxesToMulForUnequalRanks(axesXToMul, axesYToMul);
unsigned int inputXColDim = axesXToMul.second;
unsigned int inputYRowDim = axesYToMul.first;
- unsigned int inputYRowSize = inputYInfo.GetShape()[inputYRowDim];
-
auto batchMatMulOperation = [&](const std::vector<unsigned int>& curIdx)
{
float sum = 0.0f;
@@ -437,10 +440,11 @@ unsigned int BatchMatMul::CalcFlatIdx(DataSlot type, const std::vector<unsigned
{
unsigned int result = idx[idx.size()-1];
unsigned int dimMultiplier = 1;
- unsigned int offset;
+ unsigned int offset = 0;
// -2 because final dim is already accounted for in the multiplier (last dim is just a multiplier of 1x)
- for(unsigned int i = static_cast<unsigned int>(idx.size()-2); static_cast<int>(i) >= 0; i--)
+ // Check offset in relation to i, to stop calculating flat index once all input shape fields considered
+ for(unsigned int i = static_cast<unsigned int>(idx.size()-2); static_cast<int>(i) >= 0 && (i + 1) > offset; i--)
{
switch(type)
{
diff --git a/src/backends/reference/workloads/Broadcast.cpp b/src/backends/reference/workloads/Broadcast.cpp
index 24af0fc4b1..f17ec6b311 100644
--- a/src/backends/reference/workloads/Broadcast.cpp
+++ b/src/backends/reference/workloads/Broadcast.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019,2024 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -38,13 +38,31 @@ BroadcastLoop::BroadcastLoop(const TensorShape& inShape, const TensorShape& outS
unsigned int sIn = 1;
unsigned int sOut = 1;
+ // Get the difference between the output dimension and input dimension
+ const unsigned int dimDifference = numDims - inShape.GetNumDimensions();
+
for (unsigned int j = numDims - 1, k = 0; k < numDims ; k++, j--)
{
+
m_DimData[j].m_DimSize = outShape[j];
- m_DimData[j].m_Stride1 = (inShape[j] > 1) ? sIn : 0;
+ // Pretend there are extra 1-dimensional tensors prepended
+ if (dimDifference > 0 && j < dimDifference)
+ {
+ m_DimData[j].m_Stride1 = 0;
+ sIn *= 1;
+ }
+ else if (dimDifference > 0)
+ {
+ m_DimData[j].m_Stride1 = (inShape[j - dimDifference] > 1) ? sIn : 0;
+ sIn *= inShape[j - dimDifference];
+ }
+ else
+ {
+ m_DimData[j].m_Stride1 = (inShape[j] > 1) ? sIn : 0;
+ sIn *= inShape[j];
+ }
m_DimData[j].m_StrideOut = sOut;
- sIn *= inShape[j];
sOut *= outShape[j];
}
}
diff --git a/src/backends/tosaCommon/TosaMappings.cpp b/src/backends/tosaCommon/TosaMappings.cpp
index 81391f8213..bc1376b9cc 100644
--- a/src/backends/tosaCommon/TosaMappings.cpp
+++ b/src/backends/tosaCommon/TosaMappings.cpp
@@ -28,7 +28,12 @@ TosaSerializationBasicBlock* GetTosaMapping(const Layer* layer,
auto activationDesc = PolymorphicDowncast<const ActivationDescriptor*>(&descriptor);
if (activationDesc->m_Function == ActivationFunction::LeakyReLu)
{
- return ConvertActivationToTosaOperator(layer, inputs, outputs, activationDesc);
+ return ConvertLeakyReluToTosaOperator(layer, inputs, outputs, activationDesc);
+ }
+ if (activationDesc->m_Function == ActivationFunction::ReLu ||
+ activationDesc->m_Function == ActivationFunction::BoundedReLu)
+ {
+ return ConvertReluToTosaOperator(layer, inputs, outputs, activationDesc);
}
else
{
diff --git a/src/backends/tosaCommon/operatorMappings/ActivationOperator.hpp b/src/backends/tosaCommon/operatorMappings/ActivationOperator.hpp
deleted file mode 100644
index 7519f0c155..0000000000
--- a/src/backends/tosaCommon/operatorMappings/ActivationOperator.hpp
+++ /dev/null
@@ -1,20 +0,0 @@
-//
-// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <Layer.hpp>
-
-#include <tosa_serialization_handler.h>
-
-#include "TosaOperatorUtils.hpp"
-
-using namespace armnn;
-using namespace tosa;
-
-TosaSerializationBasicBlock* ConvertActivationToTosaOperator(const Layer* layer,
- const std::vector<const TensorInfo*>& inputs,
- const std::vector<const TensorInfo*>& outputs,
- const ActivationDescriptor* activationDescriptor);
diff --git a/src/backends/tosaCommon/operatorMappings/CMakeLists.txt b/src/backends/tosaCommon/operatorMappings/CMakeLists.txt
index eba9011c56..bd86958de1 100644
--- a/src/backends/tosaCommon/operatorMappings/CMakeLists.txt
+++ b/src/backends/tosaCommon/operatorMappings/CMakeLists.txt
@@ -4,8 +4,10 @@
#
list(APPEND armnnTosaBackendOperators_sources
- ActivationOperator.hpp
- ActivationOperator.cpp
+ LeakyReluOperator.hpp
+ LeakyReluOperator.cpp
+ ReluOperator.hpp
+ ReluOperator.cpp
AvgPool2DIgnoreValueOperator.hpp
AvgPool2DIgnoreValueOperator.cpp
ConcatOperator.hpp
diff --git a/src/backends/tosaCommon/operatorMappings/ActivationOperator.cpp b/src/backends/tosaCommon/operatorMappings/LeakyReluOperator.cpp
index c13555da6a..4d330818f1 100644
--- a/src/backends/tosaCommon/operatorMappings/ActivationOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/LeakyReluOperator.cpp
@@ -7,33 +7,33 @@
// SPDX-License-Identifier: Apache-2.0
//
-#include "ActivationOperator.hpp"
+#include "LeakyReluOperator.hpp"
#include "TosaRescaleOperatorUtils.hpp"
#include <layers/ActivationLayer.hpp>
// This function is paraphrased from:
// tensorflow/compiler/mlir/tosa/transforms/legalize_tfl.cc from function ConvertTFLLeakyReluOp
-TosaSerializationBasicBlock* ConvertActivationToTosaOperator(const Layer* layer,
- const std::vector<const TensorInfo*>& inputs,
- const std::vector<const TensorInfo*>& outputs,
- const ActivationDescriptor* activationDescriptor)
+TosaSerializationBasicBlock* ConvertLeakyReluToTosaOperator(const Layer* layer,
+ const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const ActivationDescriptor* activationDescriptor)
{
if (inputs.size() != 1)
{
- throw armnn::Exception("ConvertActivationToTosaOperator: 1 input tensors required.");
+ throw armnn::Exception("ConvertLeakyReluToTosaOperator: 1 input tensors required.");
}
if (outputs.size() != 1)
{
- throw armnn::Exception("ConvertActivationToTosaOperator: 1 output tensor required.");
+ throw armnn::Exception("ConvertLeakyReluToTosaOperator: 1 output tensor required.");
}
std::string inputName = std::string("input_");
std::string outputNameAlpha = std::string("intermediate1_") + GetUniqueTosaMappingID();
std::string outputNameMul = std::string("intermediate2_") + GetUniqueTosaMappingID();
std::string outputName = std::string("output0_");
- std::string blockName = std::string("Op_ACTIVATION_block_") + GetUniqueTosaMappingID();
+ std::string blockName = std::string("Op_LEAKY_RELU_block_") + GetUniqueTosaMappingID();
// If a layer is present then the block will be used for execution, so input and output names need to be determined
// using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
@@ -61,7 +61,6 @@ TosaSerializationBasicBlock* ConvertActivationToTosaOperator(const Layer* layer,
DType outputDType0 = ArmNNToDType(outputs[0]->GetDataType());
tensors.push_back(new TosaSerializationTensor(outputName, outputShape0, outputDType0, {}));
-#if TOSA_COMPAT_VERSION(0, 60, 0)
std::string outputNameMAXMIN= std::string("intermediate3_") + GetUniqueTosaMappingID();
if (inputDType0 == DType::DType_FP32 ||
@@ -211,64 +210,4 @@ TosaSerializationBasicBlock* ConvertActivationToTosaOperator(const Layer* layer,
{inputName}, // inputs
{outputName}); // outputs
}
-#else
- std::string outputNameZero = std::string("intermediate3_") + GetUniqueTosaMappingID();
- std::string outputNameGE = std::string("intermediate4_") + GetUniqueTosaMappingID();
-
- // const_zero
- TosaSerializationOperator* zeroOp = nullptr;
- TosaSerializationTensor* zeroTensor = nullptr;
- CreateConstTosaOperator<float>(outputNameZero,
- 0.0f,
- inputDType0,
- inputShape0,
- zeroOp,
- zeroTensor);
- tensors.push_back(zeroTensor);
-
- // const_alpha
- TosaSerializationOperator* alphaOp = nullptr;
- TosaSerializationTensor* alphaTensor = nullptr;
- CreateConstTosaOperator<float>(outputNameAlpha,
- activationDescriptor->m_A,
- inputDType0,
- inputShape0,
- alphaOp,
- alphaTensor);
- tensors.push_back(alphaTensor);
-
- // mul
- int32_t shift = 0;
- TosaMulAttribute mulAttribute(shift);
- TosaSerializationOperator* mulOp = new TosaSerializationOperator(Op_MUL,
- Attribute_MulAttribute,
- &mulAttribute,
- {inputName, outputNameAlpha},
- {outputNameMul});
- tensors.push_back(new TosaSerializationTensor(outputNameMul, inputShape0, inputDType0, {}));
-
- // greater_equal
- TosaSerializationOperator* geOp = new TosaSerializationOperator(Op_GREATER_EQUAL,
- Attribute_NONE,
- nullptr,
- {inputName, outputNameZero},
- {outputNameGE});
- tensors.push_back(new TosaSerializationTensor(outputNameGE, outputShape0, DType::DType_BOOL, {}));
-
- // select
- TosaSerializationOperator* selOp = new TosaSerializationOperator(Op_SELECT,
- Attribute_NONE,
- nullptr,
- {outputNameGE, inputName, outputNameMul},
- {outputName});
-
- // operatorInputNames/operatorOutputNames ends up being the same as
- // blockInputNames/blockOutputNames for one-to-one ArmNN to Tosa mappings
- return new TosaSerializationBasicBlock(blockName, // name
- mainName, // region name
- {zeroOp, alphaOp, mulOp, geOp, selOp}, // operators
- tensors, // tensors
- {inputName}, // inputs
- {outputName}); // outputs
-#endif
}
diff --git a/src/backends/tosaCommon/operatorMappings/LeakyReluOperator.hpp b/src/backends/tosaCommon/operatorMappings/LeakyReluOperator.hpp
new file mode 100644
index 0000000000..839bdeb183
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/LeakyReluOperator.hpp
@@ -0,0 +1,20 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <Layer.hpp>
+
+#include <tosa_serialization_handler.h>
+
+#include "TosaOperatorUtils.hpp"
+
+using namespace armnn;
+using namespace tosa;
+
+TosaSerializationBasicBlock* ConvertLeakyReluToTosaOperator(const Layer* layer,
+ const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const ActivationDescriptor* activationDescriptor);
diff --git a/src/backends/tosaCommon/operatorMappings/ReluOperator.cpp b/src/backends/tosaCommon/operatorMappings/ReluOperator.cpp
new file mode 100644
index 0000000000..541b39cd8d
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/ReluOperator.cpp
@@ -0,0 +1,156 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+//
+// Copyright © 2020 The TensorFlow Authors. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "LeakyReluOperator.hpp"
+#include "TosaRescaleOperatorUtils.hpp"
+
+#include <layers/ActivationLayer.hpp>
+
+// This function is paraphrased from:
+// tensorflow/compiler/mlir/tosa/transforms/legalize_tfl.cc from function ConvertTFLReluOp
+TosaSerializationBasicBlock* ConvertReluToTosaOperator(const Layer* layer,
+ const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const ActivationDescriptor* desc)
+{
+ if (inputs.size() != 1)
+ {
+ throw armnn::Exception("ConvertReluToTosaOperator: 1 input tensors required.");
+ }
+
+ if (outputs.size() != 1)
+ {
+ throw armnn::Exception("ConvertReluToTosaOperator: 1 output tensor required.");
+ }
+
+ std::string inputName = std::string("input_");
+ std::string outputName = std::string("output0_");
+ std::string blockName = "";
+
+ int32_t clamp_min = 0;
+ int32_t clamp_max = 0;
+ float float_max = 0.0f;
+ switch (desc->m_Function)
+ {
+ case ActivationFunction::ReLu:
+ {
+ clamp_max = std::numeric_limits<int32_t>::max();
+ float_max = std::numeric_limits<float>::max();
+ blockName = std::string("Op_RELU_block_") + GetUniqueTosaMappingID();
+ break;
+ }
+ case ActivationFunction::BoundedReLu:
+ {
+ clamp_max = static_cast<int32_t>(desc->m_A);
+ float_max = desc->m_A;
+ blockName = std::string("Op_BOUNDED_RELU_block_") + GetUniqueTosaMappingID();
+ break;
+ }
+ case ActivationFunction::LeakyReLu:
+ {
+ throw Exception("LeakyRelu TOSA mappings are performed in ConvertLeakyReluToTosaOperator().");
+ }
+ default:
+ {
+ throw Exception("Activation function is not supported in ConvertReluToTosaOperator().");
+ }
+ }
+
+ // If a layer is present then the block will be used for execution, so input and output names need to be determined
+ // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
+ if (layer != nullptr)
+ {
+ inputName = GenerateUniqueInputName(layer->GetInputSlot(0));
+ outputName = GenerateUniqueOutputName(*layer);
+ }
+
+ std::vector<TosaSerializationTensor*> tensors;
+ std::vector<TosaSerializationOperator*> operators;
+
+ // Only add input tensors if connected layer is an input layer.
+ // As intermediate or constant tensors will be created separately.
+ // There also can't be duplicate tensor.
+ std::vector<int32_t> inputShape0;
+ DType inputDType0 = DType::DType_UNKNOWN;
+ if(inputName.find("input_") != std::string::npos)
+ {
+ inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
+ inputDType0 = ArmNNToDType(inputs[0]->GetDataType());
+ tensors.push_back(new TosaSerializationTensor(inputName, inputShape0, inputDType0, {}));
+ }
+
+ std::vector<int32_t> outputShape0 = GetTosaTensorShape(outputs[0]->GetShape());
+ DType outputDType0 = ArmNNToDType(outputs[0]->GetDataType());
+ tensors.push_back(new TosaSerializationTensor(outputName, outputShape0, outputDType0, {}));
+
+ std::string clampInputNameStr = inputName;
+ if (inputDType0 == tosa::DType::DType_INT8 || inputDType0 == tosa::DType::DType_INT16)
+ {
+ std::string outputNameRescale = std::string("intermediate0_") + GetUniqueTosaMappingID();
+ clampInputNameStr = outputNameRescale;
+
+ double scale = inputs[0]->GetQuantizationScale() / outputs[0]->GetQuantizationScale();
+ int32_t input_zp = inputs[0]->GetQuantizationOffset();
+ int32_t output_zp = outputs[0]->GetQuantizationOffset();
+
+ clamp_min = output_zp;
+
+ if (desc->m_Function == ActivationFunction::BoundedReLu)
+ {
+ clamp_max = static_cast<int32_t>(std::round(desc->m_A / outputs[0]->GetQuantizationScale())) + output_zp;
+ }
+
+ if (inputDType0 == tosa::DType::DType_INT8)
+ {
+ clamp_min =
+ clamp_min < std::numeric_limits<int8_t>::min() ? std::numeric_limits<int8_t>::min() : clamp_min;
+ clamp_max =
+ clamp_max > std::numeric_limits<int8_t>::max() ? std::numeric_limits<int8_t>::max() : clamp_max;
+ }
+ else
+ {
+ clamp_min =
+ clamp_min < std::numeric_limits<int16_t>::min() ? std::numeric_limits<int16_t>::min() : clamp_min;
+ clamp_max =
+ clamp_max > std::numeric_limits<int16_t>::max() ? std::numeric_limits<int16_t>::max() : clamp_max;
+ }
+
+ TosaSerializationOperator* rescaleOp = nullptr;
+ CreateRescaleTosaOperator(inputName,
+ outputNameRescale,
+ scale,
+ input_zp,
+ output_zp,
+ false,
+ true,
+ &rescaleOp);
+ operators.push_back(rescaleOp);
+ tensors.push_back(new TosaSerializationTensor(outputNameRescale,
+ inputShape0,
+ inputDType0,
+ {}));
+ }
+
+ TosaClampAttribute attribute(clamp_min, clamp_max, 0, float_max);
+ auto* clamp_op = new TosaSerializationOperator(Op_CLAMP,
+ Attribute_ClampAttribute,
+ &attribute,
+ {clampInputNameStr},
+ {outputName});
+ operators.push_back(clamp_op);
+
+ // operatorInputNames/operatorOutputNames ends up being the same as
+ // blockInputNames/blockOutputNames for one-to-one ArmNN to Tosa mappings
+ return new TosaSerializationBasicBlock(blockName, // name
+ mainName, // region name
+ operators, // operators
+ tensors, // tensors
+ {inputName}, // inputs
+ {outputName}); // outputs
+}
diff --git a/src/backends/tosaCommon/operatorMappings/ReluOperator.hpp b/src/backends/tosaCommon/operatorMappings/ReluOperator.hpp
new file mode 100644
index 0000000000..9c8f999949
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/ReluOperator.hpp
@@ -0,0 +1,20 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <Layer.hpp>
+
+#include <tosa_serialization_handler.h>
+
+#include "TosaOperatorUtils.hpp"
+
+using namespace armnn;
+using namespace tosa;
+
+TosaSerializationBasicBlock* ConvertReluToTosaOperator(const Layer* layer,
+ const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const ActivationDescriptor* activationDescriptor);
diff --git a/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp b/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp
index a1a90812cd..9d7ff1e4c9 100644
--- a/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp
+++ b/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp
@@ -5,7 +5,8 @@
#pragma once
-#include "ActivationOperator.hpp"
+#include "ReluOperator.hpp"
+#include "LeakyReluOperator.hpp"
#include "AvgPool2DIgnoreValueOperator.hpp"
#include "ConcatOperator.hpp"
#include "ConstantOperator.hpp"
diff --git a/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp b/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp
index f566504a40..18bdbe3318 100644
--- a/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp
+++ b/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp
@@ -502,10 +502,3 @@ inline void CreateConstTosaOperator(const std::string& outputName,
tensor = new TosaSerializationTensor(outputName, shape, dtype, uint8Data);
ARMNN_THROW_MSG_IF_FALSE(tensor, armnn::Exception, "CreateConstTosaOperator: failed to created tensor");
}
-
-// Macro to preserve usage of a code block as the TOSA library version advances. Parameters
-// specify the minimum version required by the code block.
-#define TOSA_COMPAT_VERSION(_major, _minor, _patch) \
- (TOSA_VERSION_MAJOR >= _major) || \
- (TOSA_VERSION_MINOR >= _minor) || \
- (TOSA_VERSION_PATCH >= _patch)
diff --git a/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp b/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp
index f5da79c04a..22fd782a1a 100644
--- a/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp
+++ b/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp
@@ -30,24 +30,73 @@ TEST_SUITE("TosaRefEndToEnd")
static std::vector<BackendId> tosaDefaultBackends = { "TosaRef" };
// Activation
-//LeakyRelu
+// LeakyRelu
TEST_CASE("TosaRefLeakyReluActivationFloat32")
{
ActivationEndToEndTest<DataType::Float32>(tosaDefaultBackends, ActivationFunction::LeakyReLu, 1.f, 0, 0.01f);
}
+
TEST_CASE("TosaRefLeakyReluActivationFloat16")
{
ActivationEndToEndTest<DataType::Float16>(tosaDefaultBackends, ActivationFunction::LeakyReLu, 0.3f, 5, 0.01f);
}
+
TEST_CASE("TosaRefLeakyReluActivationInt8")
{
ActivationEndToEndTest<DataType::QAsymmS8>(tosaDefaultBackends, ActivationFunction::LeakyReLu, 0.6f, 7, 0.01f);
}
+
TEST_CASE("TosaRefLeakyReluActivationInt16")
{
ActivationEndToEndTest<DataType::QSymmS16>(tosaDefaultBackends, ActivationFunction::LeakyReLu, 0.15f, 0, 0.01f);
}
+// Relu
+TEST_CASE("TosaRefReLuEndToEndTestQAsymmS8")
+{
+ ActivationEndToEndTest<armnn::DataType::QAsymmS8>(tosaDefaultBackends, ActivationFunction::ReLu);
+}
+
+TEST_CASE("TosaRefReLuEndToEndTestFloat32")
+{
+ ActivationEndToEndTest<armnn::DataType::Float32>(tosaDefaultBackends, ActivationFunction::ReLu);
+}
+
+TEST_CASE("TosaRefReLuEndToEndTestFloat16")
+{
+ ActivationEndToEndTest<armnn::DataType::Float16>(tosaDefaultBackends, ActivationFunction::ReLu);
+}
+
+TEST_CASE("TosaRefReLuEndToEndTestQSymmS16")
+{
+ ActivationEndToEndTest<armnn::DataType::QSymmS16>(tosaDefaultBackends, ActivationFunction::ReLu);
+}
+
+// BoundedRelu
+TEST_CASE("TosaRefBoundedReLuEndToEndTestFloat32")
+{
+ ActivationEndToEndTest<armnn::DataType::Float32>(
+ tosaDefaultBackends, ActivationFunction::BoundedReLu, 1.0f, 0, 6.0f, 0.0f);
+}
+
+TEST_CASE("TosaRefBoundedReLuEndToEndTestFloat16")
+{
+ ActivationEndToEndTest<armnn::DataType::Float16>(
+ tosaDefaultBackends, ActivationFunction::BoundedReLu, 1.0f, 0, 6.0f, 0.0f);
+}
+
+TEST_CASE("TosaRefBoundedReLuEndToEndTestQAsymmS8")
+{
+ ActivationEndToEndTest<armnn::DataType::QAsymmS8>(
+ tosaDefaultBackends, ActivationFunction::BoundedReLu, 1.0f, 0, 6.0f, 0.0f);
+}
+
+TEST_CASE("TosaRefBoundedReLuEndToEndTestQSymmS16")
+{
+ ActivationEndToEndTest<armnn::DataType::QSymmS16>(
+ tosaDefaultBackends, ActivationFunction::BoundedReLu, 1.0f, 0, 6.0f, 0.0f);
+}
+
// Addition
TEST_CASE("TosaRefAdditionEndtoEndTestFloat32")
{