aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCathal Corbett <cathal.corbett@arm.com>2024-06-27 14:53:50 +0100
committerCathal Corbett <cathal.corbett@arm.com>2024-06-28 12:28:22 +0100
commit66d0e7d5c6d3c271f72848a180616ebc2e3403cc (patch)
treedab3b4433d4e2145583b21084df415fcfd277340
parent7177b72cdd36c0a5b0b52a5b3034b1c2b0f8a3ec (diff)
downloadarmnn-66d0e7d5c6d3c271f72848a180616ebc2e3403cc.tar.gz
IVGCVSW-8443 Implement Activation:ReLu in TosaRef
Signed-off-by: Cathal Corbett <cathal.corbett@arm.com> Change-Id: I2a1f7be2b0b4558b0e0e8dbd2350ccf21b5babf8
-rw-r--r--src/backends/tosaCommon/TosaMappings.cpp6
-rw-r--r--src/backends/tosaCommon/operatorMappings/ActivationOperator.hpp20
-rw-r--r--src/backends/tosaCommon/operatorMappings/CMakeLists.txt6
-rw-r--r--src/backends/tosaCommon/operatorMappings/LeakyReluOperator.cpp (renamed from src/backends/tosaCommon/operatorMappings/ActivationOperator.cpp)77
-rw-r--r--src/backends/tosaCommon/operatorMappings/LeakyReluOperator.hpp20
-rw-r--r--src/backends/tosaCommon/operatorMappings/ReluOperator.cpp121
-rw-r--r--src/backends/tosaCommon/operatorMappings/ReluOperator.hpp20
-rw-r--r--src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp3
-rw-r--r--src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp7
-rw-r--r--src/backends/tosaReference/test/TosaRefEndToEndTests.cpp21
10 files changed, 201 insertions, 100 deletions
diff --git a/src/backends/tosaCommon/TosaMappings.cpp b/src/backends/tosaCommon/TosaMappings.cpp
index 81391f8213..8608776471 100644
--- a/src/backends/tosaCommon/TosaMappings.cpp
+++ b/src/backends/tosaCommon/TosaMappings.cpp
@@ -28,7 +28,11 @@ TosaSerializationBasicBlock* GetTosaMapping(const Layer* layer,
auto activationDesc = PolymorphicDowncast<const ActivationDescriptor*>(&descriptor);
if (activationDesc->m_Function == ActivationFunction::LeakyReLu)
{
- return ConvertActivationToTosaOperator(layer, inputs, outputs, activationDesc);
+ return ConvertLeakyReluToTosaOperator(layer, inputs, outputs, activationDesc);
+ }
+ if (activationDesc->m_Function == ActivationFunction::ReLu)
+ {
+ return ConvertReluToTosaOperator(layer, inputs, outputs, activationDesc);
}
else
{
diff --git a/src/backends/tosaCommon/operatorMappings/ActivationOperator.hpp b/src/backends/tosaCommon/operatorMappings/ActivationOperator.hpp
deleted file mode 100644
index 7519f0c155..0000000000
--- a/src/backends/tosaCommon/operatorMappings/ActivationOperator.hpp
+++ /dev/null
@@ -1,20 +0,0 @@
-//
-// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <Layer.hpp>
-
-#include <tosa_serialization_handler.h>
-
-#include "TosaOperatorUtils.hpp"
-
-using namespace armnn;
-using namespace tosa;
-
-TosaSerializationBasicBlock* ConvertActivationToTosaOperator(const Layer* layer,
- const std::vector<const TensorInfo*>& inputs,
- const std::vector<const TensorInfo*>& outputs,
- const ActivationDescriptor* activationDescriptor);
diff --git a/src/backends/tosaCommon/operatorMappings/CMakeLists.txt b/src/backends/tosaCommon/operatorMappings/CMakeLists.txt
index eba9011c56..bd86958de1 100644
--- a/src/backends/tosaCommon/operatorMappings/CMakeLists.txt
+++ b/src/backends/tosaCommon/operatorMappings/CMakeLists.txt
@@ -4,8 +4,10 @@
#
list(APPEND armnnTosaBackendOperators_sources
- ActivationOperator.hpp
- ActivationOperator.cpp
+ LeakyReluOperator.hpp
+ LeakyReluOperator.cpp
+ ReluOperator.hpp
+ ReluOperator.cpp
AvgPool2DIgnoreValueOperator.hpp
AvgPool2DIgnoreValueOperator.cpp
ConcatOperator.hpp
diff --git a/src/backends/tosaCommon/operatorMappings/ActivationOperator.cpp b/src/backends/tosaCommon/operatorMappings/LeakyReluOperator.cpp
index c13555da6a..4d330818f1 100644
--- a/src/backends/tosaCommon/operatorMappings/ActivationOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/LeakyReluOperator.cpp
@@ -7,33 +7,33 @@
// SPDX-License-Identifier: Apache-2.0
//
-#include "ActivationOperator.hpp"
+#include "LeakyReluOperator.hpp"
#include "TosaRescaleOperatorUtils.hpp"
#include <layers/ActivationLayer.hpp>
// This function is paraphrased from:
// tensorflow/compiler/mlir/tosa/transforms/legalize_tfl.cc from function ConvertTFLLeakyReluOp
-TosaSerializationBasicBlock* ConvertActivationToTosaOperator(const Layer* layer,
- const std::vector<const TensorInfo*>& inputs,
- const std::vector<const TensorInfo*>& outputs,
- const ActivationDescriptor* activationDescriptor)
+TosaSerializationBasicBlock* ConvertLeakyReluToTosaOperator(const Layer* layer,
+ const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const ActivationDescriptor* activationDescriptor)
{
if (inputs.size() != 1)
{
- throw armnn::Exception("ConvertActivationToTosaOperator: 1 input tensors required.");
+ throw armnn::Exception("ConvertLeakyReluToTosaOperator: 1 input tensors required.");
}
if (outputs.size() != 1)
{
- throw armnn::Exception("ConvertActivationToTosaOperator: 1 output tensor required.");
+ throw armnn::Exception("ConvertLeakyReluToTosaOperator: 1 output tensor required.");
}
std::string inputName = std::string("input_");
std::string outputNameAlpha = std::string("intermediate1_") + GetUniqueTosaMappingID();
std::string outputNameMul = std::string("intermediate2_") + GetUniqueTosaMappingID();
std::string outputName = std::string("output0_");
- std::string blockName = std::string("Op_ACTIVATION_block_") + GetUniqueTosaMappingID();
+ std::string blockName = std::string("Op_LEAKY_RELU_block_") + GetUniqueTosaMappingID();
// If a layer is present then the block will be used for execution, so input and output names need to be determined
// using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
@@ -61,7 +61,6 @@ TosaSerializationBasicBlock* ConvertActivationToTosaOperator(const Layer* layer,
DType outputDType0 = ArmNNToDType(outputs[0]->GetDataType());
tensors.push_back(new TosaSerializationTensor(outputName, outputShape0, outputDType0, {}));
-#if TOSA_COMPAT_VERSION(0, 60, 0)
std::string outputNameMAXMIN= std::string("intermediate3_") + GetUniqueTosaMappingID();
if (inputDType0 == DType::DType_FP32 ||
@@ -211,64 +210,4 @@ TosaSerializationBasicBlock* ConvertActivationToTosaOperator(const Layer* layer,
{inputName}, // inputs
{outputName}); // outputs
}
-#else
- std::string outputNameZero = std::string("intermediate3_") + GetUniqueTosaMappingID();
- std::string outputNameGE = std::string("intermediate4_") + GetUniqueTosaMappingID();
-
- // const_zero
- TosaSerializationOperator* zeroOp = nullptr;
- TosaSerializationTensor* zeroTensor = nullptr;
- CreateConstTosaOperator<float>(outputNameZero,
- 0.0f,
- inputDType0,
- inputShape0,
- zeroOp,
- zeroTensor);
- tensors.push_back(zeroTensor);
-
- // const_alpha
- TosaSerializationOperator* alphaOp = nullptr;
- TosaSerializationTensor* alphaTensor = nullptr;
- CreateConstTosaOperator<float>(outputNameAlpha,
- activationDescriptor->m_A,
- inputDType0,
- inputShape0,
- alphaOp,
- alphaTensor);
- tensors.push_back(alphaTensor);
-
- // mul
- int32_t shift = 0;
- TosaMulAttribute mulAttribute(shift);
- TosaSerializationOperator* mulOp = new TosaSerializationOperator(Op_MUL,
- Attribute_MulAttribute,
- &mulAttribute,
- {inputName, outputNameAlpha},
- {outputNameMul});
- tensors.push_back(new TosaSerializationTensor(outputNameMul, inputShape0, inputDType0, {}));
-
- // greater_equal
- TosaSerializationOperator* geOp = new TosaSerializationOperator(Op_GREATER_EQUAL,
- Attribute_NONE,
- nullptr,
- {inputName, outputNameZero},
- {outputNameGE});
- tensors.push_back(new TosaSerializationTensor(outputNameGE, outputShape0, DType::DType_BOOL, {}));
-
- // select
- TosaSerializationOperator* selOp = new TosaSerializationOperator(Op_SELECT,
- Attribute_NONE,
- nullptr,
- {outputNameGE, inputName, outputNameMul},
- {outputName});
-
- // operatorInputNames/operatorOutputNames ends up being the same as
- // blockInputNames/blockOutputNames for one-to-one ArmNN to Tosa mappings
- return new TosaSerializationBasicBlock(blockName, // name
- mainName, // region name
- {zeroOp, alphaOp, mulOp, geOp, selOp}, // operators
- tensors, // tensors
- {inputName}, // inputs
- {outputName}); // outputs
-#endif
}
diff --git a/src/backends/tosaCommon/operatorMappings/LeakyReluOperator.hpp b/src/backends/tosaCommon/operatorMappings/LeakyReluOperator.hpp
new file mode 100644
index 0000000000..839bdeb183
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/LeakyReluOperator.hpp
@@ -0,0 +1,20 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <Layer.hpp>
+
+#include <tosa_serialization_handler.h>
+
+#include "TosaOperatorUtils.hpp"
+
+using namespace armnn;
+using namespace tosa;
+
+TosaSerializationBasicBlock* ConvertLeakyReluToTosaOperator(const Layer* layer,
+ const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const ActivationDescriptor* activationDescriptor);
diff --git a/src/backends/tosaCommon/operatorMappings/ReluOperator.cpp b/src/backends/tosaCommon/operatorMappings/ReluOperator.cpp
new file mode 100644
index 0000000000..bd1a59670e
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/ReluOperator.cpp
@@ -0,0 +1,121 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+//
+// Copyright © 2020 The TensorFlow Authors. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "LeakyReluOperator.hpp"
+#include "TosaRescaleOperatorUtils.hpp"
+
+#include <layers/ActivationLayer.hpp>
+
+// This function is paraphrased from:
+// tensorflow/compiler/mlir/tosa/transforms/legalize_tfl.cc from function ConvertTFLReluOp
+TosaSerializationBasicBlock* ConvertReluToTosaOperator(const Layer* layer,
+ const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const ActivationDescriptor*)
+{
+ if (inputs.size() != 1)
+ {
+ throw armnn::Exception("ConvertReluToTosaOperator: 1 input tensors required.");
+ }
+
+ if (outputs.size() != 1)
+ {
+ throw armnn::Exception("ConvertReluToTosaOperator: 1 output tensor required.");
+ }
+
+ std::string inputName = std::string("input_");
+ std::string outputName = std::string("output0_");
+ std::string blockName = std::string("Op_RELU_block_") + GetUniqueTosaMappingID();
+
+ // If a layer is present then the block will be used for execution, so input and output names need to be determined
+ // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
+ if (layer != nullptr)
+ {
+ inputName = GenerateUniqueInputName(layer->GetInputSlot(0));
+ outputName = GenerateUniqueOutputName(*layer);
+ }
+
+ std::vector<TosaSerializationTensor*> tensors;
+ std::vector<TosaSerializationOperator*> operators;
+
+ // Only add input tensors if connected layer is an input layer.
+ // As intermediate or constant tensors will be created separately.
+ // There also can't be duplicate tensor.
+ std::vector<int32_t> inputShape0;
+ DType inputDType0 = DType::DType_UNKNOWN;
+ if(inputName.find("input_") != std::string::npos)
+ {
+ inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
+ inputDType0 = ArmNNToDType(inputs[0]->GetDataType());
+ tensors.push_back(new TosaSerializationTensor(inputName, inputShape0, inputDType0, {}));
+ }
+
+ std::vector<int32_t> outputShape0 = GetTosaTensorShape(outputs[0]->GetShape());
+ DType outputDType0 = ArmNNToDType(outputs[0]->GetDataType());
+ tensors.push_back(new TosaSerializationTensor(outputName, outputShape0, outputDType0, {}));
+
+ int32_t clamp_min = 0;
+ int32_t clamp_max = std::numeric_limits<int32_t>::max();
+ std::string clampInputNameStr = inputName;
+ if (inputDType0 == tosa::DType::DType_INT8 || inputDType0 == tosa::DType::DType_INT16)
+ {
+ std::string outputNameRescale = std::string("intermediate0_") + GetUniqueTosaMappingID();
+ clampInputNameStr = outputNameRescale;
+
+ double scale = inputs[0]->GetQuantizationScale() / outputs[0]->GetQuantizationScale();
+ int32_t input_zp = inputs[0]->GetQuantizationOffset();
+ int32_t output_zp = outputs[0]->GetQuantizationOffset();
+
+ clamp_min = outputs[0]->GetQuantizationOffset();
+ if (inputDType0 == tosa::DType::DType_INT8)
+ {
+ clamp_min =
+ clamp_min < std::numeric_limits<int8_t>::min() ? std::numeric_limits<int8_t>::min() : clamp_min;
+ clamp_max = std::numeric_limits<int8_t>::max();
+ }
+ else
+ {
+ clamp_min =
+ clamp_min < std::numeric_limits<int16_t>::min() ? std::numeric_limits<int16_t>::min() : clamp_min;
+ clamp_max = std::numeric_limits<int16_t>::max();
+ }
+
+ TosaSerializationOperator* rescaleOp = nullptr;
+ CreateRescaleTosaOperator(inputName,
+ outputNameRescale,
+ scale,
+ input_zp,
+ output_zp,
+ false,
+ true,
+ &rescaleOp);
+ operators.push_back(rescaleOp);
+ tensors.push_back(new TosaSerializationTensor(outputNameRescale,
+ inputShape0,
+ inputDType0,
+ {}));
+ }
+
+ TosaClampAttribute attribute(clamp_min, clamp_max, 0, std::numeric_limits<float>::max());
+ auto* clamp_op = new TosaSerializationOperator(Op_CLAMP,
+ Attribute_ClampAttribute,
+ &attribute,
+ {clampInputNameStr},
+ {outputName});
+ operators.push_back(clamp_op);
+
+ // operatorInputNames/operatorOutputNames ends up being the same as
+ // blockInputNames/blockOutputNames for one-to-one ArmNN to Tosa mappings
+ return new TosaSerializationBasicBlock(blockName, // name
+ mainName, // region name
+ operators, // operators
+ tensors, // tensors
+ {inputName}, // inputs
+ {outputName}); // outputs
+}
diff --git a/src/backends/tosaCommon/operatorMappings/ReluOperator.hpp b/src/backends/tosaCommon/operatorMappings/ReluOperator.hpp
new file mode 100644
index 0000000000..9c8f999949
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/ReluOperator.hpp
@@ -0,0 +1,20 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <Layer.hpp>
+
+#include <tosa_serialization_handler.h>
+
+#include "TosaOperatorUtils.hpp"
+
+using namespace armnn;
+using namespace tosa;
+
+TosaSerializationBasicBlock* ConvertReluToTosaOperator(const Layer* layer,
+ const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const ActivationDescriptor* activationDescriptor);
diff --git a/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp b/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp
index a1a90812cd..9d7ff1e4c9 100644
--- a/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp
+++ b/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp
@@ -5,7 +5,8 @@
#pragma once
-#include "ActivationOperator.hpp"
+#include "ReluOperator.hpp"
+#include "LeakyReluOperator.hpp"
#include "AvgPool2DIgnoreValueOperator.hpp"
#include "ConcatOperator.hpp"
#include "ConstantOperator.hpp"
diff --git a/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp b/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp
index f566504a40..18bdbe3318 100644
--- a/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp
+++ b/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp
@@ -502,10 +502,3 @@ inline void CreateConstTosaOperator(const std::string& outputName,
tensor = new TosaSerializationTensor(outputName, shape, dtype, uint8Data);
ARMNN_THROW_MSG_IF_FALSE(tensor, armnn::Exception, "CreateConstTosaOperator: failed to created tensor");
}
-
-// Macro to preserve usage of a code block as the TOSA library version advances. Parameters
-// specify the minimum version required by the code block.
-#define TOSA_COMPAT_VERSION(_major, _minor, _patch) \
- (TOSA_VERSION_MAJOR >= _major) || \
- (TOSA_VERSION_MINOR >= _minor) || \
- (TOSA_VERSION_PATCH >= _patch)
diff --git a/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp b/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp
index f5da79c04a..09a3d44c02 100644
--- a/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp
+++ b/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp
@@ -48,6 +48,27 @@ TEST_CASE("TosaRefLeakyReluActivationInt16")
ActivationEndToEndTest<DataType::QSymmS16>(tosaDefaultBackends, ActivationFunction::LeakyReLu, 0.15f, 0, 0.01f);
}
+//Relu
+TEST_CASE("TosaRefReLuEndToEndTestQAsymmS8")
+{
+ ActivationEndToEndTest<armnn::DataType::QAsymmS8>(tosaDefaultBackends, ActivationFunction::ReLu);
+}
+
+TEST_CASE("TosaRefReLuEndToEndTestFloat32")
+{
+ ActivationEndToEndTest<armnn::DataType::Float32>(tosaDefaultBackends, ActivationFunction::ReLu);
+}
+
+TEST_CASE("TosaRefReLuEndToEndTestFloat16")
+{
+ ActivationEndToEndTest<armnn::DataType::Float16>(tosaDefaultBackends, ActivationFunction::ReLu);
+}
+
+TEST_CASE("TosaRefReLuEndToEndTestQSymmS16")
+{
+ ActivationEndToEndTest<armnn::DataType::QSymmS16>(tosaDefaultBackends, ActivationFunction::ReLu);
+}
+
// Addition
TEST_CASE("TosaRefAdditionEndtoEndTestFloat32")
{