aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp2
-rw-r--r--src/backends/backendsCommon/test/DepthwiseConvolution2dEndToEndTests.hpp3
-rw-r--r--src/backends/reference/workloads/BatchMatMulImpl.cpp12
-rw-r--r--src/backends/tosaCommon/TosaMappings.cpp26
-rw-r--r--src/backends/tosaCommon/operatorMappings/ActivationOperator.hpp20
-rw-r--r--src/backends/tosaCommon/operatorMappings/CMakeLists.txt8
-rw-r--r--src/backends/tosaCommon/operatorMappings/ConstantOperator.cpp29
-rw-r--r--src/backends/tosaCommon/operatorMappings/ConstantOperator.hpp5
-rw-r--r--src/backends/tosaCommon/operatorMappings/DepthwiseConv2dOperator.cpp182
-rw-r--r--src/backends/tosaCommon/operatorMappings/DepthwiseConv2dOperator.hpp17
-rw-r--r--src/backends/tosaCommon/operatorMappings/LeakyReluOperator.cpp (renamed from src/backends/tosaCommon/operatorMappings/ActivationOperator.cpp)77
-rw-r--r--src/backends/tosaCommon/operatorMappings/LeakyReluOperator.hpp20
-rw-r--r--src/backends/tosaCommon/operatorMappings/ReluOperator.cpp156
-rw-r--r--src/backends/tosaCommon/operatorMappings/ReluOperator.hpp20
-rw-r--r--src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp4
-rw-r--r--src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp7
-rw-r--r--src/backends/tosaCommon/operatorMappings/TosaRescaleOperatorUtils.hpp3
-rw-r--r--src/backends/tosaReference/TosaRefLayerSupport.cpp13
-rw-r--r--src/backends/tosaReference/test/TosaRefEndToEndTests.cpp59
19 files changed, 549 insertions, 114 deletions
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 7055092be2..5334641803 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -4305,7 +4305,7 @@ void BatchMatMulQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) cons
auto axesXToMul = BatchMatMulDescriptor::GetAxesToMul(m_Parameters.m_DataLayoutX,
inputXInfoAfterParams.GetShape());
auto axesYToMul = BatchMatMulDescriptor::GetAxesToMul(m_Parameters.m_DataLayoutY,
- inputXInfoBeforeParams.GetShape());
+ inputYInfoBeforeParams.GetShape());
if(inputXInfoAfterParams.GetShape()[axesXToMul.second]
!= inputYInfoAfterParams.GetShape()[axesYToMul.first])
diff --git a/src/backends/backendsCommon/test/DepthwiseConvolution2dEndToEndTests.hpp b/src/backends/backendsCommon/test/DepthwiseConvolution2dEndToEndTests.hpp
index a2c369b692..778b4823c3 100644
--- a/src/backends/backendsCommon/test/DepthwiseConvolution2dEndToEndTests.hpp
+++ b/src/backends/backendsCommon/test/DepthwiseConvolution2dEndToEndTests.hpp
@@ -45,7 +45,7 @@ armnn::INetworkPtr CreateDepthwiseConvolution2dNetwork(const armnn::DepthwiseCon
} // anonymous namespace
-template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType>
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType = ArmnnType>
void DepthwiseConvolution2dEndToEnd(const std::vector<armnn::BackendId>& backends,
armnn::DataLayout dataLayout)
{
@@ -168,6 +168,7 @@ void DepthwiseConvolution2dEndToEnd(const std::vector<armnn::BackendId>& backend
{
PermuteTensorNhwcToNchw(inputInfo, inputData);
PermuteTensorNhwcToNchw(outputInfo, expectedOutputData);
+ PermuteTensorNhwcToNchw(weightsInfo, weightsData);
}
// Quantize data
diff --git a/src/backends/reference/workloads/BatchMatMulImpl.cpp b/src/backends/reference/workloads/BatchMatMulImpl.cpp
index 8e169cbab8..e0c36c5db8 100644
--- a/src/backends/reference/workloads/BatchMatMulImpl.cpp
+++ b/src/backends/reference/workloads/BatchMatMulImpl.cpp
@@ -42,13 +42,16 @@ void BatchMatMul::ApplyBatchMatMul()
inputXInfo.GetShape());
auto axesYToMul = BatchMatMulDescriptor::GetAxesToMul(params.m_DataLayoutY,
inputYInfo.GetShape());
+
+ // the inputYRowSize (or inputXColSize) needs to be obtained using the original (unadjusted) axis value,
+ // because it's obtained from the original tensor shape
+ unsigned int inputYRowSize = inputYInfo.GetShape()[axesYToMul.first];
+
AdjustAxesToMulForUnequalRanks(axesXToMul, axesYToMul);
unsigned int inputXColDim = axesXToMul.second;
unsigned int inputYRowDim = axesYToMul.first;
- unsigned int inputYRowSize = inputYInfo.GetShape()[inputYRowDim];
-
auto batchMatMulOperation = [&](const std::vector<unsigned int>& curIdx)
{
float sum = 0.0f;
@@ -437,10 +440,11 @@ unsigned int BatchMatMul::CalcFlatIdx(DataSlot type, const std::vector<unsigned
{
unsigned int result = idx[idx.size()-1];
unsigned int dimMultiplier = 1;
- unsigned int offset;
+ unsigned int offset = 0;
// -2 because final dim is already accounted for in the multiplier (last dim is just a multiplier of 1x)
- for(unsigned int i = static_cast<unsigned int>(idx.size()-2); static_cast<int>(i) >= 0; i--)
+ // Check offset in relation to i, to stop calculating flat index once all input shape fields considered
+ for(unsigned int i = static_cast<unsigned int>(idx.size()-2); static_cast<int>(i) >= 0 && (i + 1) > offset; i--)
{
switch(type)
{
diff --git a/src/backends/tosaCommon/TosaMappings.cpp b/src/backends/tosaCommon/TosaMappings.cpp
index 0e44d54aab..bc1376b9cc 100644
--- a/src/backends/tosaCommon/TosaMappings.cpp
+++ b/src/backends/tosaCommon/TosaMappings.cpp
@@ -28,7 +28,12 @@ TosaSerializationBasicBlock* GetTosaMapping(const Layer* layer,
auto activationDesc = PolymorphicDowncast<const ActivationDescriptor*>(&descriptor);
if (activationDesc->m_Function == ActivationFunction::LeakyReLu)
{
- return ConvertActivationToTosaOperator(layer, inputs, outputs, activationDesc);
+ return ConvertLeakyReluToTosaOperator(layer, inputs, outputs, activationDesc);
+ }
+ if (activationDesc->m_Function == ActivationFunction::ReLu ||
+ activationDesc->m_Function == ActivationFunction::BoundedReLu)
+ {
+ return ConvertReluToTosaOperator(layer, inputs, outputs, activationDesc);
}
else
{
@@ -58,13 +63,30 @@ TosaSerializationBasicBlock* GetTosaMapping(const Layer* layer,
}
case LayerType::Constant:
{
- return ConvertConstantToTosaOperator(layer, outputs);
+ bool isDepthwiseConv2dWeights = false;
+ if(layer)
+ {
+ // The difference in layout of weights in Tensorflow/ArmNN and the layout
+ // described in TOSA means we must permute the weights from [1, H, W, C * M] to [H, W, C, M].
+ unsigned int slotIdx = layer->GetOutputSlot().GetConnection(0)->GetSlotIndex();
+ LayerType type = layer->GetOutputSlot().GetConnection(0)->GetOwningLayer().GetType();
+ if(type == LayerType::DepthwiseConvolution2d && slotIdx == 1)
+ {
+ isDepthwiseConv2dWeights = true;
+ }
+ }
+ return ConvertConstantToTosaOperator(layer, outputs, isDepthwiseConv2dWeights);
}
case LayerType::Convolution2d:
{
auto conv2dDesc = PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor);
return ConvertConv2dToTosaOperator(layer, inputs, outputs, conv2dDesc);
}
+ case LayerType::DepthwiseConvolution2d:
+ {
+ auto conv2dDesc = PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor);
+ return ConvertDepthwiseConv2dToTosaOperator(layer, inputs, outputs, conv2dDesc);
+ }
case LayerType::Pooling2d:
{
auto poolDesc = PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor);
diff --git a/src/backends/tosaCommon/operatorMappings/ActivationOperator.hpp b/src/backends/tosaCommon/operatorMappings/ActivationOperator.hpp
deleted file mode 100644
index 7519f0c155..0000000000
--- a/src/backends/tosaCommon/operatorMappings/ActivationOperator.hpp
+++ /dev/null
@@ -1,20 +0,0 @@
-//
-// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <Layer.hpp>
-
-#include <tosa_serialization_handler.h>
-
-#include "TosaOperatorUtils.hpp"
-
-using namespace armnn;
-using namespace tosa;
-
-TosaSerializationBasicBlock* ConvertActivationToTosaOperator(const Layer* layer,
- const std::vector<const TensorInfo*>& inputs,
- const std::vector<const TensorInfo*>& outputs,
- const ActivationDescriptor* activationDescriptor);
diff --git a/src/backends/tosaCommon/operatorMappings/CMakeLists.txt b/src/backends/tosaCommon/operatorMappings/CMakeLists.txt
index 58a64574d6..bd86958de1 100644
--- a/src/backends/tosaCommon/operatorMappings/CMakeLists.txt
+++ b/src/backends/tosaCommon/operatorMappings/CMakeLists.txt
@@ -4,8 +4,10 @@
#
list(APPEND armnnTosaBackendOperators_sources
- ActivationOperator.hpp
- ActivationOperator.cpp
+ LeakyReluOperator.hpp
+ LeakyReluOperator.cpp
+ ReluOperator.hpp
+ ReluOperator.cpp
AvgPool2DIgnoreValueOperator.hpp
AvgPool2DIgnoreValueOperator.cpp
ConcatOperator.hpp
@@ -14,6 +16,8 @@ list(APPEND armnnTosaBackendOperators_sources
ConstantOperator.cpp
Conv2dOperator.hpp
Conv2dOperator.cpp
+ DepthwiseConv2dOperator.hpp
+ DepthwiseConv2dOperator.cpp
ElementwiseBinaryOperator.hpp
ElementwiseBinaryOperator.cpp
ElementwiseUnaryOperator.cpp
diff --git a/src/backends/tosaCommon/operatorMappings/ConstantOperator.cpp b/src/backends/tosaCommon/operatorMappings/ConstantOperator.cpp
index c7cd7d7969..f5920fe45e 100644
--- a/src/backends/tosaCommon/operatorMappings/ConstantOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/ConstantOperator.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022, 2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -8,7 +8,8 @@
#include <layers/ConstantLayer.hpp>
TosaSerializationBasicBlock* ConvertConstantToTosaOperator(const Layer* layer,
- const std::vector<const TensorInfo*>& outputs)
+ const std::vector<const TensorInfo*>& outputs,
+ bool isDepthwiseConv2dWeights = false)
{
std::string outputName = std::string("constant_");
std::string blockName = std::string("Op_CONST_block_") + GetUniqueTosaMappingID();
@@ -30,7 +31,29 @@ TosaSerializationBasicBlock* ConvertConstantToTosaOperator(const Layer* layer,
auto* op = new TosaSerializationOperator(Op_CONST, Attribute_NONE, nullptr, {}, {outputName});
- std::vector<int32_t> outputShape0 = GetTosaTensorShape(outputs[0]->GetShape());
+ std::vector<int32_t> outputShape0;
+
+ if(isDepthwiseConv2dWeights)
+ {
+ // Constant weights are connected to a depthwise conv2d layer. From this get the depthwise conv2d input shape.
+ TensorShape inputShape =
+ layer->GetOutputSlot().GetConnection(0)->GetOwningLayer().GetInputSlot(0).GetTensorInfo().GetShape();
+
+ unsigned int multiplier = outputs[0]->GetShape()[3]/inputShape[3];
+
+ // TOSA requires depthwise conv2d kernel to be converted from [1, H, W, C * M] to layout [H, W, C, M]
+ outputShape0 = {
+ static_cast<int32_t>(outputs[0]->GetShape()[1]),
+ static_cast<int32_t>(outputs[0]->GetShape()[2]),
+ static_cast<int32_t>(inputShape[3]),
+ static_cast<int32_t>(multiplier)
+ };
+ }
+ else
+ {
+ outputShape0 = GetTosaTensorShape(outputs[0]->GetShape());
+ }
+
DType outputDType0 = ArmNNToDType(outputs[0]->GetDataType());
// Setup output tensor with constant tensor data if available.
diff --git a/src/backends/tosaCommon/operatorMappings/ConstantOperator.hpp b/src/backends/tosaCommon/operatorMappings/ConstantOperator.hpp
index 598e041232..934d24db83 100644
--- a/src/backends/tosaCommon/operatorMappings/ConstantOperator.hpp
+++ b/src/backends/tosaCommon/operatorMappings/ConstantOperator.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -11,5 +11,6 @@ using namespace armnn;
using namespace tosa;
TosaSerializationBasicBlock* ConvertConstantToTosaOperator(const Layer* layer,
- const std::vector<const TensorInfo*>& outputs);
+ const std::vector<const TensorInfo*>& outputs,
+ bool isDepthwiseConv2dWeights);
diff --git a/src/backends/tosaCommon/operatorMappings/DepthwiseConv2dOperator.cpp b/src/backends/tosaCommon/operatorMappings/DepthwiseConv2dOperator.cpp
new file mode 100644
index 0000000000..4fc64ee492
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/DepthwiseConv2dOperator.cpp
@@ -0,0 +1,182 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "DepthwiseConv2dOperator.hpp"
+#include "TosaRescaleOperatorUtils.hpp"
+#include <ResolveType.hpp>
+
+TosaSerializationBasicBlock* ConvertDepthwiseConv2dToTosaOperator(
+ const Layer* layer,
+ const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const DepthwiseConvolution2dDescriptor* conv2dDescriptor)
+{
+ std::vector<std::string> inputNames;
+ std::string outputName = std::string("output0_");
+ std::string blockName = std::string("Op_DEPTHWISE_CONV2D_block_") + GetUniqueTosaMappingID();
+
+ DType inputDType0 = ArmNNToDType(inputs[0]->GetDataType());
+ DType outputDType0 = ArmNNToDType(outputs[0]->GetDataType());
+
+ // Set input names for validation purposes only.
+ if(layer == nullptr)
+ {
+ inputNames.emplace_back("input_0");
+ inputNames.emplace_back("input_1");
+ if(conv2dDescriptor->m_BiasEnabled)
+ {
+ inputNames.emplace_back("input_2");
+ }
+ }
+ // If a layer is present then the block will be used for execution, so input and output names need to be
+ // determined using the previous and following layers so the graph is connected correctly.
+ // For validation this doesn't matter.
+ else
+ {
+ // Get the layer connected to the input slot and determine unique tensor names.
+ for (uint32_t i = 0; i < inputs.size(); ++i)
+ {
+ std::string inputName = GenerateUniqueInputName(layer->GetInputSlot(i));
+ inputNames.push_back(inputName);
+ }
+
+ // Determine unique output tensor name.
+ outputName = GenerateUniqueOutputName(*layer);
+ }
+
+ std::vector<TosaSerializationTensor*> tensors;
+ std::vector<TosaSerializationOperator*> operators;
+
+ // Setup input Tensor
+ // Only add tensor if connected layer is an input layer.
+ // As intermediate or constant tensors will be created separately.
+ // There also can't be duplicate tensors.
+ if(inputNames[0].find("input_") != std::string::npos)
+ {
+ std::vector<int32_t> inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
+ tensors.push_back(new TosaSerializationTensor(inputNames[0], inputShape0, inputDType0, {}));
+ }
+
+ // Only add input tensors if weights and bias are not constant or if running validation.
+ // Constant tensors will be created in the ConvertConstantToTosaOperator function.
+ if(!inputs[1]->IsConstant() || layer == nullptr)
+ {
+ std::vector<int32_t> inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
+ std::vector<int32_t> inputShape1 = GetTosaTensorShape(inputs[1]->GetShape());
+
+ int32_t multiplier = inputShape1[3]/inputShape0[3];
+
+ // TOSA requires depthwise conv2d kernel to be converted from from [1, H, W, C * M] to [H, W, C, M]
+ std::vector<int32_t> inputShapeHWCM = {
+ inputShape1[1], inputShape1[2], inputShape0[3], multiplier
+ };
+
+ DType inputDType1 = ArmNNToDType(inputs[1]->GetDataType());
+
+ tensors.push_back(new TosaSerializationTensor(inputNames[1], inputShapeHWCM, inputDType1, {}));
+ }
+
+ if(conv2dDescriptor->m_BiasEnabled)
+ {
+ if(!inputs[2]->IsConstant() || layer == nullptr)
+ {
+ std::vector<int32_t> inputShape2 = GetTosaTensorShape(inputs[2]->GetShape());
+ DType inputDType2 = ArmNNToDType(inputs[2]->GetDataType());
+
+ tensors.push_back(new TosaSerializationTensor(inputNames[2], inputShape2, inputDType2, {}));
+ }
+ }
+ else
+ {
+ // If bias is disabled, create a constant bias of 0 as three inputs are required.
+ std::string constantName = std::string("constant_") + GetUniqueTosaMappingID();
+
+ operators.push_back(new TosaSerializationOperator(Op_CONST, Attribute_NONE, nullptr, {}, {constantName}));
+
+ // The size of the bias must match the channels dimension, so get the correct index.
+ unsigned int index = (conv2dDescriptor->m_DataLayout == DataLayout::NHWC) ? 3 : 1;
+
+ const DType dType = (inputDType0 == DType_INT8) ? DType_INT32 : outputDType0;
+ std::vector<float> data(outputs[0]->GetShape()[index], 0);
+
+ std::vector<uint8_t> uint8Data;
+ TosaSerializationHandler::ConvertF32toU8(data, uint8Data);
+
+ tensors.push_back(new TosaSerializationTensor(constantName,
+ {static_cast<int32_t>(outputs[0]->GetShape()[index])},
+ dType,
+ uint8Data));
+ inputNames.emplace_back(constantName);
+ }
+
+ // Setup Output Tensor
+ std::vector<int32_t> outputShape0 = {GetTosaTensorShape(outputs[0]->GetShape())};
+ std::string outputConv2dName;
+ bool isInputInt8 = (inputDType0 == DType_INT8);
+ if (isInputInt8)
+ {
+ outputConv2dName = std::string("intermediate0_") + GetUniqueTosaMappingID();
+ tensors.push_back(new TosaSerializationTensor(outputConv2dName, outputShape0, DType_INT32, {}));
+ }
+ else
+ {
+ tensors.push_back(new TosaSerializationTensor(outputName, outputShape0, outputDType0, {}));
+ }
+
+ // Set up CONV2D operator
+ std::vector<int> pad = {static_cast<int>(conv2dDescriptor->m_PadTop),
+ static_cast<int>(conv2dDescriptor->m_PadBottom),
+ static_cast<int>(conv2dDescriptor->m_PadLeft),
+ static_cast<int>(conv2dDescriptor->m_PadRight)};
+ std::vector<int> stride = {static_cast<int>(conv2dDescriptor->m_StrideY),
+ static_cast<int>(conv2dDescriptor->m_StrideX)};
+ std::vector<int> dilation = {static_cast<int>(conv2dDescriptor->m_DilationY),
+ static_cast<int>(conv2dDescriptor->m_DilationX)};
+ TosaConvAttribute attribute(pad, stride, dilation,
+ inputs[0]->GetQuantizationOffset(), // input_zp
+ inputs[1]->GetQuantizationOffset(), // weight_zp
+ false); // local_bound
+
+ std::string& convOutStr = isInputInt8 ? outputConv2dName : outputName;
+ auto* conv2d_op = new TosaSerializationOperator(Op_DEPTHWISE_CONV2D,
+ Attribute_ConvAttribute,
+ &attribute,
+ inputNames,
+ {convOutStr});
+ operators.push_back(conv2d_op);
+
+ if (isInputInt8)
+ {
+ int32_t output_zp = outputs[0]->GetQuantizationOffset();
+ double output_scale = outputs[0]->GetQuantizationScales()[0];
+ double input_scale = inputs[0]->GetQuantizationScales()[0];
+ const std::vector<float>& weight_scales = inputs[1]->GetQuantizationScales();
+
+ TosaSerializationOperator* rescaleOp = nullptr;
+ CreateRescaleTosaOperatorPerChannel(outputConv2dName,
+ outputName,
+ 0,
+ output_zp,
+ true,
+ true,
+ input_scale,
+ output_scale,
+ weight_scales,
+ &rescaleOp);
+ operators.push_back(rescaleOp);
+ tensors.push_back(new TosaSerializationTensor(outputName,
+ outputShape0,
+ DType_INT8, {}));
+ }
+
+ // operatorInputNames/operatorOutputNames ends up being the same as
+ // blockInputNames/blockOutputNames for one-to-one ArmNN to TOSA mappings
+ return new TosaSerializationBasicBlock(blockName, // name
+ mainName, // region name
+ operators, // operators
+ tensors, // tensors
+ inputNames, // inputs
+ {outputName}); // outputs
+} \ No newline at end of file
diff --git a/src/backends/tosaCommon/operatorMappings/DepthwiseConv2dOperator.hpp b/src/backends/tosaCommon/operatorMappings/DepthwiseConv2dOperator.hpp
new file mode 100644
index 0000000000..2282330b31
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/DepthwiseConv2dOperator.hpp
@@ -0,0 +1,17 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TosaOperatorUtils.hpp"
+
+using namespace armnn;
+using namespace tosa;
+
+TosaSerializationBasicBlock* ConvertDepthwiseConv2dToTosaOperator(
+ const Layer* layer,
+ const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const DepthwiseConvolution2dDescriptor* conv2dDescriptor);
diff --git a/src/backends/tosaCommon/operatorMappings/ActivationOperator.cpp b/src/backends/tosaCommon/operatorMappings/LeakyReluOperator.cpp
index c13555da6a..4d330818f1 100644
--- a/src/backends/tosaCommon/operatorMappings/ActivationOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/LeakyReluOperator.cpp
@@ -7,33 +7,33 @@
// SPDX-License-Identifier: Apache-2.0
//
-#include "ActivationOperator.hpp"
+#include "LeakyReluOperator.hpp"
#include "TosaRescaleOperatorUtils.hpp"
#include <layers/ActivationLayer.hpp>
// This function is paraphrased from:
// tensorflow/compiler/mlir/tosa/transforms/legalize_tfl.cc from function ConvertTFLLeakyReluOp
-TosaSerializationBasicBlock* ConvertActivationToTosaOperator(const Layer* layer,
- const std::vector<const TensorInfo*>& inputs,
- const std::vector<const TensorInfo*>& outputs,
- const ActivationDescriptor* activationDescriptor)
+TosaSerializationBasicBlock* ConvertLeakyReluToTosaOperator(const Layer* layer,
+ const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const ActivationDescriptor* activationDescriptor)
{
if (inputs.size() != 1)
{
- throw armnn::Exception("ConvertActivationToTosaOperator: 1 input tensors required.");
+ throw armnn::Exception("ConvertLeakyReluToTosaOperator: 1 input tensors required.");
}
if (outputs.size() != 1)
{
- throw armnn::Exception("ConvertActivationToTosaOperator: 1 output tensor required.");
+ throw armnn::Exception("ConvertLeakyReluToTosaOperator: 1 output tensor required.");
}
std::string inputName = std::string("input_");
std::string outputNameAlpha = std::string("intermediate1_") + GetUniqueTosaMappingID();
std::string outputNameMul = std::string("intermediate2_") + GetUniqueTosaMappingID();
std::string outputName = std::string("output0_");
- std::string blockName = std::string("Op_ACTIVATION_block_") + GetUniqueTosaMappingID();
+ std::string blockName = std::string("Op_LEAKY_RELU_block_") + GetUniqueTosaMappingID();
// If a layer is present then the block will be used for execution, so input and output names need to be determined
// using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
@@ -61,7 +61,6 @@ TosaSerializationBasicBlock* ConvertActivationToTosaOperator(const Layer* layer,
DType outputDType0 = ArmNNToDType(outputs[0]->GetDataType());
tensors.push_back(new TosaSerializationTensor(outputName, outputShape0, outputDType0, {}));
-#if TOSA_COMPAT_VERSION(0, 60, 0)
std::string outputNameMAXMIN= std::string("intermediate3_") + GetUniqueTosaMappingID();
if (inputDType0 == DType::DType_FP32 ||
@@ -211,64 +210,4 @@ TosaSerializationBasicBlock* ConvertActivationToTosaOperator(const Layer* layer,
{inputName}, // inputs
{outputName}); // outputs
}
-#else
- std::string outputNameZero = std::string("intermediate3_") + GetUniqueTosaMappingID();
- std::string outputNameGE = std::string("intermediate4_") + GetUniqueTosaMappingID();
-
- // const_zero
- TosaSerializationOperator* zeroOp = nullptr;
- TosaSerializationTensor* zeroTensor = nullptr;
- CreateConstTosaOperator<float>(outputNameZero,
- 0.0f,
- inputDType0,
- inputShape0,
- zeroOp,
- zeroTensor);
- tensors.push_back(zeroTensor);
-
- // const_alpha
- TosaSerializationOperator* alphaOp = nullptr;
- TosaSerializationTensor* alphaTensor = nullptr;
- CreateConstTosaOperator<float>(outputNameAlpha,
- activationDescriptor->m_A,
- inputDType0,
- inputShape0,
- alphaOp,
- alphaTensor);
- tensors.push_back(alphaTensor);
-
- // mul
- int32_t shift = 0;
- TosaMulAttribute mulAttribute(shift);
- TosaSerializationOperator* mulOp = new TosaSerializationOperator(Op_MUL,
- Attribute_MulAttribute,
- &mulAttribute,
- {inputName, outputNameAlpha},
- {outputNameMul});
- tensors.push_back(new TosaSerializationTensor(outputNameMul, inputShape0, inputDType0, {}));
-
- // greater_equal
- TosaSerializationOperator* geOp = new TosaSerializationOperator(Op_GREATER_EQUAL,
- Attribute_NONE,
- nullptr,
- {inputName, outputNameZero},
- {outputNameGE});
- tensors.push_back(new TosaSerializationTensor(outputNameGE, outputShape0, DType::DType_BOOL, {}));
-
- // select
- TosaSerializationOperator* selOp = new TosaSerializationOperator(Op_SELECT,
- Attribute_NONE,
- nullptr,
- {outputNameGE, inputName, outputNameMul},
- {outputName});
-
- // operatorInputNames/operatorOutputNames ends up being the same as
- // blockInputNames/blockOutputNames for one-to-one ArmNN to Tosa mappings
- return new TosaSerializationBasicBlock(blockName, // name
- mainName, // region name
- {zeroOp, alphaOp, mulOp, geOp, selOp}, // operators
- tensors, // tensors
- {inputName}, // inputs
- {outputName}); // outputs
-#endif
}
diff --git a/src/backends/tosaCommon/operatorMappings/LeakyReluOperator.hpp b/src/backends/tosaCommon/operatorMappings/LeakyReluOperator.hpp
new file mode 100644
index 0000000000..839bdeb183
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/LeakyReluOperator.hpp
@@ -0,0 +1,20 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <Layer.hpp>
+
+#include <tosa_serialization_handler.h>
+
+#include "TosaOperatorUtils.hpp"
+
+using namespace armnn;
+using namespace tosa;
+
+TosaSerializationBasicBlock* ConvertLeakyReluToTosaOperator(const Layer* layer,
+ const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const ActivationDescriptor* activationDescriptor);
diff --git a/src/backends/tosaCommon/operatorMappings/ReluOperator.cpp b/src/backends/tosaCommon/operatorMappings/ReluOperator.cpp
new file mode 100644
index 0000000000..541b39cd8d
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/ReluOperator.cpp
@@ -0,0 +1,156 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+//
+// Copyright © 2020 The TensorFlow Authors. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "LeakyReluOperator.hpp"
+#include "TosaRescaleOperatorUtils.hpp"
+
+#include <layers/ActivationLayer.hpp>
+
+// This function is paraphrased from:
+// tensorflow/compiler/mlir/tosa/transforms/legalize_tfl.cc from function ConvertTFLReluOp
+TosaSerializationBasicBlock* ConvertReluToTosaOperator(const Layer* layer,
+ const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const ActivationDescriptor* desc)
+{
+ if (inputs.size() != 1)
+ {
+ throw armnn::Exception("ConvertReluToTosaOperator: 1 input tensors required.");
+ }
+
+ if (outputs.size() != 1)
+ {
+ throw armnn::Exception("ConvertReluToTosaOperator: 1 output tensor required.");
+ }
+
+ std::string inputName = std::string("input_");
+ std::string outputName = std::string("output0_");
+ std::string blockName = "";
+
+ int32_t clamp_min = 0;
+ int32_t clamp_max = 0;
+ float float_max = 0.0f;
+ switch (desc->m_Function)
+ {
+ case ActivationFunction::ReLu:
+ {
+ clamp_max = std::numeric_limits<int32_t>::max();
+ float_max = std::numeric_limits<float>::max();
+ blockName = std::string("Op_RELU_block_") + GetUniqueTosaMappingID();
+ break;
+ }
+ case ActivationFunction::BoundedReLu:
+ {
+ clamp_max = static_cast<int32_t>(desc->m_A);
+ float_max = desc->m_A;
+ blockName = std::string("Op_BOUNDED_RELU_block_") + GetUniqueTosaMappingID();
+ break;
+ }
+ case ActivationFunction::LeakyReLu:
+ {
+ throw Exception("LeakyRelu TOSA mappings are performed in ConvertLeakyReluToTosaOperator().");
+ }
+ default:
+ {
+ throw Exception("Activation function is not supported in ConvertReluToTosaOperator().");
+ }
+ }
+
+ // If a layer is present then the block will be used for execution, so input and output names need to be determined
+ // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
+ if (layer != nullptr)
+ {
+ inputName = GenerateUniqueInputName(layer->GetInputSlot(0));
+ outputName = GenerateUniqueOutputName(*layer);
+ }
+
+ std::vector<TosaSerializationTensor*> tensors;
+ std::vector<TosaSerializationOperator*> operators;
+
+ // Only add input tensors if connected layer is an input layer.
+ // As intermediate or constant tensors will be created separately.
+ // There also can't be duplicate tensor.
+ std::vector<int32_t> inputShape0;
+ DType inputDType0 = DType::DType_UNKNOWN;
+ if(inputName.find("input_") != std::string::npos)
+ {
+ inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
+ inputDType0 = ArmNNToDType(inputs[0]->GetDataType());
+ tensors.push_back(new TosaSerializationTensor(inputName, inputShape0, inputDType0, {}));
+ }
+
+ std::vector<int32_t> outputShape0 = GetTosaTensorShape(outputs[0]->GetShape());
+ DType outputDType0 = ArmNNToDType(outputs[0]->GetDataType());
+ tensors.push_back(new TosaSerializationTensor(outputName, outputShape0, outputDType0, {}));
+
+ std::string clampInputNameStr = inputName;
+ if (inputDType0 == tosa::DType::DType_INT8 || inputDType0 == tosa::DType::DType_INT16)
+ {
+ std::string outputNameRescale = std::string("intermediate0_") + GetUniqueTosaMappingID();
+ clampInputNameStr = outputNameRescale;
+
+ double scale = inputs[0]->GetQuantizationScale() / outputs[0]->GetQuantizationScale();
+ int32_t input_zp = inputs[0]->GetQuantizationOffset();
+ int32_t output_zp = outputs[0]->GetQuantizationOffset();
+
+ clamp_min = output_zp;
+
+ if (desc->m_Function == ActivationFunction::BoundedReLu)
+ {
+ clamp_max = static_cast<int32_t>(std::round(desc->m_A / outputs[0]->GetQuantizationScale())) + output_zp;
+ }
+
+ if (inputDType0 == tosa::DType::DType_INT8)
+ {
+ clamp_min =
+ clamp_min < std::numeric_limits<int8_t>::min() ? std::numeric_limits<int8_t>::min() : clamp_min;
+ clamp_max =
+ clamp_max > std::numeric_limits<int8_t>::max() ? std::numeric_limits<int8_t>::max() : clamp_max;
+ }
+ else
+ {
+ clamp_min =
+ clamp_min < std::numeric_limits<int16_t>::min() ? std::numeric_limits<int16_t>::min() : clamp_min;
+ clamp_max =
+ clamp_max > std::numeric_limits<int16_t>::max() ? std::numeric_limits<int16_t>::max() : clamp_max;
+ }
+
+ TosaSerializationOperator* rescaleOp = nullptr;
+ CreateRescaleTosaOperator(inputName,
+ outputNameRescale,
+ scale,
+ input_zp,
+ output_zp,
+ false,
+ true,
+ &rescaleOp);
+ operators.push_back(rescaleOp);
+ tensors.push_back(new TosaSerializationTensor(outputNameRescale,
+ inputShape0,
+ inputDType0,
+ {}));
+ }
+
+ TosaClampAttribute attribute(clamp_min, clamp_max, 0, float_max);
+ auto* clamp_op = new TosaSerializationOperator(Op_CLAMP,
+ Attribute_ClampAttribute,
+ &attribute,
+ {clampInputNameStr},
+ {outputName});
+ operators.push_back(clamp_op);
+
+ // operatorInputNames/operatorOutputNames ends up being the same as
+ // blockInputNames/blockOutputNames for one-to-one ArmNN to Tosa mappings
+ return new TosaSerializationBasicBlock(blockName, // name
+ mainName, // region name
+ operators, // operators
+ tensors, // tensors
+ {inputName}, // inputs
+ {outputName}); // outputs
+}
diff --git a/src/backends/tosaCommon/operatorMappings/ReluOperator.hpp b/src/backends/tosaCommon/operatorMappings/ReluOperator.hpp
new file mode 100644
index 0000000000..9c8f999949
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/ReluOperator.hpp
@@ -0,0 +1,20 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <Layer.hpp>
+
+#include <tosa_serialization_handler.h>
+
+#include "TosaOperatorUtils.hpp"
+
+using namespace armnn;
+using namespace tosa;
+
+TosaSerializationBasicBlock* ConvertReluToTosaOperator(const Layer* layer,
+ const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const ActivationDescriptor* activationDescriptor);
diff --git a/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp b/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp
index fd0574f30d..9d7ff1e4c9 100644
--- a/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp
+++ b/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp
@@ -5,11 +5,13 @@
#pragma once
-#include "ActivationOperator.hpp"
+#include "ReluOperator.hpp"
+#include "LeakyReluOperator.hpp"
#include "AvgPool2DIgnoreValueOperator.hpp"
#include "ConcatOperator.hpp"
#include "ConstantOperator.hpp"
#include "Conv2dOperator.hpp"
+#include "DepthwiseConv2dOperator.hpp"
#include "ElementwiseBinaryOperator.hpp"
#include "ElementwiseUnaryOperator.hpp"
#include "Pooling2DOperator.hpp"
diff --git a/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp b/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp
index f566504a40..18bdbe3318 100644
--- a/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp
+++ b/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp
@@ -502,10 +502,3 @@ inline void CreateConstTosaOperator(const std::string& outputName,
tensor = new TosaSerializationTensor(outputName, shape, dtype, uint8Data);
ARMNN_THROW_MSG_IF_FALSE(tensor, armnn::Exception, "CreateConstTosaOperator: failed to created tensor");
}
-
-// Macro to preserve usage of a code block as the TOSA library version advances. Parameters
-// specify the minimum version required by the code block.
-#define TOSA_COMPAT_VERSION(_major, _minor, _patch) \
- (TOSA_VERSION_MAJOR >= _major) || \
- (TOSA_VERSION_MINOR >= _minor) || \
- (TOSA_VERSION_PATCH >= _patch)
diff --git a/src/backends/tosaCommon/operatorMappings/TosaRescaleOperatorUtils.hpp b/src/backends/tosaCommon/operatorMappings/TosaRescaleOperatorUtils.hpp
index c37d6519bb..942872e5ed 100644
--- a/src/backends/tosaCommon/operatorMappings/TosaRescaleOperatorUtils.hpp
+++ b/src/backends/tosaCommon/operatorMappings/TosaRescaleOperatorUtils.hpp
@@ -208,8 +208,9 @@ inline void CreateRescaleTosaOperatorPerChannel(const std::string& inputName,
op_tensor_shifts.push_back(shift);
}
+ bool per_channel = weight_scales.size() == 1 ? false : true;
CreateRescaleTosaOperator(inputName, outputName, op_tensor_multipliers, op_tensor_shifts,
- input_zp, output_zp, double_round, scale32, true, op);
+ input_zp, output_zp, double_round, scale32, per_channel, op);
}
inline void CreateFromInt32RescaleTosaOperator(const std::string& inputName,
diff --git a/src/backends/tosaReference/TosaRefLayerSupport.cpp b/src/backends/tosaReference/TosaRefLayerSupport.cpp
index dac06676bf..38fd01b93c 100644
--- a/src/backends/tosaReference/TosaRefLayerSupport.cpp
+++ b/src/backends/tosaReference/TosaRefLayerSupport.cpp
@@ -73,6 +73,19 @@ bool TosaRefLayerSupport::IsLayerSupported(const LayerType& type,
}
break;
}
+ case LayerType::DepthwiseConvolution2d:
+ {
+ inputInfos.push_back(&infos[0]); // input
+ outputInfos.push_back(&infos[1]); // output
+ inputInfos.push_back(&infos[2]); // weights
+
+ auto conv2dDesc = PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor);
+ if(conv2dDesc->m_BiasEnabled)
+ {
+ inputInfos.push_back(&infos[3]); // bias
+ }
+ break;
+ }
case LayerType::ElementwiseUnary:
case LayerType::Pooling2d:
case LayerType::Quantize:
diff --git a/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp b/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp
index f86edd52f4..22fd782a1a 100644
--- a/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp
+++ b/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp
@@ -9,6 +9,7 @@
#include "backendsCommon/test/AdditionEndToEndTestImpl.hpp"
#include "backendsCommon/test/Convolution2dEndToEndTestImpl.hpp"
#include "backendsCommon/test/ConcatEndToEndTestImpl.hpp"
+#include "backendsCommon/test/DepthwiseConvolution2dEndToEndTests.hpp"
#include "backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp"
#include "backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp"
#include "backendsCommon/test/MultiplicationEndToEndTestImpl.hpp"
@@ -29,24 +30,73 @@ TEST_SUITE("TosaRefEndToEnd")
static std::vector<BackendId> tosaDefaultBackends = { "TosaRef" };
// Activation
-//LeakyRelu
+// LeakyRelu
TEST_CASE("TosaRefLeakyReluActivationFloat32")
{
ActivationEndToEndTest<DataType::Float32>(tosaDefaultBackends, ActivationFunction::LeakyReLu, 1.f, 0, 0.01f);
}
+
TEST_CASE("TosaRefLeakyReluActivationFloat16")
{
ActivationEndToEndTest<DataType::Float16>(tosaDefaultBackends, ActivationFunction::LeakyReLu, 0.3f, 5, 0.01f);
}
+
TEST_CASE("TosaRefLeakyReluActivationInt8")
{
ActivationEndToEndTest<DataType::QAsymmS8>(tosaDefaultBackends, ActivationFunction::LeakyReLu, 0.6f, 7, 0.01f);
}
+
TEST_CASE("TosaRefLeakyReluActivationInt16")
{
ActivationEndToEndTest<DataType::QSymmS16>(tosaDefaultBackends, ActivationFunction::LeakyReLu, 0.15f, 0, 0.01f);
}
+// Relu
+TEST_CASE("TosaRefReLuEndToEndTestQAsymmS8")
+{
+ ActivationEndToEndTest<armnn::DataType::QAsymmS8>(tosaDefaultBackends, ActivationFunction::ReLu);
+}
+
+TEST_CASE("TosaRefReLuEndToEndTestFloat32")
+{
+ ActivationEndToEndTest<armnn::DataType::Float32>(tosaDefaultBackends, ActivationFunction::ReLu);
+}
+
+TEST_CASE("TosaRefReLuEndToEndTestFloat16")
+{
+ ActivationEndToEndTest<armnn::DataType::Float16>(tosaDefaultBackends, ActivationFunction::ReLu);
+}
+
+TEST_CASE("TosaRefReLuEndToEndTestQSymmS16")
+{
+ ActivationEndToEndTest<armnn::DataType::QSymmS16>(tosaDefaultBackends, ActivationFunction::ReLu);
+}
+
+// BoundedRelu
+TEST_CASE("TosaRefBoundedReLuEndToEndTestFloat32")
+{
+ ActivationEndToEndTest<armnn::DataType::Float32>(
+ tosaDefaultBackends, ActivationFunction::BoundedReLu, 1.0f, 0, 6.0f, 0.0f);
+}
+
+TEST_CASE("TosaRefBoundedReLuEndToEndTestFloat16")
+{
+ ActivationEndToEndTest<armnn::DataType::Float16>(
+ tosaDefaultBackends, ActivationFunction::BoundedReLu, 1.0f, 0, 6.0f, 0.0f);
+}
+
+TEST_CASE("TosaRefBoundedReLuEndToEndTestQAsymmS8")
+{
+ ActivationEndToEndTest<armnn::DataType::QAsymmS8>(
+ tosaDefaultBackends, ActivationFunction::BoundedReLu, 1.0f, 0, 6.0f, 0.0f);
+}
+
+TEST_CASE("TosaRefBoundedReLuEndToEndTestQSymmS16")
+{
+ ActivationEndToEndTest<armnn::DataType::QSymmS16>(
+ tosaDefaultBackends, ActivationFunction::BoundedReLu, 1.0f, 0, 6.0f, 0.0f);
+}
+
// Addition
TEST_CASE("TosaRefAdditionEndtoEndTestFloat32")
{
@@ -129,6 +179,13 @@ TEST_CASE("TosaRefConv2dWithoutBiasEndtoEndTestInt8")
armnn::DataType::Signed32>(tosaDefaultBackends, armnn::DataLayout::NHWC, false);
}
+// DepthwiseConv2d
+TEST_CASE("TosaRefDepthwiseConv2dEndtoEndTestInt8")
+{
+ DepthwiseConvolution2dEndToEnd<armnn::DataType::QSymmS8,
+ armnn::DataType::Signed32>(tosaDefaultBackends, armnn::DataLayout::NHWC);
+}
+
// Elementwise Binary
//Add
TEST_CASE("TosaRefAddEndtoEndTestInt32")