aboutsummaryrefslogtreecommitdiff
path: root/src/backends/tosaCommon/operatorMappings
diff options
context:
space:
mode:
authorMatthew Sloyan <matthew.sloyan@arm.com>2022-12-08 13:38:23 +0000
committerCathal Corbett <cathal.corbett@arm.com>2022-12-15 10:00:12 +0000
commitfc9d5e7d1e0c1a4d7fed4ebc363832e03c3e2543 (patch)
tree5c1b7efd146256c2de9781ae5a60e22cf2d6142a /src/backends/tosaCommon/operatorMappings
parent67fd526f8f7c79803d514a6045454049104eced9 (diff)
downloadarmnn-fc9d5e7d1e0c1a4d7fed4ebc363832e03c3e2543.tar.gz
IVGCVSW-7204 Add TransposeConv2d support to TOSA Reference Backend
Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com> Change-Id: I9bfd597afd41468f304edfbe5d7141378ce60d4f
Diffstat (limited to 'src/backends/tosaCommon/operatorMappings')
-rw-r--r--src/backends/tosaCommon/operatorMappings/CMakeLists.txt2
-rw-r--r--src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp3
-rw-r--r--src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp17
-rw-r--r--src/backends/tosaCommon/operatorMappings/TransposeConv2dOperator.cpp161
-rw-r--r--src/backends/tosaCommon/operatorMappings/TransposeConv2dOperator.hpp20
5 files changed, 202 insertions, 1 deletions
diff --git a/src/backends/tosaCommon/operatorMappings/CMakeLists.txt b/src/backends/tosaCommon/operatorMappings/CMakeLists.txt
index cb1d68e625..90c1a4f958 100644
--- a/src/backends/tosaCommon/operatorMappings/CMakeLists.txt
+++ b/src/backends/tosaCommon/operatorMappings/CMakeLists.txt
@@ -19,6 +19,8 @@ list(APPEND armnnTosaBackendOperators_sources
SliceOperator.hpp
SliceOperator.cpp
TosaOperatorUtils.hpp
+ TransposeConv2dOperator.hpp
+ TransposeConv2dOperator.cpp
)
add_library(armnnTosaBackendOperators OBJECT ${armnnTosaBackendOperators_sources})
diff --git a/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp b/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp
index a3597f0461..1a9d6be9c0 100644
--- a/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp
+++ b/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp
@@ -11,4 +11,5 @@
#include "AvgPool2DIgnoreValueOperator.hpp"
#include "Pooling2DOperator.hpp"
#include "ReshapeOperator.hpp"
-#include "SliceOperator.hpp" \ No newline at end of file
+#include "SliceOperator.hpp"
+#include "TransposeConv2dOperator.hpp" \ No newline at end of file
diff --git a/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp b/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp
index 288966badd..be2f53e413 100644
--- a/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp
+++ b/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp
@@ -75,6 +75,23 @@ inline std::string GenerateUniqueName(const Layer& layer, uint32_t layerSlot)
}
}
+// Function that generates unique output name using the layer type, input slot and layer guid.
+inline std::string GenerateUniqueOutputName(const Layer& layer, uint32_t layerSlot)
+{
+ Layer& connectedLayer = layer.GetOutputSlot().GetConnection(0)->GetOwningLayer();
+
+ // Get the layer connected to the output slot, if output use that layer and id,
+ // otherwise use current layer and id.
+ if(connectedLayer.GetType() == LayerType::Output)
+ {
+ return GenerateUniqueName(connectedLayer, layerSlot);
+ }
+ else
+ {
+ return GenerateUniqueName(layer, layerSlot);
+ }
+}
+
// Function to return unique int as a string to ensure uniqueness between all input, output and block names.
static int uniqueTosaMappingID = 0;
inline std::string GetUniqueTosaMappingID()
diff --git a/src/backends/tosaCommon/operatorMappings/TransposeConv2dOperator.cpp b/src/backends/tosaCommon/operatorMappings/TransposeConv2dOperator.cpp
new file mode 100644
index 0000000000..a0d58e2fa8
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/TransposeConv2dOperator.cpp
@@ -0,0 +1,161 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "TransposeConv2dOperator.hpp"
+
+#include "layers/TransposeConvolution2dLayer.hpp"
+
+TosaSerializationBasicBlock* ConvertTransposeConv2dToTosaOperator(const Layer* layer,
+ const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const TransposeConvolution2dDescriptor* descriptor)
+{
+ std::string input0Name = std::string("input0_");
+ std::string input1Name = std::string("constant_") + GetUniqueTosaMappingID();
+ std::string input2Name = std::string("constant_") + GetUniqueTosaMappingID();
+ std::string outputName = std::string("output0_");
+ std::string blockName = std::string("Op_TRANSPOSE_CONV2D_block_") + GetUniqueTosaMappingID();
+
+ // If a layer is present then the block will be used for execution, so input and output names need to be determined
+ // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
+ if(layer != nullptr)
+ {
+ Layer& connectedInputLayer = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
+ input0Name = GenerateUniqueName(connectedInputLayer, 0);
+
+ outputName = GenerateUniqueOutputName(*layer, 0);
+ }
+
+ std::vector<TosaSerializationTensor*> tensors;
+ std::vector<TosaSerializationOperator*> operators;
+
+ // Setup input tensor
+ // Only add tensor if connected layer is an input layer.
+ // As intermediate or constant tensors will be created separately.
+ // There also can't be duplicate tensors.
+ if(input0Name.find("input0_") != std::string::npos)
+ {
+ std::vector<int32_t> inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
+ DType inputDType0 = ArmNNToDType(inputs[0]->GetDataType());
+
+ tensors.push_back(new TosaSerializationTensor(input0Name, inputShape0, inputDType0, {}));
+ }
+
+ // Setup weights tensor, constant data will get copied during SetConstantTensorData
+ operators.push_back(new TosaSerializationOperator(Op_CONST, Attribute_NONE, nullptr, {}, {input1Name}));
+
+ // During validation the TensorInfo can be retrieved from the inputs.
+ // During execution, it is only available through the layer so use m_Weight.
+ if(layer == nullptr)
+ {
+ std::vector<int32_t> inputShape1 = GetTosaTensorShape(inputs[1]->GetShape());
+ DType inputDType1 = ArmNNToDType(inputs[1]->GetDataType());
+
+ tensors.push_back(new TosaSerializationTensor(input1Name, inputShape1, inputDType1, {}));
+ }
+ else
+ {
+ auto transposeConv2dLayer = PolymorphicDowncast<const TransposeConvolution2dLayer*>(layer);
+
+ std::vector<int32_t> inputShape1 = GetTosaTensorShape(
+ transposeConv2dLayer->m_Weight->GetTensorInfo().GetShape());
+ DType inputDType1 = ArmNNToDType(transposeConv2dLayer->m_Weight->GetTensorInfo().GetDataType());
+
+ std::vector<uint8_t> uint8Data = ConvertConstantTensorDataToBuffer(transposeConv2dLayer->m_Weight);
+ tensors.push_back(new TosaSerializationTensor(input1Name, inputShape1, inputDType1, uint8Data));
+ }
+
+ // Setup bias operator and tensor, constant data will get copied during SetConstantTensorData
+ operators.push_back(new TosaSerializationOperator(Op_CONST, Attribute_NONE, nullptr, {}, {input2Name}));
+
+ // During validation the TensorInfo can be retrieved from the inputs.
+ // During execution, it is only available through the layer so use m_Bias.
+ if(layer == nullptr && descriptor->m_BiasEnabled)
+ {
+ std::vector<int32_t> inputShape2 = GetTosaTensorShape(inputs[2]->GetShape());
+ DType inputDType2 = ArmNNToDType(inputs[2]->GetDataType());
+
+ tensors.push_back(new TosaSerializationTensor(input2Name, inputShape2, inputDType2, {}));
+ }
+ else if(descriptor->m_BiasEnabled)
+ {
+ auto transposeConv2dLayer = PolymorphicDowncast<const TransposeConvolution2dLayer*>(layer);
+
+ std::vector<int32_t> inputShape2 = GetTosaTensorShape(
+ transposeConv2dLayer->m_Bias->GetTensorInfo().GetShape());
+ DType inputDType2 = ArmNNToDType(transposeConv2dLayer->m_Bias->GetTensorInfo().GetDataType());
+
+ std::vector<uint8_t> uint8Data = ConvertConstantTensorDataToBuffer(transposeConv2dLayer->m_Bias);
+ tensors.push_back(new TosaSerializationTensor(input2Name, inputShape2, inputDType2, uint8Data));
+ }
+ else
+ {
+ // If bias is disabled, create a constant bias tensor of 0's as three inputs are required.
+ // The size of the bias must match the channels dimension, so get the correct index.
+ unsigned int index = (descriptor->m_DataLayout == DataLayout::NHWC) ?
+ outputs[0]->GetShape()[3] : outputs[0]->GetShape()[1];
+
+ std::vector<uint8_t> uint8Data;
+ std::vector<float> data(outputs[0]->GetShape()[index], 0.0f);
+
+ TosaSerializationHandler::ConvertF32toU8(data, uint8Data);
+
+ tensors.push_back(new TosaSerializationTensor(input2Name,
+ {static_cast<int32_t>(outputs[0]->GetShape()[index])},
+ DType_FP32,
+ uint8Data));
+ }
+
+ // Setup Output Tensor
+ std::vector<int32_t> outputShape0 = GetTosaTensorShape(outputs[0]->GetShape());
+ DType outputDType0 = ArmNNToDType(outputs[0]->GetDataType());
+
+ tensors.push_back(new TosaSerializationTensor(outputName, outputShape0, outputDType0, {}));
+
+ // Set up TRANSPOSE_CONV2D operator
+ // The TOSA Reference Model pads the output shape, so it is added to output shape.
+ // In Arm NN we pad the input shape, so it is taken away.
+ // To offset this the negative padding value can be used.
+ std::vector<int> pad = {-static_cast<int>(descriptor->m_PadTop),
+ -static_cast<int>(descriptor->m_PadBottom),
+ -static_cast<int>(descriptor->m_PadLeft),
+ -static_cast<int>(descriptor->m_PadRight)};
+ std::vector<int> stride = {static_cast<int>(descriptor->m_StrideY),
+ static_cast<int>(descriptor->m_StrideX)};
+
+ std::vector<int> outputShape;
+ // If available use shape in descriptor otherwise use output shape.
+ if (descriptor->m_OutputShape.size() == 4)
+ {
+ for (uint32_t i = 0; i < descriptor->m_OutputShape.size(); ++i)
+ {
+ outputShape.push_back(static_cast<int>(descriptor->m_OutputShape[i]));
+ }
+ }
+ else
+ {
+ for (uint32_t i = 0; i < outputs[0]->GetNumDimensions(); ++i)
+ {
+ outputShape.push_back(static_cast<int>(outputs[0]->GetShape()[i]));
+ }
+ }
+
+ TosaTransposeConvAttribute attribute(pad, stride, outputShape, 0, 0, ArmNNToDType(inputs[0]->GetDataType()));
+
+ auto* op = new TosaSerializationOperator(Op_TRANSPOSE_CONV2D,
+ Attribute_TransposeConvAttribute,
+ &attribute,
+ {input0Name, input1Name, input2Name},
+ {outputName});
+ operators.push_back(op);
+
+ // operatorInputNames/operatorOutputNames ends up being the same as
+ // blockInputNames/blockOutputNames for one-to-one ArmNN to TOSA mappings
+ return new TosaSerializationBasicBlock(blockName, // name
+ operators, // operators
+ tensors, // tensors
+ {input0Name, input1Name, input2Name}, // inputs
+ {outputName}); // outputs
+} \ No newline at end of file
diff --git a/src/backends/tosaCommon/operatorMappings/TransposeConv2dOperator.hpp b/src/backends/tosaCommon/operatorMappings/TransposeConv2dOperator.hpp
new file mode 100644
index 0000000000..eb911a1195
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/TransposeConv2dOperator.hpp
@@ -0,0 +1,20 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TosaOperatorUtils.hpp"
+
+#include <Layer.hpp>
+
+#include <tosa_serialization_handler.h>
+
+using namespace armnn;
+using namespace tosa;
+
+TosaSerializationBasicBlock* ConvertTransposeConv2dToTosaOperator(const Layer* layer,
+ const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const TransposeConvolution2dDescriptor* descriptor);