From c9ea45adefdde2890e9aa191a5b31563a3dd35ea Mon Sep 17 00:00:00 2001 From: Mike Kelly Date: Fri, 28 Feb 2020 18:11:58 +0000 Subject: IVGCVSW-4375 Add support for Transpose * Added TransposeLayer * Added CL, Neon and Ref Workloads * Added Transpose utilities * Added Serializer and Deserializer support * Added Quantizer support Signed-off-by: Mike Kelly Change-Id: I04c755ba7cb5b1edf72b3c9f3c0314878032e3c7 --- src/armnn/InternalTypes.cpp | 1 + src/armnn/InternalTypes.hpp | 3 +- src/armnn/LayersFwd.hpp | 2 + src/armnn/Network.cpp | 6 + src/armnn/Network.hpp | 3 + src/armnn/QuantizerVisitor.cpp | 9 + src/armnn/QuantizerVisitor.hpp | 4 + src/armnn/SerializeLayerParameters.cpp | 20 ++ src/armnn/SerializeLayerParameters.hpp | 5 + src/armnn/layers/TransposeLayer.cpp | 62 ++++++ src/armnn/layers/TransposeLayer.hpp | 70 ++++++ src/armnnDeserializer/Deserializer.cpp | 27 +++ src/armnnDeserializer/Deserializer.hpp | 1 + .../test/DeserializeTranspose.cpp | 137 ++++++++++++ src/armnnSerializer/ArmnnSchema.fbs | 15 +- src/armnnSerializer/Serializer.cpp | 27 +++ src/armnnSerializer/Serializer.hpp | 4 + src/armnnSerializer/test/SerializerTests.cpp | 28 +++ src/armnnUtils/Transpose.cpp | 126 +++++++++++ src/backends/aclCommon/ArmComputeTensorUtils.cpp | 27 +++ src/backends/aclCommon/ArmComputeTensorUtils.hpp | 3 + src/backends/backendsCommon/LayerSupportBase.cpp | 8 + src/backends/backendsCommon/LayerSupportBase.hpp | 6 + src/backends/backendsCommon/WorkloadData.cpp | 29 +++ src/backends/backendsCommon/WorkloadData.hpp | 5 + src/backends/backendsCommon/WorkloadFactory.cpp | 21 +- src/backends/backendsCommon/WorkloadFactory.hpp | 3 + .../backendsCommon/WorkloadFactoryBase.hpp | 4 + .../test/IsLayerSupportedTestImpl.hpp | 2 + src/backends/backendsCommon/test/LayerTests.hpp | 1 + .../test/layerTests/TransposeTestImpl.hpp | 240 +++++++++++++++++++++ src/backends/cl/ClLayerSupport.cpp | 9 + src/backends/cl/ClLayerSupport.hpp | 6 + src/backends/cl/ClWorkloadFactory.cpp | 6 + src/backends/cl/ClWorkloadFactory.hpp | 3 + src/backends/cl/backend.mk | 3 +- src/backends/cl/test/ClLayerTests.cpp | 14 ++ src/backends/cl/workloads/CMakeLists.txt | 2 + src/backends/cl/workloads/ClTransposeWorkload.cpp | 49 +++++ src/backends/cl/workloads/ClTransposeWorkload.hpp | 40 ++++ src/backends/cl/workloads/ClWorkloads.hpp | 1 + src/backends/neon/NeonLayerSupport.cpp | 9 + src/backends/neon/NeonLayerSupport.hpp | 5 + src/backends/neon/NeonWorkloadFactory.cpp | 6 + src/backends/neon/NeonWorkloadFactory.hpp | 3 + src/backends/neon/backend.mk | 3 +- src/backends/neon/test/NeonLayerTests.cpp | 14 ++ src/backends/neon/workloads/CMakeLists.txt | 2 + .../neon/workloads/NeonTransposeWorkload.cpp | 48 +++++ .../neon/workloads/NeonTransposeWorkload.hpp | 39 ++++ src/backends/neon/workloads/NeonWorkloads.hpp | 1 + src/backends/reference/RefLayerSupport.cpp | 32 ++- src/backends/reference/RefLayerSupport.hpp | 6 + src/backends/reference/RefWorkloadFactory.cpp | 11 + src/backends/reference/RefWorkloadFactory.hpp | 3 + src/backends/reference/backend.mk | 1 + src/backends/reference/test/RefLayerTests.cpp | 14 ++ src/backends/reference/workloads/CMakeLists.txt | 2 + .../reference/workloads/RefTransposeWorkload.cpp | 35 +++ .../reference/workloads/RefTransposeWorkload.hpp | 35 +++ src/backends/reference/workloads/RefWorkloads.hpp | 1 + 61 files changed, 1294 insertions(+), 8 deletions(-) create mode 100644 src/armnn/layers/TransposeLayer.cpp create mode 100644 src/armnn/layers/TransposeLayer.hpp create mode 100644 src/armnnDeserializer/test/DeserializeTranspose.cpp create mode 100644 src/armnnUtils/Transpose.cpp create mode 100644 src/backends/backendsCommon/test/layerTests/TransposeTestImpl.hpp create mode 100644 src/backends/cl/workloads/ClTransposeWorkload.cpp create mode 100644 src/backends/cl/workloads/ClTransposeWorkload.hpp create mode 100644 src/backends/neon/workloads/NeonTransposeWorkload.cpp create mode 100644 src/backends/neon/workloads/NeonTransposeWorkload.hpp create mode 100644 src/backends/reference/workloads/RefTransposeWorkload.cpp create mode 100644 src/backends/reference/workloads/RefTransposeWorkload.hpp (limited to 'src') diff --git a/src/armnn/InternalTypes.cpp b/src/armnn/InternalTypes.cpp index 10e7f501b7..c032e44cd3 100644 --- a/src/armnn/InternalTypes.cpp +++ b/src/armnn/InternalTypes.cpp @@ -70,6 +70,7 @@ char const* GetLayerTypeAsCString(LayerType type) case LayerType::Subtraction: return "Subtraction"; case LayerType::Switch: return "Switch"; case LayerType::TransposeConvolution2d: return "TransposeConvolution2d"; + case LayerType::Transpose: return "Transpose"; default: BOOST_ASSERT_MSG(false, "Unknown layer type"); return "Unknown"; diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp index 2d7be3cac6..351f12c510 100644 --- a/src/armnn/InternalTypes.hpp +++ b/src/armnn/InternalTypes.hpp @@ -69,9 +69,10 @@ enum class LayerType StridedSlice, Subtraction, Switch, + TransposeConvolution2d, // Last layer goes here. LastLayer, - TransposeConvolution2d = LastLayer + Transpose = LastLayer }; const char* GetLayerTypeAsCString(LayerType type); diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp index 2d486f48a6..f3ce7e61fa 100644 --- a/src/armnn/LayersFwd.hpp +++ b/src/armnn/LayersFwd.hpp @@ -62,6 +62,7 @@ #include "layers/SubtractionLayer.hpp" #include "layers/SwitchLayer.hpp" #include "layers/TransposeConvolution2dLayer.hpp" +#include "layers/TransposeLayer.hpp" namespace armnn { @@ -145,6 +146,7 @@ DECLARE_LAYER(StandIn) DECLARE_LAYER(StridedSlice) DECLARE_LAYER(Subtraction) DECLARE_LAYER(Switch) +DECLARE_LAYER(Transpose) DECLARE_LAYER(TransposeConvolution2d) } diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index 7edc6240a1..b405a77829 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -1526,6 +1526,12 @@ IConnectableLayer* Network::AddTransposeConvolution2dLayer(const TransposeConvol return layer; } +IConnectableLayer* Network::AddTransposeLayer(const TransposeDescriptor& transposeDescriptor, + const char* name) +{ + return m_Graph->AddLayer(transposeDescriptor, name); +} + IConnectableLayer* Network::AddStackLayer(const StackDescriptor& stackDescriptor, const char* name) { diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp index 23a8e47093..5da681306c 100644 --- a/src/armnn/Network.hpp +++ b/src/armnn/Network.hpp @@ -226,6 +226,9 @@ public: const Optional& biases, const char* name = nullptr) override; + IConnectableLayer* AddTransposeLayer(const TransposeDescriptor& transposeDescriptor, + const char* name = nullptr) override; + IConnectableLayer* AddStackLayer(const StackDescriptor& stackDescriptor, const char* name = nullptr) override; diff --git a/src/armnn/QuantizerVisitor.cpp b/src/armnn/QuantizerVisitor.cpp index 51818ebddd..8e7c45f47f 100644 --- a/src/armnn/QuantizerVisitor.cpp +++ b/src/armnn/QuantizerVisitor.cpp @@ -561,4 +561,13 @@ void QuantizerVisitor::VisitTransposeConvolution2dLayer(const IConnectableLayer* SetQuantizedInputConnections(layer, newLayer); } +void QuantizerVisitor::VisitTransposeLayer(const IConnectableLayer* layer, + const TransposeDescriptor& transposeDescriptor, + const char* name) +{ + IConnectableLayer* newLayer = m_QuantizedNetwork->AddTransposeLayer(transposeDescriptor, name); + RecordLayer(layer, newLayer); + SetQuantizedInputConnections(layer, newLayer); +} + } //namespace armnn diff --git a/src/armnn/QuantizerVisitor.hpp b/src/armnn/QuantizerVisitor.hpp index 4013033697..29500ab0c8 100644 --- a/src/armnn/QuantizerVisitor.hpp +++ b/src/armnn/QuantizerVisitor.hpp @@ -187,6 +187,10 @@ public: const Optional& biases, const char* name = nullptr) override; + void VisitTransposeLayer(const IConnectableLayer* layer, + const TransposeDescriptor& descriptor, + const char* name = nullptr) override; + /// Extract the quantized network INetworkPtr RetrieveFinalNetwork() { return std::move(m_QuantizedNetwork); } diff --git a/src/armnn/SerializeLayerParameters.cpp b/src/armnn/SerializeLayerParameters.cpp index 544e389ae9..76b92f3f9d 100644 --- a/src/armnn/SerializeLayerParameters.cpp +++ b/src/armnn/SerializeLayerParameters.cpp @@ -491,4 +491,24 @@ void StringifyLayerParameters::Serialize( fn("DataLayout", GetDataLayoutName(desc.m_DataLayout)); } +void StringifyLayerParameters::Serialize(ParameterStringifyFunction& fn, + const TransposeDescriptor& desc) +{ + std::stringstream ss; + ss << "["; + bool addComma = false; + for (auto it : desc.m_DimMappings) + { + if (addComma) + { + ss << ","; + } + ss << it; + addComma = true; + } + ss << "]"; + + fn("DimMappings",ss.str()); +} + } // namespace armnn \ No newline at end of file diff --git a/src/armnn/SerializeLayerParameters.hpp b/src/armnn/SerializeLayerParameters.hpp index 76ca0a52dd..ae921c4b07 100644 --- a/src/armnn/SerializeLayerParameters.hpp +++ b/src/armnn/SerializeLayerParameters.hpp @@ -160,4 +160,9 @@ template <> struct StringifyLayerParameters static void Serialize(ParameterStringifyFunction& fn, const TransposeConvolution2dDescriptor& desc); }; +template <> struct StringifyLayerParameters +{ + static void Serialize(ParameterStringifyFunction& fn, const TransposeDescriptor& desc); +}; + } // namespace armnn \ No newline at end of file diff --git a/src/armnn/layers/TransposeLayer.cpp b/src/armnn/layers/TransposeLayer.cpp new file mode 100644 index 0000000000..3c22b545b9 --- /dev/null +++ b/src/armnn/layers/TransposeLayer.cpp @@ -0,0 +1,62 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "TransposeLayer.hpp" + +#include "LayerCloneBase.hpp" + +#include + +#include + +#include +#include + +namespace armnn +{ + +TransposeLayer::TransposeLayer(const TransposeDescriptor& param, const char* name) + : LayerWithParameters(1, 1, LayerType::Transpose, param, name) +{ +} + +std::unique_ptr TransposeLayer::CreateWorkload(const IWorkloadFactory& factory) const +{ + TransposeQueueDescriptor descriptor; + return factory.CreateTranspose(descriptor, PrepInfoAndDesc(descriptor)); +} + +TransposeLayer* TransposeLayer::Clone(Graph& graph) const +{ + return CloneBase(graph, m_Param, GetName()); +} + +std::vector TransposeLayer::InferOutputShapes(const std::vector& inputShapes) const +{ + BOOST_ASSERT(inputShapes.size() == 1); + const TensorShape& inShape = inputShapes[0]; + return std::vector ({armnnUtils::TransposeTensorShape(inShape, m_Param.m_DimMappings)}); +} + +void TransposeLayer::ValidateTensorShapesFromInputs() +{ + VerifyLayerConnections(1, CHECK_LOCATION()); + + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); + + BOOST_ASSERT(inferredShapes.size() == 1); + + ConditionalThrowIfNotEqual( + "TransposeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", + GetOutputSlot(0).GetTensorInfo().GetShape(), + inferredShapes[0]); +} + +void TransposeLayer::Accept(ILayerVisitor& visitor) const +{ + visitor.VisitTransposeLayer(this, GetParameters(), GetName()); +} + +} // namespace armnn diff --git a/src/armnn/layers/TransposeLayer.hpp b/src/armnn/layers/TransposeLayer.hpp new file mode 100644 index 0000000000..4906bc9412 --- /dev/null +++ b/src/armnn/layers/TransposeLayer.hpp @@ -0,0 +1,70 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include "LayerWithParameters.hpp" + +namespace armnn +{ + +/// This layer represents a transpose operation. +class TransposeLayer : public LayerWithParameters +{ +public: + /// Makes a workload for the Transpose type. + /// @param [in] factory The workload factory which will create the workload. + /// @return A pointer to the created workload, or nullptr if not created. + virtual std::unique_ptr CreateWorkload(const IWorkloadFactory& factory) const override; + + /// Creates a dynamically-allocated copy of this layer. + /// @param [in] graph The graph into which this layer is being cloned. + TransposeLayer* Clone(Graph& graph) const override; + + /// Check if the input tensor shape(s) + /// will lead to a valid configuration of @ref TransposeLayer. + void ValidateTensorShapesFromInputs() override; + + /// Infers the output shapes from given input shapes and the permutation vector. + /// @param [in] inputShapes The input shapes layer has. + /// @return A vector to the inferred output shape. + std::vector InferOutputShapes(const std::vector& inputShapes) const override; + + /// @return a permutation vector describing the permutation for the dimensions of the input tensor. + const PermutationVector& GetPermutation() const + { + return m_Param.m_DimMappings; + } + + /// Indicates if the other layer received is inverse of this one. + /// @param [in] other The other layer to be compared with. + /// @return true if other layer is inverse of this false otherwise. + bool IsInverse(const Layer& other) const + { + return (other.GetType() == LayerType::Transpose) && + GetPermutation().IsInverse(boost::polymorphic_downcast(&other)->GetPermutation()); + } + + /// Indicates if the other layer received is equal to this one. + /// @param [in] other The other layer to be compare with. + /// @return true if other layer is equal to this false otherwise. + bool IsEqual(const Layer& other) const + { + return (other.GetType() == LayerType::Transpose) && + GetPermutation().IsEqual(boost::polymorphic_downcast(&other)->GetPermutation()); + } + + void Accept(ILayerVisitor& visitor) const override; + +protected: + /// Constructor to create a TransposeLayer. + /// @param [in] param TransposeDescriptor to configure the permute operation. + /// @param [in] name Optional name for the layer. + TransposeLayer(const TransposeDescriptor& param, const char* name); + + /// Default destructor + ~TransposeLayer() = default; +}; + +} // namespace diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp index 61a38f9cf3..0d81649115 100644 --- a/src/armnnDeserializer/Deserializer.cpp +++ b/src/armnnDeserializer/Deserializer.cpp @@ -12,6 +12,7 @@ #include #include +#include #include #include @@ -241,6 +242,7 @@ m_ParserFunctions(Layer_MAX+1, &Deserializer::ParseUnsupportedLayer) m_ParserFunctions[Layer_SubtractionLayer] = &Deserializer::ParseSubtraction; m_ParserFunctions[Layer_SwitchLayer] = &Deserializer::ParseSwitch; m_ParserFunctions[Layer_TransposeConvolution2dLayer] = &Deserializer::ParseTransposeConvolution2d; + m_ParserFunctions[Layer_TransposeLayer] = &Deserializer::ParseTranspose; } Deserializer::LayerBaseRawPtr Deserializer::GetBaseLayer(const GraphPtr& graphPtr, unsigned int layerIndex) @@ -357,6 +359,8 @@ Deserializer::LayerBaseRawPtr Deserializer::GetBaseLayer(const GraphPtr& graphPt return graphPtr->layers()->Get(layerIndex)->layer_as_SwitchLayer()->base(); case Layer::Layer_TransposeConvolution2dLayer: return graphPtr->layers()->Get(layerIndex)->layer_as_TransposeConvolution2dLayer()->base(); + case Layer::Layer_TransposeLayer: + return graphPtr->layers()->Get(layerIndex)->layer_as_TransposeLayer()->base(); case Layer::Layer_NONE: default: throw ParseException(boost::str( @@ -2721,6 +2725,29 @@ void Deserializer::ParsePrelu(GraphPtr graph, unsigned int layerIndex) RegisterOutputSlots(graph, layerIndex, layer); } +void Deserializer::ParseTranspose(GraphPtr graph, unsigned int layerIndex) +{ + CHECK_LAYERS(graph, 0, layerIndex); + + auto dimsMapping = graph->layers()->Get(layerIndex)->layer_as_TransposeLayer()->descriptor()->dimMappings(); + + auto inputs = GetInputs(graph, layerIndex); + CHECK_VALID_SIZE(inputs.size(), 1); + + auto outputs = GetOutputs(graph, layerIndex); + CHECK_VALID_SIZE(outputs.size(), 1); + auto outputInfo = ToTensorInfo(outputs[0]); + + auto layerName = GetLayerName(graph, layerIndex); + const armnn::TransposeDescriptor descriptor(armnn::PermutationVector(dimsMapping->data(), dimsMapping->Length())); + + IConnectableLayer* layer = m_Network->AddTransposeLayer(descriptor, layerName.c_str()); + layer->GetOutputSlot(0).SetTensorInfo(outputInfo); + + RegisterInputSlots(graph, layerIndex, layer); + RegisterOutputSlots(graph, layerIndex, layer); +} + void Deserializer::ParseTransposeConvolution2d(GraphPtr graph, unsigned int layerIndex) { CHECK_LAYERS(graph, 0, layerIndex); diff --git a/src/armnnDeserializer/Deserializer.hpp b/src/armnnDeserializer/Deserializer.hpp index 8e8fe1aca8..f7e47cc8c2 100644 --- a/src/armnnDeserializer/Deserializer.hpp +++ b/src/armnnDeserializer/Deserializer.hpp @@ -128,6 +128,7 @@ private: void ParseStridedSlice(GraphPtr graph, unsigned int layerIndex); void ParseSubtraction(GraphPtr graph, unsigned int layerIndex); void ParseSwitch(GraphPtr graph, unsigned int layerIndex); + void ParseTranspose(GraphPtr graph, unsigned int layerIndex); void ParseTransposeConvolution2d(GraphPtr graph, unsigned int layerIndex); void RegisterInputSlots(GraphPtr graph, uint32_t layerIndex, diff --git a/src/armnnDeserializer/test/DeserializeTranspose.cpp b/src/armnnDeserializer/test/DeserializeTranspose.cpp new file mode 100644 index 0000000000..bf0f043a4a --- /dev/null +++ b/src/armnnDeserializer/test/DeserializeTranspose.cpp @@ -0,0 +1,137 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include +#include "ParserFlatbuffersSerializeFixture.hpp" +#include "../Deserializer.hpp" + +#include + +BOOST_AUTO_TEST_SUITE(Deserializer) + +struct TransposeFixture : public ParserFlatbuffersSerializeFixture +{ + explicit TransposeFixture(const std::string &inputShape, + const std::string &dimMappings, + const std::string &outputShape, + const std::string &dataType) + { + m_JsonString = R"( + { + inputIds: [0], + outputIds: [2], + layers: [ + { + layer_type: "InputLayer", + layer: { + base: { + layerBindingId: 0, + base: { + index: 0, + layerName: "InputLayer", + layerType: "Input", + inputSlots: [{ + index: 0, + connection: {sourceLayerIndex:0, outputSlotIndex:0 }, + }], + outputSlots: [{ + index: 0, + tensorInfo: { + dimensions: )" + inputShape + R"(, + dataType: )" + dataType + R"( + } + }] + } + } + } + }, + { + layer_type: "TransposeLayer", + layer: { + base: { + index: 1, + layerName: "TransposeLayer", + layerType: "Transpose", + inputSlots: [{ + index: 0, + connection: {sourceLayerIndex:0, outputSlotIndex:0 }, + }], + outputSlots: [{ + index: 0, + tensorInfo: { + dimensions: )" + outputShape + R"(, + dataType: )" + dataType + R"( + } + }] + }, + descriptor: { + dimMappings: )" + dimMappings + R"(, + } + } + }, + { + layer_type: "OutputLayer", + layer: { + base:{ + layerBindingId: 2, + base: { + index: 2, + layerName: "OutputLayer", + layerType: "Output", + inputSlots: [{ + index: 0, + connection: {sourceLayerIndex:1, outputSlotIndex:0 }, + }], + outputSlots: [{ + index: 0, + tensorInfo: { + dimensions: )" + outputShape + R"(, + dataType: )" + dataType + R"( + }, + }], + } + } + }, + } + ] + } + )"; + SetupSingleInputSingleOutput("InputLayer", "OutputLayer"); + } +}; + +struct SimpleTranspose2DFixture : TransposeFixture +{ + SimpleTranspose2DFixture() : TransposeFixture("[ 2, 3 ]", + "[ 1, 0 ]", + "[ 3, 2 ]", + "QuantisedAsymm8") {} +}; + +BOOST_FIXTURE_TEST_CASE(SimpleTranspose2DQuantisedAsymm8, SimpleTranspose2DFixture) +{ + RunTest<2, armnn::DataType::QAsymmU8>(0, + { 1, 2, 3, 4, 5, 6 }, + { 1, 4, 2, 5, 3, 6 }); +} + +struct SimpleTranspose4DFixture : TransposeFixture +{ + SimpleTranspose4DFixture() : TransposeFixture("[ 1, 2, 3, 4 ]", + "[ 3, 2, 1, 0 ]", + "[ 4, 3, 2, 1 ]", + "QuantisedAsymm8") {} +}; + +BOOST_FIXTURE_TEST_CASE(SimpleTranspose4DQuantisedAsymm8, SimpleTranspose4DFixture) +{ + RunTest<4, armnn::DataType::QAsymmU8>(0, + { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 }, + { 1, 13, 5, 17, 9, 21, 2, 14, 6, 18, 10, 22, + 3, 15, 7, 19, 11, 23, 4, 16, 8, 20, 12, 24 }); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/armnnSerializer/ArmnnSchema.fbs b/src/armnnSerializer/ArmnnSchema.fbs index 0697517a0f..d175d41f3f 100644 --- a/src/armnnSerializer/ArmnnSchema.fbs +++ b/src/armnnSerializer/ArmnnSchema.fbs @@ -149,7 +149,8 @@ enum LayerType : uint { LogSoftmax = 51, Comparison = 52, StandIn = 53, - ElementwiseUnary = 54 + ElementwiseUnary = 54, + Transpose = 55 } // Base layer table to be used as part of other layers @@ -732,6 +733,15 @@ table TransposeConvolution2dDescriptor { dataLayout:DataLayout = NCHW; } +table TransposeLayer { + base:LayerBase; + descriptor:TransposeDescriptor; +} + +table TransposeDescriptor { + dimMappings:[uint]; +} + table ResizeLayer { base:LayerBase; descriptor:ResizeDescriptor; @@ -820,7 +830,8 @@ union Layer { LogSoftmaxLayer, ComparisonLayer, StandInLayer, - ElementwiseUnaryLayer + ElementwiseUnaryLayer, + TransposeLayer } table AnyLayer { diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp index 3c01842c95..a3fdcf8123 100644 --- a/src/armnnSerializer/Serializer.cpp +++ b/src/armnnSerializer/Serializer.cpp @@ -1301,6 +1301,33 @@ void SerializerVisitor::VisitTransposeConvolution2dLayer( CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_TransposeConvolution2dLayer); } +void SerializerVisitor::VisitTransposeLayer(const armnn::IConnectableLayer* layer, + const armnn::TransposeDescriptor& descriptor, + const char* name) +{ + boost::ignore_unused(name); + + // Create FlatBuffer BaseLayer + auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Transpose); + + std::vector dimMappings; + for (unsigned int i=0; i& biases, const char* = nullptr) override; + void VisitTransposeLayer(const armnn::IConnectableLayer* layer, + const armnn::TransposeDescriptor& descriptor, + const char* name = nullptr) override; + private: /// Creates the Input Slots and Output Slots and LayerBase for the layer. diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp index 47804fe328..8c9c92b634 100644 --- a/src/armnnSerializer/test/SerializerTests.cpp +++ b/src/armnnSerializer/test/SerializerTests.cpp @@ -2501,6 +2501,34 @@ BOOST_AUTO_TEST_CASE(SerializeSwitch) deserializedNetwork->Accept(verifier); } +BOOST_AUTO_TEST_CASE(SerializeTranspose) +{ + DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(Transpose) + + const std::string layerName("transpose"); + const armnn::TensorInfo inputTensorInfo({4, 3, 2, 1}, armnn::DataType::Float32); + const armnn::TensorInfo outputTensorInfo({1, 2, 3, 4}, armnn::DataType::Float32); + + armnn::TransposeDescriptor descriptor(armnn::PermutationVector({3, 2, 1, 0})); + + armnn::INetworkPtr network = armnn::INetwork::Create(); + armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0); + armnn::IConnectableLayer* const transposeLayer = network->AddTransposeLayer(descriptor, layerName.c_str()); + armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0); + + inputLayer->GetOutputSlot(0).Connect(transposeLayer->GetInputSlot(0)); + transposeLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); + + inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo); + transposeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); + + armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); + BOOST_CHECK(deserializedNetwork); + + TransposeLayerVerifier verifier(layerName, {inputTensorInfo}, {outputTensorInfo}, descriptor); + deserializedNetwork->Accept(verifier); +} + BOOST_AUTO_TEST_CASE(SerializeTransposeConvolution2d) { using Descriptor = armnn::TransposeConvolution2dDescriptor; diff --git a/src/armnnUtils/Transpose.cpp b/src/armnnUtils/Transpose.cpp new file mode 100644 index 0000000000..3f3837c725 --- /dev/null +++ b/src/armnnUtils/Transpose.cpp @@ -0,0 +1,126 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include + +#include + +#include "Half.hpp" + +#include +#include + +namespace +{ + +class TransposeLoop +{ +public: + using size_type = unsigned int; + + TransposeLoop(const armnn::TensorShape& srcShape, const armnn::PermutationVector& mappings) + : m_SrcShape(srcShape) + { + assert(srcShape.GetNumDimensions() == mappings.GetSize()); + + const size_type numDims = srcShape.GetNumDimensions(); + + size_type srcStride = 1U; + size_type dstStride = 1U; + + for (size_type i = numDims - 1U, k = 0U; k < numDims; ++k, --i) + { + m_SrcStrides[i] = srcStride; + m_DstStrides[mappings[i]] = dstStride; + + srcStride *= srcShape[i]; + dstStride *= srcShape[mappings[i]]; + } + } + + void Unroll(const void* srcData, void* dstData, size_t dataTypeSize) + { + assert(srcData); + assert(dstData); + assert(dataTypeSize > 0); + + const unsigned char* srcDataPtr = reinterpret_cast(srcData); + unsigned char* dstDataPtr = reinterpret_cast(dstData); + + const unsigned char* const srcEndPtr = srcDataPtr + m_SrcShape.GetNumElements() * dataTypeSize; + unsigned char* const dstEndPtr = dstDataPtr + m_SrcShape.GetNumElements() * dataTypeSize; + + Unroll(0, srcDataPtr, dstDataPtr, srcEndPtr, dstEndPtr, dataTypeSize); + } + +private: + void Unroll(size_type dimension, + const unsigned char* srcData, unsigned char* dstData, + const unsigned char* srcEnd, unsigned char* dstEnd, + size_t dataTypeSize) + { + assert(srcData); + assert(dstData); + assert(srcEnd); + assert(dstEnd); + assert(srcData < srcEnd); + assert(dstData < dstEnd); + assert(dataTypeSize > 0); + + if (dimension >= m_SrcShape.GetNumDimensions()) + { + ::memcpy(dstData, srcData, dataTypeSize); + } + else + { + for (size_type i = 0; i < m_SrcShape[dimension]; i++) + { + Unroll(dimension + 1, srcData, dstData, srcEnd, dstEnd, dataTypeSize); + + srcData += m_SrcStrides[dimension] * dataTypeSize; + dstData += m_DstStrides[dimension] * dataTypeSize; + } + } + } + + armnn::TensorShape m_SrcShape; + std::array m_SrcStrides; + std::array m_DstStrides; +}; + +} // namespace + +namespace armnnUtils +{ + +armnn::TensorShape TransposeTensorShape(const armnn::TensorShape& srcShape, const armnn::PermutationVector& mappings) +{ + assert(srcShape.GetNumDimensions() == mappings.GetSize()); + + const unsigned int numDims = mappings.GetSize(); + unsigned int outDims[armnn::MaxNumOfTensorDimensions]; + + for (unsigned int i = 0U; i < numDims; ++i) + { + outDims[i] = srcShape[mappings[i]]; + } + armnn::TensorShape permutedShape(numDims, outDims); + return permutedShape; +} + +armnn::TensorInfo TransposeTensorShape(const armnn::TensorInfo& info, const armnn::PermutationVector& mappings) +{ + armnn::TensorInfo outInfo(info); + outInfo.SetShape(TransposeTensorShape(info.GetShape(), mappings)); + return outInfo; +} + +void Transpose(const armnn::TensorShape& srcShape, const armnn::PermutationVector& mappings, + const void* src, void* dst, size_t dataTypeSize) +{ + TransposeLoop(srcShape, mappings).Unroll(src, dst, dataTypeSize); +} + +} // namespace armnnUtils diff --git a/src/backends/aclCommon/ArmComputeTensorUtils.cpp b/src/backends/aclCommon/ArmComputeTensorUtils.cpp index 49fef5bf17..84091e8fb3 100644 --- a/src/backends/aclCommon/ArmComputeTensorUtils.cpp +++ b/src/backends/aclCommon/ArmComputeTensorUtils.cpp @@ -214,7 +214,34 @@ arm_compute::PermutationVector BuildArmComputePermutationVector(const armnn::Per { aclPerm.set(i - start, perm[i] - start); } + return aclPerm; +} + +arm_compute::PermutationVector BuildArmComputeTransposeVector(const armnn::PermutationVector& perm) +{ + arm_compute::PermutationVector aclPerm; + std::map permuteMappings; + for (unsigned int i = 0; i < perm.GetSize(); ++i) + { + permuteMappings[perm[i]] = i; + } + + std::vector permuteVector; + for (unsigned int i = 0; i < perm.GetSize(); ++i) + { + permuteVector.push_back(permuteMappings.at(i)); + } + unsigned int start = 0; + while ((start < perm.GetSize()) && (start == permuteVector[start])) + { + ++start; + } + + for (unsigned int i = start; i < perm.GetSize(); ++i) + { + aclPerm.set(i - start, permuteVector[i] - start); + } return aclPerm; } diff --git a/src/backends/aclCommon/ArmComputeTensorUtils.hpp b/src/backends/aclCommon/ArmComputeTensorUtils.hpp index b4ff0f72ff..9b236e1eed 100644 --- a/src/backends/aclCommon/ArmComputeTensorUtils.hpp +++ b/src/backends/aclCommon/ArmComputeTensorUtils.hpp @@ -60,6 +60,9 @@ arm_compute::NormalizationLayerInfo BuildArmComputeNormalizationLayerInfo(const /// Utility function used to setup an arm_compute::PermutationVector object from an armnn::PermutationVector. arm_compute::PermutationVector BuildArmComputePermutationVector(const armnn::PermutationVector& vector); +/// Utility function used to setup an arm_compute::PermutationVector object from an armnn::PermutationVector. +arm_compute::PermutationVector BuildArmComputeTransposeVector(const armnn::PermutationVector& vector); + /// Utility function used to setup an arm_compute::Size2D object from width and height values. arm_compute::Size2D BuildArmComputeSize2D(const unsigned int width, const unsigned int height); diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp index 449b8098d6..127913447c 100644 --- a/src/backends/backendsCommon/LayerSupportBase.cpp +++ b/src/backends/backendsCommon/LayerSupportBase.cpp @@ -579,4 +579,12 @@ bool LayerSupportBase::IsTransposeConvolution2dSupported(const TensorInfo& /*inp return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } +bool LayerSupportBase::IsTransposeSupported(const TensorInfo& /*input*/, + const TensorInfo& /*output*/, + const TransposeDescriptor& /*descriptor*/, + Optional reasonIfUnsupported) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); +} + } // namespace armnn diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp index 459ac03b6f..888bef5f89 100644 --- a/src/backends/backendsCommon/LayerSupportBase.hpp +++ b/src/backends/backendsCommon/LayerSupportBase.hpp @@ -353,6 +353,12 @@ public: const TensorInfo& weights, const Optional& biases, Optional reasonIfUnsupported = EmptyOptional()) const override; + + bool IsTransposeSupported(const TensorInfo& input, + const TensorInfo& output, + const TransposeDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const override; + }; } // namespace armnn diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp index 410469e97d..9b7a2429d6 100644 --- a/src/backends/backendsCommon/WorkloadData.cpp +++ b/src/backends/backendsCommon/WorkloadData.cpp @@ -2680,6 +2680,35 @@ void TransposeConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloa ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output"); } +void TransposeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const +{ + const std::string descriptorName{"TransposeQueueDescriptor"}; + + ValidateNumInputs(workloadInfo, descriptorName, 1); + ValidateNumOutputs(workloadInfo, descriptorName, 1); + + const PermutationVector& mapping = m_Parameters.m_DimMappings; + + const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0]; + const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0]; + + ValidateTensorNumDimensions(inputTensorInfo, descriptorName, mapping.GetSize(), "input"); + ValidateTensorNumDimensions(outputTensorInfo, descriptorName, mapping.GetSize(), "output"); + + for (unsigned int i = 0u; i < mapping.GetSize(); ++i) + { + if (inputTensorInfo.GetShape()[mapping[i]] != outputTensorInfo.GetShape()[i]) + { + throw InvalidArgumentException(descriptorName + ": src dimension " + to_string(mapping[i]) + + " (=" + to_string(inputTensorInfo.GetShape()[mapping[i]]) + ") " + + "must match dst dimension " + to_string(i) + + " (=" + to_string(outputTensorInfo.GetShape()[i]) + ")"); + } + } + + ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output"); +} + void QuantizedLstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const { const std::string descriptorName{"QuantizedLstmQueueDescriptor"}; diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp index 46681e9def..06289fa039 100644 --- a/src/backends/backendsCommon/WorkloadData.hpp +++ b/src/backends/backendsCommon/WorkloadData.hpp @@ -504,6 +504,11 @@ struct TransposeConvolution2dQueueDescriptor : QueueDescriptorWithParameters +{ + void Validate(const WorkloadInfo& workloadInfo) const; +}; + struct QuantizedLstmQueueDescriptor : QueueDescriptor { QuantizedLstmQueueDescriptor() diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp index 23ff70a52e..6ac76ecea6 100644 --- a/src/backends/backendsCommon/WorkloadFactory.cpp +++ b/src/backends/backendsCommon/WorkloadFactory.cpp @@ -1023,6 +1023,17 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, reason); break; } + case LayerType::Transpose: + { + auto cLayer = boost::polymorphic_downcast(&layer); + const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); + const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); + result = layerSupportObject->IsTransposeSupported(OverrideDataType(input, dataType), + OverrideDataType(output, dataType), + cLayer->GetParameters(), + reason); + break; + } case LayerType::TransposeConvolution2d: { auto cLayer = boost::polymorphic_downcast(&layer); @@ -1315,7 +1326,7 @@ std::unique_ptr IWorkloadFactory::CreatePad(const PadQueueDescriptor& } std::unique_ptr IWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& /*descriptor*/, - const WorkloadInfo&/**/ /*info*/) const + const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } @@ -1379,7 +1390,7 @@ std::unique_ptr IWorkloadFactory::CreateSlice(const SliceQueueDescrip { return std::unique_ptr(); } -/**/ + std::unique_ptr IWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { @@ -1428,6 +1439,12 @@ std::unique_ptr IWorkloadFactory::CreateSwitch(const SwitchQueueDescr return std::unique_ptr(); } +std::unique_ptr IWorkloadFactory::CreateTranspose(const TransposeQueueDescriptor& /*descriptor*/, + const WorkloadInfo& /*info*/) const +{ + return std::unique_ptr(); +} + std::unique_ptr IWorkloadFactory::CreateTransposeConvolution2d( const TransposeConvolution2dQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp index e1cdff6abe..dae58b6d93 100644 --- a/src/backends/backendsCommon/WorkloadFactory.hpp +++ b/src/backends/backendsCommon/WorkloadFactory.hpp @@ -235,6 +235,9 @@ public: virtual std::unique_ptr CreateSwitch(const SwitchQueueDescriptor& descriptor, const WorkloadInfo& Info) const; + virtual std::unique_ptr CreateTranspose(const TransposeQueueDescriptor& descriptor, + const WorkloadInfo& info) const; + virtual std::unique_ptr CreateTransposeConvolution2d( const TransposeConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const; diff --git a/src/backends/backendsCommon/WorkloadFactoryBase.hpp b/src/backends/backendsCommon/WorkloadFactoryBase.hpp index 9602cc3b6c..960dbd3413 100644 --- a/src/backends/backendsCommon/WorkloadFactoryBase.hpp +++ b/src/backends/backendsCommon/WorkloadFactoryBase.hpp @@ -266,6 +266,10 @@ public: const WorkloadInfo& /*info*/) const override { return nullptr; } + std::unique_ptr CreateTranspose(const TransposeQueueDescriptor& /*descriptor*/, + const WorkloadInfo& /*info*/) const override + { return nullptr; } + std::unique_ptr CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const override { return nullptr; } diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp index 1a899aa935..395a63d6e6 100644 --- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp +++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp @@ -537,6 +537,8 @@ DECLARE_LAYER_POLICY_1_PARAM(Subtraction) DECLARE_LAYER_POLICY_1_PARAM(Switch) +DECLARE_LAYER_POLICY_2_PARAM(Transpose) + DECLARE_LAYER_POLICY_2_PARAM(TransposeConvolution2d) diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp index eba7944cc3..62a66df166 100644 --- a/src/backends/backendsCommon/test/LayerTests.hpp +++ b/src/backends/backendsCommon/test/LayerTests.hpp @@ -53,3 +53,4 @@ #include #include #include +#include diff --git a/src/backends/backendsCommon/test/layerTests/TransposeTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/TransposeTestImpl.hpp new file mode 100644 index 0000000000..3949dcc142 --- /dev/null +++ b/src/backends/backendsCommon/test/layerTests/TransposeTestImpl.hpp @@ -0,0 +1,240 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include + + +#include +#include + +#include + +#include + +template +LayerTestResult SimpleTransposeTestImpl( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + armnn::TransposeDescriptor descriptor, + armnn::TensorInfo inputTensorInfo, + armnn::TensorInfo outputTensorInfo, + const std::vector& inputData, + const std::vector& outputExpectedData) +{ + boost::ignore_unused(memoryManager); + auto input = MakeTensor(inputTensorInfo, inputData); + + LayerTestResult ret(outputTensorInfo); + ret.outputExpected = MakeTensor(outputTensorInfo, outputExpectedData); + + std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::TransposeQueueDescriptor data; + data.m_Parameters = descriptor; + armnn::WorkloadInfo info; + AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr workload = workloadFactory.CreateTranspose(data, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + + return ret; +} + +template> +LayerTestResult SimpleTransposeTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + armnn::TensorInfo inputTensorInfo; + armnn::TensorInfo outputTensorInfo; + + unsigned int inputShape[] = { 1, 2, 2, 2 }; + unsigned int outputShape[] = { 1, 2, 2, 2 }; + + armnn::TransposeDescriptor descriptor; + descriptor.m_DimMappings = {0U, 2U, 3U, 1U}; + + inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType); + outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType); + + // Set quantization parameters if the requested type is a quantized type. + if(armnn::IsQuantizedType()) + { + inputTensorInfo.SetQuantizationScale(0.5f); + inputTensorInfo.SetQuantizationOffset(5); + outputTensorInfo.SetQuantizationScale(0.5f); + outputTensorInfo.SetQuantizationOffset(5); + } + + std::vector input = std::vector( + { + 1, 2, + 3, 4, + 5, 6, + 7, 8 + }); + + std::vector outputExpected = std::vector( + { + 1, 5, 2, 6, + 3, 7, 4, 8 + }); + + return SimpleTransposeTestImpl(workloadFactory, memoryManager, + descriptor, inputTensorInfo, + outputTensorInfo, input, outputExpected); +} + +template> +LayerTestResult TransposeValueSet1Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + armnn::TensorInfo inputTensorInfo; + armnn::TensorInfo outputTensorInfo; + + unsigned int inputShape[] = { 1, 2, 2, 3 }; + unsigned int outputShape[] = { 1, 3, 2, 2 }; + + armnn::TransposeDescriptor descriptor; + descriptor.m_DimMappings = {0U, 3U, 1U, 2U}; + + inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType); + outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType); + + // Set quantization parameters if the requested type is a quantized type. + if(armnn::IsQuantizedType()) + { + inputTensorInfo.SetQuantizationScale(0.5f); + inputTensorInfo.SetQuantizationOffset(5); + outputTensorInfo.SetQuantizationScale(0.5f); + outputTensorInfo.SetQuantizationOffset(5); + } + + std::vector input = std::vector( + { + 1, 2, 3, + 11, 12, 13, + 21, 22, 23, + 31, 32, 33 + }); + + std::vector outputExpected = std::vector( + { + 1, 11, 21, 31, + 2, 12, 22, 32, + 3, 13, 23, 33 + }); + + return SimpleTransposeTestImpl(workloadFactory, memoryManager, + descriptor, inputTensorInfo, + outputTensorInfo, input, outputExpected); +} + +template> +LayerTestResult TransposeValueSet2Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + armnn::TensorInfo inputTensorInfo; + armnn::TensorInfo outputTensorInfo; + + unsigned int inputShape[] = { 1, 3, 2, 2 }; + unsigned int outputShape[] = { 1, 2, 2, 3 }; + + armnn::TransposeDescriptor descriptor; + descriptor.m_DimMappings = {0U, 2U, 3U, 1U}; + + inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType); + outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType); + + // Set quantization parameters if the requested type is a quantized type. + if(armnn::IsQuantizedType()) + { + inputTensorInfo.SetQuantizationScale(0.5f); + inputTensorInfo.SetQuantizationOffset(5); + outputTensorInfo.SetQuantizationScale(0.5f); + outputTensorInfo.SetQuantizationOffset(5); + } + + std::vector input = std::vector( + { + 1, 11, 21, 31, + 2, 12, 22, 32, + 3, 13, 23, 33 + }); + + std::vector outputExpected = std::vector( + { + 1, 2, 3, + 11, 12, 13, + 21, 22, 23, + 31, 32, 33, + }); + + return SimpleTransposeTestImpl(workloadFactory, memoryManager, + descriptor, inputTensorInfo, + outputTensorInfo, input, outputExpected); +} + +template> +LayerTestResult TransposeValueSet3Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + armnn::TensorInfo inputTensorInfo; + armnn::TensorInfo outputTensorInfo; + + unsigned int inputShape[] = { 1, 2, 3, 3 }; + unsigned int outputShape[] = { 1, 3, 2, 3 }; + + armnn::TransposeDescriptor descriptor; + descriptor.m_DimMappings = {0U, 3U, 1U, 2U}; + + inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType); + outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType); + + // Set quantization parameters if the requested type is a quantized type. + if(armnn::IsQuantizedType()) + { + inputTensorInfo.SetQuantizationScale(0.5f); + inputTensorInfo.SetQuantizationOffset(5); + outputTensorInfo.SetQuantizationScale(0.5f); + outputTensorInfo.SetQuantizationOffset(5); + } + + std::vector input = std::vector( + { + 1, 2, 3, + 11, 12, 13, + 21, 22, 23, + 31, 32, 33, + 41, 42, 43, + 51, 52, 53 + }); + + std::vector outputExpected = std::vector( + { + 1, 11, 21, 31, 41, 51, + 2, 12, 22, 32, 42, 52, + 3, 13, 23, 33, 43, 53 + }); + + return SimpleTransposeTestImpl(workloadFactory, memoryManager, + descriptor, inputTensorInfo, + outputTensorInfo, input, outputExpected); +} diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp index e8548e4b5a..d3ac98655a 100644 --- a/src/backends/cl/ClLayerSupport.cpp +++ b/src/backends/cl/ClLayerSupport.cpp @@ -60,6 +60,7 @@ #include "workloads/ClStridedSliceWorkload.hpp" #include "workloads/ClSubtractionWorkload.hpp" #include "workloads/ClTransposeConvolution2dWorkload.hpp" +#include "workloads/ClTransposeWorkload.hpp" #endif using namespace boost; @@ -819,4 +820,12 @@ bool ClLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input, biases); } +bool ClLayerSupport::IsTransposeSupported(const TensorInfo& input, + const TensorInfo& output, + const TransposeDescriptor& descriptor, + Optional reasonIfUnsupported) const +{ + FORWARD_WORKLOAD_VALIDATE_FUNC(ClTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor); +} + } // namespace armnn diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp index 819d086cb4..60899d0596 100644 --- a/src/backends/cl/ClLayerSupport.hpp +++ b/src/backends/cl/ClLayerSupport.hpp @@ -286,6 +286,12 @@ public: const TensorInfo& weights, const Optional& biases, Optional reasonIfUnsupported = EmptyOptional()) const override; + + bool IsTransposeSupported(const TensorInfo& input, + const TensorInfo& output, + const TransposeDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const override; + }; } // namespace armnn diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp index 4bb2e2a8ce..21c26296af 100644 --- a/src/backends/cl/ClWorkloadFactory.cpp +++ b/src/backends/cl/ClWorkloadFactory.cpp @@ -534,6 +534,12 @@ std::unique_ptr ClWorkloadFactory::CreateSubtraction(const Subtractio return MakeWorkload(descriptor, info); } +std::unique_ptr ClWorkloadFactory::CreateTranspose(const TransposeQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload(descriptor, info); +} + std::unique_ptr ClWorkloadFactory::CreateTransposeConvolution2d( const TransposeConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const diff --git a/src/backends/cl/ClWorkloadFactory.hpp b/src/backends/cl/ClWorkloadFactory.hpp index 980be9192e..a7168010f2 100644 --- a/src/backends/cl/ClWorkloadFactory.hpp +++ b/src/backends/cl/ClWorkloadFactory.hpp @@ -210,6 +210,9 @@ public: std::unique_ptr CreateSubtraction(const SubtractionQueueDescriptor& descriptor, const WorkloadInfo& info) const override; + std::unique_ptr CreateTranspose(const TransposeQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + std::unique_ptr CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const override; diff --git a/src/backends/cl/backend.mk b/src/backends/cl/backend.mk index 4182b94136..e326add9e9 100644 --- a/src/backends/cl/backend.mk +++ b/src/backends/cl/backend.mk @@ -67,7 +67,8 @@ BACKEND_SOURCES := \ workloads/ClStackWorkload.cpp \ workloads/ClStridedSliceWorkload.cpp \ workloads/ClSubtractionWorkload.cpp \ - workloads/ClTransposeConvolution2dWorkload.cpp + workloads/ClTransposeConvolution2dWorkload.cpp \ + workloads/ClTransposeWorkload.cpp else # ARMNN_COMPUTE_CL_ENABLED == 0 diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp index cfec81a4c3..d8b0fd156b 100644 --- a/src/backends/cl/test/ClLayerTests.cpp +++ b/src/backends/cl/test/ClLayerTests.cpp @@ -770,6 +770,20 @@ ARMNN_AUTO_TEST_CASE(DequantizeSimpleInt16, DequantizeSimpleInt16Test) ARMNN_AUTO_TEST_CASE(DequantizeSimpleUint8ToFp16, DequantizeSimpleUint8ToFp16Test) ARMNN_AUTO_TEST_CASE(DequantizeSimpleInt16ToFp16, DequantizeSimpleInt16ToFp16Test) +// Transpose +ARMNN_AUTO_TEST_CASE(SimpleTransposeFloat32, SimpleTransposeTest) +ARMNN_AUTO_TEST_CASE(TransposeFloat32ValueSet1Test, TransposeValueSet1Test) +ARMNN_AUTO_TEST_CASE(TransposeFloat32ValueSet2Test, TransposeValueSet2Test) +ARMNN_AUTO_TEST_CASE(TransposeFloat32ValueSet3Test, TransposeValueSet3Test) +ARMNN_AUTO_TEST_CASE(SimpleTransposeQASymm8, SimpleTransposeTest) +ARMNN_AUTO_TEST_CASE(TransposeQASymm8ValueSet1Test, TransposeValueSet1Test) +ARMNN_AUTO_TEST_CASE(TransposeQASymm8ValueSet2Test, TransposeValueSet2Test) +ARMNN_AUTO_TEST_CASE(TransposeQASymm8ValueSet3Test, TransposeValueSet3Test) +ARMNN_AUTO_TEST_CASE(SimpleTransposeQSymm16, SimpleTransposeTest) +ARMNN_AUTO_TEST_CASE(TransposeQSymm16ValueSet1Test, TransposeValueSet1Test) +ARMNN_AUTO_TEST_CASE(TransposeQSymm16ValueSet2Test, TransposeValueSet2Test) +ARMNN_AUTO_TEST_CASE(TransposeQSymm16ValueSet3Test, TransposeValueSet3Test) + // TransposeConvolution2d ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dFloatNchw, SimpleTransposeConvolution2dTest, diff --git a/src/backends/cl/workloads/CMakeLists.txt b/src/backends/cl/workloads/CMakeLists.txt index de62ca9496..17d69b1ed5 100644 --- a/src/backends/cl/workloads/CMakeLists.txt +++ b/src/backends/cl/workloads/CMakeLists.txt @@ -96,6 +96,8 @@ list(APPEND armnnClBackendWorkloads_sources ClSubtractionWorkload.hpp ClTransposeConvolution2dWorkload.cpp ClTransposeConvolution2dWorkload.hpp + ClTransposeWorkload.cpp + ClTransposeWorkload.hpp ClWorkloads.hpp ClWorkloadUtils.hpp ) diff --git a/src/backends/cl/workloads/ClTransposeWorkload.cpp b/src/backends/cl/workloads/ClTransposeWorkload.cpp new file mode 100644 index 0000000000..b276b229f6 --- /dev/null +++ b/src/backends/cl/workloads/ClTransposeWorkload.cpp @@ -0,0 +1,49 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "ClTransposeWorkload.hpp" +#include +#include + +#include + +#include "ClWorkloadUtils.hpp" + +namespace armnn +{ + +arm_compute::Status ClTransposeWorkloadValidate(const TensorInfo& input, + const TensorInfo& output, + const TransposeDescriptor& descriptor) +{ + const arm_compute::TensorInfo aclInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input); + const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output); + const armnn::PermutationVector& mappings = descriptor.m_DimMappings; + + return arm_compute::CLPermute::validate(&aclInputInfo, &aclOutputInfo, + armcomputetensorutils::BuildArmComputeTransposeVector(mappings)); +} + +ClTransposeWorkload::ClTransposeWorkload(const TransposeQueueDescriptor& descriptor, + const WorkloadInfo& info) + : BaseWorkload(descriptor, info) +{ + m_Data.ValidateInputsOutputs(GetName(), 1, 1); + + const arm_compute::ICLTensor& input = static_cast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ICLTensor& output = static_cast(m_Data.m_Outputs[0])->GetTensor(); + const armnn::PermutationVector& mappings = m_Data.m_Parameters.m_DimMappings; + // Run the layer. + m_PermuteFunction.configure(&input, &output, + armcomputetensorutils::BuildArmComputeTransposeVector(mappings)); +} + +void ClTransposeWorkload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT_CL(GetName() + "_Execute"); + RunClFunction(m_PermuteFunction, CHECK_LOCATION()); +} + +} // namespace armnn diff --git a/src/backends/cl/workloads/ClTransposeWorkload.hpp b/src/backends/cl/workloads/ClTransposeWorkload.hpp new file mode 100644 index 0000000000..c1bed93b97 --- /dev/null +++ b/src/backends/cl/workloads/ClTransposeWorkload.hpp @@ -0,0 +1,40 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include +#include + +#include +#include + +#include + +namespace armnn +{ + +arm_compute::Status ClTransposeWorkloadValidate(const TensorInfo& input, + const TensorInfo& output, + const TransposeDescriptor& descriptor); + +class ClTransposeWorkload : public BaseWorkload +{ +public: + static const std::string& GetName() + { + static const std::string name = std::string("ClTransposeWorkload"); + return name; + } + + ClTransposeWorkload(const TransposeQueueDescriptor& descriptor, const WorkloadInfo& info); + void Execute() const override; + +private: + using BaseWorkload::m_Data; + mutable arm_compute::CLPermute m_PermuteFunction; +}; + +} // namespace armnn diff --git a/src/backends/cl/workloads/ClWorkloads.hpp b/src/backends/cl/workloads/ClWorkloads.hpp index 014dc3f99e..ec193d5e3e 100644 --- a/src/backends/cl/workloads/ClWorkloads.hpp +++ b/src/backends/cl/workloads/ClWorkloads.hpp @@ -49,3 +49,4 @@ #include "ClConvertFp16ToFp32Workload.hpp" #include "ClConvertFp32ToFp16Workload.hpp" #include "ClTransposeConvolution2dWorkload.hpp" +#include "ClTransposeWorkload.hpp" diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp index 3c161d553a..7e58dabe93 100644 --- a/src/backends/neon/NeonLayerSupport.cpp +++ b/src/backends/neon/NeonLayerSupport.cpp @@ -59,6 +59,7 @@ #include "workloads/NeonStridedSliceWorkload.hpp" #include "workloads/NeonSubtractionWorkload.hpp" #include "workloads/NeonTransposeConvolution2dWorkload.hpp" +#include "workloads/NeonTransposeWorkload.hpp" #endif using namespace boost; @@ -803,4 +804,12 @@ bool NeonLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input biases); } +bool NeonLayerSupport::IsTransposeSupported(const TensorInfo& input, + const TensorInfo& output, + const TransposeDescriptor& descriptor, + Optional reasonIfUnsupported) const +{ + FORWARD_WORKLOAD_VALIDATE_FUNC(NeonTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor); +} + } // namespace armnn diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp index 9cb64eac2b..f45db35ffe 100644 --- a/src/backends/neon/NeonLayerSupport.hpp +++ b/src/backends/neon/NeonLayerSupport.hpp @@ -288,6 +288,11 @@ public: const Optional& biases, Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsTransposeSupported(const TensorInfo& input, + const TensorInfo& output, + const TransposeDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const override; + }; // class NeonLayerSupport } // namespace armnn diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp index c3e0dc8cc1..dc3ee846fb 100644 --- a/src/backends/neon/NeonWorkloadFactory.cpp +++ b/src/backends/neon/NeonWorkloadFactory.cpp @@ -503,6 +503,12 @@ std::unique_ptr NeonWorkloadFactory::CreateSubtraction( return std::make_unique(descriptor, info); } +std::unique_ptr NeonWorkloadFactory::CreateTranspose(const TransposeQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return std::make_unique(descriptor, info); +} + std::unique_ptr NeonWorkloadFactory::CreateTransposeConvolution2d( const TransposeConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const diff --git a/src/backends/neon/NeonWorkloadFactory.hpp b/src/backends/neon/NeonWorkloadFactory.hpp index 95271e200d..bc4107dbb0 100644 --- a/src/backends/neon/NeonWorkloadFactory.hpp +++ b/src/backends/neon/NeonWorkloadFactory.hpp @@ -213,6 +213,9 @@ public: std::unique_ptr CreateSubtraction(const SubtractionQueueDescriptor& descriptor, const WorkloadInfo& info) const override; + std::unique_ptr CreateTranspose(const TransposeQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + std::unique_ptr CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const override; diff --git a/src/backends/neon/backend.mk b/src/backends/neon/backend.mk index 1c572e61f5..d9a5405983 100644 --- a/src/backends/neon/backend.mk +++ b/src/backends/neon/backend.mk @@ -67,7 +67,8 @@ BACKEND_SOURCES := \ workloads/NeonStackWorkload.cpp \ workloads/NeonStridedSliceWorkload.cpp \ workloads/NeonSubtractionWorkload.cpp \ - workloads/NeonTransposeConvolution2dWorkload.cpp + workloads/NeonTransposeConvolution2dWorkload.cpp \ + workloads/NeonTransposeWorkload.cpp else diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp index 18658a34a1..482bc25e34 100644 --- a/src/backends/neon/test/NeonLayerTests.cpp +++ b/src/backends/neon/test/NeonLayerTests.cpp @@ -891,6 +891,20 @@ ARMNN_AUTO_TEST_CASE(StackOutput4DAxis3, StackOutput4DAxis3Float32Test) ARMNN_AUTO_TEST_CASE(StackOutput3DInputs3, StackOutput3DInputs3Float32Test) ARMNN_AUTO_TEST_CASE(StackOutput5D, StackOutput5DFloat32Test) +// Transpose +ARMNN_AUTO_TEST_CASE(SimpleTransposeFloat32, SimpleTransposeTest) +ARMNN_AUTO_TEST_CASE(TransposeFloat32ValueSet1Test, TransposeValueSet1Test) +ARMNN_AUTO_TEST_CASE(TransposeFloat32ValueSet2Test, TransposeValueSet2Test) +ARMNN_AUTO_TEST_CASE(TransposeFloat32ValueSet3Test, TransposeValueSet3Test) +ARMNN_AUTO_TEST_CASE(SimpleTransposeQASymm8, SimpleTransposeTest) +ARMNN_AUTO_TEST_CASE(TransposeQASymm8ValueSet1Test, TransposeValueSet1Test) +ARMNN_AUTO_TEST_CASE(TransposeQASymm8ValueSet2Test, TransposeValueSet2Test) +ARMNN_AUTO_TEST_CASE(TransposeQASymm8ValueSet3Test, TransposeValueSet3Test) +ARMNN_AUTO_TEST_CASE(SimpleTransposeQSymm16, SimpleTransposeTest) +ARMNN_AUTO_TEST_CASE(TransposeQSymm16ValueSet1Test, TransposeValueSet1Test) +ARMNN_AUTO_TEST_CASE(TransposeQSymm16ValueSet2Test, TransposeValueSet2Test) +ARMNN_AUTO_TEST_CASE(TransposeQSymm16ValueSet3Test, TransposeValueSet3Test) + // TransposeConvolution2d ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dFloatNchw, SimpleTransposeConvolution2dTest, diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt index 02ffedcf09..a932f8b852 100644 --- a/src/backends/neon/workloads/CMakeLists.txt +++ b/src/backends/neon/workloads/CMakeLists.txt @@ -98,6 +98,8 @@ list(APPEND armnnNeonBackendWorkloads_sources NeonSubtractionWorkload.hpp NeonTransposeConvolution2dWorkload.cpp NeonTransposeConvolution2dWorkload.hpp + NeonTransposeWorkload.cpp + NeonTransposeWorkload.hpp NeonWorkloads.hpp NeonWorkloadUtils.hpp ) diff --git a/src/backends/neon/workloads/NeonTransposeWorkload.cpp b/src/backends/neon/workloads/NeonTransposeWorkload.cpp new file mode 100644 index 0000000000..c11f2df2d2 --- /dev/null +++ b/src/backends/neon/workloads/NeonTransposeWorkload.cpp @@ -0,0 +1,48 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "NeonTransposeWorkload.hpp" +#include +#include + +#include + +namespace armnn +{ + +arm_compute::Status NeonTransposeWorkloadValidate(const TensorInfo& input, + const TensorInfo& output, + const TransposeDescriptor& descriptor) +{ + const arm_compute::TensorInfo aclInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input); + const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output); + const armnn::PermutationVector& mappings = descriptor.m_DimMappings; + + return arm_compute::NEPermute::validate(&aclInputInfo, &aclOutputInfo, + armcomputetensorutils::BuildArmComputeTransposeVector(mappings)); +} + +NeonTransposeWorkload::NeonTransposeWorkload(const TransposeQueueDescriptor& descriptor, + const WorkloadInfo& info) + : BaseWorkload(descriptor, info) +{ + m_Data.ValidateInputsOutputs(GetName(), 1, 1); + + const arm_compute::ITensor& input = static_cast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = static_cast(m_Data.m_Outputs[0])->GetTensor(); + const armnn::PermutationVector& mappings = m_Data.m_Parameters.m_DimMappings; + + // Run the layer. + m_PermuteFunction.configure(&input, &output, + armcomputetensorutils::BuildArmComputeTransposeVector(mappings)); +} + +void NeonTransposeWorkload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT_NEON(GetName() + "_Execute"); + m_PermuteFunction.run(); +} + +} // namespace armnn diff --git a/src/backends/neon/workloads/NeonTransposeWorkload.hpp b/src/backends/neon/workloads/NeonTransposeWorkload.hpp new file mode 100644 index 0000000000..aab7b705e3 --- /dev/null +++ b/src/backends/neon/workloads/NeonTransposeWorkload.hpp @@ -0,0 +1,39 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include +#include +#include + +#include +#include + +#include + +namespace armnn +{ +arm_compute::Status NeonTransposeWorkloadValidate(const TensorInfo& input, const TensorInfo& output, + const TransposeDescriptor& descriptor); + +class NeonTransposeWorkload : public BaseWorkload +{ +public: + static const std::string& GetName() + { + static const std::string name = std::string("NeonTransposeWorkload"); + return name; + } + + NeonTransposeWorkload(const TransposeQueueDescriptor& descriptor, const WorkloadInfo& info); + void Execute() const override; + +private: + using BaseWorkload::m_Data; + mutable arm_compute::NEPermute m_PermuteFunction; +}; + +} // namespace armnn diff --git a/src/backends/neon/workloads/NeonWorkloads.hpp b/src/backends/neon/workloads/NeonWorkloads.hpp index b08483c443..52cd76f14b 100644 --- a/src/backends/neon/workloads/NeonWorkloads.hpp +++ b/src/backends/neon/workloads/NeonWorkloads.hpp @@ -50,3 +50,4 @@ #include "NeonStridedSliceWorkload.hpp" #include "NeonSubtractionWorkload.hpp" #include "NeonTransposeConvolution2dWorkload.hpp" +#include "NeonTransposeWorkload.hpp" diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp index 8f1f170c5c..25334c3b52 100644 --- a/src/backends/reference/RefLayerSupport.cpp +++ b/src/backends/reference/RefLayerSupport.cpp @@ -1388,9 +1388,10 @@ bool RefLayerSupport::IsPermuteSupported(const TensorInfo& input, bool supported = true; // Define supported output and inputs types. - std::array supportedTypes = + std::array supportedTypes = { DataType::Float32, + DataType::Float16, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1912,4 +1913,33 @@ bool RefLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input, return supported; } +bool RefLayerSupport::IsTransposeSupported(const TensorInfo& input, + const TensorInfo& output, + const TransposeDescriptor& descriptor, + Optional reasonIfUnsupported) const +{ + ignore_unused(descriptor); + bool supported = true; + + // Define supported output and inputs types. + std::array supportedTypes = + { + DataType::Float32, + DataType::Float16, + DataType::QAsymmU8, + DataType::QSymmS16 + }; + + supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported, + "Reference transpose: input is not a supported type."); + + supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported, + "Reference transpose: output is not a supported type."); + + supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported, + "Reference transpose: input and output types are mismatched."); + + return supported; +} + } // namespace armnn diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp index 1551a55694..27f3f81489 100644 --- a/src/backends/reference/RefLayerSupport.hpp +++ b/src/backends/reference/RefLayerSupport.hpp @@ -318,6 +318,12 @@ public: const TensorInfo& weights, const Optional& biases, Optional reasonIfUnsupported = EmptyOptional()) const override; + + bool IsTransposeSupported(const TensorInfo& input, + const TensorInfo& output, + const TransposeDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const override; + }; } // namespace armnn diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp index 02dbbabf9f..2a415bfbf0 100644 --- a/src/backends/reference/RefWorkloadFactory.cpp +++ b/src/backends/reference/RefWorkloadFactory.cpp @@ -561,6 +561,17 @@ std::unique_ptr RefWorkloadFactory::CreateSubtraction(const Subtracti return std::make_unique(descriptor, info); } +std::unique_ptr RefWorkloadFactory::CreateTranspose(const TransposeQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + if (IsQSymmS16(info)) + { + return std::make_unique(descriptor, info); + } + return MakeWorkloadHelper(descriptor, info); +} + std::unique_ptr RefWorkloadFactory::CreateTransposeConvolution2d( const TransposeConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp index b5b9b0faf0..030ce6f03d 100644 --- a/src/backends/reference/RefWorkloadFactory.hpp +++ b/src/backends/reference/RefWorkloadFactory.hpp @@ -236,6 +236,9 @@ public: std::unique_ptr CreateSubtraction(const SubtractionQueueDescriptor& descriptor, const WorkloadInfo& info) const override; + std::unique_ptr CreateTranspose(const TransposeQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + std::unique_ptr CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const override; diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk index 1987bd59fa..010d54871a 100644 --- a/src/backends/reference/backend.mk +++ b/src/backends/reference/backend.mk @@ -85,6 +85,7 @@ BACKEND_SOURCES := \ workloads/RefStridedSliceWorkload.cpp \ workloads/RefSplitterWorkload.cpp \ workloads/RefTransposeConvolution2dWorkload.cpp \ + workloads/RefTransposeWorkload.cpp \ workloads/Resize.cpp \ workloads/Slice.cpp \ workloads/SpaceToBatchNd.cpp \ diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp index d5c67ef6c7..ed2b995bd5 100644 --- a/src/backends/reference/test/RefLayerTests.cpp +++ b/src/backends/reference/test/RefLayerTests.cpp @@ -1460,6 +1460,20 @@ ARMNN_AUTO_TEST_CASE(Slice3dInt16, Slice3dInt16Test) ARMNN_AUTO_TEST_CASE(Slice2dInt16, Slice2dInt16Test) ARMNN_AUTO_TEST_CASE(Slice1dInt16, Slice1dInt16Test) +// Transpose +ARMNN_AUTO_TEST_CASE(SimpleTransposeFloat32, SimpleTransposeTest) +ARMNN_AUTO_TEST_CASE(TransposeFloat32ValueSet1Test, TransposeValueSet1Test) +ARMNN_AUTO_TEST_CASE(TransposeFloat32ValueSet2Test, TransposeValueSet2Test) +ARMNN_AUTO_TEST_CASE(TransposeFloat32ValueSet3Test, TransposeValueSet3Test) +ARMNN_AUTO_TEST_CASE(SimpleTransposeQASymm8, SimpleTransposeTest) +ARMNN_AUTO_TEST_CASE(TransposeQASymm8ValueSet1Test, TransposeValueSet1Test) +ARMNN_AUTO_TEST_CASE(TransposeQASymm8ValueSet2Test, TransposeValueSet2Test) +ARMNN_AUTO_TEST_CASE(TransposeQASymm8ValueSet3Test, TransposeValueSet3Test) +ARMNN_AUTO_TEST_CASE(SimpleTransposeQSymm16, SimpleTransposeTest) +ARMNN_AUTO_TEST_CASE(TransposeQSymm16ValueSet1Test, TransposeValueSet1Test) +ARMNN_AUTO_TEST_CASE(TransposeQSymm16ValueSet2Test, TransposeValueSet2Test) +ARMNN_AUTO_TEST_CASE(TransposeQSymm16ValueSet3Test, TransposeValueSet3Test) + // TransposeConvolution2d ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dFloatNchw, SimpleTransposeConvolution2dTest, diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt index 6795204d59..b2d8938745 100644 --- a/src/backends/reference/workloads/CMakeLists.txt +++ b/src/backends/reference/workloads/CMakeLists.txt @@ -141,6 +141,8 @@ list(APPEND armnnRefBackendWorkloads_sources RefStridedSliceWorkload.hpp RefTransposeConvolution2dWorkload.cpp RefTransposeConvolution2dWorkload.hpp + RefTransposeWorkload.cpp + RefTransposeWorkload.hpp RefWorkloads.hpp RefWorkloadUtils.hpp Resize.cpp diff --git a/src/backends/reference/workloads/RefTransposeWorkload.cpp b/src/backends/reference/workloads/RefTransposeWorkload.cpp new file mode 100644 index 0000000000..6bdfb2111d --- /dev/null +++ b/src/backends/reference/workloads/RefTransposeWorkload.cpp @@ -0,0 +1,35 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "RefTransposeWorkload.hpp" +#include "RefWorkloadUtils.hpp" + +#include + +#include + +namespace armnn +{ + +template +void RefTransposeWorkload::Execute() const +{ + using T = ResolveType; + + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, GetName() + "_Execute"); + + const ITensorHandle* src = m_Data.m_Inputs[0]; + ITensorHandle* dst = m_Data.m_Outputs[0]; + const PermutationVector& mappings = m_Data.m_Parameters.m_DimMappings; + + armnnUtils::Transpose(GetTensorInfo(src).GetShape(), mappings, src->Map(), dst->Map(), sizeof(T)); +} + +template class RefTransposeWorkload; +template class RefTransposeWorkload; +template class RefTransposeWorkload; +template class RefTransposeWorkload; + +} //namespace armnn diff --git a/src/backends/reference/workloads/RefTransposeWorkload.hpp b/src/backends/reference/workloads/RefTransposeWorkload.hpp new file mode 100644 index 0000000000..4b1c3d303b --- /dev/null +++ b/src/backends/reference/workloads/RefTransposeWorkload.hpp @@ -0,0 +1,35 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include + +#include + +namespace armnn +{ + +template +class RefTransposeWorkload : public TypedWorkload +{ +public: + static const std::string& GetName() + { + static const std::string name = std::string("RefTranspose") + GetDataTypeName(DataType) + "Workload"; + return name; + } + + using TypedWorkload::m_Data; + using TypedWorkload::TypedWorkload; + void Execute() const override; +}; + +using RefTransposeFloat16Workload = RefTransposeWorkload; +using RefTransposeFloat32Workload = RefTransposeWorkload; +using RefTransposeQAsymm8Workload = RefTransposeWorkload; +using RefTransposeQSymm16Workload = RefTransposeWorkload; + +} //namespace armnn \ No newline at end of file diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp index 7034b67aa5..a0558ff06e 100644 --- a/src/backends/reference/workloads/RefWorkloads.hpp +++ b/src/backends/reference/workloads/RefWorkloads.hpp @@ -58,6 +58,7 @@ #include "RefStridedSliceWorkload.hpp" #include "RefSpaceToDepthWorkload.hpp" #include "RefTransposeConvolution2dWorkload.hpp" +#include "RefTransposeWorkload.hpp" #include "RefWorkloadUtils.hpp" #include "Resize.hpp" #include "Softmax.hpp" -- cgit v1.2.1