From 639fb0437d1a5a8a6ea737fed5a16b554dfffead Mon Sep 17 00:00:00 2001 From: Aron Virginas-Tar Date: Thu, 20 Jun 2019 14:28:19 +0100 Subject: IVGCVSW-3319 Add frontend support for TransposeConvolution2d Layer Signed-off-by: Aron Virginas-Tar Change-Id: Ic06f63f1eff255e697facf319e2ac4c83d782e7c --- Android.mk | 1 + CMakeLists.txt | 2 + include/armnn/Descriptors.hpp | 32 +++++ include/armnn/DescriptorsFwd.hpp | 1 + include/armnn/ILayerSupport.hpp | 9 ++ include/armnn/ILayerVisitor.hpp | 15 ++- include/armnn/INetwork.hpp | 11 ++ include/armnn/LayerSupport.hpp | 10 ++ include/armnn/LayerVisitorBase.hpp | 9 +- src/armnn/InternalTypes.hpp | 3 +- src/armnn/LayersFwd.hpp | 2 + src/armnn/Network.cpp | 22 ++++ src/armnn/Network.hpp | 5 + src/armnn/layers/TransposeConvolution2dLayer.cpp | 130 +++++++++++++++++++++ src/armnn/layers/TransposeConvolution2dLayer.hpp | 59 ++++++++++ src/armnnSerializer/Serializer.cpp | 10 ++ src/armnnSerializer/Serializer.hpp | 7 ++ src/backends/backendsCommon/LayerSupportBase.cpp | 10 ++ src/backends/backendsCommon/LayerSupportBase.hpp | 8 ++ src/backends/backendsCommon/WorkloadData.cpp | 41 +++++++ src/backends/backendsCommon/WorkloadData.hpp | 13 +++ src/backends/backendsCommon/WorkloadFactory.cpp | 37 ++++++ src/backends/backendsCommon/WorkloadFactory.hpp | 6 +- .../test/IsLayerSupportedTestImpl.hpp | 8 ++ 24 files changed, 447 insertions(+), 4 deletions(-) create mode 100644 src/armnn/layers/TransposeConvolution2dLayer.cpp create mode 100644 src/armnn/layers/TransposeConvolution2dLayer.hpp diff --git a/Android.mk b/Android.mk index 7f3080b512..b08a4aed53 100644 --- a/Android.mk +++ b/Android.mk @@ -135,6 +135,7 @@ LOCAL_SRC_FILES := \ src/armnn/layers/StridedSliceLayer.cpp \ src/armnn/layers/SubtractionLayer.cpp \ src/armnn/layers/SwitchLayer.cpp \ + src/armnn/layers/TransposeConvolution2dLayer.cpp \ src/armnn/Descriptors.cpp \ src/armnn/Exceptions.cpp \ src/armnn/Graph.cpp \ diff --git a/CMakeLists.txt b/CMakeLists.txt index 2971b0d73e..f07295f5b2 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -318,6 +318,8 @@ list(APPEND armnn_sources src/armnn/layers/SubtractionLayer.hpp src/armnn/layers/SwitchLayer.cpp src/armnn/layers/SwitchLayer.hpp + src/armnn/layers/TransposeConvolution2dLayer.cpp + src/armnn/layers/TransposeConvolution2dLayer.hpp src/armnn/BackendSettings.hpp src/armnn/CompatibleTypes.hpp src/armnn/Descriptors.cpp diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp index dd1991d569..2fda8c1d06 100644 --- a/include/armnn/Descriptors.hpp +++ b/include/armnn/Descriptors.hpp @@ -684,4 +684,36 @@ struct PreCompiledDescriptor unsigned int m_NumOutputSlots; }; +/// A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer. +struct TransposeConvolution2dDescriptor +{ + TransposeConvolution2dDescriptor() : + m_PadLeft(0), + m_PadRight(0), + m_PadTop(0), + m_PadBottom(0), + m_StrideX(0), + m_StrideY(0), + m_BiasEnabled(false), + m_DataLayout(DataLayout::NCHW) + {} + + /// Padding left value in the width dimension. + uint32_t m_PadLeft; + /// Padding right value in the width dimension. + uint32_t m_PadRight; + /// Padding top value in the height dimension. + uint32_t m_PadTop; + /// Padding bottom value in the height dimension. + uint32_t m_PadBottom; + /// Stride value when proceeding through input for the width dimension. + uint32_t m_StrideX; + /// Stride value when proceeding through input for the height dimension. + uint32_t m_StrideY; + /// Enable/disable bias. + bool m_BiasEnabled; + /// The data layout to be used (NCHW, NHWC). + DataLayout m_DataLayout; +}; + } // namespace armnn \ No newline at end of file diff --git a/include/armnn/DescriptorsFwd.hpp b/include/armnn/DescriptorsFwd.hpp index 1c75c253ed..b814d48699 100644 --- a/include/armnn/DescriptorsFwd.hpp +++ b/include/armnn/DescriptorsFwd.hpp @@ -30,6 +30,7 @@ struct SoftmaxDescriptor; struct SpaceToBatchNdDescriptor; struct SpaceToDepthDescriptor; struct StridedSliceDescriptor; +struct TransposeConvolution2dDescriptor; struct ViewsDescriptor; // MergerDescriptor is deprecated use ConcatDescriptor instead diff --git a/include/armnn/ILayerSupport.hpp b/include/armnn/ILayerSupport.hpp index 324a9f5a2d..eb581d3aaa 100644 --- a/include/armnn/ILayerSupport.hpp +++ b/include/armnn/ILayerSupport.hpp @@ -295,6 +295,15 @@ public: const TensorInfo& output0, const TensorInfo& output1, Optional reasonIfUnsupported = EmptyOptional()) const = 0; + + virtual bool IsTransposeConvolution2dSupported( + const TensorInfo& input, + const TensorInfo& output, + const TransposeConvolution2dDescriptor& descriptor, + const TensorInfo& weights, + const Optional& biases, + Optional reasonIfUnsupported = EmptyOptional()) const = 0; + }; // class ILayerSupport using ILayerSupportSharedPtr = std::shared_ptr; diff --git a/include/armnn/ILayerVisitor.hpp b/include/armnn/ILayerVisitor.hpp index 9519c8b0c1..c98760c7cd 100644 --- a/include/armnn/ILayerVisitor.hpp +++ b/include/armnn/ILayerVisitor.hpp @@ -363,7 +363,7 @@ public: /// Function a strided slice layer should call back to when its Accept(ILayerVisitor&) function is invoked. /// @param layer - pointer to the layer which is calling back to this visit function. - /// @param StridedSliceDescriptor - Parameters for the strided slice operation. + /// @param stridedSliceDescriptor - Parameters for the strided slice operation. /// @param name - Optional name for the layer. virtual void VisitStridedSliceLayer(const IConnectableLayer* layer, const StridedSliceDescriptor& stridedSliceDescriptor, @@ -381,6 +381,19 @@ public: virtual void VisitSwitchLayer(const IConnectableLayer* layer, const char* name = nullptr) = 0; + /// Function that a 2D transpose convolution layer should call back to when its Accept(ILayerVisitor&) + /// function is invoked. + /// @param layer - pointer to the layer which is calling back to this visit function. + /// @param descriptor - Description of the 2D transpose convolution layer. + /// @param weights - Tensor for the weights data. + /// @param biases - Optional tensor for the bias data. + /// @param name - Optional name for the layer. + virtual void VisitTransposeConvolution2dLayer(const IConnectableLayer* layer, + const TransposeConvolution2dDescriptor& descriptor, + const ConstTensor& weights, + const Optional& biases, + const char* name = nullptr) = 0; + virtual void StartVisit() {} virtual void FinishVisit() {} diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp index cacca33caf..af67764fc9 100644 --- a/include/armnn/INetwork.hpp +++ b/include/armnn/INetwork.hpp @@ -432,6 +432,17 @@ public: /// @return - Interface for configuring the layer. virtual IConnectableLayer* AddPreluLayer(const char* name = nullptr) = 0; + /// Adds a 2D transpose convolution layer to the network. + /// @param descriptor - Description of the 2D transpose convolution layer. + /// @param weights - Tensor for the weights data. + /// @param biases - Optional tensor for the bias data. + /// @param name - Optional name for the layer. + /// @return - Interface for configuring the layer. + virtual IConnectableLayer* AddTransposeConvolution2dLayer(const TransposeConvolution2dDescriptor& descriptor, + const ConstTensor& weights, + const Optional& biases, + const char* name = nullptr) = 0; + virtual void Accept(ILayerVisitor& visitor) const = 0; protected: diff --git a/include/armnn/LayerSupport.hpp b/include/armnn/LayerSupport.hpp index 673193f330..f0dca7709e 100644 --- a/include/armnn/LayerSupport.hpp +++ b/include/armnn/LayerSupport.hpp @@ -381,4 +381,14 @@ bool IsSwitchSupported(const BackendId& backend, const TensorInfo& output1, char* reasonIfUnsupported = nullptr, size_t reasonIfUnsupportedMaxLength = 1024); + +/// Deprecated in favor of IBackend and ILayerSupport interfaces +bool IsTransposeConvolution2dSupported(const BackendId& backend, + const TensorInfo& input, + const TensorInfo& output, + const TransposeConvolution2dDescriptor& descriptor, + const TensorInfo& weights, + const Optional& biases, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024); } diff --git a/include/armnn/LayerVisitorBase.hpp b/include/armnn/LayerVisitorBase.hpp index 48fc2bbb0b..8406efe068 100644 --- a/include/armnn/LayerVisitorBase.hpp +++ b/include/armnn/LayerVisitorBase.hpp @@ -193,6 +193,13 @@ public: void VisitSwitchLayer(const IConnectableLayer*, const char*) override { DefaultPolicy::Apply(__func__); } + + void VisitTransposeConvolution2dLayer(const IConnectableLayer*, + const TransposeConvolution2dDescriptor&, + const ConstTensor&, + const Optional&, + const char*) override { DefaultPolicy::Apply(__func__); } + }; -} //namespace armnn +} // namespace armnn diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp index a1434eae5e..dc3dc17c02 100644 --- a/src/armnn/InternalTypes.hpp +++ b/src/armnn/InternalTypes.hpp @@ -60,9 +60,10 @@ enum class LayerType Splitter, StridedSlice, Subtraction, + Switch, // Last layer goes here. LastLayer, - Switch = LastLayer + TransposeConvolution2d = LastLayer }; const char* GetLayerTypeAsCString(LayerType type); diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp index a801431f84..9837cd349d 100644 --- a/src/armnn/LayersFwd.hpp +++ b/src/armnn/LayersFwd.hpp @@ -53,6 +53,7 @@ #include "layers/StridedSliceLayer.hpp" #include "layers/SubtractionLayer.hpp" #include "layers/SwitchLayer.hpp" +#include "layers/TransposeConvolution2dLayer.hpp" namespace armnn { @@ -128,5 +129,6 @@ DECLARE_LAYER(Splitter) DECLARE_LAYER(StridedSlice) DECLARE_LAYER(Subtraction) DECLARE_LAYER(Switch) +DECLARE_LAYER(TransposeConvolution2d) } diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index 75b63e49f6..9436fc6f9c 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -1008,6 +1008,28 @@ IConnectableLayer* Network::AddPreluLayer(const char* name) return m_Graph->AddLayer(name); } +IConnectableLayer* Network::AddTransposeConvolution2dLayer(const TransposeConvolution2dDescriptor& descriptor, + const ConstTensor& weights, + const Optional& biases, + const char* name) +{ + if (descriptor.m_BiasEnabled && !biases.has_value()) + { + throw InvalidArgumentException("AddTransposeConvolution2dLayer: Biases cannot be empty"); + } + + const auto layer = m_Graph->AddLayer(descriptor, name); + + layer->m_Weight = std::make_unique(weights); + + if (descriptor.m_BiasEnabled) + { + layer->m_Bias = std::make_unique(biases.value()); + } + + return layer; +} + void Network::Accept(ILayerVisitor& visitor) const { for (auto layer : GetGraph()) diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp index e1379d0014..b90e3c2f8d 100644 --- a/src/armnn/Network.hpp +++ b/src/armnn/Network.hpp @@ -187,6 +187,11 @@ public: IConnectableLayer* AddPreluLayer(const char* name = nullptr) override; + IConnectableLayer* AddTransposeConvolution2dLayer(const TransposeConvolution2dDescriptor& descriptor, + const ConstTensor& weights, + const Optional& biases, + const char* name = nullptr) override; + void Accept(ILayerVisitor& visitor) const override; private: diff --git a/src/armnn/layers/TransposeConvolution2dLayer.cpp b/src/armnn/layers/TransposeConvolution2dLayer.cpp new file mode 100644 index 0000000000..69f598d288 --- /dev/null +++ b/src/armnn/layers/TransposeConvolution2dLayer.cpp @@ -0,0 +1,130 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#include "TransposeConvolution2dLayer.hpp" + +#include "LayerCloneBase.hpp" + +#include + +#include +#include + +#include + +using namespace armnnUtils; + +namespace armnn +{ + +TransposeConvolution2dLayer::TransposeConvolution2dLayer(const TransposeConvolution2dDescriptor& param, + const char* name) + : LayerWithParameters(1, 1, LayerType::TransposeConvolution2d, param, name) +{ +} + +std::unique_ptr TransposeConvolution2dLayer::CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const +{ + BOOST_ASSERT_MSG(m_Weight != nullptr, "TransposeConvolution2dLayer: Weights data should not be null."); + + TransposeConvolution2dQueueDescriptor descriptor; + descriptor.m_Weight = m_Weight.get(); + + if (m_Param.m_BiasEnabled) + { + BOOST_ASSERT_MSG(m_Bias != nullptr, "TransposeConvolution2dLayer: Bias data should not be null."); + descriptor.m_Bias = m_Bias.get(); + } + + return factory.CreateTransposeConvolution2d(descriptor, PrepInfoAndDesc(descriptor, graph)); +} + +TransposeConvolution2dLayer* TransposeConvolution2dLayer::Clone(Graph& graph) const +{ + auto layer = CloneBase(graph, m_Param, GetName()); + + layer->m_Weight = m_Weight ? std::make_unique(*m_Weight) : nullptr; + + if (layer->m_Param.m_BiasEnabled) + { + layer->m_Bias = m_Bias ? std::make_unique(*m_Bias) : nullptr; + } + + return std::move(layer); +} + +std::vector TransposeConvolution2dLayer::InferOutputShapes( + const std::vector& inputShapes) const +{ + BOOST_ASSERT(inputShapes.size() == 2); + const TensorShape& inputShape = inputShapes[0]; + const TensorShape& kernelShape = inputShapes[1]; + + BOOST_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Transpose convolutions will always have 4D input"); + + DataLayoutIndexed dataLayoutIndex(m_Param.m_DataLayout); + + unsigned int inBatchSize = inputShape[0]; + unsigned int inWidth = inputShape[dataLayoutIndex.GetWidthIndex()]; + unsigned int inHeight = inputShape[dataLayoutIndex.GetHeightIndex()]; + unsigned int inChannels = inputShape[dataLayoutIndex.GetChannelsIndex()]; + + unsigned int kernelWidth = kernelShape[dataLayoutIndex.GetWidthIndex()]; + unsigned int kernelHeight = kernelShape[dataLayoutIndex.GetHeightIndex()]; + + unsigned int totalPaddingX = m_Param.m_PadLeft + m_Param.m_PadRight; + unsigned int totalPaddingY = m_Param.m_PadTop + m_Param.m_PadBottom; + + unsigned int outWidth = m_Param.m_StrideX * (inWidth + 1) - totalPaddingX + kernelWidth; + unsigned int outHeight = m_Param.m_StrideY * (inHeight + 1) - totalPaddingY + kernelHeight; + + unsigned int outChannels = inChannels; + unsigned int outBatchSize = inBatchSize; + + TensorShape tensorShape = m_Param.m_DataLayout == armnn::DataLayout::NHWC ? + TensorShape( { outBatchSize, outHeight, outWidth, outChannels } ) : + TensorShape( { outBatchSize, outChannels, outHeight, outWidth }); + + return std::vector({ tensorShape }); +} + +void TransposeConvolution2dLayer::ValidateTensorShapesFromInputs() +{ + VerifyLayerConnections(1, CHECK_LOCATION()); + + BOOST_ASSERT_MSG(m_Weight != nullptr, "TransposeConvolution2dLayer: Weight data cannot be null."); + + auto inferredShapes = InferOutputShapes({ + GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(), + m_Weight->GetTensorInfo().GetShape() }); + + BOOST_ASSERT(inferredShapes.size() == 1); + + ConditionalThrowIfNotEqual( + "TransposeConvolution2dLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", + GetOutputSlot(0).GetTensorInfo().GetShape(), + inferredShapes[0]); +} + +Layer::ConstantTensors TransposeConvolution2dLayer::GetConstantTensorsByRef() +{ + return {m_Weight, m_Bias}; +} + +void TransposeConvolution2dLayer::Accept(ILayerVisitor& visitor) const +{ + ConstTensor weightsTensor(m_Weight->GetTensorInfo(), m_Weight->Map(true)) ; + Optional optionalBiasTensor = EmptyOptional(); + + if (GetParameters().m_BiasEnabled) + { + ConstTensor biasTensor(m_Bias->GetTensorInfo(), m_Bias->Map(true)); + optionalBiasTensor = Optional(biasTensor); + } + + visitor.VisitTransposeConvolution2dLayer(this, GetParameters(), weightsTensor, optionalBiasTensor, GetName()); +} + +} // namespace armnn diff --git a/src/armnn/layers/TransposeConvolution2dLayer.hpp b/src/armnn/layers/TransposeConvolution2dLayer.hpp new file mode 100644 index 0000000000..4dc4644a3c --- /dev/null +++ b/src/armnn/layers/TransposeConvolution2dLayer.hpp @@ -0,0 +1,59 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include "LayerWithParameters.hpp" + +namespace armnn +{ + +class ScopedCpuTensorHandle; + +/// This layer represents a 2D transpose convolution operation. +class TransposeConvolution2dLayer : public LayerWithParameters +{ +public: + /// A unique pointer to store weight values. + std::unique_ptr m_Weight; + /// A unique pointer to store bias values. + std::unique_ptr m_Bias; + + /// Makes a workload for the TransposeConvolution2d type. + /// @param [in] graph The graph where this layer can be found. + /// @param [in] factory The workload factory which will create the workload. + /// @return A pointer to the created workload, or nullptr if not created. + virtual std::unique_ptr CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const override; + + /// Creates a dynamically-allocated copy of this layer. + /// @param [in] graph The graph into which this layer is being cloned. + TransposeConvolution2dLayer* Clone(Graph& graph) const override; + + /// Check if the input tensor shape(s) + /// will lead to a valid configuration of @ref TransposeConvolution2dLayer. + void ValidateTensorShapesFromInputs() override; + + /// Infers the output shapes from given input shapes and layer properties. + /// @param [in] inputShapes The input shapes the layer has. + /// @return A vector of the inferred output shape. + std::vector InferOutputShapes(const std::vector& inputShapes) const override; + + void Accept(ILayerVisitor& visitor) const override; + +protected: + /// Constructor to create a TransposeConvolution2dLayer. + /// @param [in] param TransposeConvolution2dDescriptor to configure the 2D transpose convolution operation. + /// @param [in] name Optional name for the layer. + TransposeConvolution2dLayer(const TransposeConvolution2dDescriptor& param, const char* name); + + /// Default destructor + ~TransposeConvolution2dLayer() = default; + + /// Retrieve the handles to the constant values stored by the layer. + /// @return A vector of the constant tensors stored by this layer. + ConstantTensors GetConstantTensorsByRef() override; +}; + +} // namespace armnn diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp index 81231e4eba..dabe977179 100644 --- a/src/armnnSerializer/Serializer.cpp +++ b/src/armnnSerializer/Serializer.cpp @@ -943,6 +943,16 @@ void SerializerVisitor::VisitSwitchLayer(const armnn::IConnectableLayer* layer, CreateAnyLayer(fbSwitchLayer.o, serializer::Layer::Layer_SwitchLayer); } +void SerializerVisitor::VisitTransposeConvolution2dLayer( + const armnn::IConnectableLayer* layer, + const armnn::TransposeConvolution2dDescriptor& descriptor, + const armnn::ConstTensor& weights, + const armnn::Optional& biases, + const char* name) +{ + throw UnimplementedException("SerializerVisitor::VisitTransposeConvolution2dLayer is not implemented"); +} + fb::Offset SerializerVisitor::CreateLayerBase(const IConnectableLayer* layer, const serializer::LayerType layerType) { diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp index aae879993e..31f7d05198 100644 --- a/src/armnnSerializer/Serializer.hpp +++ b/src/armnnSerializer/Serializer.hpp @@ -206,6 +206,13 @@ public: void VisitSwitchLayer(const armnn::IConnectableLayer* layer, const char* name = nullptr) override; + + void VisitTransposeConvolution2dLayer(const armnn::IConnectableLayer* layer, + const armnn::TransposeConvolution2dDescriptor& descriptor, + const armnn::ConstTensor& weights, + const armnn::Optional& biases, + const char* = nullptr) override; + private: /// Creates the Input Slots and Output Slots and LayerBase for the layer. diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp index 12e4ee81ae..2eb0e4161e 100644 --- a/src/backends/backendsCommon/LayerSupportBase.cpp +++ b/src/backends/backendsCommon/LayerSupportBase.cpp @@ -448,4 +448,14 @@ bool LayerSupportBase::IsSwitchSupported(const TensorInfo& input0, return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } +bool LayerSupportBase::IsTransposeConvolution2dSupported(const TensorInfo& input, + const TensorInfo& output, + const TransposeConvolution2dDescriptor& descriptor, + const TensorInfo& weights, + const Optional& biases, + Optional reasonIfUnsupported) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); +} + } // namespace armnn diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp index d035dfcd62..52ba5b216f 100644 --- a/src/backends/backendsCommon/LayerSupportBase.hpp +++ b/src/backends/backendsCommon/LayerSupportBase.hpp @@ -282,6 +282,14 @@ public: const TensorInfo& output0, const TensorInfo& output1, Optional reasonIfUnsupported = EmptyOptional()) const override; + + bool IsTransposeConvolution2dSupported( + const TensorInfo& input, + const TensorInfo& output, + const TransposeConvolution2dDescriptor& descriptor, + const TensorInfo& weights, + const Optional& biases, + Optional reasonIfUnsupported = EmptyOptional()) const override; }; } // namespace armnn diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp index adba86c79a..5ca492888f 100644 --- a/src/backends/backendsCommon/WorkloadData.cpp +++ b/src/backends/backendsCommon/WorkloadData.cpp @@ -1800,4 +1800,45 @@ void PreluQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const "alpha"); } +void TransposeConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const +{ + const std::string descriptorName{"TransposeConvolution2dQueueDescriptor"}; + + ValidateNumInputs(workloadInfo, descriptorName, 1); + ValidateNumOutputs(workloadInfo, descriptorName, 1); + + ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], descriptorName, 4, "input"); + ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], descriptorName, 4, "output"); + + ValidatePointer(m_Weight, descriptorName, "weight"); + ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), descriptorName, 4, "weight"); + + ValidateTensorDataType(m_Weight->GetTensorInfo(), + workloadInfo.m_InputTensorInfos[0].GetDataType(), + descriptorName, + "weight"); + + if (m_Parameters.m_BiasEnabled) + { + ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), descriptorName, 1, "bias"); + + ValidateTensorDataType(m_Bias->GetTensorInfo(), + GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()), + descriptorName, "bias"); + + ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(), + workloadInfo.m_InputTensorInfos[0], + m_Weight->GetTensorInfo(), + descriptorName); + } + + ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], + m_Weight->GetTensorInfo(), + workloadInfo.m_OutputTensorInfos[0], + descriptorName, + "input", + "weights", + "output"); +} + } //namespace armnn diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp index 6a51bc3144..744758385b 100644 --- a/src/backends/backendsCommon/WorkloadData.hpp +++ b/src/backends/backendsCommon/WorkloadData.hpp @@ -445,4 +445,17 @@ struct PreluQueueDescriptor : QueueDescriptor void Validate(const WorkloadInfo& workloadInfo) const; }; +struct TransposeConvolution2dQueueDescriptor : QueueDescriptorWithParameters +{ + TransposeConvolution2dQueueDescriptor() : + m_Weight(nullptr), + m_Bias(nullptr) + {} + + const ConstCpuTensorHandle* m_Weight; + const ConstCpuTensorHandle* m_Bias; + + void Validate(const WorkloadInfo& workloadInfo) const; +}; + } //namespace armnn diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp index cca39198e1..2fba3b7059 100644 --- a/src/backends/backendsCommon/WorkloadFactory.cpp +++ b/src/backends/backendsCommon/WorkloadFactory.cpp @@ -796,6 +796,36 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, reason); break; } + case LayerType::TransposeConvolution2d: + { + auto cLayer = boost::polymorphic_downcast(&layer); + + const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(), + dataType); + const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType); + + const TransposeConvolution2dDescriptor& descriptor = cLayer->GetParameters(); + + Optional biases; + if (descriptor.m_BiasEnabled) + { + BOOST_ASSERT(cLayer->m_Bias.get() != nullptr); + biases = OverrideDataType(cLayer->m_Bias->GetTensorInfo(), + GetBiasTypeFromWeightsType(dataType)); + } + + BOOST_ASSERT(cLayer->m_Weight.get() != nullptr); + const TensorInfo weights = OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType); + + result = layerSupportObject->IsTransposeConvolution2dSupported(input, + output, + descriptor, + weights, + biases, + reason); + + break; + } default: { BOOST_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer."); @@ -1098,4 +1128,11 @@ std::unique_ptr IWorkloadFactory::CreateSwitch(const SwitchQueueDescr return std::unique_ptr(); } +std::unique_ptr IWorkloadFactory::CreateTransposeConvolution2d( + const TransposeConvolution2dQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return std::unique_ptr(); } + +} // namepsace armnn \ No newline at end of file diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp index c9fbe71f96..978d3a3a98 100644 --- a/src/backends/backendsCommon/WorkloadFactory.hpp +++ b/src/backends/backendsCommon/WorkloadFactory.hpp @@ -190,6 +190,10 @@ public: virtual std::unique_ptr CreateSwitch(const SwitchQueueDescriptor& descriptor, const WorkloadInfo& Info) const; + + virtual std::unique_ptr CreateTransposeConvolution2d( + const TransposeConvolution2dQueueDescriptor& descriptor, + const WorkloadInfo& info) const; }; -} //namespace armnn +} // namespace armnn diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp index 111cf8f3e3..7c9d0f52c3 100644 --- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp +++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp @@ -212,6 +212,12 @@ struct DummyLayer { }; +template<> +struct DummyLayer + : public DummyConvolutionLayer +{ +}; + template struct DummyLstmLayer { @@ -408,6 +414,8 @@ DECLARE_LAYER_POLICY_1_PARAM(Subtraction) DECLARE_LAYER_POLICY_1_PARAM(Switch) +DECLARE_LAYER_POLICY_2_PARAM(TransposeConvolution2d) + // Generic implementation to get the number of input slots for a given layer type; template -- cgit v1.2.1