aboutsummaryrefslogtreecommitdiff
path: root/src/armnn
diff options
context:
space:
mode:
authorMike Kelly <mike.kelly@arm.com>2020-02-28 18:11:58 +0000
committermike.kelly <mike.kelly@arm.com>2020-03-02 16:44:09 +0000
commitc9ea45adefdde2890e9aa191a5b31563a3dd35ea (patch)
tree2ea65c972d24cc2d823ea39eb105d4062db54934 /src/armnn
parent510f6183d289b176702a18f020449c68be6f1075 (diff)
downloadarmnn-c9ea45adefdde2890e9aa191a5b31563a3dd35ea.tar.gz
IVGCVSW-4375 Add support for Transpose
* Added TransposeLayer * Added CL, Neon and Ref Workloads * Added Transpose utilities * Added Serializer and Deserializer support * Added Quantizer support Signed-off-by: Mike Kelly <mike.kelly@arm.com> Change-Id: I04c755ba7cb5b1edf72b3c9f3c0314878032e3c7
Diffstat (limited to 'src/armnn')
-rw-r--r--src/armnn/InternalTypes.cpp1
-rw-r--r--src/armnn/InternalTypes.hpp3
-rw-r--r--src/armnn/LayersFwd.hpp2
-rw-r--r--src/armnn/Network.cpp6
-rw-r--r--src/armnn/Network.hpp3
-rw-r--r--src/armnn/QuantizerVisitor.cpp9
-rw-r--r--src/armnn/QuantizerVisitor.hpp4
-rw-r--r--src/armnn/SerializeLayerParameters.cpp20
-rw-r--r--src/armnn/SerializeLayerParameters.hpp5
-rw-r--r--src/armnn/layers/TransposeLayer.cpp62
-rw-r--r--src/armnn/layers/TransposeLayer.hpp70
11 files changed, 184 insertions, 1 deletions
diff --git a/src/armnn/InternalTypes.cpp b/src/armnn/InternalTypes.cpp
index 10e7f501b7..c032e44cd3 100644
--- a/src/armnn/InternalTypes.cpp
+++ b/src/armnn/InternalTypes.cpp
@@ -70,6 +70,7 @@ char const* GetLayerTypeAsCString(LayerType type)
case LayerType::Subtraction: return "Subtraction";
case LayerType::Switch: return "Switch";
case LayerType::TransposeConvolution2d: return "TransposeConvolution2d";
+ case LayerType::Transpose: return "Transpose";
default:
BOOST_ASSERT_MSG(false, "Unknown layer type");
return "Unknown";
diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp
index 2d7be3cac6..351f12c510 100644
--- a/src/armnn/InternalTypes.hpp
+++ b/src/armnn/InternalTypes.hpp
@@ -69,9 +69,10 @@ enum class LayerType
StridedSlice,
Subtraction,
Switch,
+ TransposeConvolution2d,
// Last layer goes here.
LastLayer,
- TransposeConvolution2d = LastLayer
+ Transpose = LastLayer
};
const char* GetLayerTypeAsCString(LayerType type);
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index 2d486f48a6..f3ce7e61fa 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -62,6 +62,7 @@
#include "layers/SubtractionLayer.hpp"
#include "layers/SwitchLayer.hpp"
#include "layers/TransposeConvolution2dLayer.hpp"
+#include "layers/TransposeLayer.hpp"
namespace armnn
{
@@ -145,6 +146,7 @@ DECLARE_LAYER(StandIn)
DECLARE_LAYER(StridedSlice)
DECLARE_LAYER(Subtraction)
DECLARE_LAYER(Switch)
+DECLARE_LAYER(Transpose)
DECLARE_LAYER(TransposeConvolution2d)
}
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 7edc6240a1..b405a77829 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -1526,6 +1526,12 @@ IConnectableLayer* Network::AddTransposeConvolution2dLayer(const TransposeConvol
return layer;
}
+IConnectableLayer* Network::AddTransposeLayer(const TransposeDescriptor& transposeDescriptor,
+ const char* name)
+{
+ return m_Graph->AddLayer<TransposeLayer>(transposeDescriptor, name);
+}
+
IConnectableLayer* Network::AddStackLayer(const StackDescriptor& stackDescriptor,
const char* name)
{
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index 23a8e47093..5da681306c 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -226,6 +226,9 @@ public:
const Optional<ConstTensor>& biases,
const char* name = nullptr) override;
+ IConnectableLayer* AddTransposeLayer(const TransposeDescriptor& transposeDescriptor,
+ const char* name = nullptr) override;
+
IConnectableLayer* AddStackLayer(const StackDescriptor& stackDescriptor,
const char* name = nullptr) override;
diff --git a/src/armnn/QuantizerVisitor.cpp b/src/armnn/QuantizerVisitor.cpp
index 51818ebddd..8e7c45f47f 100644
--- a/src/armnn/QuantizerVisitor.cpp
+++ b/src/armnn/QuantizerVisitor.cpp
@@ -561,4 +561,13 @@ void QuantizerVisitor::VisitTransposeConvolution2dLayer(const IConnectableLayer*
SetQuantizedInputConnections(layer, newLayer);
}
+void QuantizerVisitor::VisitTransposeLayer(const IConnectableLayer* layer,
+ const TransposeDescriptor& transposeDescriptor,
+ const char* name)
+{
+ IConnectableLayer* newLayer = m_QuantizedNetwork->AddTransposeLayer(transposeDescriptor, name);
+ RecordLayer(layer, newLayer);
+ SetQuantizedInputConnections(layer, newLayer);
+}
+
} //namespace armnn
diff --git a/src/armnn/QuantizerVisitor.hpp b/src/armnn/QuantizerVisitor.hpp
index 4013033697..29500ab0c8 100644
--- a/src/armnn/QuantizerVisitor.hpp
+++ b/src/armnn/QuantizerVisitor.hpp
@@ -187,6 +187,10 @@ public:
const Optional<ConstTensor>& biases,
const char* name = nullptr) override;
+ void VisitTransposeLayer(const IConnectableLayer* layer,
+ const TransposeDescriptor& descriptor,
+ const char* name = nullptr) override;
+
/// Extract the quantized network
INetworkPtr RetrieveFinalNetwork() { return std::move(m_QuantizedNetwork); }
diff --git a/src/armnn/SerializeLayerParameters.cpp b/src/armnn/SerializeLayerParameters.cpp
index 544e389ae9..76b92f3f9d 100644
--- a/src/armnn/SerializeLayerParameters.cpp
+++ b/src/armnn/SerializeLayerParameters.cpp
@@ -491,4 +491,24 @@ void StringifyLayerParameters<TransposeConvolution2dDescriptor>::Serialize(
fn("DataLayout", GetDataLayoutName(desc.m_DataLayout));
}
+void StringifyLayerParameters<TransposeDescriptor>::Serialize(ParameterStringifyFunction& fn,
+ const TransposeDescriptor& desc)
+{
+ std::stringstream ss;
+ ss << "[";
+ bool addComma = false;
+ for (auto it : desc.m_DimMappings)
+ {
+ if (addComma)
+ {
+ ss << ",";
+ }
+ ss << it;
+ addComma = true;
+ }
+ ss << "]";
+
+ fn("DimMappings",ss.str());
+}
+
} // namespace armnn \ No newline at end of file
diff --git a/src/armnn/SerializeLayerParameters.hpp b/src/armnn/SerializeLayerParameters.hpp
index 76ca0a52dd..ae921c4b07 100644
--- a/src/armnn/SerializeLayerParameters.hpp
+++ b/src/armnn/SerializeLayerParameters.hpp
@@ -160,4 +160,9 @@ template <> struct StringifyLayerParameters<TransposeConvolution2dDescriptor>
static void Serialize(ParameterStringifyFunction& fn, const TransposeConvolution2dDescriptor& desc);
};
+template <> struct StringifyLayerParameters<TransposeDescriptor>
+{
+ static void Serialize(ParameterStringifyFunction& fn, const TransposeDescriptor& desc);
+};
+
} // namespace armnn \ No newline at end of file
diff --git a/src/armnn/layers/TransposeLayer.cpp b/src/armnn/layers/TransposeLayer.cpp
new file mode 100644
index 0000000000..3c22b545b9
--- /dev/null
+++ b/src/armnn/layers/TransposeLayer.cpp
@@ -0,0 +1,62 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "TransposeLayer.hpp"
+
+#include "LayerCloneBase.hpp"
+
+#include <armnn/TypesUtils.hpp>
+
+#include <armnnUtils/Transpose.hpp>
+
+#include <backendsCommon/WorkloadData.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+namespace armnn
+{
+
+TransposeLayer::TransposeLayer(const TransposeDescriptor& param, const char* name)
+ : LayerWithParameters(1, 1, LayerType::Transpose, param, name)
+{
+}
+
+std::unique_ptr<IWorkload> TransposeLayer::CreateWorkload(const IWorkloadFactory& factory) const
+{
+ TransposeQueueDescriptor descriptor;
+ return factory.CreateTranspose(descriptor, PrepInfoAndDesc(descriptor));
+}
+
+TransposeLayer* TransposeLayer::Clone(Graph& graph) const
+{
+ return CloneBase<TransposeLayer>(graph, m_Param, GetName());
+}
+
+std::vector<TensorShape> TransposeLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+{
+ BOOST_ASSERT(inputShapes.size() == 1);
+ const TensorShape& inShape = inputShapes[0];
+ return std::vector<TensorShape> ({armnnUtils::TransposeTensorShape(inShape, m_Param.m_DimMappings)});
+}
+
+void TransposeLayer::ValidateTensorShapesFromInputs()
+{
+ VerifyLayerConnections(1, CHECK_LOCATION());
+
+ auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
+
+ BOOST_ASSERT(inferredShapes.size() == 1);
+
+ ConditionalThrowIfNotEqual<LayerValidationException>(
+ "TransposeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
+ GetOutputSlot(0).GetTensorInfo().GetShape(),
+ inferredShapes[0]);
+}
+
+void TransposeLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitTransposeLayer(this, GetParameters(), GetName());
+}
+
+} // namespace armnn
diff --git a/src/armnn/layers/TransposeLayer.hpp b/src/armnn/layers/TransposeLayer.hpp
new file mode 100644
index 0000000000..4906bc9412
--- /dev/null
+++ b/src/armnn/layers/TransposeLayer.hpp
@@ -0,0 +1,70 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+/// This layer represents a transpose operation.
+class TransposeLayer : public LayerWithParameters<TransposeDescriptor>
+{
+public:
+ /// Makes a workload for the Transpose type.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
+
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
+ TransposeLayer* Clone(Graph& graph) const override;
+
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref TransposeLayer.
+ void ValidateTensorShapesFromInputs() override;
+
+ /// Infers the output shapes from given input shapes and the permutation vector.
+ /// @param [in] inputShapes The input shapes layer has.
+ /// @return A vector to the inferred output shape.
+ std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+
+ /// @return a permutation vector describing the permutation for the dimensions of the input tensor.
+ const PermutationVector& GetPermutation() const
+ {
+ return m_Param.m_DimMappings;
+ }
+
+ /// Indicates if the other layer received is inverse of this one.
+ /// @param [in] other The other layer to be compared with.
+ /// @return true if other layer is inverse of this false otherwise.
+ bool IsInverse(const Layer& other) const
+ {
+ return (other.GetType() == LayerType::Transpose) &&
+ GetPermutation().IsInverse(boost::polymorphic_downcast<const TransposeLayer*>(&other)->GetPermutation());
+ }
+
+ /// Indicates if the other layer received is equal to this one.
+ /// @param [in] other The other layer to be compare with.
+ /// @return true if other layer is equal to this false otherwise.
+ bool IsEqual(const Layer& other) const
+ {
+ return (other.GetType() == LayerType::Transpose) &&
+ GetPermutation().IsEqual(boost::polymorphic_downcast<const TransposeLayer*>(&other)->GetPermutation());
+ }
+
+ void Accept(ILayerVisitor& visitor) const override;
+
+protected:
+ /// Constructor to create a TransposeLayer.
+ /// @param [in] param TransposeDescriptor to configure the permute operation.
+ /// @param [in] name Optional name for the layer.
+ TransposeLayer(const TransposeDescriptor& param, const char* name);
+
+ /// Default destructor
+ ~TransposeLayer() = default;
+};
+
+} // namespace