aboutsummaryrefslogtreecommitdiff
path: root/src/armnn
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2023-07-13 17:16:45 +0100
committerTeresaARM <teresa.charlinreyes@arm.com>2023-07-25 09:54:47 +0000
commit79a06a59bafadf736ca53c4240e87f9bbb657260 (patch)
treeb94b2063766b40b79bed8cb81f571ea05cfa9efb /src/armnn
parenta638f101bcb51008932f922fe0a5cef28633bc66 (diff)
downloadarmnn-79a06a59bafadf736ca53c4240e87f9bbb657260.tar.gz
IVGCVSW-7883 Front end and reference implementation for TILE
Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Signed-off-by: Cian McGriskin <cian.mcgriskin@arm.com> Change-Id: I0afb2403fee11c5c1e58ea65e2525e99594d8f2d
Diffstat (limited to 'src/armnn')
-rw-r--r--src/armnn/BackendHelper.cpp15
-rw-r--r--src/armnn/LayersFwd.hpp2
-rw-r--r--src/armnn/Network.cpp11
-rw-r--r--src/armnn/Network.hpp3
-rw-r--r--src/armnn/layers/TileLayer.cpp71
-rw-r--r--src/armnn/layers/TileLayer.hpp45
6 files changed, 147 insertions, 0 deletions
diff --git a/src/armnn/BackendHelper.cpp b/src/armnn/BackendHelper.cpp
index 18184fbfb2..0a55a08087 100644
--- a/src/armnn/BackendHelper.cpp
+++ b/src/armnn/BackendHelper.cpp
@@ -1409,6 +1409,21 @@ bool LayerSupportHandle::IsSwitchSupported(const TensorInfo& input0,
reasonIfUnsupported);
}
+bool LayerSupportHandle::IsTileSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const armnn::TileDescriptor &descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Tile,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported);
+}
+
bool LayerSupportHandle::IsTransposeConvolution2dSupported(
const TensorInfo& input,
const TensorInfo& output,
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index d3ce6f2a67..743b8d7205 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -76,6 +76,7 @@
#include "layers/StridedSliceLayer.hpp"
#include "layers/SubtractionLayer.hpp"
#include "layers/SwitchLayer.hpp"
+#include "layers/TileLayer.hpp"
#include "layers/TransposeConvolution2dLayer.hpp"
#include "layers/TransposeLayer.hpp"
#include "layers/UnidirectionalSequenceLstmLayer.hpp"
@@ -178,6 +179,7 @@ DECLARE_LAYER(StandIn)
DECLARE_LAYER(StridedSlice)
DECLARE_LAYER(Subtraction)
DECLARE_LAYER(Switch)
+DECLARE_LAYER(Tile)
DECLARE_LAYER(Transpose)
DECLARE_LAYER(TransposeConvolution2d)
DECLARE_LAYER(UnidirectionalSequenceLstm)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 27e91ae39c..22d2c78c65 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -644,6 +644,12 @@ IConnectableLayer* INetwork::AddReverseV2Layer(const char *name)
return pNetworkImpl->AddReverseV2Layer(name);
}
+IConnectableLayer* INetwork::AddTileLayer(const TileDescriptor &descriptor,
+ const char *name)
+{
+ return pNetworkImpl->AddTileLayer(descriptor, name);
+}
+
void INetwork::ExecuteStrategy(IStrategy& strategy) const
{
return pNetworkImpl->ExecuteStrategy(strategy);
@@ -2934,6 +2940,11 @@ IConnectableLayer* NetworkImpl::AddReverseV2Layer(const char *name)
return m_Graph->AddLayer<ReverseV2Layer>(name);
}
+IConnectableLayer* NetworkImpl::AddTileLayer(const TileDescriptor &desc, const char *name)
+{
+ return m_Graph->AddLayer<TileLayer>(desc, name);
+}
+
IConnectableLayer* NetworkImpl::AddPrecompiledLayer(const PreCompiledDescriptor& preCompiledDescriptor,
CompiledBlobPtr compiledBlobPtr,
const Optional<BackendId>& backend,
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index ae287f32d1..a84a0e9ba4 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -220,6 +220,9 @@ public:
IConnectableLayer* AddSwitchLayer(const char* name = nullptr);
+ IConnectableLayer* AddTileLayer(const TileDescriptor& tileDescriptor,
+ const char* name = nullptr);
+
IConnectableLayer* AddTransposeConvolution2dLayer(const TransposeConvolution2dDescriptor& descriptor,
const ConstTensor& weights,
const Optional<ConstTensor>& biases,
diff --git a/src/armnn/layers/TileLayer.cpp b/src/armnn/layers/TileLayer.cpp
new file mode 100644
index 0000000000..3c313905fe
--- /dev/null
+++ b/src/armnn/layers/TileLayer.cpp
@@ -0,0 +1,71 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "TileLayer.hpp"
+
+#include <armnn/backends/WorkloadFactory.hpp>
+#include "layers/LayerCloneBase.hpp"
+
+namespace armnn
+{
+TileLayer::TileLayer(const TileDescriptor &param, const char *name)
+ : LayerWithParameters(1, 1, LayerType::Tile, param, name)
+{}
+
+std::unique_ptr<IWorkload> TileLayer::CreateWorkload(const IWorkloadFactory &factory) const
+{
+ TileQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
+ return factory.CreateWorkload(LayerType::Tile, descriptor, PrepInfoAndDesc(descriptor));
+}
+
+TileLayer* TileLayer::Clone(armnn::Graph &graph) const
+{
+ auto layer = CloneBase<TileLayer>(graph, m_Param, GetName());
+
+ return std::move(layer);
+}
+
+std::vector<TensorShape> TileLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+{
+ ARMNN_ASSERT(inputShapes.size() == 1);
+ const TensorShape& inputShape = inputShapes[0];
+ const std::vector<uint32_t> multipleShape = m_Param.m_Multiples;
+
+ std::vector<unsigned int> dimSizes;
+
+ // Check input shape and multiples have same length and multiply them together to get output shape
+ if(inputShape.GetNumDimensions() == multipleShape.size())
+ {
+ for(uint32_t i = 0; i < inputShape.GetNumDimensions(); ++i)
+ {
+ dimSizes.insert(dimSizes.begin(), inputShape[i] * multipleShape[i]);
+ }
+ }
+ else
+ {
+ throw LayerValidationException("TileLayer: input rank and length of multiples are different.");
+ }
+
+ return std::vector<TensorShape>({TensorShape({inputShape.GetNumElements(), dimSizes.data()})});
+}
+
+void TileLayer::ValidateTensorShapesFromInputs()
+{
+ VerifyLayerConnections(1, CHECK_LOCATION());
+
+ const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
+
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
+
+ auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
+
+ ARMNN_ASSERT(inferredShapes.size() == 1);
+
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "TileLayer");
+}
+
+} \ No newline at end of file
diff --git a/src/armnn/layers/TileLayer.hpp b/src/armnn/layers/TileLayer.hpp
new file mode 100644
index 0000000000..632cdb426b
--- /dev/null
+++ b/src/armnn/layers/TileLayer.hpp
@@ -0,0 +1,45 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+class TileLayer : public LayerWithParameters<TileDescriptor>
+{
+public:
+ /// Makes a workload for the Tile type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
+
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
+ TileLayer* Clone(Graph& graph) const override;
+
+ /// Infers the output shapes from given input shapes and layer properties.
+ /// @param [in] inputShapes The input shapes layer has.
+ /// @return A vector to the inferred output shape.
+ std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+
+ /// Check if the input tensor tile(s)
+ /// will lead to a valid configuration of @ref TileLayer.
+ /// @param [in] shapeInferenceMethod Indicates if output tile shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs() override;
+
+protected:
+ /// Constructor to create a TileLayer.
+ /// @param [in] name Optional name for the layer.
+ TileLayer(const TileDescriptor& param, const char* name);
+
+ /// Default destructor.
+ ~TileLayer() = default;
+};
+
+} // namespace armnn