aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2023-07-13 17:16:45 +0100
committerTeresaARM <teresa.charlinreyes@arm.com>2023-07-25 09:54:47 +0000
commit79a06a59bafadf736ca53c4240e87f9bbb657260 (patch)
treeb94b2063766b40b79bed8cb81f571ea05cfa9efb
parenta638f101bcb51008932f922fe0a5cef28633bc66 (diff)
downloadarmnn-79a06a59bafadf736ca53c4240e87f9bbb657260.tar.gz
IVGCVSW-7883 Front end and reference implementation for TILE
Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Signed-off-by: Cian McGriskin <cian.mcgriskin@arm.com> Change-Id: I0afb2403fee11c5c1e58ea65e2525e99594d8f2d
-rw-r--r--Android.mk1
-rw-r--r--CMakeLists.txt2
-rw-r--r--docs/02_operator_list.dox44
-rw-r--r--include/armnn/BackendHelper.hpp5
-rw-r--r--include/armnn/Descriptors.hpp19
-rw-r--r--include/armnn/DescriptorsFwd.hpp1
-rw-r--r--include/armnn/INetwork.hpp27
-rw-r--r--include/armnn/Types.hpp3
-rw-r--r--include/armnn/backends/WorkloadData.hpp5
-rw-r--r--src/armnn/BackendHelper.cpp15
-rw-r--r--src/armnn/LayersFwd.hpp2
-rw-r--r--src/armnn/Network.cpp11
-rw-r--r--src/armnn/Network.hpp3
-rw-r--r--src/armnn/layers/TileLayer.cpp71
-rw-r--r--src/armnn/layers/TileLayer.hpp45
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp47
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.cpp13
-rw-r--r--src/backends/backendsCommon/common.mk1
-rw-r--r--src/backends/backendsCommon/test/CMakeLists.txt2
-rw-r--r--src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp8
-rw-r--r--src/backends/backendsCommon/test/LayerTests.hpp1
-rw-r--r--src/backends/backendsCommon/test/layerTests/TileTestImpl.cpp434
-rw-r--r--src/backends/backendsCommon/test/layerTests/TileTestImpl.hpp31
-rw-r--r--src/backends/reference/RefLayerSupport.cpp34
-rw-r--r--src/backends/reference/RefLayerSupport.hpp5
-rw-r--r--src/backends/reference/RefWorkloadFactory.cpp5
-rw-r--r--src/backends/reference/backend.mk2
-rw-r--r--src/backends/reference/test/RefLayerTests.cpp38
-rw-r--r--src/backends/reference/workloads/CMakeLists.txt4
-rw-r--r--src/backends/reference/workloads/RefTileWorkload.cpp47
-rw-r--r--src/backends/reference/workloads/RefTileWorkload.hpp30
-rw-r--r--src/backends/reference/workloads/RefWorkloads.hpp1
-rw-r--r--src/backends/reference/workloads/Tile.cpp102
-rw-r--r--src/backends/reference/workloads/Tile.hpp21
34 files changed, 1058 insertions, 22 deletions
diff --git a/Android.mk b/Android.mk
index bf3c195505..c32afbeb34 100644
--- a/Android.mk
+++ b/Android.mk
@@ -276,6 +276,7 @@ LOCAL_SRC_FILES := \
src/armnn/layers/StridedSliceLayer.cpp \
src/armnn/layers/SubtractionLayer.cpp \
src/armnn/layers/SwitchLayer.cpp \
+ src/armnn/layers/TileLayer.cpp \
src/armnn/layers/TransposeConvolution2dLayer.cpp \
src/armnn/layers/TransposeLayer.cpp \
src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp \
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 291154a491..bf598aa200 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -328,6 +328,8 @@ list(APPEND armnn_sources
src/armnn/layers/SubtractionLayer.hpp
src/armnn/layers/SwitchLayer.cpp
src/armnn/layers/SwitchLayer.hpp
+ src/armnn/layers/TileLayer.cpp
+ src/armnn/layers/TileLayer.hpp
src/armnn/layers/TransposeConvolution2dLayer.cpp
src/armnn/layers/TransposeConvolution2dLayer.hpp
src/armnn/layers/TransposeLayer.hpp
diff --git a/docs/02_operator_list.dox b/docs/02_operator_list.dox
index 8fa6178aff..02b72ca66c 100644
--- a/docs/02_operator_list.dox
+++ b/docs/02_operator_list.dox
@@ -3311,6 +3311,50 @@ where N = batches, C = channels, H = height, W = width
<tr><td>FLOAT32
</table>
<tr>
+ <td rowspan="3">TileLayer
+ <td rowspan="3" style="width:200px;"> Layer to construct a tensor by repeating in tiles a given tensor.
+ <td rowspan="3">
+ <ul>
+ <li>ANEURALNETWORKS_TILE
+ </ul>
+ <td>CpuRef
+ <td>
+ <ul>
+ <li>All
+ </ul>
+ <td>
+ <table>
+ <tr><th>
+ <tr><td>FLOAT16
+ <tr><td>FLOAT32
+ <tr><td>QASYMMS8
+ <tr><td>QASYMMU8
+ <tr><td>QSYMMS16
+ <tr><td>SIGNED32
+ </table>
+<tr>
+ <td>CpuAcc
+ <td>
+ <ul>
+ <li>None
+ </ul>
+ <td>
+ <table>
+ <tr><th>
+ <tr><td>None
+ </table>
+<tr>
+ <td>GpuAcc
+ <td>
+ <ul>
+ <li>None
+ </ul>
+ <td>
+ <table>
+ <tr><th>
+ <tr><td>None
+ </table>
+<tr>
<td rowspan="3">TransposeConvolution2dLayer
<td rowspan="3" style="width:200px;"> Layer to perform 2D transpose convolution (deconvolution) operation.
<td rowspan="3">
diff --git a/include/armnn/BackendHelper.hpp b/include/armnn/BackendHelper.hpp
index 6181ba5c40..b550f9f68e 100644
--- a/include/armnn/BackendHelper.hpp
+++ b/include/armnn/BackendHelper.hpp
@@ -422,6 +422,11 @@ public:
const TensorInfo& output1,
Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+ bool IsTileSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const TileDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
bool IsTransposeConvolution2dSupported(
const TensorInfo& input,
const TensorInfo& output,
diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp
index 9ff894f1b0..3a571a66e8 100644
--- a/include/armnn/Descriptors.hpp
+++ b/include/armnn/Descriptors.hpp
@@ -1620,4 +1620,23 @@ struct BatchMatMulDescriptor : BaseDescriptor
const TensorShape& tensorShape);
};
+struct TileDescriptor : BaseDescriptor
+{
+ TileDescriptor()
+ : m_Multiples()
+ {}
+
+ explicit TileDescriptor(std::vector<uint32_t> multiples)
+ : m_Multiples(std::move(multiples))
+ {}
+
+ bool operator ==(const TileDescriptor& rhs) const
+ {
+ return m_Multiples == rhs.m_Multiples;
+ }
+
+ /// The vector to multiply the input shape by
+ std::vector<uint32_t> m_Multiples;
+};
+
} // namespace armnn
diff --git a/include/armnn/DescriptorsFwd.hpp b/include/armnn/DescriptorsFwd.hpp
index 2c25a49f00..be1a3f6782 100644
--- a/include/armnn/DescriptorsFwd.hpp
+++ b/include/armnn/DescriptorsFwd.hpp
@@ -49,6 +49,7 @@ struct SpaceToDepthDescriptor;
struct StackDescriptor;
struct StandInDescriptor;
struct StridedSliceDescriptor;
+struct TileDescriptor;
struct TransposeConvolution2dDescriptor;
struct TransposeDescriptor;
struct ViewsDescriptor;
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index e20dd1c348..830e0bac66 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -740,7 +740,7 @@ public:
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
IConnectableLayer* AddGatherLayer(const GatherDescriptor& descriptor,
- const char* name = nullptr);
+ const char* name = nullptr);
/// Add GatherNd layer to the network.
/// @param name - Optional name for the layer.
@@ -764,23 +764,23 @@ public:
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
IConnectableLayer* AddTransposeConvolution2dLayer(const TransposeConvolution2dDescriptor& descriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
- const char* name = nullptr);
+ const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
+ const char* name = nullptr);
/// Adds a transpose layer to the network.
/// @param transposeDescriptor - TransposeDescriptor to configure the transpose.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
IConnectableLayer* AddTransposeLayer(const TransposeDescriptor& transposeDescriptor,
- const char* name = nullptr);
+ const char* name = nullptr);
/// Adds a stack layer to the network.
/// @param descriptor - Description of the stack layer.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
IConnectableLayer* AddStackLayer(const StackDescriptor& descriptor,
- const char* name = nullptr);
+ const char* name = nullptr);
/// Add a stand-in layer for a type unknown to the Arm NN framework.
/// Note: Due to the nature of this layer, no validation can be performed by the framework.
@@ -789,14 +789,14 @@ public:
/// @descriptor - Descriptor for the StandIn layer.
/// @return - Interface for configuring the layer.
IConnectableLayer* AddStandInLayer(const StandInDescriptor& descriptor,
- const char* name = nullptr);
+ const char* name = nullptr);
/// Add a QuantizedLstm layer to the network
/// @param params - The weights and biases for the Quantized LSTM cell
/// @param name - Optional name for the layer
/// @return - Interface for configuring the layer.
IConnectableLayer* AddQuantizedLstmLayer(const QuantizedLstmInputParams& params,
- const char* name = nullptr);
+ const char* name = nullptr);
/// Add a QLstm layer to the network
/// @param descriptor - Parameters for the QLstm operation
@@ -804,8 +804,8 @@ public:
/// @param name - Optional name for the layer
/// @return - Interface for configuring the layer.
IConnectableLayer* AddQLstmLayer(const QLstmDescriptor& descriptor,
- const LstmInputParams& params,
- const char* name = nullptr);
+ const LstmInputParams& params,
+ const char* name = nullptr);
/// Adds a Logical Binary layer to the network.
/// @param descriptor - Description of the Logical Binary layer.
@@ -842,6 +842,13 @@ public:
/// @return - Interface for configuring the layer
IConnectableLayer* AddReverseV2Layer(const char* name = nullptr);
+ /// Add a Tile layer to the network
+ /// @param descriptor - Parameters for the Tile operation
+ /// @param name - Optional name for the layer
+ /// @return - Interface for configuring the layer
+ IConnectableLayer* AddTileLayer(const TileDescriptor& descriptor,
+ const char* name = nullptr);
+
void ExecuteStrategy(IStrategy& strategy) const;
protected:
diff --git a/include/armnn/Types.hpp b/include/armnn/Types.hpp
index f05f05b2a0..bf4458ee7f 100644
--- a/include/armnn/Types.hpp
+++ b/include/armnn/Types.hpp
@@ -474,6 +474,7 @@ using InferenceTimingPair = std::pair<HighResolutionClock, HighResolutionClock>;
X(BatchMatMul) \
X(ElementwiseBinary) \
X(ReverseV2) \
+ X(Tile) \
// New layers should be added at last position to minimize instability.
@@ -485,7 +486,7 @@ enum class LayerType
LIST_OF_LAYER_TYPE
#undef X
FirstLayer = Activation,
- LastLayer = ElementwiseBinary
+ LastLayer = Tile
};
const char* GetLayerTypeAsCString(LayerType type);
diff --git a/include/armnn/backends/WorkloadData.hpp b/include/armnn/backends/WorkloadData.hpp
index e7d5e0e689..21a597df8a 100644
--- a/include/armnn/backends/WorkloadData.hpp
+++ b/include/armnn/backends/WorkloadData.hpp
@@ -755,4 +755,9 @@ struct ReverseV2QueueDescriptor : QueueDescriptor
void Validate(const WorkloadInfo& workloadInfo) const;
};
+struct TileQueueDescriptor : QueueDescriptorWithParameters<TileDescriptor>
+{
+ void Validate(const WorkloadInfo& workloadInfo) const;
+};
+
} // namespace armnn
diff --git a/src/armnn/BackendHelper.cpp b/src/armnn/BackendHelper.cpp
index 18184fbfb2..0a55a08087 100644
--- a/src/armnn/BackendHelper.cpp
+++ b/src/armnn/BackendHelper.cpp
@@ -1409,6 +1409,21 @@ bool LayerSupportHandle::IsSwitchSupported(const TensorInfo& input0,
reasonIfUnsupported);
}
+bool LayerSupportHandle::IsTileSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const armnn::TileDescriptor &descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Tile,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported);
+}
+
bool LayerSupportHandle::IsTransposeConvolution2dSupported(
const TensorInfo& input,
const TensorInfo& output,
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index d3ce6f2a67..743b8d7205 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -76,6 +76,7 @@
#include "layers/StridedSliceLayer.hpp"
#include "layers/SubtractionLayer.hpp"
#include "layers/SwitchLayer.hpp"
+#include "layers/TileLayer.hpp"
#include "layers/TransposeConvolution2dLayer.hpp"
#include "layers/TransposeLayer.hpp"
#include "layers/UnidirectionalSequenceLstmLayer.hpp"
@@ -178,6 +179,7 @@ DECLARE_LAYER(StandIn)
DECLARE_LAYER(StridedSlice)
DECLARE_LAYER(Subtraction)
DECLARE_LAYER(Switch)
+DECLARE_LAYER(Tile)
DECLARE_LAYER(Transpose)
DECLARE_LAYER(TransposeConvolution2d)
DECLARE_LAYER(UnidirectionalSequenceLstm)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 27e91ae39c..22d2c78c65 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -644,6 +644,12 @@ IConnectableLayer* INetwork::AddReverseV2Layer(const char *name)
return pNetworkImpl->AddReverseV2Layer(name);
}
+IConnectableLayer* INetwork::AddTileLayer(const TileDescriptor &descriptor,
+ const char *name)
+{
+ return pNetworkImpl->AddTileLayer(descriptor, name);
+}
+
void INetwork::ExecuteStrategy(IStrategy& strategy) const
{
return pNetworkImpl->ExecuteStrategy(strategy);
@@ -2934,6 +2940,11 @@ IConnectableLayer* NetworkImpl::AddReverseV2Layer(const char *name)
return m_Graph->AddLayer<ReverseV2Layer>(name);
}
+IConnectableLayer* NetworkImpl::AddTileLayer(const TileDescriptor &desc, const char *name)
+{
+ return m_Graph->AddLayer<TileLayer>(desc, name);
+}
+
IConnectableLayer* NetworkImpl::AddPrecompiledLayer(const PreCompiledDescriptor& preCompiledDescriptor,
CompiledBlobPtr compiledBlobPtr,
const Optional<BackendId>& backend,
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index ae287f32d1..a84a0e9ba4 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -220,6 +220,9 @@ public:
IConnectableLayer* AddSwitchLayer(const char* name = nullptr);
+ IConnectableLayer* AddTileLayer(const TileDescriptor& tileDescriptor,
+ const char* name = nullptr);
+
IConnectableLayer* AddTransposeConvolution2dLayer(const TransposeConvolution2dDescriptor& descriptor,
const ConstTensor& weights,
const Optional<ConstTensor>& biases,
diff --git a/src/armnn/layers/TileLayer.cpp b/src/armnn/layers/TileLayer.cpp
new file mode 100644
index 0000000000..3c313905fe
--- /dev/null
+++ b/src/armnn/layers/TileLayer.cpp
@@ -0,0 +1,71 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "TileLayer.hpp"
+
+#include <armnn/backends/WorkloadFactory.hpp>
+#include "layers/LayerCloneBase.hpp"
+
+namespace armnn
+{
+TileLayer::TileLayer(const TileDescriptor &param, const char *name)
+ : LayerWithParameters(1, 1, LayerType::Tile, param, name)
+{}
+
+std::unique_ptr<IWorkload> TileLayer::CreateWorkload(const IWorkloadFactory &factory) const
+{
+ TileQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
+ return factory.CreateWorkload(LayerType::Tile, descriptor, PrepInfoAndDesc(descriptor));
+}
+
+TileLayer* TileLayer::Clone(armnn::Graph &graph) const
+{
+ auto layer = CloneBase<TileLayer>(graph, m_Param, GetName());
+
+ return std::move(layer);
+}
+
+std::vector<TensorShape> TileLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+{
+ ARMNN_ASSERT(inputShapes.size() == 1);
+ const TensorShape& inputShape = inputShapes[0];
+ const std::vector<uint32_t> multipleShape = m_Param.m_Multiples;
+
+ std::vector<unsigned int> dimSizes;
+
+ // Check input shape and multiples have same length and multiply them together to get output shape
+ if(inputShape.GetNumDimensions() == multipleShape.size())
+ {
+ for(uint32_t i = 0; i < inputShape.GetNumDimensions(); ++i)
+ {
+ dimSizes.insert(dimSizes.begin(), inputShape[i] * multipleShape[i]);
+ }
+ }
+ else
+ {
+ throw LayerValidationException("TileLayer: input rank and length of multiples are different.");
+ }
+
+ return std::vector<TensorShape>({TensorShape({inputShape.GetNumElements(), dimSizes.data()})});
+}
+
+void TileLayer::ValidateTensorShapesFromInputs()
+{
+ VerifyLayerConnections(1, CHECK_LOCATION());
+
+ const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
+
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
+
+ auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
+
+ ARMNN_ASSERT(inferredShapes.size() == 1);
+
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "TileLayer");
+}
+
+} \ No newline at end of file
diff --git a/src/armnn/layers/TileLayer.hpp b/src/armnn/layers/TileLayer.hpp
new file mode 100644
index 0000000000..632cdb426b
--- /dev/null
+++ b/src/armnn/layers/TileLayer.hpp
@@ -0,0 +1,45 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+class TileLayer : public LayerWithParameters<TileDescriptor>
+{
+public:
+ /// Makes a workload for the Tile type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
+
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
+ TileLayer* Clone(Graph& graph) const override;
+
+ /// Infers the output shapes from given input shapes and layer properties.
+ /// @param [in] inputShapes The input shapes layer has.
+ /// @return A vector to the inferred output shape.
+ std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+
+ /// Check if the input tensor tile(s)
+ /// will lead to a valid configuration of @ref TileLayer.
+ /// @param [in] shapeInferenceMethod Indicates if output tile shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs() override;
+
+protected:
+ /// Constructor to create a TileLayer.
+ /// @param [in] name Optional name for the layer.
+ TileLayer(const TileDescriptor& param, const char* name);
+
+ /// Default destructor.
+ ~TileLayer() = default;
+};
+
+} // namespace armnn
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index bd3c7c2760..7efca9de50 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -207,9 +207,9 @@ void ValidateBiasTensorQuantization(const TensorInfo& biasTensor,
//---------------------------------------------------------------
void ValidateTensors(const std::vector<ITensorHandle*>& vec,
- unsigned int numExpected,
- const std::string& descName,
- const std::string& varName)
+ unsigned int numExpected,
+ const std::string& descName,
+ const std::string& varName)
{
if (vec.empty() && numExpected > 0)
{
@@ -433,9 +433,9 @@ void QueueDescriptor::ValidateTensorNumDimensions(const TensorInfo& tensor,
//---------------------------------------------------------------
void QueueDescriptor::ValidateTensorNumDimNumElem(const TensorInfo& tensorInfo,
- unsigned int numDimension,
- unsigned int numElements,
- std::string const& tensorName) const
+ unsigned int numDimension,
+ unsigned int numElements,
+ std::string const& tensorName) const
{
const std::string functionName{"ValidateTensorNumDimNumElem"};
ValidateTensorNumDimensions(tensorInfo, functionName, numDimension, tensorName);
@@ -1614,7 +1614,8 @@ void ResizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
}
}
-void ReverseV2QueueDescriptor::Validate(const WorkloadInfo &workloadInfo) const {
+void ReverseV2QueueDescriptor::Validate(const WorkloadInfo &workloadInfo) const
+{
const std::string descriptorName{"ReverseV2QueueDescriptor"};
// Backend restriction
@@ -2948,7 +2949,6 @@ void ShapeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
DataType::Float32,
DataType::QAsymmS8,
DataType::QAsymmU8,
- DataType::QAsymmS8,
DataType::QSymmS8,
DataType::QSymmS16,
DataType::Signed32
@@ -4378,5 +4378,36 @@ void BatchMatMulQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) cons
}
}
+void TileQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+{
+ const std::string& descriptorName{"TileQueueDescriptor"};
+
+ ValidateNumInputs(workloadInfo, descriptorName, 1);
+ ValidateNumOutputs(workloadInfo, descriptorName, 1);
+
+ const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
+ const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
+
+ std::vector<DataType> supportedTypes =
+ {
+ DataType::Float32,
+ DataType::Float16,
+ DataType::QAsymmS8,
+ DataType::QAsymmU8,
+ DataType::QSymmS8,
+ DataType::QSymmS16,
+ DataType::Signed32
+ };
+
+ // Multiples length must be the same as the number of dimensions in input.
+ if (m_Parameters.m_Multiples.size() != inputTensorInfo.GetNumDimensions())
+ {
+ throw InvalidArgumentException(descriptorName +
+ ": Multiples length is not same as the number of dimensions in Input.");
+ }
+
+ ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
+ ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
+}
} // namespace armnn \ No newline at end of file
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index ee797b632c..9ab3ca5b77 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -1352,6 +1352,19 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
reason);
break;
}
+ case LayerType::Tile:
+ {
+ auto cLayer = PolymorphicDowncast<const TileLayer*>(&layer);
+ const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
+ const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+
+ result = layerSupportObject.IsTileSupported(OverrideDataType(input, dataType),
+ OverrideDataType(output, dataType),
+ cLayer->GetParameters(),
+ reason);
+
+ break;
+ }
case LayerType::Transpose:
{
auto cLayer = PolymorphicDowncast<const TransposeLayer*>(&layer);
diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk
index 1e0467deba..2c41285615 100644
--- a/src/backends/backendsCommon/common.mk
+++ b/src/backends/backendsCommon/common.mk
@@ -106,6 +106,7 @@ COMMON_TEST_SOURCES := \
test/layerTests/StackTestImpl.cpp \
test/layerTests/StridedSliceTestImpl.cpp \
test/layerTests/SubtractionTestImpl.cpp \
+ test/layerTests/TileTestImpl.cpp \
test/layerTests/TransposeConvolution2dTestImpl.cpp \
test/layerTests/UnidirectionalSequenceLstmTestImpl.cpp \
memoryOptimizerStrategyLibrary/test/ConstMemoryStrategyTests.cpp \
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index 8d6891a68d..bbd1324010 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -199,6 +199,8 @@ list(APPEND armnnBackendsCommonUnitTests_sources
layerTests/StridedSliceTestImpl.hpp
layerTests/SubtractionTestImpl.cpp
layerTests/SubtractionTestImpl.hpp
+ layerTests/TileTestImpl.cpp
+ layerTests/TileTestImpl.hpp
layerTests/TransposeConvolution2dTestImpl.cpp
layerTests/TransposeConvolution2dTestImpl.hpp
layerTests/UnidirectionalSequenceLstmTestImpl.cpp
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index 182fab97be..ff02e06859 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -740,10 +740,14 @@ ARMNN_NO_DEPRECATE_WARN_END
DECLARE_LAYER_POLICY_1_PARAM(Rank)
+DECLARE_LAYER_POLICY_2_PARAM(Reduce)
+
DECLARE_LAYER_POLICY_2_PARAM(Resize)
DECLARE_LAYER_POLICY_2_PARAM(Reshape)
+DECLARE_LAYER_POLICY_1_PARAM(ReverseV2)
+
DECLARE_LAYER_POLICY_1_PARAM(Shape)
DECLARE_LAYER_POLICY_2_PARAM(Slice)
@@ -766,10 +770,10 @@ ARMNN_NO_DEPRECATE_WARN_BEGIN
DECLARE_LAYER_POLICY_1_PARAM(Subtraction)
ARMNN_NO_DEPRECATE_WARN_END
-DECLARE_LAYER_POLICY_2_PARAM(Reduce)
-
DECLARE_LAYER_POLICY_1_PARAM(Switch)
+DECLARE_LAYER_POLICY_2_PARAM(Tile)
+
DECLARE_LAYER_POLICY_2_PARAM(Transpose)
DECLARE_LAYER_POLICY_2_PARAM(TransposeConvolution2d)
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index 755a665ba6..7182cb2d47 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -74,6 +74,7 @@
#include <backendsCommon/test/layerTests/StackTestImpl.hpp>
#include <backendsCommon/test/layerTests/StridedSliceTestImpl.hpp>
#include <backendsCommon/test/layerTests/SubtractionTestImpl.hpp>
+#include <backendsCommon/test/layerTests/TileTestImpl.hpp>
#include <backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.hpp>
#include <backendsCommon/test/layerTests/TransposeTestImpl.hpp>
#include <backendsCommon/test/layerTests/UnidirectionalSequenceLstmTestImpl.hpp>
diff --git a/src/backends/backendsCommon/test/layerTests/TileTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/TileTestImpl.cpp
new file mode 100644
index 0000000000..0b13bba425
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/TileTestImpl.cpp
@@ -0,0 +1,434 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "TileTestImpl.hpp"
+#include <vector>
+
+#include <armnn/backends/IBackendInternal.hpp>
+#include <armnn/backends/Workload.hpp>
+#include <armnn/backends/WorkloadData.hpp>
+#include <armnn/backends/WorkloadFactory.hpp>
+
+#include <armnnTestUtils/WorkloadTestUtils.hpp>
+#include <armnnTestUtils/TensorCopyUtils.hpp>
+
+#include <armnn/BackendHelper.hpp>
+
+#include <armnnUtils/QuantizeHelper.hpp>
+
+namespace
+{
+template<typename T, std::size_t NumDims>
+LayerTestResult<T, NumDims> TileTestImpl(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ armnn::TileDescriptor descriptor,
+ armnn::TensorInfo& inputInfo,
+ armnn::TensorInfo& outputInfo,
+ std::vector<T>& inputData,
+ std::vector<T>& expectedOutputData)
+{
+
+ LayerTestResult<T, NumDims> result(outputInfo);
+ std::vector<T> outputActual(outputInfo.GetNumElements());
+
+ armnn::TileQueueDescriptor queueDescriptor;
+ queueDescriptor.m_Parameters = std::move(descriptor);
+ armnn::WorkloadInfo workloadInfo;
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
+
+ AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
+ AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
+
+ const armnn::BackendId& backend = workloadFactory.GetBackendId();
+ armnn::LayerSupportHandle handle = armnn::GetILayerSupportByBackendId(backend);
+
+ auto workload = workloadFactory.CreateWorkload(armnn::LayerType::Tile, queueDescriptor, workloadInfo);
+
+ inputHandle->Allocate();
+ outputHandle->Allocate();
+
+ CopyDataToITensorHandle(inputHandle.get(), inputData.data());
+
+ workload->PostAllocationConfigure();
+ ExecuteWorkload(*workload, memoryManager);
+
+ CopyDataFromITensorHandle(outputActual.data(), outputHandle.get());
+ return LayerTestResult<T, NumDims>(outputActual,
+ expectedOutputData,
+ outputHandle->GetShape(),
+ outputInfo.GetShape());
+}
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 1> Tile1dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ auto descriptor = armnn::TileDescriptor( std::vector<uint32_t>{ 3 } );
+
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+
+ armnn::TensorShape inputShape = { 3 };
+ armnn::TensorShape outputShape = { 9 };
+
+ armnn::TensorInfo inputInfo(inputShape, ArmnnType);
+ armnn::TensorInfo outputInfo(outputShape, ArmnnType);
+
+ std::vector<T> input = armnnUtils::QuantizedVector<T>(
+ {
+ 0.f, 1.f, 2.f
+ }, qScale, qOffset);
+
+ std::vector<T> expectedOutput = armnnUtils::QuantizedVector<T>(
+ {
+ 0.f, 1.f, 2.f,
+ 0.f, 1.f, 2.f,
+ 0.f, 1.f, 2.f
+ }, qScale, qOffset);
+
+ return TileTestImpl<T, 1>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ descriptor,
+ inputInfo,
+ outputInfo,
+ input,
+ expectedOutput);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 2> Tile2dTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ auto descriptor = armnn::TileDescriptor(std::vector<uint32_t>{ 2, 2 });
+
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+
+ armnn::TensorShape inputShape = { 2, 3 };
+ armnn::TensorShape outputShape = { 4, 6 };
+
+ armnn::TensorInfo inputInfo(inputShape, ArmnnType);
+ armnn::TensorInfo outputInfo(outputShape, ArmnnType);
+
+ std::vector<T> input = armnnUtils::QuantizedVector<T>(
+ {
+ 0.f, 1.f, 2.f,
+ 3.f, 4.f, 5.f
+ }, qScale, qOffset);
+
+
+ std::vector<T> expectedOutput = armnnUtils::QuantizedVector<T>(
+ {
+ 0.f, 1.f, 2.f, 0.f, 1.f, 2.f,
+ 3.f, 4.f, 5.f, 3.f, 4.f, 5.f,
+
+ 0.f, 1.f, 2.f, 0.f, 1.f, 2.f,
+ 3.f, 4.f, 5.f, 3.f, 4.f, 5.f
+ }, qScale, qOffset);
+
+ return TileTestImpl<T, 2>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ descriptor,
+ inputInfo,
+ outputInfo,
+ input,
+ expectedOutput);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 3> Tile3dTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ auto descriptor = armnn::TileDescriptor(std::vector<uint32_t>{ 1, 2, 1 });
+
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+
+ armnn::TensorShape inputShape = { 2, 2, 3 };
+ armnn::TensorShape outputShape = { 2, 4, 3 };
+
+ armnn::TensorInfo inputInfo(inputShape, ArmnnType);
+ armnn::TensorInfo outputInfo(outputShape, ArmnnType);
+ std::vector<T> input = armnnUtils::QuantizedVector<T>(
+ {
+ 0.f, 1.f, 2.f,
+ 3.f, 4.f, 5.f,
+
+ 6.f, 7.f, 8.f,
+ 9.f, 10.f, 11.f
+ }, qScale, qOffset);
+
+
+ std::vector<T> expectedOutput = armnnUtils::QuantizedVector<T>(
+ {
+ 0.f, 1.f, 2.f,
+ 3.f, 4.f, 5.f,
+ 0.f, 1.f, 2.f,
+ 3.f, 4.f, 5.f,
+
+ 6.f, 7.f, 8.f,
+ 9.f, 10.f, 11.f,
+ 6.f, 7.f, 8.f,
+ 9.f, 10.f, 11.f
+ }, qScale, qOffset);
+
+ return TileTestImpl<T, 3>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ descriptor,
+ inputInfo,
+ outputInfo,
+ input,
+ expectedOutput);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 4> Tile4dTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ auto descriptor = armnn::TileDescriptor(std::vector<uint32_t>{ 2, 1, 1, 1 });
+
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+
+ armnn::TensorShape inputShape = { 2, 2, 2, 3};
+ armnn::TensorShape outputShape = { 4, 2, 2, 3};
+
+ armnn::TensorInfo inputInfo(inputShape, ArmnnType);
+ armnn::TensorInfo outputInfo(outputShape, ArmnnType);
+
+ std::vector<T> input = armnnUtils::QuantizedVector<T>(
+ {
+ 0.f, 1.f, 2.f,
+ 3.f, 4.f, 5.f,
+
+ 6.f, 7.f, 8.f,
+ 9.f, 10.f, 11.f,
+
+ 0.f, 1.f, 2.f,
+ 3.f, 4.f, 5.f,
+
+ 6.f, 7.f, 8.f,
+ 9.f, 10.f, 11.f
+ }, qScale, qOffset);
+
+
+ std::vector<T> expectedOutput = armnnUtils::QuantizedVector<T>(
+ {
+ 0.f, 1.f, 2.f,
+ 3.f, 4.f, 5.f,
+
+ 6.f, 7.f, 8.f,
+ 9.f, 10.f, 11.f,
+
+ 0.f, 1.f, 2.f,
+ 3.f, 4.f, 5.f,
+
+ 6.f, 7.f, 8.f,
+ 9.f, 10.f, 11.f,
+
+ 0.f, 1.f, 2.f,
+ 3.f, 4.f, 5.f,
+
+ 6.f, 7.f, 8.f,
+ 9.f, 10.f, 11.f,
+
+ 0.f, 1.f, 2.f,
+ 3.f, 4.f, 5.f,
+
+ 6.f, 7.f, 8.f,
+ 9.f, 10.f, 11.f
+ }, qScale, qOffset);
+
+ return TileTestImpl<T, 4>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ descriptor,
+ inputInfo,
+ outputInfo,
+ input,
+ expectedOutput);
+}
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 1>
+Tile1dTest<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 2>
+Tile2dTest<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 3>
+Tile3dTest<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
+Tile4dTest<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 1>
+Tile1dTest<armnn::DataType::Float16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 2>
+Tile2dTest<armnn::DataType::Float16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 3>
+Tile3dTest<armnn::DataType::Float16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
+Tile4dTest<armnn::DataType::Float16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 1>
+Tile1dTest<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 2>
+Tile2dTest<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 3>
+Tile3dTest<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
+Tile4dTest<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 1>
+Tile1dTest<armnn::DataType::QAsymmU8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 2>
+Tile2dTest<armnn::DataType::QAsymmU8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 3>
+Tile3dTest<armnn::DataType::QAsymmU8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+Tile4dTest<armnn::DataType::QAsymmU8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS8>, 1>
+Tile1dTest<armnn::DataType::QSymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS8>, 2>
+Tile2dTest<armnn::DataType::QSymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS8>, 3>
+Tile3dTest<armnn::DataType::QSymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS8>, 4>
+Tile4dTest<armnn::DataType::QSymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 1>
+Tile1dTest<armnn::DataType::QSymmS16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 2>
+Tile2dTest<armnn::DataType::QSymmS16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 3>
+Tile3dTest<armnn::DataType::QSymmS16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+Tile4dTest<armnn::DataType::QSymmS16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Signed32>, 1>
+Tile1dTest<armnn::DataType::Signed32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Signed32>, 2>
+Tile2dTest<armnn::DataType::Signed32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Signed32>, 3>
+Tile3dTest<armnn::DataType::Signed32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Signed32>, 4>
+Tile4dTest<armnn::DataType::Signed32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
diff --git a/src/backends/backendsCommon/test/layerTests/TileTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/TileTestImpl.hpp
new file mode 100644
index 0000000000..3699bda8ef
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/TileTestImpl.hpp
@@ -0,0 +1,31 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnnTestUtils/LayerTestResult.hpp>
+#include <armnn/backends/IBackendInternal.hpp>
+#include <armnn/backends/WorkloadFactory.hpp>
+#include "ResolveType.hpp"
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> Tile4dTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 3> Tile3dTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> Tile2dTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 1> Tile1dTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory); \ No newline at end of file
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index e94478f088..9d396e5db9 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -402,6 +402,11 @@ bool RefLayerSupport::IsLayerSupported(const LayerType& type,
reasonIfUnsupported);
case LayerType::Subtraction:
return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+ case LayerType::Tile:
+ return IsTileSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const TileDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
case LayerType::Transpose:
return IsTransposeSupported(infos[0],
infos[1],
@@ -2693,6 +2698,35 @@ bool RefLayerSupport::IsPreluSupported(const TensorInfo& input,
return supported;
}
+bool RefLayerSupport::IsTileSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const TileDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ IgnoreUnused(descriptor);
+
+ bool supported = true;
+
+ std::array<DataType, 7> supportedTypes
+ {
+ DataType::Float32,
+ DataType::Float16,
+ DataType::QAsymmS8,
+ DataType::QAsymmU8,
+ DataType::QSymmS8,
+ DataType::QSymmS16,
+ DataType::Signed32
+ };
+
+ supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
+ "Tile: input type not supported.");
+
+ supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
+ "Tile: output type not supported");
+
+ return supported;
+}
+
bool RefLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input,
const TensorInfo& output,
const TransposeConvolution2dDescriptor& descriptor,
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index 21d59e27fc..42b5814380 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -354,6 +354,11 @@ public:
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsTileSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const TileDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+
bool IsTransposeConvolution2dSupported(
const TensorInfo& input,
const TensorInfo& output,
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index 7d5f742126..86a584452d 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -618,6 +618,11 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateWorkload(LayerType type,
return std::make_unique<RefSubtractionWorkload<float>>(*subtractionQueueDescriptor, info);
}
}
+ case LayerType::Tile:
+ {
+ auto tileQueueDescriptor = PolymorphicDowncast<const TileQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefTileWorkload>(*tileQueueDescriptor, info);
+ }
case LayerType::Transpose:
{
auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk
index dfafa0ac39..7f047af930 100644
--- a/src/backends/reference/backend.mk
+++ b/src/backends/reference/backend.mk
@@ -102,6 +102,7 @@ BACKEND_SOURCES := \
workloads/RefStackWorkload.cpp \
workloads/RefStridedSliceWorkload.cpp \
workloads/RefSplitterWorkload.cpp \
+ workloads/RefTileWorkload.cpp \
workloads/RefTransposeConvolution2dWorkload.cpp \
workloads/RefTransposeWorkload.cpp \
workloads/RefUnidirectionalSequenceLstmWorkload.cpp \
@@ -115,6 +116,7 @@ BACKEND_SOURCES := \
workloads/StringMapping.cpp \
workloads/Softmax.cpp \
workloads/Splitter.cpp \
+ workloads/Tile.cpp \
workloads/TransposeConvolution2d.cpp
else
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index a68775e8e9..1f42397458 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017,2022, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -1596,6 +1596,42 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple2Dim2AxisQAsymmS8, ReverseV2SimpleT
ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple2Dim2AxisQAsymmU8, ReverseV2SimpleTest2Dim2Axis<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple2Dim2AxisQSymmS16, ReverseV2SimpleTest2Dim2Axis<DataType::QSymmS16>)
+// Tile
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile1dTestFloat32, Tile1dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile2dTestFloat32, Tile2dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile3dTestFloat32, Tile3dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile4dTestFloat32, Tile4dTest<DataType::Float32>)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile1dTestFloat16, Tile1dTest<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile2dTestFloat16, Tile2dTest<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile3dTestFloat16, Tile3dTest<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile4dTestFloat16, Tile4dTest<DataType::Float16>)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile1dTestQAsymmS8, Tile1dTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile2dTestQAsymmS8, Tile2dTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile3dTestQAsymmS8, Tile3dTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile4dTestQAsymmS8, Tile4dTest<DataType::QAsymmS8>)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile1dTestQAsymmU8, Tile1dTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile2dTestQAsymmU8, Tile2dTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile3dTestQAsymmU8, Tile3dTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile4dTestQAsymmU8, Tile4dTest<DataType::QAsymmU8>)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile1dTestQSymmS8, Tile1dTest<DataType::QSymmS8>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile2dTestQSymmS8, Tile2dTest<DataType::QSymmS8>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile3dTestQSymmS8, Tile3dTest<DataType::QSymmS8>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile4dTestQSymmS8, Tile4dTest<DataType::QSymmS8>)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile1dTestQSymmS16, Tile1dTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile2dTestQSymmS16, Tile2dTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile3dTestQSymmS16, Tile3dTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile4dTestQSymmS16, Tile4dTest<DataType::QSymmS16>)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile1dTestSigned32, Tile1dTest<DataType::Signed32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile2dTestSigned32, Tile2dTest<DataType::Signed32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile3dTestSigned32, Tile3dTest<DataType::Signed32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile4dTestSigned32, Tile4dTest<DataType::Signed32>)
+
// Fake Quantization
ARMNN_AUTO_TEST_CASE_WITH_THF(FakeQuantization, FakeQuantizationTest)
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index 28f6d2f371..9372568133 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -180,6 +180,8 @@ list(APPEND armnnRefBackendWorkloads_sources
RefStackWorkload.hpp
RefStridedSliceWorkload.cpp
RefStridedSliceWorkload.hpp
+ RefTileWorkload.cpp
+ RefTileWorkload.hpp
RefTransposeConvolution2dWorkload.cpp
RefTransposeConvolution2dWorkload.hpp
RefTransposeWorkload.cpp
@@ -209,6 +211,8 @@ list(APPEND armnnRefBackendWorkloads_sources
StridedSlice.cpp
StringMapping.cpp
StringMapping.hpp
+ Tile.cpp
+ Tile.hpp
TensorBufferArrayView.hpp
TransposeConvolution2d.cpp
TransposeConvolution2d.hpp
diff --git a/src/backends/reference/workloads/RefTileWorkload.cpp b/src/backends/reference/workloads/RefTileWorkload.cpp
new file mode 100644
index 0000000000..9fa8c8c3d3
--- /dev/null
+++ b/src/backends/reference/workloads/RefTileWorkload.cpp
@@ -0,0 +1,47 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RefTileWorkload.hpp"
+#include "RefWorkloadUtils.hpp"
+#include "Tile.hpp"
+#include "Profiling.hpp"
+
+namespace armnn
+{
+
+ RefTileWorkload::RefTileWorkload(const TileQueueDescriptor& descriptor, const WorkloadInfo& info)
+ : RefBaseWorkload(descriptor, info)
+ {}
+
+ void RefTileWorkload::Execute() const
+ {
+ Execute(m_Data.m_Inputs, m_Data.m_Outputs);
+ }
+
+ void RefTileWorkload::ExecuteAsync(ExecutionData& executionData)
+ {
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
+ }
+
+ void RefTileWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
+ {
+ ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefTileWorkload_Execute");
+
+ const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
+
+ std::unique_ptr<Decoder<float>> inputDecoder = MakeDecoder<float>(GetTensorInfo(inputs[0]),
+ inputs[0]->Map());
+
+ std::unique_ptr<Encoder<float>> outputEncoder = MakeEncoder<float>(GetTensorInfo(outputs[0]),
+ outputs[0]->Map());
+
+ Tile(m_Data.m_Parameters,
+ inputInfo,
+ *inputDecoder,
+ *outputEncoder);
+ }
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/backends/reference/workloads/RefTileWorkload.hpp b/src/backends/reference/workloads/RefTileWorkload.hpp
new file mode 100644
index 0000000000..2fb8eab05e
--- /dev/null
+++ b/src/backends/reference/workloads/RefTileWorkload.hpp
@@ -0,0 +1,30 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "RefBaseWorkload.hpp"
+#include <armnn/backends/WorkloadData.hpp>
+
+#include "Tile.hpp"
+
+namespace armnn
+{
+
+ class RefTileWorkload : public RefBaseWorkload<TileQueueDescriptor>
+ {
+ public:
+ explicit RefTileWorkload(const TileQueueDescriptor& descriptor,
+ const WorkloadInfo& info);
+
+ void Execute() const override;
+ void ExecuteAsync(ExecutionData& executionData) override;
+
+ private:
+ void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
+
+ };
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp
index e15a7ca047..a36eae501c 100644
--- a/src/backends/reference/workloads/RefWorkloads.hpp
+++ b/src/backends/reference/workloads/RefWorkloads.hpp
@@ -62,6 +62,7 @@
#include "RefStackWorkload.hpp"
#include "RefStridedSliceWorkload.hpp"
#include "RefSpaceToDepthWorkload.hpp"
+#include "RefTileWorkload.hpp"
#include "RefTransposeConvolution2dWorkload.hpp"
#include "RefTransposeWorkload.hpp"
#include "RefUnidirectionalSequenceLstmWorkload.hpp"
diff --git a/src/backends/reference/workloads/Tile.cpp b/src/backends/reference/workloads/Tile.cpp
new file mode 100644
index 0000000000..148c51de2e
--- /dev/null
+++ b/src/backends/reference/workloads/Tile.cpp
@@ -0,0 +1,102 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "Tile.hpp"
+#include "Encoders.hpp"
+#include <numeric>
+#include <armnn/backends/WorkloadData.hpp>
+#include <armnn/Logging.hpp>
+
+namespace armnn
+{
+
+// Converts a flatten index into a multi-dimensional coordinate.
+std::vector<uint32_t> IndexToCoordinates(std::vector<uint32_t>& shape, uint32_t index)
+{
+ std::vector<uint32_t> coordinates;
+ // Iterating through dimensions starting from the last dimension to the first
+ for (std::size_t i = shape.size() - 1; i < shape.size(); --i)
+ {
+ // Coordinate is found by getting the index and modulus it by the current dimension size
+ // shape of dimension = dimension size
+ coordinates.insert(coordinates.begin(), index % shape[i]);
+ // Pass the index to next iteration making index = index / size of the current dimension
+ index = index/shape[i];
+ }
+ return coordinates;
+}
+
+// Convert a multidimensional coordinate to a flattened index.
+uint32_t CoordinatesToIndex(TensorShape& shape, std::vector<uint32_t>& coordinates)
+{
+ uint32_t index = 0;
+ uint32_t base = 1;
+ uint32_t rank = shape.GetNumDimensions();
+ for (uint32_t i = rank; i > 0; --i)
+ {
+ index = index + coordinates[i - 1] * base;
+ base = base * shape[i - 1];
+ }
+ return index;
+}
+
+void Tile(const TileDescriptor& params,
+ const TensorInfo& inputInfo,
+ Decoder<float>& inputDecoder,
+ Encoder<float>& outputEncoder)
+{
+ // Input and output will always have same rank
+ uint32_t rank = inputInfo.GetNumDimensions();
+
+ TensorShape inputShape = inputInfo.GetShape();
+
+ std::vector<uint32_t> outputShape(rank);
+ for (uint32_t i = 0; i < rank; ++i)
+ {
+ outputShape[i] = inputShape[i] * params.m_Multiples[i];
+ }
+
+ // If all values of multiples are 1, then return the input
+ if ( std::adjacent_find( params.m_Multiples.begin(), params.m_Multiples.end(),
+ std::not_equal_to<>() ) == params.m_Multiples.end() && params.m_Multiples[0] == 1)
+ {
+ for (uint32_t idx = 0; idx < inputInfo.GetNumElements(); ++idx)
+ {
+ float inputValue = inputDecoder.Get();
+ ++inputDecoder;
+ outputEncoder.Set(inputValue);
+ ++outputEncoder;
+ }
+ return;
+ }
+
+ std::vector<float> inputData = inputDecoder.DecodeTensor(inputInfo.GetShape());
+ std::vector<float> outputData;
+ auto outputNumElements = inputData.size() * static_cast<uint32_t>(std::accumulate(begin(params.m_Multiples),
+ end(params.m_Multiples),
+ 1,
+ std::multiplies<>()));
+ outputData.reserve(outputNumElements);
+
+ for (uint32_t outputIndex = 0; outputIndex < outputNumElements; ++outputIndex)
+ {
+ std::vector<uint32_t> outputCoords = IndexToCoordinates(outputShape, outputIndex);
+
+ // Converting output coordinates to input coordinates using modulus
+ std::vector<uint32_t> inputCoordinates;
+ inputCoordinates.reserve(rank);
+ for (uint32_t i = 0; i < rank; ++i)
+ {
+ inputCoordinates.push_back(outputCoords[i] % inputShape[i]);
+ }
+
+ uint32_t inputIndex = CoordinatesToIndex(inputShape, inputCoordinates);
+
+ outputEncoder[outputIndex];
+ outputEncoder.Set(inputData[inputIndex]);
+ }
+}
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/backends/reference/workloads/Tile.hpp b/src/backends/reference/workloads/Tile.hpp
new file mode 100644
index 0000000000..4e7ffc9040
--- /dev/null
+++ b/src/backends/reference/workloads/Tile.hpp
@@ -0,0 +1,21 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/Tensor.hpp>
+#include "Encoders.hpp"
+#include "Decoders.hpp"
+#include "armnn/Descriptors.hpp"
+
+namespace armnn
+{
+
+void Tile(const TileDescriptor& params,
+ const TensorInfo& inputInfo,
+ Decoder<float>& inputDecoder,
+ Encoder<float>& outputEncoder);
+
+} // namespace armnn \ No newline at end of file