aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/reference')
-rw-r--r--src/backends/reference/RefLayerSupport.cpp34
-rw-r--r--src/backends/reference/RefLayerSupport.hpp5
-rw-r--r--src/backends/reference/RefWorkloadFactory.cpp5
-rw-r--r--src/backends/reference/backend.mk2
-rw-r--r--src/backends/reference/test/RefLayerTests.cpp38
-rw-r--r--src/backends/reference/workloads/CMakeLists.txt4
-rw-r--r--src/backends/reference/workloads/RefTileWorkload.cpp47
-rw-r--r--src/backends/reference/workloads/RefTileWorkload.hpp30
-rw-r--r--src/backends/reference/workloads/RefWorkloads.hpp1
-rw-r--r--src/backends/reference/workloads/Tile.cpp102
-rw-r--r--src/backends/reference/workloads/Tile.hpp21
11 files changed, 288 insertions, 1 deletions
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index e94478f088..9d396e5db9 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -402,6 +402,11 @@ bool RefLayerSupport::IsLayerSupported(const LayerType& type,
reasonIfUnsupported);
case LayerType::Subtraction:
return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+ case LayerType::Tile:
+ return IsTileSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const TileDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
case LayerType::Transpose:
return IsTransposeSupported(infos[0],
infos[1],
@@ -2693,6 +2698,35 @@ bool RefLayerSupport::IsPreluSupported(const TensorInfo& input,
return supported;
}
+bool RefLayerSupport::IsTileSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const TileDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ IgnoreUnused(descriptor);
+
+ bool supported = true;
+
+ std::array<DataType, 7> supportedTypes
+ {
+ DataType::Float32,
+ DataType::Float16,
+ DataType::QAsymmS8,
+ DataType::QAsymmU8,
+ DataType::QSymmS8,
+ DataType::QSymmS16,
+ DataType::Signed32
+ };
+
+ supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
+ "Tile: input type not supported.");
+
+ supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
+ "Tile: output type not supported");
+
+ return supported;
+}
+
bool RefLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input,
const TensorInfo& output,
const TransposeConvolution2dDescriptor& descriptor,
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index 21d59e27fc..42b5814380 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -354,6 +354,11 @@ public:
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsTileSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const TileDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+
bool IsTransposeConvolution2dSupported(
const TensorInfo& input,
const TensorInfo& output,
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index 7d5f742126..86a584452d 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -618,6 +618,11 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateWorkload(LayerType type,
return std::make_unique<RefSubtractionWorkload<float>>(*subtractionQueueDescriptor, info);
}
}
+ case LayerType::Tile:
+ {
+ auto tileQueueDescriptor = PolymorphicDowncast<const TileQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefTileWorkload>(*tileQueueDescriptor, info);
+ }
case LayerType::Transpose:
{
auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk
index dfafa0ac39..7f047af930 100644
--- a/src/backends/reference/backend.mk
+++ b/src/backends/reference/backend.mk
@@ -102,6 +102,7 @@ BACKEND_SOURCES := \
workloads/RefStackWorkload.cpp \
workloads/RefStridedSliceWorkload.cpp \
workloads/RefSplitterWorkload.cpp \
+ workloads/RefTileWorkload.cpp \
workloads/RefTransposeConvolution2dWorkload.cpp \
workloads/RefTransposeWorkload.cpp \
workloads/RefUnidirectionalSequenceLstmWorkload.cpp \
@@ -115,6 +116,7 @@ BACKEND_SOURCES := \
workloads/StringMapping.cpp \
workloads/Softmax.cpp \
workloads/Splitter.cpp \
+ workloads/Tile.cpp \
workloads/TransposeConvolution2d.cpp
else
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index a68775e8e9..1f42397458 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017,2022, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -1596,6 +1596,42 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple2Dim2AxisQAsymmS8, ReverseV2SimpleT
ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple2Dim2AxisQAsymmU8, ReverseV2SimpleTest2Dim2Axis<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple2Dim2AxisQSymmS16, ReverseV2SimpleTest2Dim2Axis<DataType::QSymmS16>)
+// Tile
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile1dTestFloat32, Tile1dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile2dTestFloat32, Tile2dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile3dTestFloat32, Tile3dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile4dTestFloat32, Tile4dTest<DataType::Float32>)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile1dTestFloat16, Tile1dTest<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile2dTestFloat16, Tile2dTest<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile3dTestFloat16, Tile3dTest<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile4dTestFloat16, Tile4dTest<DataType::Float16>)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile1dTestQAsymmS8, Tile1dTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile2dTestQAsymmS8, Tile2dTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile3dTestQAsymmS8, Tile3dTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile4dTestQAsymmS8, Tile4dTest<DataType::QAsymmS8>)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile1dTestQAsymmU8, Tile1dTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile2dTestQAsymmU8, Tile2dTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile3dTestQAsymmU8, Tile3dTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile4dTestQAsymmU8, Tile4dTest<DataType::QAsymmU8>)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile1dTestQSymmS8, Tile1dTest<DataType::QSymmS8>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile2dTestQSymmS8, Tile2dTest<DataType::QSymmS8>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile3dTestQSymmS8, Tile3dTest<DataType::QSymmS8>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile4dTestQSymmS8, Tile4dTest<DataType::QSymmS8>)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile1dTestQSymmS16, Tile1dTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile2dTestQSymmS16, Tile2dTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile3dTestQSymmS16, Tile3dTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile4dTestQSymmS16, Tile4dTest<DataType::QSymmS16>)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile1dTestSigned32, Tile1dTest<DataType::Signed32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile2dTestSigned32, Tile2dTest<DataType::Signed32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile3dTestSigned32, Tile3dTest<DataType::Signed32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Tile4dTestSigned32, Tile4dTest<DataType::Signed32>)
+
// Fake Quantization
ARMNN_AUTO_TEST_CASE_WITH_THF(FakeQuantization, FakeQuantizationTest)
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index 28f6d2f371..9372568133 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -180,6 +180,8 @@ list(APPEND armnnRefBackendWorkloads_sources
RefStackWorkload.hpp
RefStridedSliceWorkload.cpp
RefStridedSliceWorkload.hpp
+ RefTileWorkload.cpp
+ RefTileWorkload.hpp
RefTransposeConvolution2dWorkload.cpp
RefTransposeConvolution2dWorkload.hpp
RefTransposeWorkload.cpp
@@ -209,6 +211,8 @@ list(APPEND armnnRefBackendWorkloads_sources
StridedSlice.cpp
StringMapping.cpp
StringMapping.hpp
+ Tile.cpp
+ Tile.hpp
TensorBufferArrayView.hpp
TransposeConvolution2d.cpp
TransposeConvolution2d.hpp
diff --git a/src/backends/reference/workloads/RefTileWorkload.cpp b/src/backends/reference/workloads/RefTileWorkload.cpp
new file mode 100644
index 0000000000..9fa8c8c3d3
--- /dev/null
+++ b/src/backends/reference/workloads/RefTileWorkload.cpp
@@ -0,0 +1,47 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RefTileWorkload.hpp"
+#include "RefWorkloadUtils.hpp"
+#include "Tile.hpp"
+#include "Profiling.hpp"
+
+namespace armnn
+{
+
+ RefTileWorkload::RefTileWorkload(const TileQueueDescriptor& descriptor, const WorkloadInfo& info)
+ : RefBaseWorkload(descriptor, info)
+ {}
+
+ void RefTileWorkload::Execute() const
+ {
+ Execute(m_Data.m_Inputs, m_Data.m_Outputs);
+ }
+
+ void RefTileWorkload::ExecuteAsync(ExecutionData& executionData)
+ {
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
+ }
+
+ void RefTileWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
+ {
+ ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefTileWorkload_Execute");
+
+ const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
+
+ std::unique_ptr<Decoder<float>> inputDecoder = MakeDecoder<float>(GetTensorInfo(inputs[0]),
+ inputs[0]->Map());
+
+ std::unique_ptr<Encoder<float>> outputEncoder = MakeEncoder<float>(GetTensorInfo(outputs[0]),
+ outputs[0]->Map());
+
+ Tile(m_Data.m_Parameters,
+ inputInfo,
+ *inputDecoder,
+ *outputEncoder);
+ }
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/backends/reference/workloads/RefTileWorkload.hpp b/src/backends/reference/workloads/RefTileWorkload.hpp
new file mode 100644
index 0000000000..2fb8eab05e
--- /dev/null
+++ b/src/backends/reference/workloads/RefTileWorkload.hpp
@@ -0,0 +1,30 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "RefBaseWorkload.hpp"
+#include <armnn/backends/WorkloadData.hpp>
+
+#include "Tile.hpp"
+
+namespace armnn
+{
+
+ class RefTileWorkload : public RefBaseWorkload<TileQueueDescriptor>
+ {
+ public:
+ explicit RefTileWorkload(const TileQueueDescriptor& descriptor,
+ const WorkloadInfo& info);
+
+ void Execute() const override;
+ void ExecuteAsync(ExecutionData& executionData) override;
+
+ private:
+ void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
+
+ };
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp
index e15a7ca047..a36eae501c 100644
--- a/src/backends/reference/workloads/RefWorkloads.hpp
+++ b/src/backends/reference/workloads/RefWorkloads.hpp
@@ -62,6 +62,7 @@
#include "RefStackWorkload.hpp"
#include "RefStridedSliceWorkload.hpp"
#include "RefSpaceToDepthWorkload.hpp"
+#include "RefTileWorkload.hpp"
#include "RefTransposeConvolution2dWorkload.hpp"
#include "RefTransposeWorkload.hpp"
#include "RefUnidirectionalSequenceLstmWorkload.hpp"
diff --git a/src/backends/reference/workloads/Tile.cpp b/src/backends/reference/workloads/Tile.cpp
new file mode 100644
index 0000000000..148c51de2e
--- /dev/null
+++ b/src/backends/reference/workloads/Tile.cpp
@@ -0,0 +1,102 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "Tile.hpp"
+#include "Encoders.hpp"
+#include <numeric>
+#include <armnn/backends/WorkloadData.hpp>
+#include <armnn/Logging.hpp>
+
+namespace armnn
+{
+
+// Converts a flatten index into a multi-dimensional coordinate.
+std::vector<uint32_t> IndexToCoordinates(std::vector<uint32_t>& shape, uint32_t index)
+{
+ std::vector<uint32_t> coordinates;
+ // Iterating through dimensions starting from the last dimension to the first
+ for (std::size_t i = shape.size() - 1; i < shape.size(); --i)
+ {
+ // Coordinate is found by getting the index and modulus it by the current dimension size
+ // shape of dimension = dimension size
+ coordinates.insert(coordinates.begin(), index % shape[i]);
+ // Pass the index to next iteration making index = index / size of the current dimension
+ index = index/shape[i];
+ }
+ return coordinates;
+}
+
+// Convert a multidimensional coordinate to a flattened index.
+uint32_t CoordinatesToIndex(TensorShape& shape, std::vector<uint32_t>& coordinates)
+{
+ uint32_t index = 0;
+ uint32_t base = 1;
+ uint32_t rank = shape.GetNumDimensions();
+ for (uint32_t i = rank; i > 0; --i)
+ {
+ index = index + coordinates[i - 1] * base;
+ base = base * shape[i - 1];
+ }
+ return index;
+}
+
+void Tile(const TileDescriptor& params,
+ const TensorInfo& inputInfo,
+ Decoder<float>& inputDecoder,
+ Encoder<float>& outputEncoder)
+{
+ // Input and output will always have same rank
+ uint32_t rank = inputInfo.GetNumDimensions();
+
+ TensorShape inputShape = inputInfo.GetShape();
+
+ std::vector<uint32_t> outputShape(rank);
+ for (uint32_t i = 0; i < rank; ++i)
+ {
+ outputShape[i] = inputShape[i] * params.m_Multiples[i];
+ }
+
+ // If all values of multiples are 1, then return the input
+ if ( std::adjacent_find( params.m_Multiples.begin(), params.m_Multiples.end(),
+ std::not_equal_to<>() ) == params.m_Multiples.end() && params.m_Multiples[0] == 1)
+ {
+ for (uint32_t idx = 0; idx < inputInfo.GetNumElements(); ++idx)
+ {
+ float inputValue = inputDecoder.Get();
+ ++inputDecoder;
+ outputEncoder.Set(inputValue);
+ ++outputEncoder;
+ }
+ return;
+ }
+
+ std::vector<float> inputData = inputDecoder.DecodeTensor(inputInfo.GetShape());
+ std::vector<float> outputData;
+ auto outputNumElements = inputData.size() * static_cast<uint32_t>(std::accumulate(begin(params.m_Multiples),
+ end(params.m_Multiples),
+ 1,
+ std::multiplies<>()));
+ outputData.reserve(outputNumElements);
+
+ for (uint32_t outputIndex = 0; outputIndex < outputNumElements; ++outputIndex)
+ {
+ std::vector<uint32_t> outputCoords = IndexToCoordinates(outputShape, outputIndex);
+
+ // Converting output coordinates to input coordinates using modulus
+ std::vector<uint32_t> inputCoordinates;
+ inputCoordinates.reserve(rank);
+ for (uint32_t i = 0; i < rank; ++i)
+ {
+ inputCoordinates.push_back(outputCoords[i] % inputShape[i]);
+ }
+
+ uint32_t inputIndex = CoordinatesToIndex(inputShape, inputCoordinates);
+
+ outputEncoder[outputIndex];
+ outputEncoder.Set(inputData[inputIndex]);
+ }
+}
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/backends/reference/workloads/Tile.hpp b/src/backends/reference/workloads/Tile.hpp
new file mode 100644
index 0000000000..4e7ffc9040
--- /dev/null
+++ b/src/backends/reference/workloads/Tile.hpp
@@ -0,0 +1,21 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/Tensor.hpp>
+#include "Encoders.hpp"
+#include "Decoders.hpp"
+#include "armnn/Descriptors.hpp"
+
+namespace armnn
+{
+
+void Tile(const TileDescriptor& params,
+ const TensorInfo& inputInfo,
+ Decoder<float>& inputDecoder,
+ Encoder<float>& outputEncoder);
+
+} // namespace armnn \ No newline at end of file