aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference
diff options
context:
space:
mode:
authorTianle Cheng <tianle.cheng@arm.com>2023-06-28 13:20:47 +0100
committerTianle Cheng <tianle.cheng@arm.com>2023-07-04 10:36:43 +0000
commit988354de127528bdebb98fd25661fbf2f39f17dd (patch)
treec06f5250bdd0182055ac9e84e20d6e338518ad08 /src/backends/reference
parent9414936e62ed8cd18cc33c0390bb605a782556c6 (diff)
downloadarmnn-988354de127528bdebb98fd25661fbf2f39f17dd.tar.gz
IVGCVSW-7831: Front end and Reference Implementation for REVERSE_V2
* Descriptors added for ReverseV2 * Layer definition added * Input validation added * Reference workload implementation for ReverseV2 added * Reference layer unit tests made for ReverseV2 * CompareTensors method updated to support comparison between empty tensors * CMake and other build files updated Signed-off-by: Tianle Cheng <tianle.cheng@arm.com> Change-Id: I805738454421309fda77c44218a8df171d68dc18
Diffstat (limited to 'src/backends/reference')
-rw-r--r--src/backends/reference/RefLayerSupport.cpp35
-rw-r--r--src/backends/reference/RefLayerSupport.hpp5
-rw-r--r--src/backends/reference/RefWorkloadFactory.cpp5
-rw-r--r--src/backends/reference/backend.mk4
-rw-r--r--src/backends/reference/test/RefLayerTests.cpp31
-rw-r--r--src/backends/reference/workloads/CMakeLists.txt4
-rw-r--r--src/backends/reference/workloads/RefReverseV2Workload.cpp48
-rw-r--r--src/backends/reference/workloads/RefReverseV2Workload.hpp30
-rw-r--r--src/backends/reference/workloads/RefWorkloads.hpp1
-rw-r--r--src/backends/reference/workloads/ReverseV2Impl.cpp133
-rw-r--r--src/backends/reference/workloads/ReverseV2Impl.hpp21
11 files changed, 316 insertions, 1 deletions
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 81e5c837a5..1d5fab1adc 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -341,6 +341,11 @@ bool RefLayerSupport::IsLayerSupported(const LayerType& type,
infos[1],
*(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
reasonIfUnsupported);
+ case LayerType::ReverseV2:
+ return IsReverseV2Supported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const ReverseV2Descriptor*>(&descriptor)),
+ reasonIfUnsupported);
case LayerType::Reduce:
return IsReduceSupported(infos[0],
infos[1],
@@ -2356,6 +2361,36 @@ bool RefLayerSupport::IsResizeSupported(const TensorInfo& input,
return supported;
}
+bool RefLayerSupport::IsReverseV2Supported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ReverseV2Descriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ IgnoreUnused(descriptor);
+ bool supported = true;
+ // ReverseV2 is data type agnostic so it can support all the types in the Reference backend
+ std::array<DataType,6> supportedTypes =
+ {
+ DataType::BFloat16,
+ DataType::Float32,
+ DataType::Float16,
+ DataType::QAsymmS8,
+ DataType::QAsymmU8,
+ DataType::QSymmS16
+ };
+
+ supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
+ "Reference ReverseV2: input type not supported");
+
+ supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
+ "Reference ReverseV2: output type not supported");
+
+ supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
+ "Reference ReverseV2: input and output types not matching");
+
+ return supported;
+}
+
bool RefLayerSupport::IsShapeSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index 8e1f68ebfc..0afb9c2c94 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -299,6 +299,11 @@ public:
const ResizeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsReverseV2Supported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ReverseV2Descriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+
bool IsShapeSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index 10f623eef3..7d5f742126 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -560,6 +560,11 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateWorkload(LayerType type,
auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
return std::make_unique<RefResizeWorkload>(*resizeQueueDescriptor, info);
}
+ case LayerType::ReverseV2:
+ {
+ auto reverseV2QueueDescriptor = PolymorphicDowncast<const ReverseV2QueueDescriptor*>(&descriptor);
+ return std::make_unique<RefReverseV2Workload>(*reverseV2QueueDescriptor, info);
+ }
case LayerType::Shape:
{
auto shapeQueueDescriptor = PolymorphicDowncast<const ShapeQueueDescriptor*>(&descriptor);
diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk
index c23984c3e9..dfafa0ac39 100644
--- a/src/backends/reference/backend.mk
+++ b/src/backends/reference/backend.mk
@@ -1,5 +1,5 @@
#
-# Copyright © 2017 ARM Ltd. All rights reserved.
+# Copyright © 2017-2023 ARM Ltd and Contributors. All rights reserved.
# SPDX-License-Identifier: MIT
#
@@ -94,6 +94,7 @@ BACKEND_SOURCES := \
workloads/RefReduceWorkload.cpp \
workloads/RefReshapeWorkload.cpp \
workloads/RefResizeWorkload.cpp \
+ workloads/RefReverseV2Workload.cpp \
workloads/RefSliceWorkload.cpp \
workloads/RefSoftmaxWorkload.cpp \
workloads/RefSpaceToBatchNdWorkload.cpp \
@@ -105,6 +106,7 @@ BACKEND_SOURCES := \
workloads/RefTransposeWorkload.cpp \
workloads/RefUnidirectionalSequenceLstmWorkload.cpp \
workloads/Resize.cpp \
+ workloads/ReverseV2Impl.cpp \
workloads/Slice.cpp \
workloads/SpaceToBatchNd.cpp \
workloads/SpaceToDepth.cpp \
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index 6e697723e9..a68775e8e9 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -1565,6 +1565,37 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(AlignCornersResizeNearestNeighbourUint16Nchw,
AlignCornersResizeNearestNeighbourTest<DataType::QSymmS16>,
DataLayout::NCHW)
+// ReverseV2
+ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2SimpleEmptyAxisFloat32, ReverseV2SimpleTestEmptyAxis<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2SimpleEmptyTensorFloat32, ReverseV2SimpleTestEmptyTensor <DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple1DimFloat32, ReverseV2SimpleTest1Dim<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple2Dim1AxisFloat32, ReverseV2SimpleTest2Dim1Axis<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple2Dim2AxisFloat32, ReverseV2SimpleTest2Dim2Axis<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple3Dim1AxisFloat32, ReverseV2SimpleTest3Dim1Axis<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple3Dim2AxisFloat32, ReverseV2SimpleTest3Dim2Axis<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple3Dim3AxisFloat32, ReverseV2SimpleTest3Dim3Axis<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple4Dim1AxisFloat32, ReverseV2SimpleTest4Dim1Axis<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple4Dim2AxisFloat32, ReverseV2SimpleTest4Dim2Axis<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple4Dim3AxisFloat32, ReverseV2SimpleTest4Dim3Axis<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple4Dim4AxisFloat32, ReverseV2SimpleTest4Dim4Axis<DataType::Float32>)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2EvenRowOddCol2DimFloat32, ReverseV2EvenRowOddColTest2Dim<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2EvenRowOddCol3DimFloat32, ReverseV2EvenRowOddColTest3Dim<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2EvenRowEvenCol2DimFloat32, ReverseV2EvenRowEvenColTest2Dim<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2EvenRowEvenCol3DimFloat32, ReverseV2EvenRowEvenColTest3Dim<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2OddRowOddCol2DimFloat32, ReverseV2OddRowOddColTest2Dim<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2OddRowOddCol3DimFloat32, ReverseV2OddRowOddColTest3Dim<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2OddRowEvenCol2DimFloat32, ReverseV2OddRowEvenColTest2Dim<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2OddRowEvenCol3DimFloat32, ReverseV2OddRowEvenColTest3Dim<DataType::Float32>)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2NegAxis2Dim1AxisFloat32, ReverseV2NegAxisTest2Dim1Axis<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2NegAxis3Dim2AxisFloat32, ReverseV2NegAxisTest3Dim2Axis<DataType::Float32>)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple2Dim2AxisFloat16, ReverseV2SimpleTest2Dim2Axis<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple2Dim2AxisQAsymmS8, ReverseV2SimpleTest2Dim2Axis<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple2Dim2AxisQAsymmU8, ReverseV2SimpleTest2Dim2Axis<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple2Dim2AxisQSymmS16, ReverseV2SimpleTest2Dim2Axis<DataType::QSymmS16>)
+
// Fake Quantization
ARMNN_AUTO_TEST_CASE_WITH_THF(FakeQuantization, FakeQuantizationTest)
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index 3592f2293d..28f6d2f371 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -66,6 +66,8 @@ list(APPEND armnnRefBackendWorkloads_sources
PreluImpl.hpp
Reduce.cpp
Reduce.hpp
+ ReverseV2Impl.cpp
+ ReverseV2Impl.hpp
RefActivationWorkload.cpp
RefActivationWorkload.hpp
RefArgMinMaxWorkload.cpp
@@ -161,6 +163,8 @@ list(APPEND armnnRefBackendWorkloads_sources
RefReshapeWorkload.hpp
RefResizeWorkload.cpp
RefResizeWorkload.hpp
+ RefReverseV2Workload.cpp
+ RefReverseV2Workload.hpp
RefShapeWorkload.hpp
RefSliceWorkload.cpp
RefSliceWorkload.hpp
diff --git a/src/backends/reference/workloads/RefReverseV2Workload.cpp b/src/backends/reference/workloads/RefReverseV2Workload.cpp
new file mode 100644
index 0000000000..cd2d9f930b
--- /dev/null
+++ b/src/backends/reference/workloads/RefReverseV2Workload.cpp
@@ -0,0 +1,48 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RefReverseV2Workload.hpp"
+
+#include "ReverseV2Impl.hpp"
+#include "RefWorkloadUtils.hpp"
+#include "Profiling.hpp"
+
+namespace armnn
+{
+
+ RefReverseV2Workload::RefReverseV2Workload(const ReverseV2QueueDescriptor& descriptor, const WorkloadInfo& info)
+ : RefBaseWorkload(descriptor, info)
+ {}
+
+ void RefReverseV2Workload::Execute() const
+ {
+ Execute(m_Data.m_Inputs, m_Data.m_Outputs);
+ }
+
+ void RefReverseV2Workload::ExecuteAsync(ExecutionData& executionData)
+ {
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
+ }
+
+ void RefReverseV2Workload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
+ {
+ ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefReverseV2Workload_Execute");
+
+ const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
+
+ std::unique_ptr<Decoder<float>> inputDecoder = MakeDecoder<float>(GetTensorInfo(inputs[0]),
+ inputs[0]->Map());
+
+ std::unique_ptr<Encoder<float>> outputEncoder = MakeEncoder<float>(GetTensorInfo(outputs[0]),
+ outputs[0]->Map());
+
+ ReverseV2(m_Data.m_Parameters,
+ inputInfo,
+ *inputDecoder,
+ *outputEncoder);
+ }
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/backends/reference/workloads/RefReverseV2Workload.hpp b/src/backends/reference/workloads/RefReverseV2Workload.hpp
new file mode 100644
index 0000000000..89e7c9ea38
--- /dev/null
+++ b/src/backends/reference/workloads/RefReverseV2Workload.hpp
@@ -0,0 +1,30 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "RefBaseWorkload.hpp"
+#include <armnn/backends/WorkloadData.hpp>
+
+#include "ReverseV2Impl.hpp"
+
+namespace armnn
+{
+
+ class RefReverseV2Workload : public RefBaseWorkload<ReverseV2QueueDescriptor>
+ {
+ public:
+ explicit RefReverseV2Workload(const ReverseV2QueueDescriptor& descriptor,
+ const WorkloadInfo& info);
+
+ void Execute() const override;
+ void ExecuteAsync(ExecutionData& executionData) override;
+
+ private:
+ void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
+
+ };
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp
index dba880bafc..e15a7ca047 100644
--- a/src/backends/reference/workloads/RefWorkloads.hpp
+++ b/src/backends/reference/workloads/RefWorkloads.hpp
@@ -53,6 +53,7 @@
#include "RefReduceWorkload.hpp"
#include "RefReshapeWorkload.hpp"
#include "RefResizeWorkload.hpp"
+#include "RefReverseV2Workload.hpp"
#include "RefShapeWorkload.hpp"
#include "RefSliceWorkload.hpp"
#include "RefSplitterWorkload.hpp"
diff --git a/src/backends/reference/workloads/ReverseV2Impl.cpp b/src/backends/reference/workloads/ReverseV2Impl.cpp
new file mode 100644
index 0000000000..f6d5fd74d1
--- /dev/null
+++ b/src/backends/reference/workloads/ReverseV2Impl.cpp
@@ -0,0 +1,133 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ReverseV2Impl.hpp"
+
+#include <armnn/backends/WorkloadData.hpp>
+#include <armnn/Logging.hpp>
+#include <armnnUtils/Permute.hpp>
+
+namespace armnn
+{
+
+// Get multi-dimensional index for input tensor
+std::vector<unsigned int> ReverseGetMultIdx(const unsigned int idx,
+ unsigned int inputRank,
+ std::vector<unsigned int>& elementNumInner)
+{
+ std::vector<unsigned int> indexList(inputRank);
+
+ unsigned int mIdx = idx;
+
+ for (unsigned int iDim = 0; iDim < inputRank; ++iDim)
+ {
+ indexList[iDim] = static_cast<unsigned int>(mIdx / elementNumInner[iDim]);
+ mIdx %= elementNumInner[iDim];
+ }
+
+ return indexList;
+}
+
+// Get flattened index for output encoder
+unsigned int ReverseGetFlatIdx(const std::vector<unsigned int>& idxList,
+ unsigned int inputRank,
+ std::vector<unsigned int>& elementNumInner)
+{
+ unsigned int idx = 0;
+
+ for (unsigned int iDim = 0; iDim < inputRank; ++iDim)
+ {
+ idx += idxList[iDim] * elementNumInner[iDim];
+ }
+
+ return idx;
+}
+
+// Relocate the coordinate to the reversed tensor
+unsigned int ReverseRelocateIdx(unsigned int idx,
+ unsigned int inputRank,
+ std::vector<bool>& axisFlag,
+ std::vector<unsigned int>& dimSize,
+ std::vector<unsigned int>& elementNumInner)
+{
+ // Get the multidimensional index list for input
+ auto inputIdxList = ReverseGetMultIdx(idx, inputRank, elementNumInner);
+
+ std::vector<unsigned int> outputIdxList(inputRank);
+
+ // Relocate the input index to the output one
+ for (unsigned int iDim = 0; iDim < inputRank; ++iDim)
+ {
+ if (axisFlag[iDim])
+ {
+ outputIdxList[iDim] = dimSize[iDim] - inputIdxList[iDim] - 1;
+ }
+ else
+ {
+ outputIdxList[iDim] = inputIdxList[iDim];
+ }
+ }
+
+ // Get the 1-dimensional flattened index for output
+ unsigned int outputIdx = ReverseGetFlatIdx(outputIdxList, inputRank, elementNumInner);
+ return outputIdx;
+}
+
+void ReverseV2(const ReverseV2Descriptor& params,
+ const TensorInfo& inputInfo,
+ Decoder<float>& inputDecoder,
+ Encoder<float>& outputEncoder)
+{
+ // Empty axis and empty tensor case: copy input to output
+ if (params.m_Axis.empty() || inputInfo.GetNumElements() == 0)
+ {
+ for (unsigned idx = 0; idx < inputInfo.GetNumElements(); idx++)
+ {
+ float inputValue = inputDecoder.Get();
+ inputDecoder += 1;
+ outputEncoder.Set(inputValue);
+ outputEncoder += 1;
+ }
+ return;
+ }
+
+ unsigned int inputRank = static_cast<unsigned int>(inputInfo.GetNumDimensions());
+
+ std::vector<bool>axisFlag(inputRank, false);
+ std::vector<unsigned int>dimSize(inputRank, 0);
+
+ // Make sure the axes are positive
+ for (int32_t axisElement: params.m_Axis)
+ {
+ axisElement = axisElement < 0 ? axisElement + static_cast<int32_t>(inputRank) : axisElement;
+ axisFlag[static_cast<uint32_t>(axisElement)] = true;
+ }
+
+ const TensorShape &inputShape = inputInfo.GetShape();
+
+ unsigned int elementNum = inputInfo.GetNumElements();
+ unsigned int baseDimSize = 1;
+
+ std::vector<unsigned int> elementNumInner;
+
+ // Get the number of element within the specific dimension
+ for (unsigned int iDim = 0; iDim < inputRank; ++iDim) {
+ dimSize[iDim] = inputShape[iDim];
+ baseDimSize *= dimSize[iDim];
+ elementNumInner.push_back(static_cast<unsigned int>(elementNum / baseDimSize));
+ }
+
+ // Iterate through all elements
+ for (unsigned int idx = 0; idx < elementNum; ++idx)
+ {
+ float inputValue = inputDecoder.Get();
+ inputDecoder += 1;
+ auto outputIdx = ReverseRelocateIdx(idx, inputRank, axisFlag, dimSize, elementNumInner);
+ outputEncoder[outputIdx];
+ outputEncoder.Set(inputValue);
+ }
+}
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/backends/reference/workloads/ReverseV2Impl.hpp b/src/backends/reference/workloads/ReverseV2Impl.hpp
new file mode 100644
index 0000000000..bc1fe1d432
--- /dev/null
+++ b/src/backends/reference/workloads/ReverseV2Impl.hpp
@@ -0,0 +1,21 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "Encoders.hpp"
+#include "Decoders.hpp"
+
+#include <armnn/backends/WorkloadData.hpp>
+
+namespace armnn
+{
+
+void ReverseV2(const ReverseV2Descriptor& params,
+ const TensorInfo& inputInfo,
+ Decoder<float>& inputDecoder,
+ Encoder<float>& outputEncoder);
+
+} // namespace armnn \ No newline at end of file