From 988354de127528bdebb98fd25661fbf2f39f17dd Mon Sep 17 00:00:00 2001 From: Tianle Cheng Date: Wed, 28 Jun 2023 13:20:47 +0100 Subject: IVGCVSW-7831: Front end and Reference Implementation for REVERSE_V2 * Descriptors added for ReverseV2 * Layer definition added * Input validation added * Reference workload implementation for ReverseV2 added * Reference layer unit tests made for ReverseV2 * CompareTensors method updated to support comparison between empty tensors * CMake and other build files updated Signed-off-by: Tianle Cheng Change-Id: I805738454421309fda77c44218a8df171d68dc18 --- src/backends/reference/RefLayerSupport.cpp | 35 ++++++ src/backends/reference/RefLayerSupport.hpp | 5 + src/backends/reference/RefWorkloadFactory.cpp | 5 + src/backends/reference/backend.mk | 4 +- src/backends/reference/test/RefLayerTests.cpp | 31 +++++ src/backends/reference/workloads/CMakeLists.txt | 4 + .../reference/workloads/RefReverseV2Workload.cpp | 48 ++++++++ .../reference/workloads/RefReverseV2Workload.hpp | 30 +++++ src/backends/reference/workloads/RefWorkloads.hpp | 1 + src/backends/reference/workloads/ReverseV2Impl.cpp | 133 +++++++++++++++++++++ src/backends/reference/workloads/ReverseV2Impl.hpp | 21 ++++ 11 files changed, 316 insertions(+), 1 deletion(-) create mode 100644 src/backends/reference/workloads/RefReverseV2Workload.cpp create mode 100644 src/backends/reference/workloads/RefReverseV2Workload.hpp create mode 100644 src/backends/reference/workloads/ReverseV2Impl.cpp create mode 100644 src/backends/reference/workloads/ReverseV2Impl.hpp (limited to 'src/backends/reference') diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp index 81e5c837a5..1d5fab1adc 100644 --- a/src/backends/reference/RefLayerSupport.cpp +++ b/src/backends/reference/RefLayerSupport.cpp @@ -341,6 +341,11 @@ bool RefLayerSupport::IsLayerSupported(const LayerType& type, infos[1], *(PolymorphicDowncast(&descriptor)), reasonIfUnsupported); + case LayerType::ReverseV2: + return IsReverseV2Supported(infos[0], + infos[1], + *(PolymorphicDowncast(&descriptor)), + reasonIfUnsupported); case LayerType::Reduce: return IsReduceSupported(infos[0], infos[1], @@ -2356,6 +2361,36 @@ bool RefLayerSupport::IsResizeSupported(const TensorInfo& input, return supported; } +bool RefLayerSupport::IsReverseV2Supported(const TensorInfo& input, + const TensorInfo& output, + const ReverseV2Descriptor& descriptor, + Optional reasonIfUnsupported) const +{ + IgnoreUnused(descriptor); + bool supported = true; + // ReverseV2 is data type agnostic so it can support all the types in the Reference backend + std::array supportedTypes = + { + DataType::BFloat16, + DataType::Float32, + DataType::Float16, + DataType::QAsymmS8, + DataType::QAsymmU8, + DataType::QSymmS16 + }; + + supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported, + "Reference ReverseV2: input type not supported"); + + supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported, + "Reference ReverseV2: output type not supported"); + + supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported, + "Reference ReverseV2: input and output types not matching"); + + return supported; +} + bool RefLayerSupport::IsShapeSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported) const diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp index 8e1f68ebfc..0afb9c2c94 100644 --- a/src/backends/reference/RefLayerSupport.hpp +++ b/src/backends/reference/RefLayerSupport.hpp @@ -299,6 +299,11 @@ public: const ResizeDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsReverseV2Supported(const TensorInfo& input, + const TensorInfo& output, + const ReverseV2Descriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const; + bool IsShapeSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp index 10f623eef3..7d5f742126 100644 --- a/src/backends/reference/RefWorkloadFactory.cpp +++ b/src/backends/reference/RefWorkloadFactory.cpp @@ -560,6 +560,11 @@ std::unique_ptr RefWorkloadFactory::CreateWorkload(LayerType type, auto resizeQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*resizeQueueDescriptor, info); } + case LayerType::ReverseV2: + { + auto reverseV2QueueDescriptor = PolymorphicDowncast(&descriptor); + return std::make_unique(*reverseV2QueueDescriptor, info); + } case LayerType::Shape: { auto shapeQueueDescriptor = PolymorphicDowncast(&descriptor); diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk index c23984c3e9..dfafa0ac39 100644 --- a/src/backends/reference/backend.mk +++ b/src/backends/reference/backend.mk @@ -1,5 +1,5 @@ # -# Copyright © 2017 ARM Ltd. All rights reserved. +# Copyright © 2017-2023 ARM Ltd and Contributors. All rights reserved. # SPDX-License-Identifier: MIT # @@ -94,6 +94,7 @@ BACKEND_SOURCES := \ workloads/RefReduceWorkload.cpp \ workloads/RefReshapeWorkload.cpp \ workloads/RefResizeWorkload.cpp \ + workloads/RefReverseV2Workload.cpp \ workloads/RefSliceWorkload.cpp \ workloads/RefSoftmaxWorkload.cpp \ workloads/RefSpaceToBatchNdWorkload.cpp \ @@ -105,6 +106,7 @@ BACKEND_SOURCES := \ workloads/RefTransposeWorkload.cpp \ workloads/RefUnidirectionalSequenceLstmWorkload.cpp \ workloads/Resize.cpp \ + workloads/ReverseV2Impl.cpp \ workloads/Slice.cpp \ workloads/SpaceToBatchNd.cpp \ workloads/SpaceToDepth.cpp \ diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp index 6e697723e9..a68775e8e9 100644 --- a/src/backends/reference/test/RefLayerTests.cpp +++ b/src/backends/reference/test/RefLayerTests.cpp @@ -1565,6 +1565,37 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(AlignCornersResizeNearestNeighbourUint16Nchw, AlignCornersResizeNearestNeighbourTest, DataLayout::NCHW) +// ReverseV2 +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2SimpleEmptyAxisFloat32, ReverseV2SimpleTestEmptyAxis) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2SimpleEmptyTensorFloat32, ReverseV2SimpleTestEmptyTensor ) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple1DimFloat32, ReverseV2SimpleTest1Dim) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple2Dim1AxisFloat32, ReverseV2SimpleTest2Dim1Axis) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple2Dim2AxisFloat32, ReverseV2SimpleTest2Dim2Axis) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple3Dim1AxisFloat32, ReverseV2SimpleTest3Dim1Axis) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple3Dim2AxisFloat32, ReverseV2SimpleTest3Dim2Axis) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple3Dim3AxisFloat32, ReverseV2SimpleTest3Dim3Axis) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple4Dim1AxisFloat32, ReverseV2SimpleTest4Dim1Axis) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple4Dim2AxisFloat32, ReverseV2SimpleTest4Dim2Axis) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple4Dim3AxisFloat32, ReverseV2SimpleTest4Dim3Axis) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple4Dim4AxisFloat32, ReverseV2SimpleTest4Dim4Axis) + +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2EvenRowOddCol2DimFloat32, ReverseV2EvenRowOddColTest2Dim) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2EvenRowOddCol3DimFloat32, ReverseV2EvenRowOddColTest3Dim) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2EvenRowEvenCol2DimFloat32, ReverseV2EvenRowEvenColTest2Dim) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2EvenRowEvenCol3DimFloat32, ReverseV2EvenRowEvenColTest3Dim) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2OddRowOddCol2DimFloat32, ReverseV2OddRowOddColTest2Dim) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2OddRowOddCol3DimFloat32, ReverseV2OddRowOddColTest3Dim) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2OddRowEvenCol2DimFloat32, ReverseV2OddRowEvenColTest2Dim) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2OddRowEvenCol3DimFloat32, ReverseV2OddRowEvenColTest3Dim) + +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2NegAxis2Dim1AxisFloat32, ReverseV2NegAxisTest2Dim1Axis) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2NegAxis3Dim2AxisFloat32, ReverseV2NegAxisTest3Dim2Axis) + +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple2Dim2AxisFloat16, ReverseV2SimpleTest2Dim2Axis) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple2Dim2AxisQAsymmS8, ReverseV2SimpleTest2Dim2Axis) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple2Dim2AxisQAsymmU8, ReverseV2SimpleTest2Dim2Axis) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple2Dim2AxisQSymmS16, ReverseV2SimpleTest2Dim2Axis) + // Fake Quantization ARMNN_AUTO_TEST_CASE_WITH_THF(FakeQuantization, FakeQuantizationTest) diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt index 3592f2293d..28f6d2f371 100644 --- a/src/backends/reference/workloads/CMakeLists.txt +++ b/src/backends/reference/workloads/CMakeLists.txt @@ -66,6 +66,8 @@ list(APPEND armnnRefBackendWorkloads_sources PreluImpl.hpp Reduce.cpp Reduce.hpp + ReverseV2Impl.cpp + ReverseV2Impl.hpp RefActivationWorkload.cpp RefActivationWorkload.hpp RefArgMinMaxWorkload.cpp @@ -161,6 +163,8 @@ list(APPEND armnnRefBackendWorkloads_sources RefReshapeWorkload.hpp RefResizeWorkload.cpp RefResizeWorkload.hpp + RefReverseV2Workload.cpp + RefReverseV2Workload.hpp RefShapeWorkload.hpp RefSliceWorkload.cpp RefSliceWorkload.hpp diff --git a/src/backends/reference/workloads/RefReverseV2Workload.cpp b/src/backends/reference/workloads/RefReverseV2Workload.cpp new file mode 100644 index 0000000000..cd2d9f930b --- /dev/null +++ b/src/backends/reference/workloads/RefReverseV2Workload.cpp @@ -0,0 +1,48 @@ +// +// Copyright © 2023 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "RefReverseV2Workload.hpp" + +#include "ReverseV2Impl.hpp" +#include "RefWorkloadUtils.hpp" +#include "Profiling.hpp" + +namespace armnn +{ + + RefReverseV2Workload::RefReverseV2Workload(const ReverseV2QueueDescriptor& descriptor, const WorkloadInfo& info) + : RefBaseWorkload(descriptor, info) + {} + + void RefReverseV2Workload::Execute() const + { + Execute(m_Data.m_Inputs, m_Data.m_Outputs); + } + + void RefReverseV2Workload::ExecuteAsync(ExecutionData& executionData) + { + WorkingMemDescriptor* workingMemDescriptor = static_cast(executionData.m_Data); + Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs); + } + + void RefReverseV2Workload::Execute(std::vector inputs, std::vector outputs) const + { + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefReverseV2Workload_Execute"); + + const TensorInfo& inputInfo = GetTensorInfo(inputs[0]); + + std::unique_ptr> inputDecoder = MakeDecoder(GetTensorInfo(inputs[0]), + inputs[0]->Map()); + + std::unique_ptr> outputEncoder = MakeEncoder(GetTensorInfo(outputs[0]), + outputs[0]->Map()); + + ReverseV2(m_Data.m_Parameters, + inputInfo, + *inputDecoder, + *outputEncoder); + } + +} // namespace armnn \ No newline at end of file diff --git a/src/backends/reference/workloads/RefReverseV2Workload.hpp b/src/backends/reference/workloads/RefReverseV2Workload.hpp new file mode 100644 index 0000000000..89e7c9ea38 --- /dev/null +++ b/src/backends/reference/workloads/RefReverseV2Workload.hpp @@ -0,0 +1,30 @@ +// +// Copyright © 2023 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "RefBaseWorkload.hpp" +#include + +#include "ReverseV2Impl.hpp" + +namespace armnn +{ + + class RefReverseV2Workload : public RefBaseWorkload + { + public: + explicit RefReverseV2Workload(const ReverseV2QueueDescriptor& descriptor, + const WorkloadInfo& info); + + void Execute() const override; + void ExecuteAsync(ExecutionData& executionData) override; + + private: + void Execute(std::vector inputs, std::vector outputs) const; + + }; + +} // namespace armnn \ No newline at end of file diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp index dba880bafc..e15a7ca047 100644 --- a/src/backends/reference/workloads/RefWorkloads.hpp +++ b/src/backends/reference/workloads/RefWorkloads.hpp @@ -53,6 +53,7 @@ #include "RefReduceWorkload.hpp" #include "RefReshapeWorkload.hpp" #include "RefResizeWorkload.hpp" +#include "RefReverseV2Workload.hpp" #include "RefShapeWorkload.hpp" #include "RefSliceWorkload.hpp" #include "RefSplitterWorkload.hpp" diff --git a/src/backends/reference/workloads/ReverseV2Impl.cpp b/src/backends/reference/workloads/ReverseV2Impl.cpp new file mode 100644 index 0000000000..f6d5fd74d1 --- /dev/null +++ b/src/backends/reference/workloads/ReverseV2Impl.cpp @@ -0,0 +1,133 @@ +// +// Copyright © 2023 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "ReverseV2Impl.hpp" + +#include +#include +#include + +namespace armnn +{ + +// Get multi-dimensional index for input tensor +std::vector ReverseGetMultIdx(const unsigned int idx, + unsigned int inputRank, + std::vector& elementNumInner) +{ + std::vector indexList(inputRank); + + unsigned int mIdx = idx; + + for (unsigned int iDim = 0; iDim < inputRank; ++iDim) + { + indexList[iDim] = static_cast(mIdx / elementNumInner[iDim]); + mIdx %= elementNumInner[iDim]; + } + + return indexList; +} + +// Get flattened index for output encoder +unsigned int ReverseGetFlatIdx(const std::vector& idxList, + unsigned int inputRank, + std::vector& elementNumInner) +{ + unsigned int idx = 0; + + for (unsigned int iDim = 0; iDim < inputRank; ++iDim) + { + idx += idxList[iDim] * elementNumInner[iDim]; + } + + return idx; +} + +// Relocate the coordinate to the reversed tensor +unsigned int ReverseRelocateIdx(unsigned int idx, + unsigned int inputRank, + std::vector& axisFlag, + std::vector& dimSize, + std::vector& elementNumInner) +{ + // Get the multidimensional index list for input + auto inputIdxList = ReverseGetMultIdx(idx, inputRank, elementNumInner); + + std::vector outputIdxList(inputRank); + + // Relocate the input index to the output one + for (unsigned int iDim = 0; iDim < inputRank; ++iDim) + { + if (axisFlag[iDim]) + { + outputIdxList[iDim] = dimSize[iDim] - inputIdxList[iDim] - 1; + } + else + { + outputIdxList[iDim] = inputIdxList[iDim]; + } + } + + // Get the 1-dimensional flattened index for output + unsigned int outputIdx = ReverseGetFlatIdx(outputIdxList, inputRank, elementNumInner); + return outputIdx; +} + +void ReverseV2(const ReverseV2Descriptor& params, + const TensorInfo& inputInfo, + Decoder& inputDecoder, + Encoder& outputEncoder) +{ + // Empty axis and empty tensor case: copy input to output + if (params.m_Axis.empty() || inputInfo.GetNumElements() == 0) + { + for (unsigned idx = 0; idx < inputInfo.GetNumElements(); idx++) + { + float inputValue = inputDecoder.Get(); + inputDecoder += 1; + outputEncoder.Set(inputValue); + outputEncoder += 1; + } + return; + } + + unsigned int inputRank = static_cast(inputInfo.GetNumDimensions()); + + std::vectoraxisFlag(inputRank, false); + std::vectordimSize(inputRank, 0); + + // Make sure the axes are positive + for (int32_t axisElement: params.m_Axis) + { + axisElement = axisElement < 0 ? axisElement + static_cast(inputRank) : axisElement; + axisFlag[static_cast(axisElement)] = true; + } + + const TensorShape &inputShape = inputInfo.GetShape(); + + unsigned int elementNum = inputInfo.GetNumElements(); + unsigned int baseDimSize = 1; + + std::vector elementNumInner; + + // Get the number of element within the specific dimension + for (unsigned int iDim = 0; iDim < inputRank; ++iDim) { + dimSize[iDim] = inputShape[iDim]; + baseDimSize *= dimSize[iDim]; + elementNumInner.push_back(static_cast(elementNum / baseDimSize)); + } + + // Iterate through all elements + for (unsigned int idx = 0; idx < elementNum; ++idx) + { + float inputValue = inputDecoder.Get(); + inputDecoder += 1; + auto outputIdx = ReverseRelocateIdx(idx, inputRank, axisFlag, dimSize, elementNumInner); + outputEncoder[outputIdx]; + outputEncoder.Set(inputValue); + } +} + +} // namespace armnn \ No newline at end of file diff --git a/src/backends/reference/workloads/ReverseV2Impl.hpp b/src/backends/reference/workloads/ReverseV2Impl.hpp new file mode 100644 index 0000000000..bc1fe1d432 --- /dev/null +++ b/src/backends/reference/workloads/ReverseV2Impl.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2023 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "Encoders.hpp" +#include "Decoders.hpp" + +#include + +namespace armnn +{ + +void ReverseV2(const ReverseV2Descriptor& params, + const TensorInfo& inputInfo, + Decoder& inputDecoder, + Encoder& outputEncoder); + +} // namespace armnn \ No newline at end of file -- cgit v1.2.1