From 988354de127528bdebb98fd25661fbf2f39f17dd Mon Sep 17 00:00:00 2001 From: Tianle Cheng Date: Wed, 28 Jun 2023 13:20:47 +0100 Subject: IVGCVSW-7831: Front end and Reference Implementation for REVERSE_V2 * Descriptors added for ReverseV2 * Layer definition added * Input validation added * Reference workload implementation for ReverseV2 added * Reference layer unit tests made for ReverseV2 * CompareTensors method updated to support comparison between empty tensors * CMake and other build files updated Signed-off-by: Tianle Cheng Change-Id: I805738454421309fda77c44218a8df171d68dc18 --- Android.mk | 1 + CMakeLists.txt | 2 + docs/02_operator_list.dox | 42 + include/armnn/BackendHelper.hpp | 5 + include/armnn/Descriptors.hpp | 24 + include/armnn/DescriptorsFwd.hpp | 1 + include/armnn/INetwork.hpp | 7 + include/armnn/Types.hpp | 1 + include/armnn/backends/WorkloadData.hpp | 7 +- include/armnnTestUtils/TensorHelpers.hpp | 9 +- src/armnn/BackendHelper.cpp | 17 +- src/armnn/LayersFwd.hpp | 2 + src/armnn/Network.cpp | 11 + src/armnn/Network.hpp | 3 + src/armnn/layers/ReverseV2Layer.cpp | 50 + src/armnn/layers/ReverseV2Layer.hpp | 49 + src/backends/backendsCommon/WorkloadData.cpp | 66 ++ src/backends/backendsCommon/WorkloadFactory.cpp | 11 + src/backends/backendsCommon/common.mk | 1 + src/backends/backendsCommon/test/CMakeLists.txt | 2 + .../test/IsLayerSupportedTestImpl.hpp | 2 + src/backends/backendsCommon/test/LayerTests.hpp | 1 + .../test/layerTests/ReverseV2TestImpl.cpp | 1084 ++++++++++++++++++++ .../test/layerTests/ReverseV2TestImpl.hpp | 156 +++ src/backends/reference/RefLayerSupport.cpp | 35 + src/backends/reference/RefLayerSupport.hpp | 5 + src/backends/reference/RefWorkloadFactory.cpp | 5 + src/backends/reference/backend.mk | 4 +- src/backends/reference/test/RefLayerTests.cpp | 31 + src/backends/reference/workloads/CMakeLists.txt | 4 + .../reference/workloads/RefReverseV2Workload.cpp | 48 + .../reference/workloads/RefReverseV2Workload.hpp | 30 + src/backends/reference/workloads/RefWorkloads.hpp | 1 + src/backends/reference/workloads/ReverseV2Impl.cpp | 133 +++ src/backends/reference/workloads/ReverseV2Impl.hpp | 21 + 35 files changed, 1867 insertions(+), 4 deletions(-) create mode 100644 src/armnn/layers/ReverseV2Layer.cpp create mode 100644 src/armnn/layers/ReverseV2Layer.hpp create mode 100644 src/backends/backendsCommon/test/layerTests/ReverseV2TestImpl.cpp create mode 100644 src/backends/backendsCommon/test/layerTests/ReverseV2TestImpl.hpp create mode 100644 src/backends/reference/workloads/RefReverseV2Workload.cpp create mode 100644 src/backends/reference/workloads/RefReverseV2Workload.hpp create mode 100644 src/backends/reference/workloads/ReverseV2Impl.cpp create mode 100644 src/backends/reference/workloads/ReverseV2Impl.hpp diff --git a/Android.mk b/Android.mk index dd8f262c1e..bf3c195505 100644 --- a/Android.mk +++ b/Android.mk @@ -264,6 +264,7 @@ LOCAL_SRC_FILES := \ src/armnn/layers/ReduceLayer.cpp \ src/armnn/layers/ReshapeLayer.cpp \ src/armnn/layers/ResizeLayer.cpp \ + src/armnn/layers/ReverseV2Layer.cpp \ src/armnn/layers/ShapeLayer.cpp \ src/armnn/layers/SliceLayer.cpp \ src/armnn/layers/SoftmaxLayer.cpp \ diff --git a/CMakeLists.txt b/CMakeLists.txt index 60fbaaa685..148e77254d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -304,6 +304,8 @@ list(APPEND armnn_sources src/armnn/layers/ReshapeLayer.cpp src/armnn/layers/ResizeLayer.hpp src/armnn/layers/ResizeLayer.cpp + src/armnn/layers/ReverseV2Layer.hpp + src/armnn/layers/ReverseV2Layer.cpp src/armnn/layers/ShapeLayer.cpp src/armnn/layers/ShapeLayer.hpp src/armnn/layers/SliceLayer.cpp diff --git a/docs/02_operator_list.dox b/docs/02_operator_list.dox index ccdb2c10da..d329901af9 100644 --- a/docs/02_operator_list.dox +++ b/docs/02_operator_list.dox @@ -2781,6 +2781,48 @@ where N = batches, C = channels, H = height, W = width FLOAT16 FLOAT32 + + ReverseV2Layer + Layer to perform ReverseV2 operations. + + + CpuRef + + + + +
+
BFLOAT16 +
FLOAT16 +
FLOAT32 +
QASYMMS8 +
QASYMMU8 +
QSYMMS16 +
+ + CpuAcc + + + + + + GpuAcc + + + + GpuAcc diff --git a/include/armnn/BackendHelper.hpp b/include/armnn/BackendHelper.hpp index ddf2308da2..6f804cbbed 100644 --- a/include/armnn/BackendHelper.hpp +++ b/include/armnn/BackendHelper.hpp @@ -360,6 +360,11 @@ public: const ResizeDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()); + bool IsReverseV2Supported(const TensorInfo& input, + const TensorInfo& output, + const ReverseV2Descriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()); + bool IsShapeSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()); diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp index 9ff894f1b0..27ca50123f 100644 --- a/include/armnn/Descriptors.hpp +++ b/include/armnn/Descriptors.hpp @@ -1620,4 +1620,28 @@ struct BatchMatMulDescriptor : BaseDescriptor const TensorShape& tensorShape); }; +struct ReverseV2Descriptor : BaseDescriptor +{ + ReverseV2Descriptor() + : m_Axis() + , m_MaxDimension(4) + {} + + ReverseV2Descriptor(std::vector axis) + : m_Axis(axis) + , m_MaxDimension(4) + {} + + bool operator ==(const ReverseV2Descriptor& rhs) const + { + return m_Axis == rhs.m_Axis; + } + + /// The indices of the dimensions to reverse + std::vector m_Axis; + /// The max dimension supported in the lower levels of code + uint32_t m_MaxDimension; + +}; + } // namespace armnn diff --git a/include/armnn/DescriptorsFwd.hpp b/include/armnn/DescriptorsFwd.hpp index 2c25a49f00..4e9621d020 100644 --- a/include/armnn/DescriptorsFwd.hpp +++ b/include/armnn/DescriptorsFwd.hpp @@ -42,6 +42,7 @@ struct QLstmDescriptor; struct ReshapeDescriptor; struct ResizeDescriptor; struct ReduceDescriptor; +struct ReverseV2Descriptor; struct SliceDescriptor; struct SoftmaxDescriptor; struct SpaceToBatchNdDescriptor; diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp index 819f5cb1a3..9b8c3b0b7b 100644 --- a/include/armnn/INetwork.hpp +++ b/include/armnn/INetwork.hpp @@ -825,6 +825,13 @@ public: IConnectableLayer* AddBatchMatMulLayer(const BatchMatMulDescriptor& descriptor, const char* name = nullptr); + /// Add a ReverseV2 layer to the network + /// @param descriptor - Parameters for the ReverseV2 operation + /// @param name - Optional name for the layer + /// @return - Interface for configuring the layer + IConnectableLayer* AddReverseV2Layer(const ReverseV2Descriptor& descriptor, + const char* name = nullptr); + void ExecuteStrategy(IStrategy& strategy) const; protected: diff --git a/include/armnn/Types.hpp b/include/armnn/Types.hpp index 117a679973..f05f05b2a0 100644 --- a/include/armnn/Types.hpp +++ b/include/armnn/Types.hpp @@ -473,6 +473,7 @@ using InferenceTimingPair = std::pair; X(GatherNd) \ X(BatchMatMul) \ X(ElementwiseBinary) \ + X(ReverseV2) \ // New layers should be added at last position to minimize instability. diff --git a/include/armnn/backends/WorkloadData.hpp b/include/armnn/backends/WorkloadData.hpp index 2abd26740a..fe59fca795 100644 --- a/include/armnn/backends/WorkloadData.hpp +++ b/include/armnn/backends/WorkloadData.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2021-2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once @@ -750,4 +750,9 @@ struct BatchMatMulQueueDescriptor : QueueDescriptorWithParameters +{ + void Validate(const WorkloadInfo& workloadInfo) const; +}; + } // namespace armnn diff --git a/include/armnnTestUtils/TensorHelpers.hpp b/include/armnnTestUtils/TensorHelpers.hpp index ca17e621c3..fa9c97032c 100644 --- a/include/armnnTestUtils/TensorHelpers.hpp +++ b/include/armnnTestUtils/TensorHelpers.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017, 2021-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once @@ -88,6 +88,13 @@ armnn::PredicateResult CompareTensors(const std::vector& actualData, return res; } + // Support for comparison between empty tensors + if (actualData.size() == 0 && expectedData.size() == 0) + { + armnn::PredicateResult comparisonResult(true); + return comparisonResult; + } + if (actualShape.GetNumDimensions() != expectedShape.GetNumDimensions()) { armnn::PredicateResult res(false); diff --git a/src/armnn/BackendHelper.cpp b/src/armnn/BackendHelper.cpp index 580c52c568..404d278efc 100644 --- a/src/armnn/BackendHelper.cpp +++ b/src/armnn/BackendHelper.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017,2022-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -1211,6 +1211,21 @@ bool LayerSupportHandle::IsResizeSupported(const TensorInfo& input, reasonIfUnsupported); } +bool LayerSupportHandle::IsReverseV2Supported(const armnn::TensorInfo &input, + const armnn::TensorInfo &output, + const armnn::ReverseV2Descriptor &descriptor, + Optional reasonIfUnsupported) +{ + TensorInfos infos{input, output}; + + return m_LayerSupport->IsLayerSupported(LayerType::ReverseV2, + infos, + descriptor, + EmptyOptional(), + EmptyOptional(), + reasonIfUnsupported); +} + bool LayerSupportHandle::IsShapeSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported) diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp index f634272316..d3ce6f2a67 100644 --- a/src/armnn/LayersFwd.hpp +++ b/src/armnn/LayersFwd.hpp @@ -64,6 +64,7 @@ #include "layers/ReduceLayer.hpp" #include "layers/ReshapeLayer.hpp" #include "layers/ResizeLayer.hpp" +#include "layers/ReverseV2Layer.hpp" #include "layers/ShapeLayer.hpp" #include "layers/SliceLayer.hpp" #include "layers/SoftmaxLayer.hpp" @@ -165,6 +166,7 @@ DECLARE_LAYER(Rank) DECLARE_LAYER(Reduce) DECLARE_LAYER(Reshape) DECLARE_LAYER(Resize) +DECLARE_LAYER(ReverseV2) DECLARE_LAYER(Shape) DECLARE_LAYER(Slice) DECLARE_LAYER(Softmax) diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index b768ea888f..2abaf44587 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -639,6 +639,12 @@ IConnectableLayer* INetwork::AddBatchMatMulLayer(const BatchMatMulDescriptor &de return pNetworkImpl->AddBatchMatMulLayer(descriptor, name); } +IConnectableLayer* INetwork::AddReverseV2Layer(const ReverseV2Descriptor &descriptor, + const char *name) +{ + return pNetworkImpl->AddReverseV2Layer(descriptor, name); +} + void INetwork::ExecuteStrategy(IStrategy& strategy) const { return pNetworkImpl->ExecuteStrategy(strategy); @@ -2902,6 +2908,11 @@ IConnectableLayer* NetworkImpl::AddBatchMatMulLayer(const BatchMatMulDescriptor& return m_Graph->AddLayer(desc, name); } +IConnectableLayer* NetworkImpl::AddReverseV2Layer(const ReverseV2Descriptor &desc, const char *name) +{ + return m_Graph->AddLayer(desc, name); +} + IConnectableLayer* NetworkImpl::AddPrecompiledLayer(const PreCompiledDescriptor& preCompiledDescriptor, CompiledBlobPtr compiledBlobPtr, const Optional& backend, diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp index eced4587b9..fc3ae42aa9 100644 --- a/src/armnn/Network.hpp +++ b/src/armnn/Network.hpp @@ -188,6 +188,9 @@ public: IConnectableLayer* AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor, const char* name = nullptr); + IConnectableLayer* AddReverseV2Layer(const ReverseV2Descriptor& ReverseV2Descriptor, + const char* name = nullptr); + IConnectableLayer* AddShapeLayer(const char* name = nullptr); IConnectableLayer* AddSliceLayer(const SliceDescriptor& sliceDescriptor, const char* name = nullptr); diff --git a/src/armnn/layers/ReverseV2Layer.cpp b/src/armnn/layers/ReverseV2Layer.cpp new file mode 100644 index 0000000000..29f8b1b781 --- /dev/null +++ b/src/armnn/layers/ReverseV2Layer.cpp @@ -0,0 +1,50 @@ +// +// Copyright © 2023 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "ReverseV2Layer.hpp" + +#include +#include "layers/LayerCloneBase.hpp" + +namespace armnn +{ +ReverseV2Layer::ReverseV2Layer(const armnn::ReverseV2Descriptor ¶m, const char *name) + : LayerWithParameters(1, 1, LayerType::ReverseV2, param, name) +{} + +std::unique_ptr ReverseV2Layer::CreateWorkload(const armnn::IWorkloadFactory &factory) const +{ + ReverseV2QueueDescriptor descriptor; + SetAdditionalInfo(descriptor); + + return factory.CreateWorkload(LayerType::ReverseV2, descriptor, PrepInfoAndDesc(descriptor)); +} + +ReverseV2Layer* ReverseV2Layer::Clone(armnn::Graph &graph) const +{ + auto layer = CloneBase(graph, m_Param, GetName()); + + return std::move(layer); +} + +/// Use the default Layer::InferOutputShape method + +void ReverseV2Layer::ValidateTensorShapesFromInputs() +{ + VerifyLayerConnections(1, CHECK_LOCATION()); + + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod); + + auto inferredShapes = InferOutputShapes({ + GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); + + ARMNN_ASSERT(inferredShapes.size() == 1); + + ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ReverseV2Layer"); +} + +} \ No newline at end of file diff --git a/src/armnn/layers/ReverseV2Layer.hpp b/src/armnn/layers/ReverseV2Layer.hpp new file mode 100644 index 0000000000..046670e9de --- /dev/null +++ b/src/armnn/layers/ReverseV2Layer.hpp @@ -0,0 +1,49 @@ +// +// Copyright © 2023 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "LayerWithParameters.hpp" + +namespace armnn +{ + + /// This layer represents a ReverseV2 operation. + class ReverseV2Layer : public LayerWithParameters + { + public: + /// Makes a workload for the ReverseV2 type. + /// @param [in] graph The graph where this layer can be found. + /// @param [in] factory The workload factory which will create the workload. + /// @return A pointer to the created workload, or nullptr if not created. + virtual std::unique_ptrCreateWorkload(const IWorkloadFactory& factory) const override; + + /// Creates a dynamically-allocated copy of this layer. + /// @param [in] graph The graph into which this layer is being cloned. + ReverseV2Layer* Clone(Graph& graph) const override; + + /// By default returns inputShapes if the number of inputs are equal to number of outputs, + /// otherwise infers the output shapes from given input shapes and layer properties. + /// @param [in] inputShapes The vector of input shapes for ReverseV2. + /// @return A vector to the inferred output shape. + + /// Use the default Layer::InferOutputShape method + // std::vector InferOutputShapes(const std::vector& inputShapes) const override; + + /// Check if the input tensor shape(s) + /// will lead to a valid configuration of @ref ReverseV2Layer. + void ValidateTensorShapesFromInputs() override; + + protected: + /// Constructor to create a ReverseV2Layer. + /// @param [in] param ReverseV2Descriptor to configure the ReverseV2 operation. + /// @param [in] name Optional name for the layer. + ReverseV2Layer(const ReverseV2Descriptor& param, const char* name); + + /// Default destructor + ~ReverseV2Layer() = default; + }; + +} // namespace armnn diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp index d4ae08d874..6cde89c2e1 100644 --- a/src/backends/backendsCommon/WorkloadData.cpp +++ b/src/backends/backendsCommon/WorkloadData.cpp @@ -1640,6 +1640,72 @@ void ResizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const } } +void ReverseV2QueueDescriptor::Validate(const WorkloadInfo &workloadInfo) const { + const std::string descriptorName{"ReverseV2QueueDescriptor"}; + + ValidateNumInputs(workloadInfo, descriptorName, 1); + ValidateNumOutputs(workloadInfo, descriptorName, 1); + + const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0]; + const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0]; + + auto inputTensorNumDimensions = inputTensorInfo.GetNumDimensions(); + if (inputTensorNumDimensions > m_Parameters.m_MaxDimension) + { + throw InvalidArgumentException(descriptorName + + ": Input tensors with rank greater than " + + std::to_string(m_Parameters.m_MaxDimension) + " are not supported."); + } + + std::vector supportedTypes = + { + DataType::BFloat16, + DataType::Float16, + DataType::Float32, + DataType::QAsymmS8, + DataType::QAsymmU8, + DataType::QSymmS16 + }; + + ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName); + ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output"); + ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output"); + + if (m_Parameters.m_Axis.size() > inputTensorNumDimensions) + { + throw InvalidArgumentException(descriptorName + ": More axes specified than is on the input tensor."); + } + if (m_Parameters.m_Axis.size() > m_Parameters.m_MaxDimension) + { + throw InvalidArgumentException(descriptorName + + ": More than " + std::to_string(m_Parameters.m_MaxDimension) + " axes cannot be specified."); + } + + if (! m_Parameters.m_Axis.empty()) + { + // First check that we have unique axis values + auto checkAxis = m_Parameters.m_Axis; + std::sort(checkAxis.begin(), checkAxis.end()); + auto lastUnique = std::unique(checkAxis.begin(), checkAxis.end()); + if (lastUnique != checkAxis.end()) + { + throw InvalidArgumentException(descriptorName + ": Axes values must be unique."); + } + + // Next check that the axes values are in range: [-rank, rank] + const auto minmax = + std::minmax_element(std::begin(m_Parameters.m_Axis), std::end(m_Parameters.m_Axis)); + if (((*minmax.first) < int32_t(-inputTensorNumDimensions)) || + ((*minmax.second) >= int32_t (inputTensorNumDimensions))) + { + throw InvalidArgumentException(descriptorName + + ": Axes values must in range [-" + std::to_string(inputTensorNumDimensions) + "," + + std::to_string(inputTensorNumDimensions) + "]."); + } + } +} + + void FakeQuantizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const { const std::string descriptorName{"FakeQuantizationQueueDescriptor"}; diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp index 7042af1127..37f9382d6e 100644 --- a/src/backends/backendsCommon/WorkloadFactory.cpp +++ b/src/backends/backendsCommon/WorkloadFactory.cpp @@ -1119,6 +1119,17 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, reason); break; } + case LayerType::ReverseV2: + { + auto cLayer = PolymorphicDowncast(&layer); + const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); + const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); + result = layerSupportObject.IsReverseV2Supported(OverrideDataType(input, dataType), + OverrideDataType(output, dataType), + cLayer->GetParameters(), + reason); + break; + } case LayerType::Shape: { const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo(); diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk index c868cbe87a..1e0467deba 100644 --- a/src/backends/backendsCommon/common.mk +++ b/src/backends/backendsCommon/common.mk @@ -91,6 +91,7 @@ COMMON_TEST_SOURCES := \ test/layerTests/ReduceSumTestImpl.cpp \ test/layerTests/ReshapeTestImpl.cpp \ test/layerTests/ResizeTestImpl.cpp \ + test/layerTests/ReverseV2TestImpl.cpp \ test/layerTests/RsqrtTestImpl.cpp \ test/layerTests/SliceTestImpl.cpp \ test/layerTests/SquaredDifferenceTestImpl.cpp \ diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt index aba9c72e6c..0139044432 100644 --- a/src/backends/backendsCommon/test/CMakeLists.txt +++ b/src/backends/backendsCommon/test/CMakeLists.txt @@ -170,6 +170,8 @@ list(APPEND armnnBackendsCommonUnitTests_sources layerTests/ReshapeTestImpl.hpp layerTests/ResizeTestImpl.cpp layerTests/ResizeTestImpl.hpp + layerTests/ReverseV2TestImpl.cpp + layerTests/ReverseV2TestImpl.hpp layerTests/RsqrtTestImpl.cpp layerTests/RsqrtTestImpl.hpp layerTests/SinTestImpl.cpp diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp index 182fab97be..f7a852f440 100644 --- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp +++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp @@ -744,6 +744,8 @@ DECLARE_LAYER_POLICY_2_PARAM(Resize) DECLARE_LAYER_POLICY_2_PARAM(Reshape) +DECLARE_LAYER_POLICY_2_PARAM(ReverseV2) + DECLARE_LAYER_POLICY_1_PARAM(Shape) DECLARE_LAYER_POLICY_2_PARAM(Slice) diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp index b6ddb31419..755a665ba6 100644 --- a/src/backends/backendsCommon/test/LayerTests.hpp +++ b/src/backends/backendsCommon/test/LayerTests.hpp @@ -60,6 +60,7 @@ #include #include #include +#include #include #include #include diff --git a/src/backends/backendsCommon/test/layerTests/ReverseV2TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ReverseV2TestImpl.cpp new file mode 100644 index 0000000000..586b831e45 --- /dev/null +++ b/src/backends/backendsCommon/test/layerTests/ReverseV2TestImpl.cpp @@ -0,0 +1,1084 @@ +// +// Copyright © 2023 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "ReverseV2TestImpl.hpp" + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace +{ + template + LayerTestResult ReverseV2TestImpl( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory, + armnn::ReverseV2Descriptor descriptor, + const std::vector& input, + const std::vector& outputExpected, + const armnn::TensorInfo& inputInfo, + const armnn::TensorInfo& outputInfo) + { + LayerTestResult result(outputInfo); + std::vector outputActual(outputInfo.GetNumElements()); + + std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo); + std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo); + + armnn::ReverseV2QueueDescriptor queueDescriptor; + queueDescriptor.m_Parameters = std::move(descriptor); + armnn::WorkloadInfo workloadInfo; + + AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get()); + AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get()); + + // Don't execute if ReverseV2 is not supported, as an exception will be raised. + const armnn::BackendId& backend = workloadFactory.GetBackendId(); + std::string reasonIfUnsupported; + armnn::LayerSupportHandle handle = armnn::GetILayerSupportByBackendId(backend); + result.m_Supported = handle.IsReverseV2Supported(inputInfo, + outputInfo, + queueDescriptor.m_Parameters, + reasonIfUnsupported); + if (!result.m_Supported) + { + return result; + } + + auto workload = workloadFactory.CreateWorkload(armnn::LayerType::ReverseV2, queueDescriptor, workloadInfo); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), input.data()); + + workload->PostAllocationConfigure(); + ExecuteWorkload(*workload, memoryManager); + + CopyDataFromITensorHandle(outputActual.data(), outputHandle.get()); + + return LayerTestResult(outputActual, + outputExpected, + outputHandle->GetShape(), + outputInfo.GetShape()); + } +} + +template +LayerTestResult ReverseV2SimpleTestEmptyAxis( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + // Simple test with default descriptor. No axes set so output is the same as input + auto descriptor = armnn::ReverseV2Descriptor(); + + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo inputInfo({2,2}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({2,2}, ArmnnType, qScale, qOffset); + + std::vector input = armnnUtils::QuantizedVector({ + 1, 2, + 3, 4 + }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ + 1, 2, + 3, 4 + }, qScale, qOffset); + + return ReverseV2TestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + descriptor, + input, + outputExpected, + inputInfo, + outputInfo); +} + +template +LayerTestResult ReverseV2SimpleTestEmptyTensor( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + // Simple test with default descriptor. Empty tensor set so output is the same as input + auto descriptor = armnn::ReverseV2Descriptor(); + + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo inputInfo({0}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({0}, ArmnnType, qScale, qOffset); + + std::vector input = armnnUtils::QuantizedVector({}, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({}, qScale, qOffset); + + return ReverseV2TestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + descriptor, + input, + outputExpected, + inputInfo, + outputInfo); +} + +template +LayerTestResult ReverseV2SimpleTest1Dim( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + auto descriptor = armnn::ReverseV2Descriptor(std::vector {0}); + + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo inputInfo({4}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({4}, ArmnnType, qScale, qOffset); + + std::vector input = armnnUtils::QuantizedVector({ + 1, 2, + 3, 4 + }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ + 4, 3, + 2, 1 + }, qScale, qOffset); + + return ReverseV2TestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + descriptor, + input, + outputExpected, + inputInfo, + outputInfo); +} + +template +LayerTestResult ReverseV2SimpleTest2Dim1Axis( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + auto descriptor = armnn::ReverseV2Descriptor(std::vector {1}); + + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo inputInfo({2,2}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({2,2}, ArmnnType, qScale, qOffset); + + std::vector input = armnnUtils::QuantizedVector({ + 1, 2, + 3, 4 + }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ + 2, 1, + 4, 3 + }, qScale, qOffset); + + return ReverseV2TestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + descriptor, + input, + outputExpected, + inputInfo, + outputInfo); +} + +template +LayerTestResult ReverseV2SimpleTest2Dim2Axis( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + auto descriptor = armnn::ReverseV2Descriptor(std::vector {1, 0}); + + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo inputInfo({2,2}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({2,2}, ArmnnType, qScale, qOffset); + + std::vector input = armnnUtils::QuantizedVector({ + 1, 2, + 3, 4 + }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ + 4, 3, + 2, 1 + }, qScale, qOffset); + + return ReverseV2TestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + descriptor, + input, + outputExpected, + inputInfo, + outputInfo); +} + +template +LayerTestResult ReverseV2SimpleTest3Dim1Axis( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + auto descriptor = armnn::ReverseV2Descriptor(std::vector {1}); + + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo inputInfo({2, 3, 4}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({2, 3, 4}, ArmnnType, qScale, qOffset); + + std::vector input = armnnUtils::QuantizedVector({ + 1, 2, 3, 4, + 5, 6, 7, 8, + 9, 10, 11, 12, + 13, 14, 15, 16, + 17, 18, 19, 20, + 21, 22, 23, 24 + }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ + 9, 10, 11, 12, + 5, 6, 7, 8, + 1, 2, 3, 4, + 21, 22, 23, 24, + 17, 18, 19, 20, + 13, 14, 15, 16 + }, qScale, qOffset); + + return ReverseV2TestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + descriptor, + input, + outputExpected, + inputInfo, + outputInfo); +} + +template +LayerTestResult ReverseV2SimpleTest3Dim2Axis( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + auto descriptor = armnn::ReverseV2Descriptor(std::vector {0, 1}); + + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo inputInfo({2, 3, 4}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({2, 3, 4}, ArmnnType, qScale, qOffset); + + std::vector input = armnnUtils::QuantizedVector({ + 1, 2, 3, 4, + 5, 6, 7, 8, + 9, 10, 11, 12, + 13, 14, 15, 16, + 17, 18, 19, 20, + 21, 22, 23, 24 + }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ + 21, 22, 23, 24, + 17, 18, 19, 20, + 13, 14, 15, 16, + 9, 10, 11, 12, + 5, 6, 7, 8, + 1, 2, 3, 4 + }, qScale, qOffset); + + return ReverseV2TestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + descriptor, + input, + outputExpected, + inputInfo, + outputInfo); +} + +template +LayerTestResult ReverseV2SimpleTest3Dim3Axis( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + // Simple test with default descriptor. No axes set so output is + // the same as input + auto descriptor = armnn::ReverseV2Descriptor(std::vector {1, 0, 2}); + + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo inputInfo({2, 3, 4}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({2, 3, 4}, ArmnnType, qScale, qOffset); + + std::vector input = armnnUtils::QuantizedVector({ + 1, 2, 3, 4, + 5, 6, 7, 8, + 9, 10, 11, 12, + 13, 14, 15, 16, + 17, 18, 19, 20, + 21, 22, 23, 24 + }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ + 24, 23, 22, 21, + 20, 19, 18, 17, + 16, 15, 14, 13, + 12, 11, 10, 9, + 8, 7, 6, 5, + 4, 3, 2, 1 + }, qScale, qOffset); + + return ReverseV2TestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + descriptor, + input, + outputExpected, + inputInfo, + outputInfo); +} + +template +LayerTestResult ReverseV2SimpleTest4Dim1Axis( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + // Simple test with default descriptor. No axes set so output is + // the same as input + auto descriptor = armnn::ReverseV2Descriptor(std::vector {0}); + + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo inputInfo({2, 2, 2, 3}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({2, 2, 2, 3}, ArmnnType, qScale, qOffset); + + std::vector input = armnnUtils::QuantizedVector({ + 1, 2, 3, + 4, 5, 6, + 7, 8, 9, + 10, 11, 12, + 13, 14, 15, + 16, 17, 18, + 19, 20, 21, + 22, 23, 24 + }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ + 13, 14, 15, + 16, 17, 18, + 19, 20, 21, + 22, 23, 24, + 1, 2, 3, + 4, 5, 6, + 7, 8, 9, + 10, 11, 12 + }, qScale, qOffset); + + return ReverseV2TestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + descriptor, + input, + outputExpected, + inputInfo, + outputInfo); +} + +template +LayerTestResult ReverseV2SimpleTest4Dim2Axis( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + // Simple test with default descriptor. No axes set so output is + // the same as input + auto descriptor = armnn::ReverseV2Descriptor(std::vector {0, 1}); + + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo inputInfo({2, 2, 2, 3}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({2, 2, 2, 3}, ArmnnType, qScale, qOffset); + + std::vector input = armnnUtils::QuantizedVector({ + 1, 2, 3, + 4, 5, 6, + 7, 8, 9, + 10, 11, 12, + 13, 14, 15, + 16, 17, 18, + 19, 20, 21, + 22, 23, 24 + }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ + 19, 20, 21, + 22, 23, 24, + 13, 14, 15, + 16, 17, 18, + 7, 8, 9, + 10, 11, 12, + 1, 2, 3, + 4, 5, 6 + }, qScale, qOffset); + + return ReverseV2TestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + descriptor, + input, + outputExpected, + inputInfo, + outputInfo); +} + +template +LayerTestResult ReverseV2SimpleTest4Dim3Axis( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + // Simple test with default descriptor. No axes set so output is + // the same as input + auto descriptor = armnn::ReverseV2Descriptor(std::vector {0, 1, 2}); + + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo inputInfo({2, 2, 2, 3}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({2, 2, 2, 3}, ArmnnType, qScale, qOffset); + + std::vector input = armnnUtils::QuantizedVector({ + 1, 2, 3, + 4, 5, 6, + 7, 8, 9, + 10, 11, 12, + 13, 14, 15, + 16, 17, 18, + 19, 20, 21, + 22, 23, 24 + }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ + 22, 23, 24, + 19, 20, 21, + 16, 17, 18, + 13, 14, 15, + 10, 11, 12, + 7, 8, 9, + 4, 5, 6, + 1, 2, 3 + }, qScale, qOffset); + + return ReverseV2TestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + descriptor, + input, + outputExpected, + inputInfo, + outputInfo); +} + +template +LayerTestResult ReverseV2SimpleTest4Dim4Axis( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + // Simple test with default descriptor. No axes set so output is + // the same as input + auto descriptor = armnn::ReverseV2Descriptor(std::vector {0, 1, 2, 3}); + + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo inputInfo({2, 2, 2, 3}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({2, 2, 2, 3}, ArmnnType, qScale, qOffset); + + std::vector input = armnnUtils::QuantizedVector({ + 1, 2, 3, + 4, 5, 6, + 7, 8, 9, + 10, 11, 12, + 13, 14, 15, + 16, 17, 18, + 19, 20, 21, + 22, 23, 24 + }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ + 24, 23, 22, + 21, 20, 19, + 18, 17, 16, + 15, 14, 13, + 12, 11, 10, + 9, 8, 7, + 6, 5, 4, + 3, 2, 1 + }, qScale, qOffset); + + return ReverseV2TestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + descriptor, + input, + outputExpected, + inputInfo, + outputInfo); +} + +template +LayerTestResult ReverseV2EvenRowOddColTest2Dim( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + // Simple test with default descriptor. No axes set so output is the same as input + auto descriptor = armnn::ReverseV2Descriptor(std::vector {1}); + + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo inputInfo({2, 3}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({2, 3}, ArmnnType, qScale, qOffset); + + std::vector input = armnnUtils::QuantizedVector({ + 1, 2, 3, + 4, 5, 6 + }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ + 3, 2, 1, + 6, 5, 4 + }, qScale, qOffset); + + return ReverseV2TestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + descriptor, + input, + outputExpected, + inputInfo, + outputInfo); +} + +template +LayerTestResult ReverseV2EvenRowOddColTest3Dim( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + // Simple test with default descriptor. No axes set so output is + // the same as input + auto descriptor = armnn::ReverseV2Descriptor(std::vector {1}); + + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo inputInfo({2, 3, 1}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({2, 3, 1}, ArmnnType, qScale, qOffset); + + std::vector input = armnnUtils::QuantizedVector({ + 1, 2, 3, + 4, 5, 6 + }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ + 3, 2, 1, + 6, 5, 4 + }, qScale, qOffset); + + return ReverseV2TestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + descriptor, + input, + outputExpected, + inputInfo, + outputInfo); +} + +template +LayerTestResult ReverseV2EvenRowEvenColTest2Dim( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + // Simple test with default descriptor. No axes set so output is + // the same as input + auto descriptor = armnn::ReverseV2Descriptor(std::vector {1}); + + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo inputInfo({2, 4}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({2, 4}, ArmnnType, qScale, qOffset); + + std::vector input = armnnUtils::QuantizedVector({ + 1, 2, 3, 4, + 5, 6, 7, 8 + }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ + 4, 3, 2, 1, + 8, 7, 6, 5 + }, qScale, qOffset); + + return ReverseV2TestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + descriptor, + input, + outputExpected, + inputInfo, + outputInfo); +} + +template +LayerTestResult ReverseV2EvenRowEvenColTest3Dim( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + // Simple test with default descriptor. No axes set so output is + // the same as input + auto descriptor = armnn::ReverseV2Descriptor(std::vector {1}); + + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo inputInfo({2, 4, 1}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({2, 4, 1}, ArmnnType, qScale, qOffset); + + std::vector input = armnnUtils::QuantizedVector({ + 1, 2, 3, 4, + 5, 6, 7, 8 + }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ + 4, 3, 2, 1, + 8, 7, 6, 5 + }, qScale, qOffset); + + return ReverseV2TestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + descriptor, + input, + outputExpected, + inputInfo, + outputInfo); +} + +template +LayerTestResult ReverseV2OddRowOddColTest2Dim( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + // Simple test with default descriptor. No axes set so output is + // the same as input + auto descriptor = armnn::ReverseV2Descriptor(std::vector {1}); + + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo inputInfo({3, 3}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({3, 3}, ArmnnType, qScale, qOffset); + + std::vector input = armnnUtils::QuantizedVector({ + 1, 2, 3, + 4, 5, 6, + 7, 8, 9 + }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ + 3, 2, 1, + 6, 5, 4, + 9, 8, 7 + }, qScale, qOffset); + + return ReverseV2TestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + descriptor, + input, + outputExpected, + inputInfo, + outputInfo); +} + +template +LayerTestResult ReverseV2OddRowOddColTest3Dim( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + // Simple test with default descriptor. No axes set so output is + // the same as input + auto descriptor = armnn::ReverseV2Descriptor(std::vector {1}); + + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo inputInfo({3, 3, 1}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({3, 3, 1}, ArmnnType, qScale, qOffset); + + std::vector input = armnnUtils::QuantizedVector({ + 1, 2, 3, + 4, 5, 6, + 7, 8, 9 + }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ + 3, 2, 1, + 6, 5, 4, + 9, 8, 7 + }, qScale, qOffset); + + return ReverseV2TestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + descriptor, + input, + outputExpected, + inputInfo, + outputInfo); +} + +template +LayerTestResult ReverseV2OddRowEvenColTest2Dim( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + // Simple test with default descriptor. No axes set so output is + // the same as input + auto descriptor = armnn::ReverseV2Descriptor(std::vector {1}); + + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo inputInfo({3, 4}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({3, 4}, ArmnnType, qScale, qOffset); + + std::vector input = armnnUtils::QuantizedVector({ + 1, 2, 3, 4, + 5, 6, 7, 8, + 9, 10, 11, 12 + }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ + 4, 3, 2, 1, + 8, 7, 6, 5, + 12, 11, 10, 9 + }, qScale, qOffset); + + return ReverseV2TestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + descriptor, + input, + outputExpected, + inputInfo, + outputInfo); +} + +template +LayerTestResult ReverseV2OddRowEvenColTest3Dim( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + // Simple test with default descriptor. No axes set so output is + // the same as input + auto descriptor = armnn::ReverseV2Descriptor(std::vector {1}); + + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo inputInfo({3, 4, 1}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({3, 4, 1}, ArmnnType, qScale, qOffset); + + std::vector input = armnnUtils::QuantizedVector({ + 1, 2, 3, 4, + 5, 6, 7, 8, + 9, 10, 11, 12 + }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ + 4, 3, 2, 1, + 8, 7, 6, 5, + 12, 11, 10, 9 + }, qScale, qOffset); + + return ReverseV2TestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + descriptor, + input, + outputExpected, + inputInfo, + outputInfo); +} + +template +LayerTestResult ReverseV2NegAxisTest2Dim1Axis( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + // Simple test with default descriptor. No axes set so output is + // the same as input + auto descriptor = armnn::ReverseV2Descriptor(std::vector {-1}); + + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo inputInfo({2, 4}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({2, 4}, ArmnnType, qScale, qOffset); + + std::vector input = armnnUtils::QuantizedVector({ + 1, 2, 3, 4, + 5, 6, 7, 8, + }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ + 4, 3, 2, 1, + 8, 7, 6, 5 + }, qScale, qOffset); + + return ReverseV2TestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + descriptor, + input, + outputExpected, + inputInfo, + outputInfo); +} + +template +LayerTestResult ReverseV2NegAxisTest3Dim2Axis( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + // Simple test with default descriptor. No axes set so output is + // the same as input + auto descriptor = armnn::ReverseV2Descriptor(std::vector {1, -1}); + + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo inputInfo({2, 4, 1}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({2, 4, 1}, ArmnnType, qScale, qOffset); + + std::vector input = armnnUtils::QuantizedVector({ + 1, 2, 3, 4, + 5, 6, 7, 8, + }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ + 4, 3, 2, 1, + 8, 7, 6, 5 + }, qScale, qOffset); + + return ReverseV2TestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + descriptor, + input, + outputExpected, + inputInfo, + outputInfo); +} + +template LayerTestResult, 2> +ReverseV2SimpleTestEmptyAxis( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +ReverseV2SimpleTestEmptyTensor( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +ReverseV2SimpleTest1Dim( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +ReverseV2SimpleTest2Dim1Axis( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +ReverseV2SimpleTest2Dim2Axis( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +ReverseV2SimpleTest3Dim1Axis( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +ReverseV2SimpleTest3Dim2Axis( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +ReverseV2SimpleTest3Dim3Axis( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +ReverseV2SimpleTest4Dim1Axis( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +ReverseV2SimpleTest4Dim2Axis( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +ReverseV2SimpleTest4Dim3Axis( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +ReverseV2SimpleTest4Dim4Axis( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +ReverseV2EvenRowOddColTest2Dim( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +ReverseV2EvenRowOddColTest3Dim( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +ReverseV2EvenRowEvenColTest2Dim( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +ReverseV2EvenRowEvenColTest3Dim( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +ReverseV2OddRowOddColTest2Dim( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +ReverseV2OddRowOddColTest3Dim( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +ReverseV2OddRowEvenColTest2Dim( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +ReverseV2OddRowEvenColTest3Dim( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +ReverseV2NegAxisTest2Dim1Axis( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +ReverseV2NegAxisTest3Dim2Axis( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +ReverseV2SimpleTest2Dim2Axis( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +ReverseV2SimpleTest2Dim2Axis( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +ReverseV2SimpleTest2Dim2Axis( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +ReverseV2SimpleTest2Dim2Axis( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); \ No newline at end of file diff --git a/src/backends/backendsCommon/test/layerTests/ReverseV2TestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ReverseV2TestImpl.hpp new file mode 100644 index 0000000000..a40e7734a0 --- /dev/null +++ b/src/backends/backendsCommon/test/layerTests/ReverseV2TestImpl.hpp @@ -0,0 +1,156 @@ +// +// Copyright © 2023 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include + +#include + +#include + +// Empty test cases + +template> +LayerTestResult ReverseV2SimpleTestEmptyAxis( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ReverseV2SimpleTestEmptyTensor( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +// Multidimensional input tensor and multi-axis tests +// These test cases are conducted with even-row-even-column input tensors + +template> +LayerTestResult ReverseV2SimpleTest1Dim( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ReverseV2SimpleTest2Dim1Axis( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ReverseV2SimpleTest2Dim2Axis( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ReverseV2SimpleTest3Dim1Axis( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ReverseV2SimpleTest3Dim2Axis( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ReverseV2SimpleTest3Dim3Axis( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ReverseV2SimpleTest4Dim1Axis( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ReverseV2SimpleTest4Dim2Axis( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ReverseV2SimpleTest4Dim3Axis( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ReverseV2SimpleTest4Dim4Axis( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +// Even and odd row number tests +// These tests are conducted with 1-axis input param + +template> +LayerTestResult ReverseV2EvenRowOddColTest2Dim( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ReverseV2EvenRowOddColTest3Dim( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ReverseV2EvenRowEvenColTest2Dim( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ReverseV2EvenRowEvenColTest3Dim( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ReverseV2OddRowOddColTest2Dim( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ReverseV2OddRowOddColTest3Dim( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ReverseV2OddRowEvenColTest2Dim( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ReverseV2OddRowEvenColTest3Dim( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +// Negative axis input tests with even-row-even-column input + +// one negative axis +template> +LayerTestResult ReverseV2NegAxisTest2Dim1Axis( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +// one negative and one positive axis +template> +LayerTestResult ReverseV2NegAxisTest3Dim2Axis( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); \ No newline at end of file diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp index 81e5c837a5..1d5fab1adc 100644 --- a/src/backends/reference/RefLayerSupport.cpp +++ b/src/backends/reference/RefLayerSupport.cpp @@ -341,6 +341,11 @@ bool RefLayerSupport::IsLayerSupported(const LayerType& type, infos[1], *(PolymorphicDowncast(&descriptor)), reasonIfUnsupported); + case LayerType::ReverseV2: + return IsReverseV2Supported(infos[0], + infos[1], + *(PolymorphicDowncast(&descriptor)), + reasonIfUnsupported); case LayerType::Reduce: return IsReduceSupported(infos[0], infos[1], @@ -2356,6 +2361,36 @@ bool RefLayerSupport::IsResizeSupported(const TensorInfo& input, return supported; } +bool RefLayerSupport::IsReverseV2Supported(const TensorInfo& input, + const TensorInfo& output, + const ReverseV2Descriptor& descriptor, + Optional reasonIfUnsupported) const +{ + IgnoreUnused(descriptor); + bool supported = true; + // ReverseV2 is data type agnostic so it can support all the types in the Reference backend + std::array supportedTypes = + { + DataType::BFloat16, + DataType::Float32, + DataType::Float16, + DataType::QAsymmS8, + DataType::QAsymmU8, + DataType::QSymmS16 + }; + + supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported, + "Reference ReverseV2: input type not supported"); + + supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported, + "Reference ReverseV2: output type not supported"); + + supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported, + "Reference ReverseV2: input and output types not matching"); + + return supported; +} + bool RefLayerSupport::IsShapeSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported) const diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp index 8e1f68ebfc..0afb9c2c94 100644 --- a/src/backends/reference/RefLayerSupport.hpp +++ b/src/backends/reference/RefLayerSupport.hpp @@ -299,6 +299,11 @@ public: const ResizeDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsReverseV2Supported(const TensorInfo& input, + const TensorInfo& output, + const ReverseV2Descriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const; + bool IsShapeSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp index 10f623eef3..7d5f742126 100644 --- a/src/backends/reference/RefWorkloadFactory.cpp +++ b/src/backends/reference/RefWorkloadFactory.cpp @@ -560,6 +560,11 @@ std::unique_ptr RefWorkloadFactory::CreateWorkload(LayerType type, auto resizeQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*resizeQueueDescriptor, info); } + case LayerType::ReverseV2: + { + auto reverseV2QueueDescriptor = PolymorphicDowncast(&descriptor); + return std::make_unique(*reverseV2QueueDescriptor, info); + } case LayerType::Shape: { auto shapeQueueDescriptor = PolymorphicDowncast(&descriptor); diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk index c23984c3e9..dfafa0ac39 100644 --- a/src/backends/reference/backend.mk +++ b/src/backends/reference/backend.mk @@ -1,5 +1,5 @@ # -# Copyright © 2017 ARM Ltd. All rights reserved. +# Copyright © 2017-2023 ARM Ltd and Contributors. All rights reserved. # SPDX-License-Identifier: MIT # @@ -94,6 +94,7 @@ BACKEND_SOURCES := \ workloads/RefReduceWorkload.cpp \ workloads/RefReshapeWorkload.cpp \ workloads/RefResizeWorkload.cpp \ + workloads/RefReverseV2Workload.cpp \ workloads/RefSliceWorkload.cpp \ workloads/RefSoftmaxWorkload.cpp \ workloads/RefSpaceToBatchNdWorkload.cpp \ @@ -105,6 +106,7 @@ BACKEND_SOURCES := \ workloads/RefTransposeWorkload.cpp \ workloads/RefUnidirectionalSequenceLstmWorkload.cpp \ workloads/Resize.cpp \ + workloads/ReverseV2Impl.cpp \ workloads/Slice.cpp \ workloads/SpaceToBatchNd.cpp \ workloads/SpaceToDepth.cpp \ diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp index 6e697723e9..a68775e8e9 100644 --- a/src/backends/reference/test/RefLayerTests.cpp +++ b/src/backends/reference/test/RefLayerTests.cpp @@ -1565,6 +1565,37 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(AlignCornersResizeNearestNeighbourUint16Nchw, AlignCornersResizeNearestNeighbourTest, DataLayout::NCHW) +// ReverseV2 +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2SimpleEmptyAxisFloat32, ReverseV2SimpleTestEmptyAxis) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2SimpleEmptyTensorFloat32, ReverseV2SimpleTestEmptyTensor ) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple1DimFloat32, ReverseV2SimpleTest1Dim) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple2Dim1AxisFloat32, ReverseV2SimpleTest2Dim1Axis) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple2Dim2AxisFloat32, ReverseV2SimpleTest2Dim2Axis) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple3Dim1AxisFloat32, ReverseV2SimpleTest3Dim1Axis) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple3Dim2AxisFloat32, ReverseV2SimpleTest3Dim2Axis) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple3Dim3AxisFloat32, ReverseV2SimpleTest3Dim3Axis) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple4Dim1AxisFloat32, ReverseV2SimpleTest4Dim1Axis) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple4Dim2AxisFloat32, ReverseV2SimpleTest4Dim2Axis) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple4Dim3AxisFloat32, ReverseV2SimpleTest4Dim3Axis) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple4Dim4AxisFloat32, ReverseV2SimpleTest4Dim4Axis) + +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2EvenRowOddCol2DimFloat32, ReverseV2EvenRowOddColTest2Dim) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2EvenRowOddCol3DimFloat32, ReverseV2EvenRowOddColTest3Dim) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2EvenRowEvenCol2DimFloat32, ReverseV2EvenRowEvenColTest2Dim) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2EvenRowEvenCol3DimFloat32, ReverseV2EvenRowEvenColTest3Dim) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2OddRowOddCol2DimFloat32, ReverseV2OddRowOddColTest2Dim) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2OddRowOddCol3DimFloat32, ReverseV2OddRowOddColTest3Dim) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2OddRowEvenCol2DimFloat32, ReverseV2OddRowEvenColTest2Dim) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2OddRowEvenCol3DimFloat32, ReverseV2OddRowEvenColTest3Dim) + +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2NegAxis2Dim1AxisFloat32, ReverseV2NegAxisTest2Dim1Axis) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2NegAxis3Dim2AxisFloat32, ReverseV2NegAxisTest3Dim2Axis) + +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple2Dim2AxisFloat16, ReverseV2SimpleTest2Dim2Axis) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple2Dim2AxisQAsymmS8, ReverseV2SimpleTest2Dim2Axis) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple2Dim2AxisQAsymmU8, ReverseV2SimpleTest2Dim2Axis) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReverseV2Simple2Dim2AxisQSymmS16, ReverseV2SimpleTest2Dim2Axis) + // Fake Quantization ARMNN_AUTO_TEST_CASE_WITH_THF(FakeQuantization, FakeQuantizationTest) diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt index 3592f2293d..28f6d2f371 100644 --- a/src/backends/reference/workloads/CMakeLists.txt +++ b/src/backends/reference/workloads/CMakeLists.txt @@ -66,6 +66,8 @@ list(APPEND armnnRefBackendWorkloads_sources PreluImpl.hpp Reduce.cpp Reduce.hpp + ReverseV2Impl.cpp + ReverseV2Impl.hpp RefActivationWorkload.cpp RefActivationWorkload.hpp RefArgMinMaxWorkload.cpp @@ -161,6 +163,8 @@ list(APPEND armnnRefBackendWorkloads_sources RefReshapeWorkload.hpp RefResizeWorkload.cpp RefResizeWorkload.hpp + RefReverseV2Workload.cpp + RefReverseV2Workload.hpp RefShapeWorkload.hpp RefSliceWorkload.cpp RefSliceWorkload.hpp diff --git a/src/backends/reference/workloads/RefReverseV2Workload.cpp b/src/backends/reference/workloads/RefReverseV2Workload.cpp new file mode 100644 index 0000000000..cd2d9f930b --- /dev/null +++ b/src/backends/reference/workloads/RefReverseV2Workload.cpp @@ -0,0 +1,48 @@ +// +// Copyright © 2023 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "RefReverseV2Workload.hpp" + +#include "ReverseV2Impl.hpp" +#include "RefWorkloadUtils.hpp" +#include "Profiling.hpp" + +namespace armnn +{ + + RefReverseV2Workload::RefReverseV2Workload(const ReverseV2QueueDescriptor& descriptor, const WorkloadInfo& info) + : RefBaseWorkload(descriptor, info) + {} + + void RefReverseV2Workload::Execute() const + { + Execute(m_Data.m_Inputs, m_Data.m_Outputs); + } + + void RefReverseV2Workload::ExecuteAsync(ExecutionData& executionData) + { + WorkingMemDescriptor* workingMemDescriptor = static_cast(executionData.m_Data); + Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs); + } + + void RefReverseV2Workload::Execute(std::vector inputs, std::vector outputs) const + { + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefReverseV2Workload_Execute"); + + const TensorInfo& inputInfo = GetTensorInfo(inputs[0]); + + std::unique_ptr> inputDecoder = MakeDecoder(GetTensorInfo(inputs[0]), + inputs[0]->Map()); + + std::unique_ptr> outputEncoder = MakeEncoder(GetTensorInfo(outputs[0]), + outputs[0]->Map()); + + ReverseV2(m_Data.m_Parameters, + inputInfo, + *inputDecoder, + *outputEncoder); + } + +} // namespace armnn \ No newline at end of file diff --git a/src/backends/reference/workloads/RefReverseV2Workload.hpp b/src/backends/reference/workloads/RefReverseV2Workload.hpp new file mode 100644 index 0000000000..89e7c9ea38 --- /dev/null +++ b/src/backends/reference/workloads/RefReverseV2Workload.hpp @@ -0,0 +1,30 @@ +// +// Copyright © 2023 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "RefBaseWorkload.hpp" +#include + +#include "ReverseV2Impl.hpp" + +namespace armnn +{ + + class RefReverseV2Workload : public RefBaseWorkload + { + public: + explicit RefReverseV2Workload(const ReverseV2QueueDescriptor& descriptor, + const WorkloadInfo& info); + + void Execute() const override; + void ExecuteAsync(ExecutionData& executionData) override; + + private: + void Execute(std::vector inputs, std::vector outputs) const; + + }; + +} // namespace armnn \ No newline at end of file diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp index dba880bafc..e15a7ca047 100644 --- a/src/backends/reference/workloads/RefWorkloads.hpp +++ b/src/backends/reference/workloads/RefWorkloads.hpp @@ -53,6 +53,7 @@ #include "RefReduceWorkload.hpp" #include "RefReshapeWorkload.hpp" #include "RefResizeWorkload.hpp" +#include "RefReverseV2Workload.hpp" #include "RefShapeWorkload.hpp" #include "RefSliceWorkload.hpp" #include "RefSplitterWorkload.hpp" diff --git a/src/backends/reference/workloads/ReverseV2Impl.cpp b/src/backends/reference/workloads/ReverseV2Impl.cpp new file mode 100644 index 0000000000..f6d5fd74d1 --- /dev/null +++ b/src/backends/reference/workloads/ReverseV2Impl.cpp @@ -0,0 +1,133 @@ +// +// Copyright © 2023 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "ReverseV2Impl.hpp" + +#include +#include +#include + +namespace armnn +{ + +// Get multi-dimensional index for input tensor +std::vector ReverseGetMultIdx(const unsigned int idx, + unsigned int inputRank, + std::vector& elementNumInner) +{ + std::vector indexList(inputRank); + + unsigned int mIdx = idx; + + for (unsigned int iDim = 0; iDim < inputRank; ++iDim) + { + indexList[iDim] = static_cast(mIdx / elementNumInner[iDim]); + mIdx %= elementNumInner[iDim]; + } + + return indexList; +} + +// Get flattened index for output encoder +unsigned int ReverseGetFlatIdx(const std::vector& idxList, + unsigned int inputRank, + std::vector& elementNumInner) +{ + unsigned int idx = 0; + + for (unsigned int iDim = 0; iDim < inputRank; ++iDim) + { + idx += idxList[iDim] * elementNumInner[iDim]; + } + + return idx; +} + +// Relocate the coordinate to the reversed tensor +unsigned int ReverseRelocateIdx(unsigned int idx, + unsigned int inputRank, + std::vector& axisFlag, + std::vector& dimSize, + std::vector& elementNumInner) +{ + // Get the multidimensional index list for input + auto inputIdxList = ReverseGetMultIdx(idx, inputRank, elementNumInner); + + std::vector outputIdxList(inputRank); + + // Relocate the input index to the output one + for (unsigned int iDim = 0; iDim < inputRank; ++iDim) + { + if (axisFlag[iDim]) + { + outputIdxList[iDim] = dimSize[iDim] - inputIdxList[iDim] - 1; + } + else + { + outputIdxList[iDim] = inputIdxList[iDim]; + } + } + + // Get the 1-dimensional flattened index for output + unsigned int outputIdx = ReverseGetFlatIdx(outputIdxList, inputRank, elementNumInner); + return outputIdx; +} + +void ReverseV2(const ReverseV2Descriptor& params, + const TensorInfo& inputInfo, + Decoder& inputDecoder, + Encoder& outputEncoder) +{ + // Empty axis and empty tensor case: copy input to output + if (params.m_Axis.empty() || inputInfo.GetNumElements() == 0) + { + for (unsigned idx = 0; idx < inputInfo.GetNumElements(); idx++) + { + float inputValue = inputDecoder.Get(); + inputDecoder += 1; + outputEncoder.Set(inputValue); + outputEncoder += 1; + } + return; + } + + unsigned int inputRank = static_cast(inputInfo.GetNumDimensions()); + + std::vectoraxisFlag(inputRank, false); + std::vectordimSize(inputRank, 0); + + // Make sure the axes are positive + for (int32_t axisElement: params.m_Axis) + { + axisElement = axisElement < 0 ? axisElement + static_cast(inputRank) : axisElement; + axisFlag[static_cast(axisElement)] = true; + } + + const TensorShape &inputShape = inputInfo.GetShape(); + + unsigned int elementNum = inputInfo.GetNumElements(); + unsigned int baseDimSize = 1; + + std::vector elementNumInner; + + // Get the number of element within the specific dimension + for (unsigned int iDim = 0; iDim < inputRank; ++iDim) { + dimSize[iDim] = inputShape[iDim]; + baseDimSize *= dimSize[iDim]; + elementNumInner.push_back(static_cast(elementNum / baseDimSize)); + } + + // Iterate through all elements + for (unsigned int idx = 0; idx < elementNum; ++idx) + { + float inputValue = inputDecoder.Get(); + inputDecoder += 1; + auto outputIdx = ReverseRelocateIdx(idx, inputRank, axisFlag, dimSize, elementNumInner); + outputEncoder[outputIdx]; + outputEncoder.Set(inputValue); + } +} + +} // namespace armnn \ No newline at end of file diff --git a/src/backends/reference/workloads/ReverseV2Impl.hpp b/src/backends/reference/workloads/ReverseV2Impl.hpp new file mode 100644 index 0000000000..bc1fe1d432 --- /dev/null +++ b/src/backends/reference/workloads/ReverseV2Impl.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2023 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "Encoders.hpp" +#include "Decoders.hpp" + +#include + +namespace armnn +{ + +void ReverseV2(const ReverseV2Descriptor& params, + const TensorInfo& inputInfo, + Decoder& inputDecoder, + Encoder& outputEncoder); + +} // namespace armnn \ No newline at end of file -- cgit v1.2.1