From 282881877522d3e94752dfc0839de9bfa0aa5a81 Mon Sep 17 00:00:00 2001 From: Tianle Cheng Date: Fri, 23 Feb 2024 17:56:54 +0000 Subject: IVGCVSW-8229 & IVGCVSW-8237 ScatterNd: Front end and reference implementation (scatter_nd, scatter_nd_add, and scatter_nd_update, scatter_nd_sub, scatter_nd_min, scatter_nd_max, scatter_nd_mul) * Front end support for ScatterNd added. * Reference implementation for ScatterNd added. * Unit tests added. Signed-off-by: Tianle Cheng Change-Id: I30da9056d9b03ca9b5fb8d09987341128badbcf4 --- Android.mk | 3 +- CMakeLists.txt | 2 + docs/02_operator_list.dox | 59 +- include/armnn/BackendHelper.hpp | 9 +- include/armnn/Descriptors.hpp | 55 +- include/armnn/DescriptorsFwd.hpp | 3 +- include/armnn/INetwork.hpp | 9 +- include/armnn/Types.hpp | 18 +- include/armnn/backends/WorkloadData.hpp | 7 +- src/armnn/BackendHelper.cpp | 19 +- src/armnn/LayersFwd.hpp | 4 +- src/armnn/Network.cpp | 13 +- src/armnn/Network.hpp | 5 +- src/armnn/layers/ScatterNdLayer.cpp | 94 ++ src/armnn/layers/ScatterNdLayer.hpp | 44 + src/backends/backendsCommon/WorkloadData.cpp | 42 + src/backends/backendsCommon/WorkloadFactory.cpp | 17 + src/backends/backendsCommon/common.mk | 3 +- src/backends/backendsCommon/test/CMakeLists.txt | 2 + .../test/IsLayerSupportedTestImpl.hpp | 4 +- src/backends/backendsCommon/test/LayerTests.hpp | 3 +- .../test/layerTests/ScatterNdTestImpl.cpp | 1479 ++++++++++++++++++++ .../test/layerTests/ScatterNdTestImpl.hpp | 156 +++ src/backends/reference/RefLayerSupport.cpp | 65 + src/backends/reference/RefLayerSupport.hpp | 9 +- src/backends/reference/RefWorkloadFactory.cpp | 7 +- src/backends/reference/backend.mk | 4 +- src/backends/reference/test/RefLayerTests.cpp | 54 +- src/backends/reference/workloads/CMakeLists.txt | 6 +- .../reference/workloads/RefScatterNdWorkload.cpp | 100 ++ .../reference/workloads/RefScatterNdWorkload.hpp | 30 + src/backends/reference/workloads/RefWorkloads.hpp | 3 +- src/backends/reference/workloads/ScatterNd.cpp | 336 +++++ src/backends/reference/workloads/ScatterNd.hpp | 34 + 34 files changed, 2674 insertions(+), 24 deletions(-) create mode 100644 src/armnn/layers/ScatterNdLayer.cpp create mode 100644 src/armnn/layers/ScatterNdLayer.hpp create mode 100644 src/backends/backendsCommon/test/layerTests/ScatterNdTestImpl.cpp create mode 100644 src/backends/backendsCommon/test/layerTests/ScatterNdTestImpl.hpp create mode 100644 src/backends/reference/workloads/RefScatterNdWorkload.cpp create mode 100644 src/backends/reference/workloads/RefScatterNdWorkload.hpp create mode 100644 src/backends/reference/workloads/ScatterNd.cpp create mode 100644 src/backends/reference/workloads/ScatterNd.hpp diff --git a/Android.mk b/Android.mk index 93dcb5c61a..379cce4255 100644 --- a/Android.mk +++ b/Android.mk @@ -1,5 +1,5 @@ # -# Copyright © 2017-2023 ARM Ltd and Contributors. All rights reserved. +# Copyright © 2017-2024 ARM Ltd and Contributors. All rights reserved. # SPDX-License-Identifier: MIT # @@ -267,6 +267,7 @@ LOCAL_SRC_FILES := \ src/armnn/layers/ReshapeLayer.cpp \ src/armnn/layers/ResizeLayer.cpp \ src/armnn/layers/ReverseV2Layer.cpp \ + src/armnn/layers/ScatterNdLayer.cpp \ src/armnn/layers/ShapeLayer.cpp \ src/armnn/layers/SliceLayer.cpp \ src/armnn/layers/SoftmaxLayer.cpp \ diff --git a/CMakeLists.txt b/CMakeLists.txt index e8067350ae..8bf7a40d21 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -310,6 +310,8 @@ list(APPEND armnn_sources src/armnn/layers/ResizeLayer.cpp src/armnn/layers/ReverseV2Layer.hpp src/armnn/layers/ReverseV2Layer.cpp + src/armnn/layers/ScatterNdLayer.cpp + src/armnn/layers/ScatterNdLayer.hpp src/armnn/layers/ShapeLayer.cpp src/armnn/layers/ShapeLayer.hpp src/armnn/layers/SliceLayer.cpp diff --git a/docs/02_operator_list.dox b/docs/02_operator_list.dox index fa386f21bd..6b2442d28f 100644 --- a/docs/02_operator_list.dox +++ b/docs/02_operator_list.dox @@ -1,4 +1,4 @@ -/// Copyright (c) 2021, 2023 ARM Limited and Contributors. All rights reserved. +/// Copyright (c) 2021, 2023-2024 ARM Limited and Contributors. All rights reserved. /// /// SPDX-License-Identifier: MIT /// @@ -2930,6 +2930,63 @@ where N = batches, C = channels, H = height, W = width FLOAT16 FLOAT32 + + ScatterLayer + Layer to scatter updates according to individual values at the specified indices. + + + CpuRef + + + + +
+
FLOAT16 +
FLOAT32 +
QASYMMS8 +
QASYMMU8 +
QSYMMS8 +
QSYMMS16 +
SIGNED32 +
+ + CpuAcc + + + + +
+
FLOAT16 +
FLOAT32 +
QASYMMS8 +
QASYMMU8 +
QASYMM8 +
QSYMMS16 +
SIGNED32 +
+ + GpuAcc + + + + +
+
FLOAT16 +
FLOAT32 +
QASYMMS8 +
QASYMMU8 +
QSYMMS8 +
QSYMMS16 +
SIGNED32 +
ShapeLayer Layer to return the shape of the input tensor. diff --git a/include/armnn/BackendHelper.hpp b/include/armnn/BackendHelper.hpp index b61f010b0f..a6b81eaa01 100644 --- a/include/armnn/BackendHelper.hpp +++ b/include/armnn/BackendHelper.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2017-2019,2021-2023 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2019,2021-2024 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -380,6 +380,13 @@ public: const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()); + bool IsScatterNdSupported(const TensorInfo& input, + const TensorInfo& indices, + const TensorInfo& updates, + const TensorInfo& output, + const ScatterNdDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()); + bool IsSliceSupported(const TensorInfo& input, const TensorInfo& output, const SliceDescriptor& descriptor, diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp index bf40b35ae9..7230bc2c1d 100644 --- a/include/armnn/Descriptors.hpp +++ b/include/armnn/Descriptors.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once @@ -1675,4 +1675,57 @@ struct BroadcastToDescriptor : BaseDescriptor TensorShape m_BroadcastToShape; }; +/// A ScatterNdDescriptor for the ScatterNdLayer. +struct ScatterNdDescriptor : BaseDescriptor +{ + // default constructor + ScatterNdDescriptor() + : m_Function(ScatterNdFunction::Update) + , m_InputEnabled(true) + , m_Axis(0) + , m_AxisEnabled(false) + {} + + // constructor for operators except for ScatterElement operator + ScatterNdDescriptor(ScatterNdFunction function, + bool inputEnabled) + : m_Function(function) + , m_InputEnabled(inputEnabled) + , m_Axis(0) + , m_AxisEnabled(false) + + {} + + // constructor for ScatterElement operator + ScatterNdDescriptor(ScatterNdFunction function, + bool inputEnabled, + int32_t axis) + : m_Function(function) + , m_InputEnabled(inputEnabled) + , m_Axis(axis) + , m_AxisEnabled(true) + + {} + + bool operator ==(const ScatterNdDescriptor &rhs) const + { + return ((m_Function == rhs.m_Function) && + (m_InputEnabled == rhs.m_InputEnabled) && + (m_AxisEnabled == rhs.m_AxisEnabled) && + (m_Axis == rhs.m_Axis)); + } + + /// Specify if the function is update, add, sub, max or min. + ScatterNdFunction m_Function; + + /// Flag to show if input tensor is accepted. + bool m_InputEnabled; + + /// Extra attribute for ScatterElement, will be set to 0 by default, we do not support axis != 0 + int32_t m_Axis; + + /// Flag for ScatterElement, will be set to false by default, we do not support m_AxisEnable = true for now. + bool m_AxisEnabled; +}; + } // namespace armnn diff --git a/include/armnn/DescriptorsFwd.hpp b/include/armnn/DescriptorsFwd.hpp index 4b0b70c2d3..3518a41c42 100644 --- a/include/armnn/DescriptorsFwd.hpp +++ b/include/armnn/DescriptorsFwd.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -44,6 +44,7 @@ struct QLstmDescriptor; struct ReshapeDescriptor; struct ResizeDescriptor; struct ReduceDescriptor; +struct ScatterNdDescriptor; struct SliceDescriptor; struct SoftmaxDescriptor; struct SpaceToBatchNdDescriptor; diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp index 64fdab6bd0..84f3e0cb64 100644 --- a/include/armnn/INetwork.hpp +++ b/include/armnn/INetwork.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once @@ -864,6 +864,13 @@ public: IConnectableLayer* AddBroadcastToLayer(const BroadcastToDescriptor& descriptor, const char* name = nullptr); + /// Add a ScatterNd layer to the network + /// @param descriptor - Parameters for the ScatterNd operation + /// @param name - Optional name for the layer + /// @return - Interface for configuring the layer + IConnectableLayer* AddScatterNdLayer(const ScatterNdDescriptor& descriptor, + const char* name = nullptr); + void ExecuteStrategy(IStrategy& strategy) const; protected: diff --git a/include/armnn/Types.hpp b/include/armnn/Types.hpp index d87e7f7147..bbe1ecccbd 100644 --- a/include/armnn/Types.hpp +++ b/include/armnn/Types.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2018-2023 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2018-2024 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once @@ -482,8 +482,8 @@ using InferenceTimingPair = std::pair; X(ReverseV2) \ X(Tile) \ X(Fused) \ - X(BroadcastTo) \ - + X(BroadcastTo) \ + X(ScatterNd) \ // New layers should be added at last position to minimize instability. /// When adding a new layer, adapt also the LastLayer enum value in the @@ -494,7 +494,17 @@ enum class LayerType LIST_OF_LAYER_TYPE #undef X FirstLayer = Activation, - LastLayer = BroadcastTo + LastLayer = ScatterNd +}; + +enum class ScatterNdFunction +{ + Update = 0, + Add = 1, + Sub = 2, + Max = 3, + Min = 4, + Mul = 5 }; const char* GetLayerTypeAsCString(LayerType type); diff --git a/include/armnn/backends/WorkloadData.hpp b/include/armnn/backends/WorkloadData.hpp index a90a1abd65..a93d986e4d 100644 --- a/include/armnn/backends/WorkloadData.hpp +++ b/include/armnn/backends/WorkloadData.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2021-2024 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once @@ -770,4 +770,9 @@ struct BroadcastToQueueDescriptor : QueueDescriptorWithParameters +{ + void Validate(const WorkloadInfo& workloadInfo) const; +}; + } // namespace armnn diff --git a/src/armnn/BackendHelper.cpp b/src/armnn/BackendHelper.cpp index 56938d021e..37ad5da2fe 100644 --- a/src/armnn/BackendHelper.cpp +++ b/src/armnn/BackendHelper.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017,2022-2023 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017,2022-2024 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -1276,6 +1276,23 @@ bool LayerSupportHandle::IsReverseV2Supported(const armnn::TensorInfo &input0, reasonIfUnsupported); } +bool LayerSupportHandle::IsScatterNdSupported(const TensorInfo& input, + const TensorInfo& indices, + const TensorInfo& updates, + const TensorInfo& output, + const armnn::ScatterNdDescriptor &descriptor, + Optional reasonIfUnsupported) +{ + TensorInfos infos{input, indices, updates, output}; + + return m_LayerSupport->IsLayerSupported(LayerType::ScatterNd, + infos, + descriptor, + EmptyOptional(), + EmptyOptional(), + reasonIfUnsupported); +} + bool LayerSupportHandle::IsShapeSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported) diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp index 325bfc3875..6ba409de50 100644 --- a/src/armnn/LayersFwd.hpp +++ b/src/armnn/LayersFwd.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once @@ -67,6 +67,7 @@ #include "layers/ReshapeLayer.hpp" #include "layers/ResizeLayer.hpp" #include "layers/ReverseV2Layer.hpp" +#include "layers/ScatterNdLayer.hpp" #include "layers/ShapeLayer.hpp" #include "layers/SliceLayer.hpp" #include "layers/SoftmaxLayer.hpp" @@ -172,6 +173,7 @@ DECLARE_LAYER(Reduce) DECLARE_LAYER(Reshape) DECLARE_LAYER(Resize) DECLARE_LAYER(ReverseV2) +DECLARE_LAYER(ScatterNd) DECLARE_LAYER(Shape) DECLARE_LAYER(Slice) DECLARE_LAYER(Softmax) diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index 2582403247..60df27d7fc 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -663,6 +663,12 @@ IConnectableLayer* INetwork::AddBroadcastToLayer(const BroadcastToDescriptor& de return pNetworkImpl->AddBroadcastToLayer(descriptor, name); } +IConnectableLayer* INetwork::AddScatterNdLayer(const ScatterNdDescriptor &descriptor, + const char *name) +{ + return pNetworkImpl->AddScatterNdLayer(descriptor, name); +} + void INetwork::ExecuteStrategy(IStrategy& strategy) const { return pNetworkImpl->ExecuteStrategy(strategy); @@ -3085,6 +3091,11 @@ IConnectableLayer* NetworkImpl::AddBroadcastToLayer(const BroadcastToDescriptor return m_Graph->AddLayer(desc, name); } +IConnectableLayer* NetworkImpl::AddScatterNdLayer(const ScatterNdDescriptor &desc, const char *name) +{ + return m_Graph->AddLayer(desc, name); +} + void NetworkImpl::ExecuteStrategy(IStrategy& strategy) const { for (auto layer : GetGraph()) diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp index 6ffdfb37a8..195f97e692 100644 --- a/src/armnn/Network.hpp +++ b/src/armnn/Network.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once @@ -196,6 +196,9 @@ public: IConnectableLayer* AddReverseV2Layer(const char* name = nullptr); + IConnectableLayer* AddScatterNdLayer(const ScatterNdDescriptor& scatterDescriptor, + const char* name = nullptr); + IConnectableLayer* AddShapeLayer(const char* name = nullptr); IConnectableLayer* AddSliceLayer(const SliceDescriptor& sliceDescriptor, const char* name = nullptr); diff --git a/src/armnn/layers/ScatterNdLayer.cpp b/src/armnn/layers/ScatterNdLayer.cpp new file mode 100644 index 0000000000..a0b270fba5 --- /dev/null +++ b/src/armnn/layers/ScatterNdLayer.cpp @@ -0,0 +1,94 @@ +// +// Copyright © 2024 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "ScatterNdLayer.hpp" +#include "LayerCloneBase.hpp" + +#include +#include +#include + +namespace armnn +{ + +ScatterNdLayer::ScatterNdLayer(const ScatterNdDescriptor ¶m, const char* name) + : LayerWithParameters(3, 1, LayerType::ScatterNd, param, name) +{ +} + +std::unique_ptr ScatterNdLayer::CreateWorkload(const armnn::IWorkloadFactory& factory) const +{ + ScatterNdQueueDescriptor descriptor; + SetAdditionalInfo(descriptor); + + return factory.CreateWorkload(LayerType::ScatterNd, descriptor, PrepInfoAndDesc(descriptor)); +} + +ScatterNdLayer* ScatterNdLayer::Clone(Graph& graph) const +{ + auto layer = CloneBase(graph, m_Param, GetName()); + + return std::move(layer); +} + +std::vector ScatterNdLayer::InferOutputShapes(const std::vector& inputShapes) const +{ + const auto inputDims = inputShapes[0].GetNumDimensions(); + + std::vector dimSizes(inputDims); + + for (unsigned i = 0; i < inputDims; ++i) + { + dimSizes[i] = inputShapes[0][i]; + } + + TensorShape outputShape({ inputDims, dimSizes.data() }); + + return std::vector({ outputShape }); +} + +void ScatterNdLayer::ValidateTensorShapesFromInputs() +{ + VerifyLayerConnections(3, CHECK_LOCATION()); + + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod); + + if (m_Param.m_InputEnabled) + { + std::vector inferredShapes = InferOutputShapes( + {GetInputSlot(0).GetTensorInfo().GetShape(), + GetInputSlot(1).GetTensorInfo().GetShape(), + GetInputSlot(2).GetTensorInfo().GetShape()}); + + if (inferredShapes.size() != 1) { + throw armnn::LayerValidationException("inferredShape has " + + std::to_string(inferredShapes.size()) + + " elements - should only have 1."); + } + + ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ScatterLayer"); + } + else + { + // No input tensor, only shape provided via input slot + // In this case, we cannot validate the output shape from the input shape, but we can + // validate that the dimensions of shape and output tensor matched + unsigned int shapeDims = GetInputSlot(0).GetTensorInfo().GetNumDimensions(); + unsigned int outputDims = GetOutputSlot(0).GetTensorInfo().GetNumDimensions(); + + if (shapeDims != outputDims) + { + throw armnn::LayerValidationException("shape dimension " + + std::to_string(shapeDims) + + " and output dimension " + + std::to_string(outputDims) + + " are not matched."); + } + } +} + +} // namespace armnn diff --git a/src/armnn/layers/ScatterNdLayer.hpp b/src/armnn/layers/ScatterNdLayer.hpp new file mode 100644 index 0000000000..adad66758a --- /dev/null +++ b/src/armnn/layers/ScatterNdLayer.hpp @@ -0,0 +1,44 @@ +// +// Copyright © 2024 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "LayerWithParameters.hpp" + +namespace armnn +{ + +/// This layer represents a ScatterNd operator. +class ScatterNdLayer : public LayerWithParameters +{ +public: + /// Makes a workload for the ScatterNd type. + /// @param [in] factory The workload factory which will create the workload. + /// @return A pointer to the created workload, or nullptr if not created. + virtual std::unique_ptr CreateWorkload(const IWorkloadFactory& factory) const override; + + /// Creates a dynamically-allocated copy of this layer. + /// @param [in] graph The graph into which this layer is being cloned. + ScatterNdLayer* Clone(Graph& graph) const override; + + /// Infers the output shapes from given input shapes and layer properties. + /// @param [in] inputShapes The input shapes layer has. + /// @return A vector to the inferred output shape. + std::vector InferOutputShapes(const std::vector& inputShapes) const override; + + /// Check if the input tensor shape(s) + /// will lead to a valid configuration of @ref ScatterNdLayer. + void ValidateTensorShapesFromInputs() override; + +protected: + /// Constructor to create a ScatterNdLayer. + /// @param [in] name Optional name for the layer. + ScatterNdLayer(const ScatterNdDescriptor& param, const char* name); + + /// Default destructor + ~ScatterNdLayer() = default; +}; + +} // namespace armnn diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp index 0ddb4291f1..de985ec28d 100644 --- a/src/backends/backendsCommon/WorkloadData.cpp +++ b/src/backends/backendsCommon/WorkloadData.cpp @@ -4443,4 +4443,46 @@ void BroadcastToQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) cons ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName); } +void ScatterNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const +{ + const std::string& descriptorName{"ScatterQueueDescriptor"}; + + ValidateNumInputs(workloadInfo, descriptorName, 3); + ValidateNumOutputs(workloadInfo, descriptorName, 1); + + const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0]; + const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1]; + const TensorInfo& inputTensorInfo2 = workloadInfo.m_InputTensorInfos[2]; + const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0]; + + std::vector supportedTypes = + { + DataType::Float32, + DataType::Float16, + DataType::QAsymmS8, + DataType::QAsymmU8, + DataType::QSymmS8, + DataType::QSymmS16, + DataType::Signed32 + }; + + std::vector indicesSupportedTypes = + { + DataType::Signed32 + }; + + if (m_Parameters.m_InputEnabled) + { + ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName); + } + else + { + ValidateDataTypes(inputTensorInfo0, indicesSupportedTypes, descriptorName); + } + + ValidateDataTypes(inputTensorInfo1, indicesSupportedTypes, descriptorName); + ValidateDataTypes(inputTensorInfo2, supportedTypes, descriptorName); + ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName); +} + } // namespace armnn \ No newline at end of file diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp index 1f8d4dae1d..b81e9c8e73 100644 --- a/src/backends/backendsCommon/WorkloadFactory.cpp +++ b/src/backends/backendsCommon/WorkloadFactory.cpp @@ -1168,6 +1168,23 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, reason); break; } + case LayerType::ScatterNd: + { + auto cLayer = PolymorphicDowncast(&layer); + const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo(); + const TensorInfo& indices = layer.GetInputSlot(1).GetTensorInfo(); + const TensorInfo& updates = layer.GetInputSlot(2).GetTensorInfo(); + const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); + + result = layerSupportObject.IsScatterNdSupported(OverrideDataType(input, dataType), + OverrideDataType(indices, dataType), + OverrideDataType(updates, dataType), + OverrideDataType(output, dataType), + cLayer->GetParameters(), + reason); + + break; + } case LayerType::Shape: { const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo(); diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk index 4876f02ce0..508d5329ba 100644 --- a/src/backends/backendsCommon/common.mk +++ b/src/backends/backendsCommon/common.mk @@ -1,5 +1,5 @@ # -# Copyright © 2017-2023 ARM Ltd and Contributors. All rights reserved. +# Copyright © 2017-2024 ARM Ltd and Contributors. All rights reserved. # SPDX-License-Identifier: MIT # @@ -94,6 +94,7 @@ COMMON_TEST_SOURCES := \ test/layerTests/ResizeTestImpl.cpp \ test/layerTests/ReverseV2TestImpl.cpp \ test/layerTests/RsqrtTestImpl.cpp \ + test/layerTests/ScatterNdTestImpl.cpp \ test/layerTests/SliceTestImpl.cpp \ test/layerTests/SquaredDifferenceTestImpl.cpp \ test/layerTests/QuantizeTestImpl.cpp \ diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt index 7de150dd1d..66bf6a084e 100644 --- a/src/backends/backendsCommon/test/CMakeLists.txt +++ b/src/backends/backendsCommon/test/CMakeLists.txt @@ -181,6 +181,8 @@ list(APPEND armnnBackendsCommonUnitTests_sources layerTests/ReverseV2TestImpl.hpp layerTests/RsqrtTestImpl.cpp layerTests/RsqrtTestImpl.hpp + layerTests/ScatterNdTestImpl.cpp + layerTests/ScatterNdTestImpl.hpp layerTests/SinTestImpl.cpp layerTests/SinTestImpl.hpp layerTests/ShapeTestImpl.cpp diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp index 9f472e9f28..f9de60d89c 100644 --- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp +++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once @@ -752,6 +752,8 @@ DECLARE_LAYER_POLICY_2_PARAM(Reshape) DECLARE_LAYER_POLICY_1_PARAM(ReverseV2) +DECLARE_LAYER_POLICY_2_PARAM(ScatterNd) + DECLARE_LAYER_POLICY_1_PARAM(Shape) DECLARE_LAYER_POLICY_2_PARAM(Slice) diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp index 015d25ef3e..3268a07810 100644 --- a/src/backends/backendsCommon/test/LayerTests.hpp +++ b/src/backends/backendsCommon/test/LayerTests.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2017, 2023 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017, 2023-2024 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -64,6 +64,7 @@ #include #include #include +#include #include #include #include diff --git a/src/backends/backendsCommon/test/layerTests/ScatterNdTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ScatterNdTestImpl.cpp new file mode 100644 index 0000000000..44203b062f --- /dev/null +++ b/src/backends/backendsCommon/test/layerTests/ScatterNdTestImpl.cpp @@ -0,0 +1,1479 @@ +// +// Copyright © 2024 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "ScatterNdTestImpl.hpp" + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace +{ +template +LayerTestResult ScatterNdTestImpl( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory, + const std::vector& input, + const std::vector& indices, + const std::vector& updates, + const std::vector& outputExpected, + const armnn::TensorInfo& inputInfo, + const armnn::TensorInfo& indicesInfo, + const armnn::TensorInfo& updatesInfo, + const armnn::TensorInfo& outputInfo, + const armnn::ScatterNdDescriptor &descriptor) +{ + LayerTestResult result(outputInfo); + std::vector outputActual(outputInfo.GetNumElements()); + + std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo); + std::unique_ptr indicesHandle = tensorHandleFactory.CreateTensorHandle(indicesInfo); + std::unique_ptr updatesHandle = tensorHandleFactory.CreateTensorHandle(updatesInfo); + std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo); + + armnn::ScatterNdQueueDescriptor queueDescriptor; + queueDescriptor.m_Parameters = descriptor; + + armnn::WorkloadInfo workloadInfo; + + AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get()); + AddInputToWorkload(queueDescriptor, workloadInfo, indicesInfo, indicesHandle.get()); + AddInputToWorkload(queueDescriptor, workloadInfo, updatesInfo, updatesHandle.get()); + AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get()); + + // Don't execute if ScatterNd is not supported, as an exception will be raised. + const armnn::BackendId& backend = workloadFactory.GetBackendId(); + std::string reasonIfUnsupported; + armnn::LayerSupportHandle handle = armnn::GetILayerSupportByBackendId(backend); + result.m_Supported = handle.IsScatterNdSupported(inputInfo, + indicesInfo, + updatesInfo, + outputInfo, + descriptor, + reasonIfUnsupported); + if (!result.m_Supported) + { + return result; + } + + auto workload = workloadFactory.CreateWorkload(armnn::LayerType::ScatterNd, queueDescriptor, workloadInfo); + + inputHandle->Allocate(); + indicesHandle->Allocate(); + updatesHandle->Allocate(); + outputHandle->Allocate(); + + if (input.data() != nullptr) + { + CopyDataToITensorHandle(inputHandle.get(), input.data()); + } + if (indices.data() != nullptr) + { + CopyDataToITensorHandle(indicesHandle.get(), indices.data()); + } + if (updates.data() != nullptr) + { + CopyDataToITensorHandle(updatesHandle.get(), updates.data()); + } + + workload->PostAllocationConfigure(); + ExecuteWorkload(*workload, memoryManager); + + if (outputActual.data() != nullptr) + { + CopyDataFromITensorHandle(outputActual.data(), outputHandle.get()); + } + + return LayerTestResult(outputActual, + outputExpected, + outputHandle->GetShape(), + outputInfo.GetShape()); + +} +} + +template +LayerTestResult ScatterNd1DimUpdateWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo inputInfo({5}, ArmnnType, qScale, qOffset); + armnn::TensorInfo indicesInfo({3, 1}, armnn::DataType::Signed32, qScale, qOffset); + armnn::TensorInfo updatesInfo({3}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({5}, ArmnnType, qScale, qOffset); + + std::vector input = armnnUtils::QuantizedVector({ 0, 0, 0, 0, 0 }, qScale, qOffset); + + std::vector indices = armnnUtils::QuantizedVector({0, 1, 2}, qScale, qOffset); + + std::vector updates = armnnUtils::QuantizedVector({ 1, 2, 3 }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ 1, 2, 3, 0, 0 }, qScale, qOffset); + + armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Update, true); + + return ScatterNdTestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + input, + indices, + updates, + outputExpected, + inputInfo, + indicesInfo, + updatesInfo, + outputInfo, + descriptor); +} + +template +LayerTestResult ScatterNd1DimUpdateNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo shapeInfo({1}, armnn::DataType::Signed32, qScale, qOffset); + armnn::TensorInfo indicesInfo({3, 1}, armnn::DataType::Signed32, qScale, qOffset); + armnn::TensorInfo updatesInfo({3}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({5}, ArmnnType, qScale, qOffset); + + std::vector shape = armnnUtils::QuantizedVector({ 5 }, qScale, qOffset); + + std::vector indices = armnnUtils::QuantizedVector({ 0, 1, 2 }, qScale, qOffset); + + std::vector updates = armnnUtils::QuantizedVector({ 1, 2, 3 }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ 1, 2, 3, 0, 0 }, qScale, qOffset); + + armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Update, false); + + return ScatterNdTestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + shape, + indices, + updates, + outputExpected, + shapeInfo, + indicesInfo, + updatesInfo, + outputInfo, + descriptor); +} + +template +LayerTestResult ScatterNd2DimUpdateWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo inputInfo({3, 3}, ArmnnType, qScale, qOffset); + armnn::TensorInfo indicesInfo({3, 2}, armnn::DataType::Signed32, qScale, qOffset); + armnn::TensorInfo updatesInfo({3}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({3, 3}, ArmnnType, qScale, qOffset); + + std::vector input = armnnUtils::QuantizedVector({ 1, 1, 1, + 1, 1, 1, + 1, 1, 1 }, qScale, qOffset); + + std::vector indices = armnnUtils::QuantizedVector({ 0, 0, + 1, 1, + 2, 2 }, qScale, qOffset); + + std::vector updates = armnnUtils::QuantizedVector({ 1, 2, 3 }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ 1, 1, 1, + 1, 2, 1, + 1, 1, 3 }, qScale, qOffset); + + armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Update, true); + + return ScatterNdTestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + input, + indices, + updates, + outputExpected, + inputInfo, + indicesInfo, + updatesInfo, + outputInfo, + descriptor); +} + +template +LayerTestResult ScatterNd2DimUpdateNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo shapeInfo({2}, armnn::DataType::Signed32, qScale, qOffset); + armnn::TensorInfo indicesInfo({3, 2}, armnn::DataType::Signed32, qScale, qOffset); + armnn::TensorInfo updatesInfo({3}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({3, 3}, ArmnnType, qScale, qOffset); + + std::vector shape = armnnUtils::QuantizedVector({ 3, 3 }, qScale, qOffset); + + std::vector indices = armnnUtils::QuantizedVector({ 0, 0, + 1, 1, + 2, 2 }, qScale, qOffset); + + std::vector updates = armnnUtils::QuantizedVector({ 1, 2, 3 }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ 1, 0, 0, + 0, 2, 0, + 0, 0, 3 }, qScale, qOffset); + + armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Update, false); + + return ScatterNdTestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + shape, + indices, + updates, + outputExpected, + shapeInfo, + indicesInfo, + updatesInfo, + outputInfo, + descriptor); +} + +template +LayerTestResult ScatterNd2Dim1Outter1InnerUpdateWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo inputInfo({3, 3}, ArmnnType, qScale, qOffset); + armnn::TensorInfo indicesInfo({2, 1}, armnn::DataType::Signed32, qScale, qOffset); + armnn::TensorInfo updatesInfo({2, 3}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({3, 3}, ArmnnType, qScale, qOffset); + + std::vector input = armnnUtils::QuantizedVector({ 1, 1, 1, + 1, 1, 1, + 1, 1, 1 }, qScale, qOffset); + + std::vector indices = armnnUtils::QuantizedVector({ 0, 1 }, qScale, qOffset); + + std::vector updates = armnnUtils::QuantizedVector({ 1, 1, 1, + 2, 2, 2 }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ 1, 1, 1, + 2, 2, 2, + 1, 1, 1 }, qScale, qOffset); + + armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Update, true); + + return ScatterNdTestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + input, + indices, + updates, + outputExpected, + inputInfo, + indicesInfo, + updatesInfo, + outputInfo, + descriptor); +} + +template +LayerTestResult ScatterNd2Dim1Outter1InnerUpdateNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo shapeInfo({2}, armnn::DataType::Signed32, qScale, qOffset); + armnn::TensorInfo indicesInfo({2, 1}, armnn::DataType::Signed32, qScale, qOffset); + armnn::TensorInfo updatesInfo({2, 3}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({3, 3}, ArmnnType, qScale, qOffset); + + std::vector shape = armnnUtils::QuantizedVector({ 3, 3 }, qScale, qOffset); + + std::vector indices = armnnUtils::QuantizedVector({ 0, 1 }, qScale, qOffset); + + std::vector updates = armnnUtils::QuantizedVector({ 1, 1, 1, + 1, 1, 1 }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ 1, 1, 1, + 1, 1, 1, + 0, 0, 0 }, qScale, qOffset); + + armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Update, false); + + return ScatterNdTestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + shape, + indices, + updates, + outputExpected, + shapeInfo, + indicesInfo, + updatesInfo, + outputInfo, + descriptor); +} + +template +LayerTestResult ScatterNd3DimUpdateWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo inputInfo({3, 3, 3}, ArmnnType, qScale, qOffset); + armnn::TensorInfo indicesInfo({3, 3}, armnn::DataType::Signed32, qScale, qOffset); + armnn::TensorInfo updatesInfo({3}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({3, 3, 3}, ArmnnType, qScale, qOffset); + + std::vector input = armnnUtils::QuantizedVector({ 0, 0, 0, + 0, 0, 0, + 0, 0, 0, + + 0, 0, 0, + 0, 0, 0, + 0, 0, 0, + + 0, 0, 0, + 0, 0, 0, + 0, 0, 0 }, qScale, qOffset); + + std::vector indices = armnnUtils::QuantizedVector({ 0, 0, 0, + 1, 1, 1, + 2, 2, 2 }, qScale, qOffset); + + std::vector updates = armnnUtils::QuantizedVector({ 1, 2, 3 }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ 1, 0, 0, + 0, 0, 0, + 0, 0, 0, + + 0, 0, 0, + 0, 2, 0, + 0, 0, 0, + + 0, 0, 0, + 0, 0, 0, + 0, 0, 3 }, qScale, qOffset); + + armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Update, true); + + return ScatterNdTestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + input, + indices, + updates, + outputExpected, + inputInfo, + indicesInfo, + updatesInfo, + outputInfo, + descriptor); +} + +template +LayerTestResult ScatterNd3DimUpdateNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo shapeInfo({3}, armnn::DataType::Signed32, qScale, qOffset); + armnn::TensorInfo indicesInfo({3, 3}, armnn::DataType::Signed32, qScale, qOffset); + armnn::TensorInfo updatesInfo({3}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({3, 3, 3}, ArmnnType, qScale, qOffset); + + std::vector shape = armnnUtils::QuantizedVector({ 3, 3, 3 }, qScale, qOffset); + + std::vector indices = armnnUtils::QuantizedVector({ 0, 0, 0, + 1, 1, 1, + 2, 2, 2 }, qScale, qOffset); + + std::vector updates = armnnUtils::QuantizedVector({ 1, 2, 3 }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ 1, 0, 0, + 0, 0, 0, + 0, 0, 0, + + 0, 0, 0, + 0, 2, 0, + 0, 0, 0, + + 0, 0, 0, + 0, 0, 0, + 0, 0, 3 }, qScale, qOffset); + + armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Update, false); + + return ScatterNdTestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + shape, + indices, + updates, + outputExpected, + shapeInfo, + indicesInfo, + updatesInfo, + outputInfo, + descriptor); +} + +template +LayerTestResult ScatterNd3Dim1Outter2InnerUpdateWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo inputInfo({3, 3, 3}, ArmnnType, qScale, qOffset); + armnn::TensorInfo indicesInfo({2, 1}, armnn::DataType::Signed32, qScale, qOffset); + armnn::TensorInfo updatesInfo({2, 3, 3}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({3, 3, 3}, ArmnnType, qScale, qOffset); + + std::vector input = armnnUtils::QuantizedVector({ 0, 0, 0, + 0, 0, 0, + 0, 0, 0, + + 0, 0, 0, + 0, 0, 0, + 0, 0, 0, + + 0, 0, 0, + 0, 0, 0, + 0, 0, 0 }, qScale, qOffset); + + std::vector indices = armnnUtils::QuantizedVector({ 0, 1 }, qScale, qOffset); + + std::vector updates = armnnUtils::QuantizedVector({ 1, 1, 1, + 1, 1, 1, + 1, 1, 1, + + 2, 2, 2, + 2, 2, 2, + 2, 2, 2 }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ 1, 1, 1, + 1, 1, 1, + 1, 1, 1, + + 2, 2, 2, + 2, 2, 2, + 2, 2, 2, + + 0, 0, 0, + 0, 0, 0, + 0, 0, 0 }, qScale, qOffset); + + armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Update, true); + + return ScatterNdTestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + input, + indices, + updates, + outputExpected, + inputInfo, + indicesInfo, + updatesInfo, + outputInfo, + descriptor); +} + +template +LayerTestResult ScatterNd3Dim1Outter2InnerUpdateNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo shapeInfo({3}, armnn::DataType::Signed32, qScale, qOffset); + armnn::TensorInfo indicesInfo({2, 1}, armnn::DataType::Signed32, qScale, qOffset); + armnn::TensorInfo updatesInfo({2, 3, 3}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({3, 3, 3}, ArmnnType, qScale, qOffset); + + std::vector shape = armnnUtils::QuantizedVector({ 3, 3, 3 }, qScale, qOffset); + + std::vector indices = armnnUtils::QuantizedVector({ 0, 1 }, qScale, qOffset); + + std::vector updates = armnnUtils::QuantizedVector({ 1, 1, 1, + 1, 1, 1, + 1, 1, 1, + + 2, 2, 2, + 2, 2, 2, + 2, 2, 2 }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ 1, 1, 1, + 1, 1, 1, + 1, 1, 1, + + 2, 2, 2, + 2, 2, 2, + 2, 2, 2, + + 0, 0, 0, + 0, 0, 0, + 0, 0, 0 }, qScale, qOffset); + + armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Update, false); + + return ScatterNdTestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + shape, + indices, + updates, + outputExpected, + shapeInfo, + indicesInfo, + updatesInfo, + outputInfo, + descriptor); +} + +template +LayerTestResult ScatterNd3Dim2Outter1InnerUpdateWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo inputInfo({3, 3, 3}, ArmnnType, qScale, qOffset); + armnn::TensorInfo indicesInfo({2, 2}, armnn::DataType::Signed32, qScale, qOffset); + armnn::TensorInfo updatesInfo({2, 3}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({3, 3, 3}, ArmnnType, qScale, qOffset); + + std::vector input = armnnUtils::QuantizedVector({ 0, 0, 0, + 0, 0, 0, + 0, 0, 0, + + 0, 0, 0, + 0, 0, 0, + 0, 0, 0, + + 0, 0, 0, + 0, 0, 0, + 0, 0, 0 }, qScale, qOffset); + + std::vector indices = armnnUtils::QuantizedVector({ 0, 0, + 1, 1, }, qScale, qOffset); + + std::vector updates = armnnUtils::QuantizedVector({ 1, 1, 1, + 2, 2, 2 }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ 1, 1, 1, + 0, 0, 0, + 0, 0, 0, + + 0, 0, 0, + 2, 2, 2, + 0, 0, 0, + + 0, 0, 0, + 0, 0, 0, + 0, 0, 0 }, qScale, qOffset); + + armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Update, true); + + return ScatterNdTestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + input, + indices, + updates, + outputExpected, + inputInfo, + indicesInfo, + updatesInfo, + outputInfo, + descriptor); +} + +template +LayerTestResult ScatterNd3Dim2Outter1InnerUpdateNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo shapeInfo({3}, armnn::DataType::Signed32, qScale, qOffset); + armnn::TensorInfo indicesInfo({2, 2}, armnn::DataType::Signed32, qScale, qOffset); + armnn::TensorInfo updatesInfo({2, 3}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({3, 3, 3}, ArmnnType, qScale, qOffset); + + std::vector shape = armnnUtils::QuantizedVector({ 3, 3, 3 }, qScale, qOffset); + + std::vector indices = armnnUtils::QuantizedVector({ 0, 0, + 1, 1, }, qScale, qOffset); + + std::vector updates = armnnUtils::QuantizedVector({ 1, 1, 1, + 2, 2, 2 }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ 1, 1, 1, + 0, 0, 0, + 0, 0, 0, + + 0, 0, 0, + 2, 2, 2, + 0, 0, 0, + + 0, 0, 0, + 0, 0, 0, + 0, 0, 0 }, qScale, qOffset); + + armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Update, false); + + return ScatterNdTestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + shape, + indices, + updates, + outputExpected, + shapeInfo, + indicesInfo, + updatesInfo, + outputInfo, + descriptor); +} + +template +LayerTestResult ScatterNd4DimUpdateWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo inputInfo({2, 3, 3, 3}, ArmnnType, qScale, qOffset); + armnn::TensorInfo indicesInfo({3, 4}, armnn::DataType::Signed32, qScale, qOffset); + armnn::TensorInfo updatesInfo({3}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({2, 3, 3, 3}, ArmnnType, qScale, qOffset); + + std::vector input = armnnUtils::QuantizedVector({ 0, 0, 0, + 0, 0, 0, + 0, 0, 0, + + 0, 0, 0, + 0, 0, 0, + 0, 0, 0, + + 0, 0, 0, + 0, 0, 0, + 0, 0, 0, + + 0, 0, 0, + 0, 0, 0, + 0, 0, 0, + + 0, 0, 0, + 0, 0, 0, + 0, 0, 0, + + 0, 0, 0, + 0, 0, 0, + 0, 0, 0 }, qScale, qOffset); + + std::vector indices = armnnUtils::QuantizedVector({ 0, 0, 0, 0, + 0, 1, 1, 1, + 1, 1, 1, 1 }, qScale, qOffset); + + std::vector updates = armnnUtils::QuantizedVector({ 1, 2, 3 }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ 1, 0, 0, + 0, 0, 0, + 0, 0, 0, + + 0, 0, 0, + 0, 2, 0, + 0, 0, 0, + + 0, 0, 0, + 0, 0, 0, + 0, 0, 0, + + 0, 0, 0, + 0, 0, 0, + 0, 0, 0, + + 0, 0, 0, + 0, 3, 0, + 0, 0, 0, + + 0, 0, 0, + 0, 0, 0, + 0, 0, 0 }, qScale, qOffset); + + armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Update, true); + + return ScatterNdTestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + input, + indices, + updates, + outputExpected, + inputInfo, + indicesInfo, + updatesInfo, + outputInfo, + descriptor); +} + +template +LayerTestResult ScatterNd4DimUpdateNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo shapeInfo({4}, armnn::DataType::Signed32, qScale, qOffset); + armnn::TensorInfo indicesInfo({3, 4}, armnn::DataType::Signed32, qScale, qOffset); + armnn::TensorInfo updatesInfo({3}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({2, 3, 3, 3}, ArmnnType, qScale, qOffset); + + std::vector shape = armnnUtils::QuantizedVector({ 2, 3, 3, 3 }, qScale, qOffset); + + std::vector indices = armnnUtils::QuantizedVector({ 0, 0, 0, 0, + 0, 1, 1, 1, + 1, 1, 1, 1 }, qScale, qOffset); + + std::vector updates = armnnUtils::QuantizedVector({ 1, 2, 3 }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ 1, 0, 0, + 0, 0, 0, + 0, 0, 0, + + 0, 0, 0, + 0, 2, 0, + 0, 0, 0, + + 0, 0, 0, + 0, 0, 0, + 0, 0, 0, + + 0, 0, 0, + 0, 0, 0, + 0, 0, 0, + + 0, 0, 0, + 0, 3, 0, + 0, 0, 0, + + 0, 0, 0, + 0, 0, 0, + 0, 0, 0 }, qScale, qOffset); + + armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Update, false); + + return ScatterNdTestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + shape, + indices, + updates, + outputExpected, + shapeInfo, + indicesInfo, + updatesInfo, + outputInfo, + descriptor); +} + +template +LayerTestResult ScatterNd2DimAddWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo inputInfo({3, 3}, ArmnnType, qScale, qOffset); + armnn::TensorInfo indicesInfo({3, 2}, armnn::DataType::Signed32, qScale, qOffset); + armnn::TensorInfo updatesInfo({3}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({3, 3}, ArmnnType, qScale, qOffset); + + std::vector input = armnnUtils::QuantizedVector({ 1, 1, 1, + 1, 1, 1, + 1, 1, 1 }, qScale, qOffset); + + std::vector indices = armnnUtils::QuantizedVector({ 0, 0, + 1, 1, + 2, 2 }, qScale, qOffset); + + std::vector updates = armnnUtils::QuantizedVector({ 1, 2, 3 }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ 2, 1, 1, + 1, 3, 1, + 1, 1, 4 }, qScale, qOffset); + + armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Add, true); + + return ScatterNdTestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + input, + indices, + updates, + outputExpected, + inputInfo, + indicesInfo, + updatesInfo, + outputInfo, + descriptor); +} + +template +LayerTestResult ScatterNd2DimAddNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo shapeInfo({2}, armnn::DataType::Signed32, qScale, qOffset); + armnn::TensorInfo indicesInfo({3, 2}, armnn::DataType::Signed32, qScale, qOffset); + armnn::TensorInfo updatesInfo({3}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({3, 3}, ArmnnType, qScale, qOffset); + + std::vector shape = armnnUtils::QuantizedVector({ 3, 3 }, qScale, qOffset); + + std::vector indices = armnnUtils::QuantizedVector({ 0, 0, + 1, 1, + 2, 2 }, qScale, qOffset); + + std::vector updates = armnnUtils::QuantizedVector({ 1, 2, 3 }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ 1, 0, 0, + 0, 2, 0, + 0, 0, 3 }, qScale, qOffset); + + armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Add, false); + + return ScatterNdTestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + shape, + indices, + updates, + outputExpected, + shapeInfo, + indicesInfo, + updatesInfo, + outputInfo, + descriptor); +} + +template +LayerTestResult ScatterNd2DimSubWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo inputInfo({3, 3}, ArmnnType, qScale, qOffset); + armnn::TensorInfo indicesInfo({3, 2}, armnn::DataType::Signed32, qScale, qOffset); + armnn::TensorInfo updatesInfo({3}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({3, 3}, ArmnnType, qScale, qOffset); + + std::vector input = armnnUtils::QuantizedVector({ 1, 1, 1, + 1, 1, 1, + 1, 1, 1 }, qScale, qOffset); + + std::vector indices = armnnUtils::QuantizedVector({ 0, 0, + 1, 1, + 2, 2 }, qScale, qOffset); + + std::vector updates = armnnUtils::QuantizedVector({ 1, 2, 3 }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ 0, 1, 1, + 1, -1, 1, + 1, 1, -2 }, qScale, qOffset); + + armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Sub, true); + + return ScatterNdTestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + input, + indices, + updates, + outputExpected, + inputInfo, + indicesInfo, + updatesInfo, + outputInfo, + descriptor); +} + +template +LayerTestResult ScatterNd2DimSubNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo shapeInfo({2}, armnn::DataType::Signed32, qScale, qOffset); + armnn::TensorInfo indicesInfo({3, 2}, armnn::DataType::Signed32, qScale, qOffset); + armnn::TensorInfo updatesInfo({3}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({3, 3}, ArmnnType, qScale, qOffset); + + std::vector shape = armnnUtils::QuantizedVector({ 3, 3 }, qScale, qOffset); + + std::vector indices = armnnUtils::QuantizedVector({ 0, 0, + 1, 1, + 2, 2 }, qScale, qOffset); + + std::vector updates = armnnUtils::QuantizedVector({ 1, 2, 3 }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ -1, 0, 0, + 0, -2, 0, + 0, 0, -3 }, qScale, qOffset); + + armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Sub, false); + + return ScatterNdTestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + shape, + indices, + updates, + outputExpected, + shapeInfo, + indicesInfo, + updatesInfo, + outputInfo, + descriptor); +} + +template +LayerTestResult ScatterNd2DimMaxWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo inputInfo({3, 3}, ArmnnType, qScale, qOffset); + armnn::TensorInfo indicesInfo({3, 2}, armnn::DataType::Signed32, qScale, qOffset); + armnn::TensorInfo updatesInfo({3}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({3, 3}, ArmnnType, qScale, qOffset); + + std::vector input = armnnUtils::QuantizedVector({ 1, 1, 1, + 1, 1, 1, + 1, 1, 1 }, qScale, qOffset); + + std::vector indices = armnnUtils::QuantizedVector({ 0, 0, + 1, 1, + 2, 2 }, qScale, qOffset); + + std::vector updates = armnnUtils::QuantizedVector({ 0, 1, 2 }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ 1, 1, 1, + 1, 1, 1, + 1, 1, 2 }, qScale, qOffset); + + armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Max, true); + + return ScatterNdTestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + input, + indices, + updates, + outputExpected, + inputInfo, + indicesInfo, + updatesInfo, + outputInfo, + descriptor); +} + +template +LayerTestResult ScatterNd2DimMaxNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo shapeInfo({2}, armnn::DataType::Signed32, qScale, qOffset); + armnn::TensorInfo indicesInfo({3, 2}, armnn::DataType::Signed32, qScale, qOffset); + armnn::TensorInfo updatesInfo({3}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({3, 3}, ArmnnType, qScale, qOffset); + + std::vector shape = armnnUtils::QuantizedVector({ 3, 3 }, qScale, qOffset); + + std::vector indices = armnnUtils::QuantizedVector({ 0, 0, + 1, 1, + 2, 2 }, qScale, qOffset); + + std::vector updates = armnnUtils::QuantizedVector({ -1, 0, 1 }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ 0, 0, 0, + 0, 0, 0, + 0, 0, 1 }, qScale, qOffset); + + armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Max, false); + + return ScatterNdTestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + shape, + indices, + updates, + outputExpected, + shapeInfo, + indicesInfo, + updatesInfo, + outputInfo, + descriptor); +} + +template +LayerTestResult ScatterNd2DimMinWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo inputInfo({3, 3}, ArmnnType, qScale, qOffset); + armnn::TensorInfo indicesInfo({3, 2}, armnn::DataType::Signed32, qScale, qOffset); + armnn::TensorInfo updatesInfo({3}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({3, 3}, ArmnnType, qScale, qOffset); + + std::vector input = armnnUtils::QuantizedVector({ 1, 1, 1, + 1, 1, 1, + 1, 1, 1 }, qScale, qOffset); + + std::vector indices = armnnUtils::QuantizedVector({ 0, 0, + 1, 1, + 2, 2 }, qScale, qOffset); + + std::vector updates = armnnUtils::QuantizedVector({ 0, 1, 2 }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ 0, 1, 1, + 1, 1, 1, + 1, 1, 1 }, qScale, qOffset); + + armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Min, true); + + return ScatterNdTestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + input, + indices, + updates, + outputExpected, + inputInfo, + indicesInfo, + updatesInfo, + outputInfo, + descriptor); +} + +template +LayerTestResult ScatterNd2DimMinNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo shapeInfo({2}, armnn::DataType::Signed32, qScale, qOffset); + armnn::TensorInfo indicesInfo({3, 2}, armnn::DataType::Signed32, qScale, qOffset); + armnn::TensorInfo updatesInfo({3}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({3, 3}, ArmnnType, qScale, qOffset); + + std::vector shape = armnnUtils::QuantizedVector({ 3, 3 }, qScale, qOffset); + + std::vector indices = armnnUtils::QuantizedVector({ 0, 0, + 1, 1, + 2, 2 }, qScale, qOffset); + + std::vector updates = armnnUtils::QuantizedVector({ -1, 0, 1 }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ -1, 0, 0, + 0, 0, 0, + 0, 0, 0 }, qScale, qOffset); + + armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Min, false); + + return ScatterNdTestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + shape, + indices, + updates, + outputExpected, + shapeInfo, + indicesInfo, + updatesInfo, + outputInfo, + descriptor); +} + +template +LayerTestResult ScatterNd2DimMulWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo inputInfo({3, 3}, ArmnnType, qScale, qOffset); + armnn::TensorInfo indicesInfo({3, 2}, armnn::DataType::Signed32, qScale, qOffset); + armnn::TensorInfo updatesInfo({3}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({3, 3}, ArmnnType, qScale, qOffset); + + std::vector input = armnnUtils::QuantizedVector({ 1, 1, 1, + 1, 1, 1, + 1, 1, 1 }, qScale, qOffset); + + std::vector indices = armnnUtils::QuantizedVector({ 0, 0, + 1, 1, + 2, 2 }, qScale, qOffset); + + std::vector updates = armnnUtils::QuantizedVector({ 1, 2, 3 }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ 1, 1, 1, + 1, 2, 1, + 1, 1, 3 }, qScale, qOffset); + + armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Mul, true); + + return ScatterNdTestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + input, + indices, + updates, + outputExpected, + inputInfo, + indicesInfo, + updatesInfo, + outputInfo, + descriptor); +} + +template +LayerTestResult ScatterNd2DimMulNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorInfo shapeInfo({2}, armnn::DataType::Signed32, qScale, qOffset); + armnn::TensorInfo indicesInfo({3, 2}, armnn::DataType::Signed32, qScale, qOffset); + armnn::TensorInfo updatesInfo({3}, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({3, 3}, ArmnnType, qScale, qOffset); + + std::vector shape = armnnUtils::QuantizedVector({ 3, 3 }, qScale, qOffset); + + std::vector indices = armnnUtils::QuantizedVector({ 0, 0, + 1, 1, + 2, 2 }, qScale, qOffset); + + std::vector updates = armnnUtils::QuantizedVector({ 1, 2, 3 }, qScale, qOffset); + + std::vector outputExpected = armnnUtils::QuantizedVector({ 0, 0, 0, + 0, 0, 0, + 0, 0, 0 }, qScale, qOffset); + + armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Mul, false); + + return ScatterNdTestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + shape, + indices, + updates, + outputExpected, + shapeInfo, + indicesInfo, + updatesInfo, + outputInfo, + descriptor); +} + +template LayerTestResult, 1> +ScatterNd1DimUpdateWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 1> +ScatterNd1DimUpdateNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +ScatterNd2DimUpdateWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +ScatterNd2DimUpdateNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +ScatterNd2Dim1Outter1InnerUpdateWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +ScatterNd2Dim1Outter1InnerUpdateNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 3> +ScatterNd3DimUpdateWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 3> +ScatterNd3DimUpdateNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 3> +ScatterNd3Dim1Outter2InnerUpdateWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 3> +ScatterNd3Dim1Outter2InnerUpdateNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 3> +ScatterNd3Dim2Outter1InnerUpdateWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 3> +ScatterNd3Dim2Outter1InnerUpdateNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 4> +ScatterNd4DimUpdateWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 4> +ScatterNd4DimUpdateNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +ScatterNd2DimAddWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +ScatterNd2DimAddNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +ScatterNd2DimSubWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +ScatterNd2DimSubNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +ScatterNd2DimMaxWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +ScatterNd2DimMaxNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +ScatterNd2DimMinWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +ScatterNd2DimMinNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +ScatterNd2DimMulWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +ScatterNd2DimMulNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 3> +ScatterNd3DimUpdateWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 3> +ScatterNd3DimUpdateNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 3> +ScatterNd3DimUpdateWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 3> +ScatterNd3DimUpdateNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 3> +ScatterNd3DimUpdateWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 3> +ScatterNd3DimUpdateNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 3> +ScatterNd3DimUpdateWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 3> +ScatterNd3DimUpdateNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 3> +ScatterNd3DimUpdateWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 3> +ScatterNd3DimUpdateNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 3> +ScatterNd3DimUpdateWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 3> +ScatterNd3DimUpdateNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); \ No newline at end of file diff --git a/src/backends/backendsCommon/test/layerTests/ScatterNdTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ScatterNdTestImpl.hpp new file mode 100644 index 0000000000..5350904719 --- /dev/null +++ b/src/backends/backendsCommon/test/layerTests/ScatterNdTestImpl.hpp @@ -0,0 +1,156 @@ +// +// Copyright © 2024 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include + +#include + +#include + +template> +LayerTestResult ScatterNd1DimUpdateWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ScatterNd1DimUpdateNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ScatterNd2DimUpdateWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ScatterNd2DimUpdateNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ScatterNd2Dim1Outter1InnerUpdateWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ScatterNd2Dim1Outter1InnerUpdateNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ScatterNd3DimUpdateWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ScatterNd3DimUpdateNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ScatterNd3Dim1Outter2InnerUpdateWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ScatterNd3Dim1Outter2InnerUpdateNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ScatterNd3Dim2Outter1InnerUpdateWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ScatterNd3Dim2Outter1InnerUpdateNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ScatterNd4DimUpdateWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ScatterNd4DimUpdateNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ScatterNd2DimAddWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ScatterNd2DimAddNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ScatterNd2DimSubWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ScatterNd2DimSubNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ScatterNd2DimMaxWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ScatterNd2DimMaxNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ScatterNd2DimMinWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ScatterNd2DimMinNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ScatterNd2DimMulWithInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ScatterNd2DimMulNoInput( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); \ No newline at end of file diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp index f97d03a26e..654aeb55dc 100644 --- a/src/backends/reference/RefLayerSupport.cpp +++ b/src/backends/reference/RefLayerSupport.cpp @@ -356,6 +356,13 @@ bool RefLayerSupport::IsLayerSupported(const LayerType& type, infos[1], *(PolymorphicDowncast(&descriptor)), reasonIfUnsupported); + case LayerType::ScatterNd: + return IsScatterNdSupported(infos[0], + infos[1], + infos[2], + infos[3], + *(PolymorphicDowncast(&descriptor)), + reasonIfUnsupported); case LayerType::Slice: return IsSliceSupported(infos[0], infos[1], @@ -2442,6 +2449,64 @@ bool RefLayerSupport::IsReverseV2Supported(const TensorInfo& input0, return supported; } +bool RefLayerSupport::IsScatterNdSupported(const TensorInfo& input, + const TensorInfo& indices, + const TensorInfo& updates, + const TensorInfo& output, + const ScatterNdDescriptor& descriptor, + Optional reasonIfUnsupported) const +{ + IgnoreUnused(descriptor); + + bool supported = true; + + std::array supportedTypes + { + DataType::Float32, + DataType::Float16, + DataType::QAsymmS8, + DataType::QAsymmU8, + DataType::QSymmS8, + DataType::QSymmS16, + DataType::Signed32 + }; + + std::array indicesSupportedTypes = + { + DataType::Signed32 + }; + + supported &= CheckSupportRule(TypeAnyOf(indices, indicesSupportedTypes), reasonIfUnsupported, + "ScatterNd: indices type not supported."); + + supported &= CheckSupportRule(TypeAnyOf(updates, supportedTypes), reasonIfUnsupported, + "ScatterNd: updates type not supported."); + + supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported, + "ScatterNd: output type not supported"); + + supported &= CheckSupportRule(TypesAreEqual(updates, output), reasonIfUnsupported, + "ScatterNd: input and updates types are mismatched"); + + if (descriptor.m_InputEnabled) + { + // If the input slot is enabled, we have the input tensor in this slot + supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported, + "ScatterNd: input type not supported."); + + supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported, + "ScatterNd: input and output types are mismatched"); + } + else + { + // If the input slot is not enabled, we have the shape tensor in this slot + supported &= CheckSupportRule(TypeAnyOf(input, indicesSupportedTypes), reasonIfUnsupported, + "ScatterNd: shape type not supported."); + } + + return supported; +} + bool RefLayerSupport::IsShapeSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported) const diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp index 9e7175389a..1b0f4a2bb5 100644 --- a/src/backends/reference/RefLayerSupport.hpp +++ b/src/backends/reference/RefLayerSupport.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once @@ -309,6 +309,13 @@ public: const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const; + bool IsScatterNdSupported(const TensorInfo& input, + const TensorInfo& indices, + const TensorInfo& updates, + const TensorInfo& output, + const ScatterNdDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const; + bool IsShapeSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const; diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp index ad6ec9a792..df458c1a6d 100644 --- a/src/backends/reference/RefWorkloadFactory.cpp +++ b/src/backends/reference/RefWorkloadFactory.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include @@ -567,6 +567,11 @@ std::unique_ptr RefWorkloadFactory::CreateWorkload(LayerType type, auto reverseV2QueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*reverseV2QueueDescriptor, info); } + case LayerType::ScatterNd: + { + auto scatterQueueDescriptor = PolymorphicDowncast(&descriptor); + return std::make_unique(*scatterQueueDescriptor, info); + } case LayerType::Shape: { auto shapeQueueDescriptor = PolymorphicDowncast(&descriptor); diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk index 27ca8f607a..752255607a 100644 --- a/src/backends/reference/backend.mk +++ b/src/backends/reference/backend.mk @@ -1,5 +1,5 @@ # -# Copyright © 2017-2023 ARM Ltd and Contributors. All rights reserved. +# Copyright © 2017-2024 ARM Ltd and Contributors. All rights reserved. # SPDX-License-Identifier: MIT # @@ -96,6 +96,7 @@ BACKEND_SOURCES := \ workloads/RefReshapeWorkload.cpp \ workloads/RefResizeWorkload.cpp \ workloads/RefReverseV2Workload.cpp \ + workloads/RefScatterNdWorkload.cpp \ workloads/RefSliceWorkload.cpp \ workloads/RefSoftmaxWorkload.cpp \ workloads/RefSpaceToBatchNdWorkload.cpp \ @@ -109,6 +110,7 @@ BACKEND_SOURCES := \ workloads/RefUnidirectionalSequenceLstmWorkload.cpp \ workloads/Resize.cpp \ workloads/ReverseV2Impl.cpp \ + workloads/ScatterNd.cpp \ workloads/Slice.cpp \ workloads/SpaceToBatchNd.cpp \ workloads/SpaceToDepth.cpp \ diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp index cfe85594b3..078338163f 100644 --- a/src/backends/reference/test/RefLayerTests.cpp +++ b/src/backends/reference/test/RefLayerTests.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017,2022-2023 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017,2022-2024 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -2885,4 +2885,56 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo3dAxis0TestSigned32, BroadcastTo3dAxis0 ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo3dAxis1TestSigned32, BroadcastTo3dAxis1Test) ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo3dAxis2TestSigned32, BroadcastTo3dAxis2Test) ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo4dTestSigned32, BroadcastTo4dTest) + +// ScatterNd +// With Input tensor +ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd1DUpdateTestWithInputFloat32, ScatterNd1DimUpdateWithInput) +ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd2DUpdateTestWithInputFloat32, ScatterNd2DimUpdateWithInput) +ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd2Dim1Outter1InnerUpdateWithInputFloat32, + ScatterNd2Dim1Outter1InnerUpdateWithInput) +ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd3DimUpdateWithInputFloat32, ScatterNd3DimUpdateWithInput) +ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd3Dim1Outter2InnerUpdateWithInputFloat32, + ScatterNd3Dim1Outter2InnerUpdateWithInput) +ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd3Dim2Outter1InnerUpdateWithInputFloat32, + ScatterNd3Dim2Outter1InnerUpdateWithInput) +ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd4DimUpdateWithInputFloat32, ScatterNd4DimUpdateWithInput) + +ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd2DimAddWithInputFloat32, ScatterNd2DimAddWithInput) +ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd2DimSubWithInputFloat32, ScatterNd2DimSubWithInput) +ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd2DimMaxWithInputFloat32, ScatterNd2DimMaxWithInput) +ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd2DimMinWithInputFloat32, ScatterNd2DimMinWithInput) +ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd2DimMulWithInputFloat32, ScatterNd2DimMulWithInput) + +ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd3DimUpdateWithInputFloat16, ScatterNd3DimUpdateWithInput) +ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd3DimUpdateWithInputQAsymmS8, ScatterNd3DimUpdateWithInput) +ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd3DimUpdateWithInputQAsymmU8, ScatterNd3DimUpdateWithInput) +ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd3DimUpdateWithInputQSymmS8, ScatterNd3DimUpdateWithInput) +ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd3DimUpdateWithInputQSymmS16, ScatterNd3DimUpdateWithInput) +ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd3DimUpdateWithInputSigned32, ScatterNd3DimUpdateWithInput) + +// No input tensor, only shape provided +ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd1DUpdateTestNoInputFloat32, ScatterNd1DimUpdateNoInput) +ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd2DimUpdateTestNoInputFloat32, ScatterNd2DimUpdateNoInput) +ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd2Dim1Outter1InnerUpdateNoInputFloat32, + ScatterNd2Dim1Outter1InnerUpdateNoInput) +ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd3DimUpdateNoInputFloat32, ScatterNd3DimUpdateNoInput) +ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd3Dim1Outter2InnerUpdateNoInputFloat32, + ScatterNd3Dim1Outter2InnerUpdateNoInput) +ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd3Dim2Outter1InnerUpdateNoInputFloat32, + ScatterNd3Dim2Outter1InnerUpdateNoInput) +ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd4DimUpdateNoInputFloat32, ScatterNd4DimUpdateNoInput) + +ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd2DimAddNoInputFloat32, ScatterNd2DimAddNoInput) +ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd2DimSubNoInputFloat32, ScatterNd2DimSubNoInput) +ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd2DimMaxNoInputFloat32, ScatterNd2DimMaxNoInput) +ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd2DimMinNoInputFloat32, ScatterNd2DimMinNoInput) +ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd2DimMulNoInputFloat32, ScatterNd2DimMulNoInput) + +ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd3DimUpdateNoInputFloat16, ScatterNd3DimUpdateNoInput) +ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd3DimUpdateNoInputQAsymmS8, ScatterNd3DimUpdateNoInput) +ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd3DimUpdateNoInputQAsymmU8, ScatterNd3DimUpdateNoInput) +ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd3DimUpdateNoInputQSymmS8, ScatterNd3DimUpdateNoInput) +ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd3DimUpdateNoInputQSymmS16, ScatterNd3DimUpdateNoInput) +ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd3DimUpdateNoInputSigned32, ScatterNd3DimUpdateNoInput) + } \ No newline at end of file diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt index 42f92aec1d..0f70cb0022 100644 --- a/src/backends/reference/workloads/CMakeLists.txt +++ b/src/backends/reference/workloads/CMakeLists.txt @@ -1,5 +1,5 @@ # -# Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. +# Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved. # SPDX-License-Identifier: MIT # @@ -85,6 +85,8 @@ list(APPEND armnnRefBackendWorkloads_sources RefCastWorkload.hpp RefChannelShuffleWorkload.cpp RefChannelShuffleWorkload.hpp + RefScatterNdWorkload.cpp + RefScatterNdWorkload.hpp RefShapeWorkload.hpp RefComparisonWorkload.cpp RefComparisonWorkload.hpp @@ -195,6 +197,8 @@ list(APPEND armnnRefBackendWorkloads_sources Resize.cpp Resize.hpp Rsqrt.hpp + ScatterNd.cpp + ScatterNd.hpp Sin.hpp Slice.cpp Slice.hpp diff --git a/src/backends/reference/workloads/RefScatterNdWorkload.cpp b/src/backends/reference/workloads/RefScatterNdWorkload.cpp new file mode 100644 index 0000000000..4713add0e9 --- /dev/null +++ b/src/backends/reference/workloads/RefScatterNdWorkload.cpp @@ -0,0 +1,100 @@ +// +// Copyright © 2024 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include +#include "RefScatterNdWorkload.hpp" +#include "RefWorkloadUtils.hpp" +#include "ScatterNd.hpp" +#include "Profiling.hpp" + +namespace armnn +{ + + RefScatterNdWorkload::RefScatterNdWorkload(const ScatterNdQueueDescriptor& descriptor, const WorkloadInfo& info) + : RefBaseWorkload(descriptor, info) + {} + + void RefScatterNdWorkload::Execute() const + { + Execute(m_Data.m_Inputs, m_Data.m_Outputs); + } + + void RefScatterNdWorkload::ExecuteAsync(ExecutionData& executionData) + { + WorkingMemDescriptor* workingMemDescriptor = static_cast(executionData.m_Data); + Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs); + } + + void RefScatterNdWorkload::Execute(std::vector inputs, std::vector outputs) const + { + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefScatterNdWorkload_Execute"); + + if (m_Data.m_Parameters.m_InputEnabled) + { + // Getting TensorInfos for three inputs slots + const TensorInfo& inputInfo = GetTensorInfo(inputs[0]); + const TensorInfo& indicesInfo = GetTensorInfo(inputs[1]); + const TensorInfo& updatesInfo = GetTensorInfo(inputs[2]); + + // Getting Decoder for input + std::unique_ptr> inputDecoder = MakeDecoder(GetTensorInfo(inputs[0]), + inputs[0]->Map()); + + // Getting Decoder for indices + std::unique_ptr> indicesDecoder = MakeDecoder(GetTensorInfo(inputs[1]), + inputs[1]->Map()); + + // Getting Decoder for updates + std::unique_ptr> updatesDecoder = MakeDecoder(GetTensorInfo(inputs[2]), + inputs[2]->Map()); + + // Getting Encoder for output + std::unique_ptr> outputEncoder = MakeEncoder(GetTensorInfo(outputs[0]), + outputs[0]->Map()); + + ScatterNd(inputInfo, + indicesInfo, + updatesInfo, + *inputDecoder, + *indicesDecoder, + *updatesDecoder, + *outputEncoder, + m_Data.m_Parameters); + } + else + { + // Getting TensorInfos for three inputs slots + const TensorInfo& shapeInfo = GetTensorInfo(inputs[0]); + const TensorInfo& indicesInfo = GetTensorInfo(inputs[1]); + const TensorInfo& updatesInfo = GetTensorInfo(inputs[2]); + + // Getting Decoder for shape + std::unique_ptr> shapeDecoder = MakeDecoder(GetTensorInfo(inputs[0]), + inputs[0]->Map()); + + // Getting Decoder for indices + std::unique_ptr> indicesDecoder = MakeDecoder(GetTensorInfo(inputs[1]), + inputs[1]->Map()); + + // Getting Decoder for updates + std::unique_ptr> updatesDecoder = MakeDecoder(GetTensorInfo(inputs[2]), + inputs[2]->Map()); + + // Getting Encoder for output + std::unique_ptr> outputEncoder = MakeEncoder(GetTensorInfo(outputs[0]), + outputs[0]->Map()); + + ScatterNd(indicesInfo, + updatesInfo, + shapeInfo, + *indicesDecoder, + *updatesDecoder, + *shapeDecoder, + *outputEncoder, + m_Data.m_Parameters); + } + } + +} // namespace armnn \ No newline at end of file diff --git a/src/backends/reference/workloads/RefScatterNdWorkload.hpp b/src/backends/reference/workloads/RefScatterNdWorkload.hpp new file mode 100644 index 0000000000..c9cf5a3af3 --- /dev/null +++ b/src/backends/reference/workloads/RefScatterNdWorkload.hpp @@ -0,0 +1,30 @@ +// +// Copyright © 2024 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "RefBaseWorkload.hpp" +#include + +#include "ScatterNd.hpp" + +namespace armnn +{ + + class RefScatterNdWorkload : public RefBaseWorkload + { + public: + explicit RefScatterNdWorkload(const ScatterNdQueueDescriptor& descriptor, + const WorkloadInfo& info); + + void Execute() const override; + void ExecuteAsync(ExecutionData& executionData) override; + + private: + void Execute(std::vector inputs, std::vector outputs) const; + + }; + +} // namespace armnn \ No newline at end of file diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp index 98aa27b8a9..92b178c3d5 100644 --- a/src/backends/reference/workloads/RefWorkloads.hpp +++ b/src/backends/reference/workloads/RefWorkloads.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -55,6 +55,7 @@ #include "RefReshapeWorkload.hpp" #include "RefResizeWorkload.hpp" #include "RefReverseV2Workload.hpp" +#include "RefScatterNdWorkload.hpp" #include "RefShapeWorkload.hpp" #include "RefSliceWorkload.hpp" #include "RefSplitterWorkload.hpp" diff --git a/src/backends/reference/workloads/ScatterNd.cpp b/src/backends/reference/workloads/ScatterNd.cpp new file mode 100644 index 0000000000..8eb53b00a8 --- /dev/null +++ b/src/backends/reference/workloads/ScatterNd.cpp @@ -0,0 +1,336 @@ +// +// Copyright © 2024 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "ScatterNd.hpp" +#include "Encoders.hpp" +#include +#include + +#include + +#include + +namespace armnn +{ + +float ScatterOperation(ScatterNdFunction operation, + float input, + float update) +{ + switch (operation) + { + case ScatterNdFunction::Update: + return update; + case ScatterNdFunction::Add: + return input + update; + case ScatterNdFunction::Sub: + return input - update; + case ScatterNdFunction::Max: + return std::max(input, update); + case ScatterNdFunction::Min: + return std::min(input, update); + case ScatterNdFunction::Mul: + return input * update; + default: + throw InvalidArgumentException("ScatterNd: cannot execute this operation."); + } +} + +void ScatterNd(const TensorInfo& inputInfo, + const TensorInfo& indicesInfo, + const TensorInfo& updatesInfo, + Decoder& input, + Decoder& indices, + Decoder& updates, + Encoder& output, + ScatterNdDescriptor descriptor) +{ + // Axis Unsupported + if (descriptor.m_AxisEnabled) + { + throw InvalidArgumentException("ScatterNd: axis param not supported."); + } + + // Get the shape for indices, updates, and input + TensorShape indicesShape = indicesInfo.GetShape(); + TensorShape updatesShape = updatesInfo.GetShape(); + TensorShape inputShape = inputInfo.GetShape(); + + // Get the dimensions for indices and updates + unsigned int dimension = inputInfo.GetNumDimensions(); + unsigned int indicesDim = indicesInfo.GetNumDimensions(); + unsigned int updatesDim = updatesInfo.GetNumDimensions(); + + // Calculate the outter and inner dimensions + unsigned int outterDim = indicesShape[indicesDim - 1]; + unsigned int innerDim = dimension - outterDim; + + // Calculate the number of elements in each dimension + unsigned int numElementsCount = 1; + std::vector elementInDim(dimension); + for (unsigned int dimIndex = dimension; dimIndex > 0; --dimIndex) + { + elementInDim[dimIndex - 1] = numElementsCount; + numElementsCount *= inputShape[dimIndex - 1]; + } + + // Number of updates per index + unsigned int numUpdatesPerIndex = elementInDim[dimension - innerDim - 1]; + + // Number of indices to update + unsigned int numIndices = indicesShape[0]; + + // Check Input Requirements + // Requirement 1: Indices and Updates must have rank at least 1 + if (indicesDim < 1 || updatesDim < 1) + { + throw InvalidArgumentException("ScatterNd: indices and updates must have rank >= 1."); + } + + // Requirement 2: Input, Indices and Updates must have values + if (inputInfo.GetNumElements() == 0 || + indicesInfo.GetNumElements() == 0 || + updatesInfo.GetNumElements() == 0) + { + throw InvalidArgumentException("ScatterNd: input, indices and updates tensor must have values."); + } + + // Requirement 3: Indices and Updates must match in shape + // The updates dimension should equals to 1 + inner dimension + if (updatesDim != 1 + innerDim) + { + throw InvalidArgumentException("ScatterNd: updates dimension should equal to 1 + inner dimension."); + } + // The inner dimension of updates has to match with shape of input + for (unsigned int dimBackIndex = 0; dimBackIndex < innerDim; ++dimBackIndex) + { + if (updatesShape[updatesDim - dimBackIndex - 1] != inputShape[dimension - dimBackIndex - 1]) + { + throw InvalidArgumentException( + fmt::format("ScatterNd: input and updates shape not match on dimension {}", + dimension - dimBackIndex)); + } + } + + // Requirement 4: Check duplicate indices and out of bound indices + std::set indicesSet; + std::vector flattenIndices(numIndices); + for (unsigned int indicesIdx = 0; indicesIdx < numIndices; ++indicesIdx) + { + // Get the index + int flattenIndex = 0; + + for (unsigned int outterIdx = 0; outterIdx < outterDim; ++outterIdx) { + + int outterIndexValue = indices.Get(); + + // Check bounds + if (outterIndexValue < 0 || outterIndexValue >= int(inputShape[outterIdx])) + { + throw InvalidArgumentException( + fmt::format("ScatterNd: indices {} out of bound [0, {})", + outterIndexValue, inputShape[outterIdx])); + } + + flattenIndex += int(elementInDim[outterIdx]) * outterIndexValue; + ++indices; + } + + // Check duplicates when executing ScatterNd::Update + if (descriptor.m_Function == ScatterNdFunction::Update && + indicesSet.find(flattenIndex) != indicesSet.end()) + { + throw InvalidArgumentException( + fmt::format("ScatterNd: duplicate indices occurs {}", flattenIndex)); + } + + flattenIndices[indicesIdx] = flattenIndex; + indicesSet.insert(flattenIndex); + } + + // Set the input data to output + for (unsigned int idx = 0; idx < inputInfo.GetNumElements(); ++idx) + { + float inputValue = input.Get(); + ++input; + output.Set(inputValue); + ++output; + } + + // Iterate through all indices to scatter updates + for (unsigned int indicesIdx = 0; indicesIdx < numIndices; ++indicesIdx) + { + // Get the index and calculate the flatten index + int flattenIndex = flattenIndices[indicesIdx]; + + // FlattenIndex is the place that we are going to update the elements + unsigned int updatesStartIdx = indicesIdx * numUpdatesPerIndex; + for (unsigned int updatesIdx = 0; updatesIdx < numUpdatesPerIndex; ++updatesIdx) + { + updates[updatesStartIdx + updatesIdx]; + input[static_cast(flattenIndex) + updatesIdx]; + float updateValue = ScatterOperation(descriptor.m_Function, input.Get(), updates.Get()); + output[static_cast(flattenIndex) + updatesIdx]; + output.Set(updateValue); + } + } +} + +void ScatterNd(const TensorInfo& indicesInfo, + const TensorInfo& updatesInfo, + const TensorInfo& shapeInfo, + Decoder& indices, + Decoder& updates, + Decoder& shape, + Encoder& output, + ScatterNdDescriptor descriptor) +{ + // Axis Unsupported + if (descriptor.m_AxisEnabled) + { + throw InvalidArgumentException("ScatterNd: axis param not supported."); + } + + // Get the shape for indices, updates, and input + TensorShape indicesShape = indicesInfo.GetShape(); + TensorShape updatesShape = updatesInfo.GetShape(); + + // Get the shape values + std::vector shapeValues = shape.DecodeTensor(shapeInfo.GetShape()); + // Check the shape + if (shapeInfo.GetNumElements() == 0) + { + throw InvalidArgumentException("ScatterNd: shape must have values."); + } + for (auto shapeValue : shapeValues) + { + if (shapeValue <= 0) + { + throw InvalidArgumentException("ScatterNd: shape values must >= 0."); + } + } + // Get the input shape + std::vector inputShape (shapeValues.begin(), shapeValues.end()); + unsigned int inputElementsNum = static_cast( + std::accumulate(inputShape.begin(), inputShape.end(), 1, std::multiplies())); + + // Get the dimensions for indices and updates + unsigned int dimension = shapeInfo.GetNumElements(); + unsigned int indicesDim = indicesInfo.GetNumDimensions(); + unsigned int updatesDim = updatesInfo.GetNumDimensions(); + + // Calculate the outter and inner dimensions + unsigned int outterDim = indicesShape[indicesDim - 1]; + unsigned int innerDim = dimension - outterDim; + + // Calculate the number of elements in each dimension + unsigned int numElementsCount = 1; + std::vector elementInDim(dimension); + for (unsigned int dimIndex = dimension; dimIndex > 0; --dimIndex) + { + elementInDim[dimIndex - 1] = numElementsCount; + numElementsCount *= inputShape[dimIndex - 1]; + } + + // Number of updates per index + unsigned int numUpdatesPerIndex = elementInDim[dimension - innerDim - 1]; + + // Number of indices to update + unsigned int numIndices = indicesShape[0]; + + // Check Input Requirements + // Requirement 1: Indices and Updates must have rank at least 1 + if (indicesDim < 1 || updatesDim < 1) + { + throw InvalidArgumentException("ScatterNd: indices and updates must have rank >= 1."); + } + + // Requirement 2: shape, Indices and Updates must have values + if (indicesInfo.GetNumElements() == 0 || + updatesInfo.GetNumElements() == 0) + { + throw InvalidArgumentException("ScatterNd: indices and updates tensor must have values."); + } + + // Requirement 3: Indices and Updates must match in shape + // The updates dimension should equals to 1 + inner dimension + if (updatesDim != 1 + innerDim) + { + throw InvalidArgumentException("ScatterNd: updates dimension should equal to 1 + inner dimension."); + } + // The inner dimension of updates has to match with shape of input + for (unsigned int dimBackIndex = 0; dimBackIndex < innerDim; ++dimBackIndex) + { + if (updatesShape[updatesDim - dimBackIndex - 1] != inputShape[dimension - dimBackIndex - 1]) + { + throw InvalidArgumentException( + fmt::format("ScatterNd: input and updates shape not match on dimension {}", + dimension - dimBackIndex)); + } + } + + // Requirement 4: Check duplicate indices and out of bound indices + std::set indicesSet; + std::vector flattenIndices(numIndices); + for (unsigned int indicesIdx = 0; indicesIdx < numIndices; ++indicesIdx) + { + // Get the index + int flattenIndex = 0; + + for (unsigned int outterIdx = 0; outterIdx < outterDim; ++outterIdx) { + + int outterIndexValue = indices.Get(); + + // Check bounds + if (outterIndexValue < 0 || outterIndexValue >= int(inputShape[outterIdx])) + { + throw InvalidArgumentException( + fmt::format("ScatterNd: indices {} out of bound [0, {})", + outterIndexValue, inputShape[outterIdx])); + } + + flattenIndex += int(elementInDim[outterIdx]) * outterIndexValue; + ++indices; + } + + // Check duplicates when executing ScatterNd::Update + if (descriptor.m_Function == ScatterNdFunction::Update && + indicesSet.find(flattenIndex) != indicesSet.end()) + { + throw InvalidArgumentException( + fmt::format("ScatterNd: duplicate indices {} occurs when executing ScatterNd::Update.", + flattenIndex)); + } + + flattenIndices[indicesIdx] = flattenIndex; + indicesSet.insert(flattenIndex); + } + + // Set zeros to output + for (unsigned int idx = 0; idx < inputElementsNum; ++idx) + { + output.Set(0.0f); + ++output; + } + + // Iterate through all indices to scatter updates + for (unsigned int indicesIdx = 0; indicesIdx < numIndices; ++indicesIdx) + { + // Get the index and calculate the flatten index + int flattenIndex = flattenIndices[indicesIdx]; + + // FlattenIndex is the place that we are going to update the elements + unsigned int updatesStartIdx = indicesIdx * numUpdatesPerIndex; + for (unsigned int updatesIdx = 0; updatesIdx < numUpdatesPerIndex; ++updatesIdx) + { + updates[updatesStartIdx + updatesIdx]; + float updateValue = ScatterOperation(descriptor.m_Function, 0.0f, updates.Get()); + output[static_cast(flattenIndex) + updatesIdx]; + output.Set(updateValue); + } + } +} + +} // namespace armnn \ No newline at end of file diff --git a/src/backends/reference/workloads/ScatterNd.hpp b/src/backends/reference/workloads/ScatterNd.hpp new file mode 100644 index 0000000000..e40d3640a7 --- /dev/null +++ b/src/backends/reference/workloads/ScatterNd.hpp @@ -0,0 +1,34 @@ +// +// Copyright © 2024 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include +#include "Encoders.hpp" +#include "Decoders.hpp" +#include "armnn/Descriptors.hpp" + +namespace armnn +{ +// ScatterNd with input tensor +void ScatterNd(const TensorInfo& inputInfo, + const TensorInfo& indicesInfo, + const TensorInfo& updatesInfo, + Decoder& input, + Decoder& indices, + Decoder& updates, + Encoder& output, + ScatterNdDescriptor descriptor); + +// ScatterNd without input tensor, only shape provided +void ScatterNd(const TensorInfo& indicesInfo, + const TensorInfo& updatesInfo, + const TensorInfo& shapeInfo, + Decoder& indices, + Decoder& updates, + Decoder& shape, + Encoder& output, + ScatterNdDescriptor descriptor); +} // namespace armnn \ No newline at end of file -- cgit v1.2.1