aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTianle Cheng <tianle.cheng@arm.com>2024-02-23 17:56:54 +0000
committerKevin May <kevin.may@arm.com>2024-02-28 16:12:34 +0000
commit282881877522d3e94752dfc0839de9bfa0aa5a81 (patch)
tree9cd11c96eb4c179e76f2e586d5a9d9b416dd85a0
parent2883a86c5a167aea3c736529bff5921ab6cbc99c (diff)
downloadarmnn-282881877522d3e94752dfc0839de9bfa0aa5a81.tar.gz
IVGCVSW-8229 & IVGCVSW-8237 ScatterNd: Front end and reference implementation
(scatter_nd, scatter_nd_add, and scatter_nd_update, scatter_nd_sub, scatter_nd_min, scatter_nd_max, scatter_nd_mul) * Front end support for ScatterNd added. * Reference implementation for ScatterNd added. * Unit tests added. Signed-off-by: Tianle Cheng <tianle.cheng@arm.com> Change-Id: I30da9056d9b03ca9b5fb8d09987341128badbcf4
-rw-r--r--Android.mk3
-rw-r--r--CMakeLists.txt2
-rw-r--r--docs/02_operator_list.dox59
-rw-r--r--include/armnn/BackendHelper.hpp9
-rw-r--r--include/armnn/Descriptors.hpp55
-rw-r--r--include/armnn/DescriptorsFwd.hpp3
-rw-r--r--include/armnn/INetwork.hpp9
-rw-r--r--include/armnn/Types.hpp18
-rw-r--r--include/armnn/backends/WorkloadData.hpp7
-rw-r--r--src/armnn/BackendHelper.cpp19
-rw-r--r--src/armnn/LayersFwd.hpp4
-rw-r--r--src/armnn/Network.cpp13
-rw-r--r--src/armnn/Network.hpp5
-rw-r--r--src/armnn/layers/ScatterNdLayer.cpp94
-rw-r--r--src/armnn/layers/ScatterNdLayer.hpp44
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp42
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.cpp17
-rw-r--r--src/backends/backendsCommon/common.mk3
-rw-r--r--src/backends/backendsCommon/test/CMakeLists.txt2
-rw-r--r--src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp4
-rw-r--r--src/backends/backendsCommon/test/LayerTests.hpp3
-rw-r--r--src/backends/backendsCommon/test/layerTests/ScatterNdTestImpl.cpp1479
-rw-r--r--src/backends/backendsCommon/test/layerTests/ScatterNdTestImpl.hpp156
-rw-r--r--src/backends/reference/RefLayerSupport.cpp65
-rw-r--r--src/backends/reference/RefLayerSupport.hpp9
-rw-r--r--src/backends/reference/RefWorkloadFactory.cpp7
-rw-r--r--src/backends/reference/backend.mk4
-rw-r--r--src/backends/reference/test/RefLayerTests.cpp54
-rw-r--r--src/backends/reference/workloads/CMakeLists.txt6
-rw-r--r--src/backends/reference/workloads/RefScatterNdWorkload.cpp100
-rw-r--r--src/backends/reference/workloads/RefScatterNdWorkload.hpp30
-rw-r--r--src/backends/reference/workloads/RefWorkloads.hpp3
-rw-r--r--src/backends/reference/workloads/ScatterNd.cpp336
-rw-r--r--src/backends/reference/workloads/ScatterNd.hpp34
34 files changed, 2674 insertions, 24 deletions
diff --git a/Android.mk b/Android.mk
index 93dcb5c61a..379cce4255 100644
--- a/Android.mk
+++ b/Android.mk
@@ -1,5 +1,5 @@
#
-# Copyright © 2017-2023 ARM Ltd and Contributors. All rights reserved.
+# Copyright © 2017-2024 ARM Ltd and Contributors. All rights reserved.
# SPDX-License-Identifier: MIT
#
@@ -267,6 +267,7 @@ LOCAL_SRC_FILES := \
src/armnn/layers/ReshapeLayer.cpp \
src/armnn/layers/ResizeLayer.cpp \
src/armnn/layers/ReverseV2Layer.cpp \
+ src/armnn/layers/ScatterNdLayer.cpp \
src/armnn/layers/ShapeLayer.cpp \
src/armnn/layers/SliceLayer.cpp \
src/armnn/layers/SoftmaxLayer.cpp \
diff --git a/CMakeLists.txt b/CMakeLists.txt
index e8067350ae..8bf7a40d21 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -310,6 +310,8 @@ list(APPEND armnn_sources
src/armnn/layers/ResizeLayer.cpp
src/armnn/layers/ReverseV2Layer.hpp
src/armnn/layers/ReverseV2Layer.cpp
+ src/armnn/layers/ScatterNdLayer.cpp
+ src/armnn/layers/ScatterNdLayer.hpp
src/armnn/layers/ShapeLayer.cpp
src/armnn/layers/ShapeLayer.hpp
src/armnn/layers/SliceLayer.cpp
diff --git a/docs/02_operator_list.dox b/docs/02_operator_list.dox
index fa386f21bd..6b2442d28f 100644
--- a/docs/02_operator_list.dox
+++ b/docs/02_operator_list.dox
@@ -1,4 +1,4 @@
-/// Copyright (c) 2021, 2023 ARM Limited and Contributors. All rights reserved.
+/// Copyright (c) 2021, 2023-2024 ARM Limited and Contributors. All rights reserved.
///
/// SPDX-License-Identifier: MIT
///
@@ -2931,6 +2931,63 @@ where N = batches, C = channels, H = height, W = width
<tr><td>FLOAT32
</table>
<tr>
+ <td rowspan="3">ScatterLayer
+ <td rowspan="3" style="width:200px;"> Layer to scatter updates according to individual values at the specified indices.
+ <td rowspan="3">
+ <ul>
+ <li>N/A
+ </ul>
+ <td>CpuRef
+ <td>
+ <ul>
+ <li>All
+ </ul>
+ <td>
+ <table>
+ <tr><th>
+ <tr><td>FLOAT16
+ <tr><td>FLOAT32
+ <tr><td>QASYMMS8
+ <tr><td>QASYMMU8
+ <tr><td>QSYMMS8
+ <tr><td>QSYMMS16
+ <tr><td>SIGNED32
+ </table>
+<tr>
+ <td>CpuAcc
+ <td>
+ <ul>
+ <li>All
+ </ul>
+ <td>
+ <table>
+ <tr><th>
+ <tr><td>FLOAT16
+ <tr><td>FLOAT32
+ <tr><td>QASYMMS8
+ <tr><td>QASYMMU8
+ <tr><td>QASYMM8
+ <tr><td>QSYMMS16
+ <tr><td>SIGNED32
+ </table>
+<tr>
+ <td>GpuAcc
+ <td>
+ <ul>
+ <li>All
+ </ul>
+ <td>
+ <table>
+ <tr><th>
+ <tr><td>FLOAT16
+ <tr><td>FLOAT32
+ <tr><td>QASYMMS8
+ <tr><td>QASYMMU8
+ <tr><td>QSYMMS8
+ <tr><td>QSYMMS16
+ <tr><td>SIGNED32
+ </table>
+<tr>
<td rowspan="3">ShapeLayer
<td rowspan="3" style="width:200px;"> Layer to return the shape of the input tensor.
<td rowspan="3">
diff --git a/include/armnn/BackendHelper.hpp b/include/armnn/BackendHelper.hpp
index b61f010b0f..a6b81eaa01 100644
--- a/include/armnn/BackendHelper.hpp
+++ b/include/armnn/BackendHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2019,2021-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2019,2021-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -380,6 +380,13 @@ public:
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+ bool IsScatterNdSupported(const TensorInfo& input,
+ const TensorInfo& indices,
+ const TensorInfo& updates,
+ const TensorInfo& output,
+ const ScatterNdDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
bool IsSliceSupported(const TensorInfo& input,
const TensorInfo& output,
const SliceDescriptor& descriptor,
diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp
index bf40b35ae9..7230bc2c1d 100644
--- a/include/armnn/Descriptors.hpp
+++ b/include/armnn/Descriptors.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -1675,4 +1675,57 @@ struct BroadcastToDescriptor : BaseDescriptor
TensorShape m_BroadcastToShape;
};
+/// A ScatterNdDescriptor for the ScatterNdLayer.
+struct ScatterNdDescriptor : BaseDescriptor
+{
+ // default constructor
+ ScatterNdDescriptor()
+ : m_Function(ScatterNdFunction::Update)
+ , m_InputEnabled(true)
+ , m_Axis(0)
+ , m_AxisEnabled(false)
+ {}
+
+ // constructor for operators except for ScatterElement operator
+ ScatterNdDescriptor(ScatterNdFunction function,
+ bool inputEnabled)
+ : m_Function(function)
+ , m_InputEnabled(inputEnabled)
+ , m_Axis(0)
+ , m_AxisEnabled(false)
+
+ {}
+
+ // constructor for ScatterElement operator
+ ScatterNdDescriptor(ScatterNdFunction function,
+ bool inputEnabled,
+ int32_t axis)
+ : m_Function(function)
+ , m_InputEnabled(inputEnabled)
+ , m_Axis(axis)
+ , m_AxisEnabled(true)
+
+ {}
+
+ bool operator ==(const ScatterNdDescriptor &rhs) const
+ {
+ return ((m_Function == rhs.m_Function) &&
+ (m_InputEnabled == rhs.m_InputEnabled) &&
+ (m_AxisEnabled == rhs.m_AxisEnabled) &&
+ (m_Axis == rhs.m_Axis));
+ }
+
+ /// Specify if the function is update, add, sub, max or min.
+ ScatterNdFunction m_Function;
+
+ /// Flag to show if input tensor is accepted.
+ bool m_InputEnabled;
+
+ /// Extra attribute for ScatterElement, will be set to 0 by default, we do not support axis != 0
+ int32_t m_Axis;
+
+ /// Flag for ScatterElement, will be set to false by default, we do not support m_AxisEnable = true for now.
+ bool m_AxisEnabled;
+};
+
} // namespace armnn
diff --git a/include/armnn/DescriptorsFwd.hpp b/include/armnn/DescriptorsFwd.hpp
index 4b0b70c2d3..3518a41c42 100644
--- a/include/armnn/DescriptorsFwd.hpp
+++ b/include/armnn/DescriptorsFwd.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -44,6 +44,7 @@ struct QLstmDescriptor;
struct ReshapeDescriptor;
struct ResizeDescriptor;
struct ReduceDescriptor;
+struct ScatterNdDescriptor;
struct SliceDescriptor;
struct SoftmaxDescriptor;
struct SpaceToBatchNdDescriptor;
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index 64fdab6bd0..84f3e0cb64 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -864,6 +864,13 @@ public:
IConnectableLayer* AddBroadcastToLayer(const BroadcastToDescriptor& descriptor,
const char* name = nullptr);
+ /// Add a ScatterNd layer to the network
+ /// @param descriptor - Parameters for the ScatterNd operation
+ /// @param name - Optional name for the layer
+ /// @return - Interface for configuring the layer
+ IConnectableLayer* AddScatterNdLayer(const ScatterNdDescriptor& descriptor,
+ const char* name = nullptr);
+
void ExecuteStrategy(IStrategy& strategy) const;
protected:
diff --git a/include/armnn/Types.hpp b/include/armnn/Types.hpp
index d87e7f7147..bbe1ecccbd 100644
--- a/include/armnn/Types.hpp
+++ b/include/armnn/Types.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2018-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2018-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -482,8 +482,8 @@ using InferenceTimingPair = std::pair<HighResolutionClock, HighResolutionClock>;
X(ReverseV2) \
X(Tile) \
X(Fused) \
- X(BroadcastTo) \
-
+ X(BroadcastTo) \
+ X(ScatterNd) \
// New layers should be added at last position to minimize instability.
/// When adding a new layer, adapt also the LastLayer enum value in the
@@ -494,7 +494,17 @@ enum class LayerType
LIST_OF_LAYER_TYPE
#undef X
FirstLayer = Activation,
- LastLayer = BroadcastTo
+ LastLayer = ScatterNd
+};
+
+enum class ScatterNdFunction
+{
+ Update = 0,
+ Add = 1,
+ Sub = 2,
+ Max = 3,
+ Min = 4,
+ Mul = 5
};
const char* GetLayerTypeAsCString(LayerType type);
diff --git a/include/armnn/backends/WorkloadData.hpp b/include/armnn/backends/WorkloadData.hpp
index a90a1abd65..a93d986e4d 100644
--- a/include/armnn/backends/WorkloadData.hpp
+++ b/include/armnn/backends/WorkloadData.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -770,4 +770,9 @@ struct BroadcastToQueueDescriptor : QueueDescriptorWithParameters<BroadcastToDes
void Validate(const WorkloadInfo& workloadInfo) const;
};
+struct ScatterNdQueueDescriptor : QueueDescriptorWithParameters<ScatterNdDescriptor>
+{
+ void Validate(const WorkloadInfo& workloadInfo) const;
+};
+
} // namespace armnn
diff --git a/src/armnn/BackendHelper.cpp b/src/armnn/BackendHelper.cpp
index 56938d021e..37ad5da2fe 100644
--- a/src/armnn/BackendHelper.cpp
+++ b/src/armnn/BackendHelper.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017,2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -1276,6 +1276,23 @@ bool LayerSupportHandle::IsReverseV2Supported(const armnn::TensorInfo &input0,
reasonIfUnsupported);
}
+bool LayerSupportHandle::IsScatterNdSupported(const TensorInfo& input,
+ const TensorInfo& indices,
+ const TensorInfo& updates,
+ const TensorInfo& output,
+ const armnn::ScatterNdDescriptor &descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ TensorInfos infos{input, indices, updates, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::ScatterNd,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported);
+}
+
bool LayerSupportHandle::IsShapeSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported)
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index 325bfc3875..6ba409de50 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -67,6 +67,7 @@
#include "layers/ReshapeLayer.hpp"
#include "layers/ResizeLayer.hpp"
#include "layers/ReverseV2Layer.hpp"
+#include "layers/ScatterNdLayer.hpp"
#include "layers/ShapeLayer.hpp"
#include "layers/SliceLayer.hpp"
#include "layers/SoftmaxLayer.hpp"
@@ -172,6 +173,7 @@ DECLARE_LAYER(Reduce)
DECLARE_LAYER(Reshape)
DECLARE_LAYER(Resize)
DECLARE_LAYER(ReverseV2)
+DECLARE_LAYER(ScatterNd)
DECLARE_LAYER(Shape)
DECLARE_LAYER(Slice)
DECLARE_LAYER(Softmax)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 2582403247..60df27d7fc 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -663,6 +663,12 @@ IConnectableLayer* INetwork::AddBroadcastToLayer(const BroadcastToDescriptor& de
return pNetworkImpl->AddBroadcastToLayer(descriptor, name);
}
+IConnectableLayer* INetwork::AddScatterNdLayer(const ScatterNdDescriptor &descriptor,
+ const char *name)
+{
+ return pNetworkImpl->AddScatterNdLayer(descriptor, name);
+}
+
void INetwork::ExecuteStrategy(IStrategy& strategy) const
{
return pNetworkImpl->ExecuteStrategy(strategy);
@@ -3085,6 +3091,11 @@ IConnectableLayer* NetworkImpl::AddBroadcastToLayer(const BroadcastToDescriptor
return m_Graph->AddLayer<BroadcastToLayer>(desc, name);
}
+IConnectableLayer* NetworkImpl::AddScatterNdLayer(const ScatterNdDescriptor &desc, const char *name)
+{
+ return m_Graph->AddLayer<ScatterNdLayer>(desc, name);
+}
+
void NetworkImpl::ExecuteStrategy(IStrategy& strategy) const
{
for (auto layer : GetGraph())
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index 6ffdfb37a8..195f97e692 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -196,6 +196,9 @@ public:
IConnectableLayer* AddReverseV2Layer(const char* name = nullptr);
+ IConnectableLayer* AddScatterNdLayer(const ScatterNdDescriptor& scatterDescriptor,
+ const char* name = nullptr);
+
IConnectableLayer* AddShapeLayer(const char* name = nullptr);
IConnectableLayer* AddSliceLayer(const SliceDescriptor& sliceDescriptor, const char* name = nullptr);
diff --git a/src/armnn/layers/ScatterNdLayer.cpp b/src/armnn/layers/ScatterNdLayer.cpp
new file mode 100644
index 0000000000..a0b270fba5
--- /dev/null
+++ b/src/armnn/layers/ScatterNdLayer.cpp
@@ -0,0 +1,94 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ScatterNdLayer.hpp"
+#include "LayerCloneBase.hpp"
+
+#include <armnn/TypesUtils.hpp>
+#include <armnn/backends/WorkloadData.hpp>
+#include <armnn/backends/WorkloadFactory.hpp>
+
+namespace armnn
+{
+
+ScatterNdLayer::ScatterNdLayer(const ScatterNdDescriptor &param, const char* name)
+ : LayerWithParameters(3, 1, LayerType::ScatterNd, param, name)
+{
+}
+
+std::unique_ptr<IWorkload> ScatterNdLayer::CreateWorkload(const armnn::IWorkloadFactory& factory) const
+{
+ ScatterNdQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
+ return factory.CreateWorkload(LayerType::ScatterNd, descriptor, PrepInfoAndDesc(descriptor));
+}
+
+ScatterNdLayer* ScatterNdLayer::Clone(Graph& graph) const
+{
+ auto layer = CloneBase<ScatterNdLayer>(graph, m_Param, GetName());
+
+ return std::move(layer);
+}
+
+std::vector<TensorShape> ScatterNdLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+{
+ const auto inputDims = inputShapes[0].GetNumDimensions();
+
+ std::vector<unsigned int> dimSizes(inputDims);
+
+ for (unsigned i = 0; i < inputDims; ++i)
+ {
+ dimSizes[i] = inputShapes[0][i];
+ }
+
+ TensorShape outputShape({ inputDims, dimSizes.data() });
+
+ return std::vector<TensorShape>({ outputShape });
+}
+
+void ScatterNdLayer::ValidateTensorShapesFromInputs()
+{
+ VerifyLayerConnections(3, CHECK_LOCATION());
+
+ const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
+
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
+
+ if (m_Param.m_InputEnabled)
+ {
+ std::vector<TensorShape> inferredShapes = InferOutputShapes(
+ {GetInputSlot(0).GetTensorInfo().GetShape(),
+ GetInputSlot(1).GetTensorInfo().GetShape(),
+ GetInputSlot(2).GetTensorInfo().GetShape()});
+
+ if (inferredShapes.size() != 1) {
+ throw armnn::LayerValidationException("inferredShape has " +
+ std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
+
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ScatterLayer");
+ }
+ else
+ {
+ // No input tensor, only shape provided via input slot
+ // In this case, we cannot validate the output shape from the input shape, but we can
+ // validate that the dimensions of shape and output tensor matched
+ unsigned int shapeDims = GetInputSlot(0).GetTensorInfo().GetNumDimensions();
+ unsigned int outputDims = GetOutputSlot(0).GetTensorInfo().GetNumDimensions();
+
+ if (shapeDims != outputDims)
+ {
+ throw armnn::LayerValidationException("shape dimension " +
+ std::to_string(shapeDims) +
+ " and output dimension " +
+ std::to_string(outputDims) +
+ " are not matched.");
+ }
+ }
+}
+
+} // namespace armnn
diff --git a/src/armnn/layers/ScatterNdLayer.hpp b/src/armnn/layers/ScatterNdLayer.hpp
new file mode 100644
index 0000000000..adad66758a
--- /dev/null
+++ b/src/armnn/layers/ScatterNdLayer.hpp
@@ -0,0 +1,44 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+/// This layer represents a ScatterNd operator.
+class ScatterNdLayer : public LayerWithParameters<ScatterNdDescriptor>
+{
+public:
+ /// Makes a workload for the ScatterNd type.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
+
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
+ ScatterNdLayer* Clone(Graph& graph) const override;
+
+ /// Infers the output shapes from given input shapes and layer properties.
+ /// @param [in] inputShapes The input shapes layer has.
+ /// @return A vector to the inferred output shape.
+ std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref ScatterNdLayer.
+ void ValidateTensorShapesFromInputs() override;
+
+protected:
+ /// Constructor to create a ScatterNdLayer.
+ /// @param [in] name Optional name for the layer.
+ ScatterNdLayer(const ScatterNdDescriptor& param, const char* name);
+
+ /// Default destructor
+ ~ScatterNdLayer() = default;
+};
+
+} // namespace armnn
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 0ddb4291f1..de985ec28d 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -4443,4 +4443,46 @@ void BroadcastToQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) cons
ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
}
+void ScatterNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+{
+ const std::string& descriptorName{"ScatterQueueDescriptor"};
+
+ ValidateNumInputs(workloadInfo, descriptorName, 3);
+ ValidateNumOutputs(workloadInfo, descriptorName, 1);
+
+ const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
+ const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
+ const TensorInfo& inputTensorInfo2 = workloadInfo.m_InputTensorInfos[2];
+ const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
+
+ std::vector<DataType> supportedTypes =
+ {
+ DataType::Float32,
+ DataType::Float16,
+ DataType::QAsymmS8,
+ DataType::QAsymmU8,
+ DataType::QSymmS8,
+ DataType::QSymmS16,
+ DataType::Signed32
+ };
+
+ std::vector<DataType> indicesSupportedTypes =
+ {
+ DataType::Signed32
+ };
+
+ if (m_Parameters.m_InputEnabled)
+ {
+ ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
+ }
+ else
+ {
+ ValidateDataTypes(inputTensorInfo0, indicesSupportedTypes, descriptorName);
+ }
+
+ ValidateDataTypes(inputTensorInfo1, indicesSupportedTypes, descriptorName);
+ ValidateDataTypes(inputTensorInfo2, supportedTypes, descriptorName);
+ ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
+}
+
} // namespace armnn \ No newline at end of file
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 1f8d4dae1d..b81e9c8e73 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -1168,6 +1168,23 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
reason);
break;
}
+ case LayerType::ScatterNd:
+ {
+ auto cLayer = PolymorphicDowncast<const ScatterNdLayer*>(&layer);
+ const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
+ const TensorInfo& indices = layer.GetInputSlot(1).GetTensorInfo();
+ const TensorInfo& updates = layer.GetInputSlot(2).GetTensorInfo();
+ const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+
+ result = layerSupportObject.IsScatterNdSupported(OverrideDataType(input, dataType),
+ OverrideDataType(indices, dataType),
+ OverrideDataType(updates, dataType),
+ OverrideDataType(output, dataType),
+ cLayer->GetParameters(),
+ reason);
+
+ break;
+ }
case LayerType::Shape:
{
const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk
index 4876f02ce0..508d5329ba 100644
--- a/src/backends/backendsCommon/common.mk
+++ b/src/backends/backendsCommon/common.mk
@@ -1,5 +1,5 @@
#
-# Copyright © 2017-2023 ARM Ltd and Contributors. All rights reserved.
+# Copyright © 2017-2024 ARM Ltd and Contributors. All rights reserved.
# SPDX-License-Identifier: MIT
#
@@ -94,6 +94,7 @@ COMMON_TEST_SOURCES := \
test/layerTests/ResizeTestImpl.cpp \
test/layerTests/ReverseV2TestImpl.cpp \
test/layerTests/RsqrtTestImpl.cpp \
+ test/layerTests/ScatterNdTestImpl.cpp \
test/layerTests/SliceTestImpl.cpp \
test/layerTests/SquaredDifferenceTestImpl.cpp \
test/layerTests/QuantizeTestImpl.cpp \
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index 7de150dd1d..66bf6a084e 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -181,6 +181,8 @@ list(APPEND armnnBackendsCommonUnitTests_sources
layerTests/ReverseV2TestImpl.hpp
layerTests/RsqrtTestImpl.cpp
layerTests/RsqrtTestImpl.hpp
+ layerTests/ScatterNdTestImpl.cpp
+ layerTests/ScatterNdTestImpl.hpp
layerTests/SinTestImpl.cpp
layerTests/SinTestImpl.hpp
layerTests/ShapeTestImpl.cpp
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index 9f472e9f28..f9de60d89c 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -752,6 +752,8 @@ DECLARE_LAYER_POLICY_2_PARAM(Reshape)
DECLARE_LAYER_POLICY_1_PARAM(ReverseV2)
+DECLARE_LAYER_POLICY_2_PARAM(ScatterNd)
+
DECLARE_LAYER_POLICY_1_PARAM(Shape)
DECLARE_LAYER_POLICY_2_PARAM(Slice)
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index 015d25ef3e..3268a07810 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -64,6 +64,7 @@
#include <backendsCommon/test/layerTests/ResizeTestImpl.hpp>
#include <backendsCommon/test/layerTests/ReverseV2TestImpl.hpp>
#include <backendsCommon/test/layerTests/RsqrtTestImpl.hpp>
+#include <backendsCommon/test/layerTests/ScatterNdTestImpl.hpp>
#include <backendsCommon/test/layerTests/ShapeTestImpl.hpp>
#include <backendsCommon/test/layerTests/SinTestImpl.hpp>
#include <backendsCommon/test/layerTests/SliceTestImpl.hpp>
diff --git a/src/backends/backendsCommon/test/layerTests/ScatterNdTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ScatterNdTestImpl.cpp
new file mode 100644
index 0000000000..44203b062f
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/ScatterNdTestImpl.cpp
@@ -0,0 +1,1479 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ScatterNdTestImpl.hpp"
+
+#include <armnn/backends/IBackendInternal.hpp>
+#include <armnn/backends/Workload.hpp>
+#include <armnn/backends/WorkloadData.hpp>
+#include <armnn/backends/WorkloadFactory.hpp>
+
+#include <armnnTestUtils/WorkloadTestUtils.hpp>
+#include <armnnUtils/QuantizeHelper.hpp>
+#include <armnnTestUtils/TensorCopyUtils.hpp>
+#include <armnn/Optional.hpp>
+#include <armnn/BackendHelper.hpp>
+
+namespace
+{
+template<armnn::DataType ArmnnType, typename T, typename TInput, std::size_t NumDims>
+LayerTestResult<T, NumDims> ScatterNdTestImpl(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ const std::vector<TInput>& input,
+ const std::vector<int>& indices,
+ const std::vector<T>& updates,
+ const std::vector<T>& outputExpected,
+ const armnn::TensorInfo& inputInfo,
+ const armnn::TensorInfo& indicesInfo,
+ const armnn::TensorInfo& updatesInfo,
+ const armnn::TensorInfo& outputInfo,
+ const armnn::ScatterNdDescriptor &descriptor)
+{
+ LayerTestResult<T, NumDims> result(outputInfo);
+ std::vector<T> outputActual(outputInfo.GetNumElements());
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
+ std::unique_ptr<armnn::ITensorHandle> indicesHandle = tensorHandleFactory.CreateTensorHandle(indicesInfo);
+ std::unique_ptr<armnn::ITensorHandle> updatesHandle = tensorHandleFactory.CreateTensorHandle(updatesInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
+
+ armnn::ScatterNdQueueDescriptor queueDescriptor;
+ queueDescriptor.m_Parameters = descriptor;
+
+ armnn::WorkloadInfo workloadInfo;
+
+ AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
+ AddInputToWorkload(queueDescriptor, workloadInfo, indicesInfo, indicesHandle.get());
+ AddInputToWorkload(queueDescriptor, workloadInfo, updatesInfo, updatesHandle.get());
+ AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
+
+ // Don't execute if ScatterNd is not supported, as an exception will be raised.
+ const armnn::BackendId& backend = workloadFactory.GetBackendId();
+ std::string reasonIfUnsupported;
+ armnn::LayerSupportHandle handle = armnn::GetILayerSupportByBackendId(backend);
+ result.m_Supported = handle.IsScatterNdSupported(inputInfo,
+ indicesInfo,
+ updatesInfo,
+ outputInfo,
+ descriptor,
+ reasonIfUnsupported);
+ if (!result.m_Supported)
+ {
+ return result;
+ }
+
+ auto workload = workloadFactory.CreateWorkload(armnn::LayerType::ScatterNd, queueDescriptor, workloadInfo);
+
+ inputHandle->Allocate();
+ indicesHandle->Allocate();
+ updatesHandle->Allocate();
+ outputHandle->Allocate();
+
+ if (input.data() != nullptr)
+ {
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
+ }
+ if (indices.data() != nullptr)
+ {
+ CopyDataToITensorHandle(indicesHandle.get(), indices.data());
+ }
+ if (updates.data() != nullptr)
+ {
+ CopyDataToITensorHandle(updatesHandle.get(), updates.data());
+ }
+
+ workload->PostAllocationConfigure();
+ ExecuteWorkload(*workload, memoryManager);
+
+ if (outputActual.data() != nullptr)
+ {
+ CopyDataFromITensorHandle(outputActual.data(), outputHandle.get());
+ }
+
+ return LayerTestResult<T, NumDims>(outputActual,
+ outputExpected,
+ outputHandle->GetShape(),
+ outputInfo.GetShape());
+
+}
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 1> ScatterNd1DimUpdateWithInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+
+ armnn::TensorInfo inputInfo({5}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo indicesInfo({3, 1}, armnn::DataType::Signed32, qScale, qOffset);
+ armnn::TensorInfo updatesInfo({3}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo outputInfo({5}, ArmnnType, qScale, qOffset);
+
+ std::vector<T> input = armnnUtils::QuantizedVector<T>({ 0, 0, 0, 0, 0 }, qScale, qOffset);
+
+ std::vector<int> indices = armnnUtils::QuantizedVector<int>({0, 1, 2}, qScale, qOffset);
+
+ std::vector<T> updates = armnnUtils::QuantizedVector<T>({ 1, 2, 3 }, qScale, qOffset);
+
+ std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({ 1, 2, 3, 0, 0 }, qScale, qOffset);
+
+ armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Update, true);
+
+ return ScatterNdTestImpl<ArmnnType, T, T, 1>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ input,
+ indices,
+ updates,
+ outputExpected,
+ inputInfo,
+ indicesInfo,
+ updatesInfo,
+ outputInfo,
+ descriptor);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 1> ScatterNd1DimUpdateNoInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+
+ armnn::TensorInfo shapeInfo({1}, armnn::DataType::Signed32, qScale, qOffset);
+ armnn::TensorInfo indicesInfo({3, 1}, armnn::DataType::Signed32, qScale, qOffset);
+ armnn::TensorInfo updatesInfo({3}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo outputInfo({5}, ArmnnType, qScale, qOffset);
+
+ std::vector<int> shape = armnnUtils::QuantizedVector<int>({ 5 }, qScale, qOffset);
+
+ std::vector<int> indices = armnnUtils::QuantizedVector<int>({ 0, 1, 2 }, qScale, qOffset);
+
+ std::vector<T> updates = armnnUtils::QuantizedVector<T>({ 1, 2, 3 }, qScale, qOffset);
+
+ std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({ 1, 2, 3, 0, 0 }, qScale, qOffset);
+
+ armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Update, false);
+
+ return ScatterNdTestImpl<ArmnnType, T, int, 1>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ shape,
+ indices,
+ updates,
+ outputExpected,
+ shapeInfo,
+ indicesInfo,
+ updatesInfo,
+ outputInfo,
+ descriptor);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 2> ScatterNd2DimUpdateWithInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+
+ armnn::TensorInfo inputInfo({3, 3}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo indicesInfo({3, 2}, armnn::DataType::Signed32, qScale, qOffset);
+ armnn::TensorInfo updatesInfo({3}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo outputInfo({3, 3}, ArmnnType, qScale, qOffset);
+
+ std::vector<T> input = armnnUtils::QuantizedVector<T>({ 1, 1, 1,
+ 1, 1, 1,
+ 1, 1, 1 }, qScale, qOffset);
+
+ std::vector<int> indices = armnnUtils::QuantizedVector<int>({ 0, 0,
+ 1, 1,
+ 2, 2 }, qScale, qOffset);
+
+ std::vector<T> updates = armnnUtils::QuantizedVector<T>({ 1, 2, 3 }, qScale, qOffset);
+
+ std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({ 1, 1, 1,
+ 1, 2, 1,
+ 1, 1, 3 }, qScale, qOffset);
+
+ armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Update, true);
+
+ return ScatterNdTestImpl<ArmnnType, T, T, 2>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ input,
+ indices,
+ updates,
+ outputExpected,
+ inputInfo,
+ indicesInfo,
+ updatesInfo,
+ outputInfo,
+ descriptor);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 2> ScatterNd2DimUpdateNoInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+
+ armnn::TensorInfo shapeInfo({2}, armnn::DataType::Signed32, qScale, qOffset);
+ armnn::TensorInfo indicesInfo({3, 2}, armnn::DataType::Signed32, qScale, qOffset);
+ armnn::TensorInfo updatesInfo({3}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo outputInfo({3, 3}, ArmnnType, qScale, qOffset);
+
+ std::vector<int> shape = armnnUtils::QuantizedVector<int>({ 3, 3 }, qScale, qOffset);
+
+ std::vector<int> indices = armnnUtils::QuantizedVector<int>({ 0, 0,
+ 1, 1,
+ 2, 2 }, qScale, qOffset);
+
+ std::vector<T> updates = armnnUtils::QuantizedVector<T>({ 1, 2, 3 }, qScale, qOffset);
+
+ std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({ 1, 0, 0,
+ 0, 2, 0,
+ 0, 0, 3 }, qScale, qOffset);
+
+ armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Update, false);
+
+ return ScatterNdTestImpl<ArmnnType, T, int, 2>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ shape,
+ indices,
+ updates,
+ outputExpected,
+ shapeInfo,
+ indicesInfo,
+ updatesInfo,
+ outputInfo,
+ descriptor);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 2> ScatterNd2Dim1Outter1InnerUpdateWithInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+
+ armnn::TensorInfo inputInfo({3, 3}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo indicesInfo({2, 1}, armnn::DataType::Signed32, qScale, qOffset);
+ armnn::TensorInfo updatesInfo({2, 3}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo outputInfo({3, 3}, ArmnnType, qScale, qOffset);
+
+ std::vector<T> input = armnnUtils::QuantizedVector<T>({ 1, 1, 1,
+ 1, 1, 1,
+ 1, 1, 1 }, qScale, qOffset);
+
+ std::vector<int> indices = armnnUtils::QuantizedVector<int>({ 0, 1 }, qScale, qOffset);
+
+ std::vector<T> updates = armnnUtils::QuantizedVector<T>({ 1, 1, 1,
+ 2, 2, 2 }, qScale, qOffset);
+
+ std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({ 1, 1, 1,
+ 2, 2, 2,
+ 1, 1, 1 }, qScale, qOffset);
+
+ armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Update, true);
+
+ return ScatterNdTestImpl<ArmnnType, T, T, 2>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ input,
+ indices,
+ updates,
+ outputExpected,
+ inputInfo,
+ indicesInfo,
+ updatesInfo,
+ outputInfo,
+ descriptor);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 2> ScatterNd2Dim1Outter1InnerUpdateNoInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+
+ armnn::TensorInfo shapeInfo({2}, armnn::DataType::Signed32, qScale, qOffset);
+ armnn::TensorInfo indicesInfo({2, 1}, armnn::DataType::Signed32, qScale, qOffset);
+ armnn::TensorInfo updatesInfo({2, 3}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo outputInfo({3, 3}, ArmnnType, qScale, qOffset);
+
+ std::vector<int> shape = armnnUtils::QuantizedVector<int>({ 3, 3 }, qScale, qOffset);
+
+ std::vector<int> indices = armnnUtils::QuantizedVector<int>({ 0, 1 }, qScale, qOffset);
+
+ std::vector<T> updates = armnnUtils::QuantizedVector<T>({ 1, 1, 1,
+ 1, 1, 1 }, qScale, qOffset);
+
+ std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({ 1, 1, 1,
+ 1, 1, 1,
+ 0, 0, 0 }, qScale, qOffset);
+
+ armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Update, false);
+
+ return ScatterNdTestImpl<ArmnnType, T, int, 2>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ shape,
+ indices,
+ updates,
+ outputExpected,
+ shapeInfo,
+ indicesInfo,
+ updatesInfo,
+ outputInfo,
+ descriptor);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 3> ScatterNd3DimUpdateWithInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+
+ armnn::TensorInfo inputInfo({3, 3, 3}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo indicesInfo({3, 3}, armnn::DataType::Signed32, qScale, qOffset);
+ armnn::TensorInfo updatesInfo({3}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo outputInfo({3, 3, 3}, ArmnnType, qScale, qOffset);
+
+ std::vector<T> input = armnnUtils::QuantizedVector<T>({ 0, 0, 0,
+ 0, 0, 0,
+ 0, 0, 0,
+
+ 0, 0, 0,
+ 0, 0, 0,
+ 0, 0, 0,
+
+ 0, 0, 0,
+ 0, 0, 0,
+ 0, 0, 0 }, qScale, qOffset);
+
+ std::vector<int> indices = armnnUtils::QuantizedVector<int>({ 0, 0, 0,
+ 1, 1, 1,
+ 2, 2, 2 }, qScale, qOffset);
+
+ std::vector<T> updates = armnnUtils::QuantizedVector<T>({ 1, 2, 3 }, qScale, qOffset);
+
+ std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({ 1, 0, 0,
+ 0, 0, 0,
+ 0, 0, 0,
+
+ 0, 0, 0,
+ 0, 2, 0,
+ 0, 0, 0,
+
+ 0, 0, 0,
+ 0, 0, 0,
+ 0, 0, 3 }, qScale, qOffset);
+
+ armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Update, true);
+
+ return ScatterNdTestImpl<ArmnnType, T, T, 3>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ input,
+ indices,
+ updates,
+ outputExpected,
+ inputInfo,
+ indicesInfo,
+ updatesInfo,
+ outputInfo,
+ descriptor);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 3> ScatterNd3DimUpdateNoInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+
+ armnn::TensorInfo shapeInfo({3}, armnn::DataType::Signed32, qScale, qOffset);
+ armnn::TensorInfo indicesInfo({3, 3}, armnn::DataType::Signed32, qScale, qOffset);
+ armnn::TensorInfo updatesInfo({3}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo outputInfo({3, 3, 3}, ArmnnType, qScale, qOffset);
+
+ std::vector<int> shape = armnnUtils::QuantizedVector<int>({ 3, 3, 3 }, qScale, qOffset);
+
+ std::vector<int> indices = armnnUtils::QuantizedVector<int>({ 0, 0, 0,
+ 1, 1, 1,
+ 2, 2, 2 }, qScale, qOffset);
+
+ std::vector<T> updates = armnnUtils::QuantizedVector<T>({ 1, 2, 3 }, qScale, qOffset);
+
+ std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({ 1, 0, 0,
+ 0, 0, 0,
+ 0, 0, 0,
+
+ 0, 0, 0,
+ 0, 2, 0,
+ 0, 0, 0,
+
+ 0, 0, 0,
+ 0, 0, 0,
+ 0, 0, 3 }, qScale, qOffset);
+
+ armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Update, false);
+
+ return ScatterNdTestImpl<ArmnnType, T, int, 3>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ shape,
+ indices,
+ updates,
+ outputExpected,
+ shapeInfo,
+ indicesInfo,
+ updatesInfo,
+ outputInfo,
+ descriptor);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 3> ScatterNd3Dim1Outter2InnerUpdateWithInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+
+ armnn::TensorInfo inputInfo({3, 3, 3}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo indicesInfo({2, 1}, armnn::DataType::Signed32, qScale, qOffset);
+ armnn::TensorInfo updatesInfo({2, 3, 3}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo outputInfo({3, 3, 3}, ArmnnType, qScale, qOffset);
+
+ std::vector<T> input = armnnUtils::QuantizedVector<T>({ 0, 0, 0,
+ 0, 0, 0,
+ 0, 0, 0,
+
+ 0, 0, 0,
+ 0, 0, 0,
+ 0, 0, 0,
+
+ 0, 0, 0,
+ 0, 0, 0,
+ 0, 0, 0 }, qScale, qOffset);
+
+ std::vector<int> indices = armnnUtils::QuantizedVector<int>({ 0, 1 }, qScale, qOffset);
+
+ std::vector<T> updates = armnnUtils::QuantizedVector<T>({ 1, 1, 1,
+ 1, 1, 1,
+ 1, 1, 1,
+
+ 2, 2, 2,
+ 2, 2, 2,
+ 2, 2, 2 }, qScale, qOffset);
+
+ std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({ 1, 1, 1,
+ 1, 1, 1,
+ 1, 1, 1,
+
+ 2, 2, 2,
+ 2, 2, 2,
+ 2, 2, 2,
+
+ 0, 0, 0,
+ 0, 0, 0,
+ 0, 0, 0 }, qScale, qOffset);
+
+ armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Update, true);
+
+ return ScatterNdTestImpl<ArmnnType, T, T, 3>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ input,
+ indices,
+ updates,
+ outputExpected,
+ inputInfo,
+ indicesInfo,
+ updatesInfo,
+ outputInfo,
+ descriptor);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 3> ScatterNd3Dim1Outter2InnerUpdateNoInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+
+ armnn::TensorInfo shapeInfo({3}, armnn::DataType::Signed32, qScale, qOffset);
+ armnn::TensorInfo indicesInfo({2, 1}, armnn::DataType::Signed32, qScale, qOffset);
+ armnn::TensorInfo updatesInfo({2, 3, 3}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo outputInfo({3, 3, 3}, ArmnnType, qScale, qOffset);
+
+ std::vector<int> shape = armnnUtils::QuantizedVector<int>({ 3, 3, 3 }, qScale, qOffset);
+
+ std::vector<int> indices = armnnUtils::QuantizedVector<int>({ 0, 1 }, qScale, qOffset);
+
+ std::vector<T> updates = armnnUtils::QuantizedVector<T>({ 1, 1, 1,
+ 1, 1, 1,
+ 1, 1, 1,
+
+ 2, 2, 2,
+ 2, 2, 2,
+ 2, 2, 2 }, qScale, qOffset);
+
+ std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({ 1, 1, 1,
+ 1, 1, 1,
+ 1, 1, 1,
+
+ 2, 2, 2,
+ 2, 2, 2,
+ 2, 2, 2,
+
+ 0, 0, 0,
+ 0, 0, 0,
+ 0, 0, 0 }, qScale, qOffset);
+
+ armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Update, false);
+
+ return ScatterNdTestImpl<ArmnnType, T, int, 3>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ shape,
+ indices,
+ updates,
+ outputExpected,
+ shapeInfo,
+ indicesInfo,
+ updatesInfo,
+ outputInfo,
+ descriptor);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 3> ScatterNd3Dim2Outter1InnerUpdateWithInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+
+ armnn::TensorInfo inputInfo({3, 3, 3}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo indicesInfo({2, 2}, armnn::DataType::Signed32, qScale, qOffset);
+ armnn::TensorInfo updatesInfo({2, 3}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo outputInfo({3, 3, 3}, ArmnnType, qScale, qOffset);
+
+ std::vector<T> input = armnnUtils::QuantizedVector<T>({ 0, 0, 0,
+ 0, 0, 0,
+ 0, 0, 0,
+
+ 0, 0, 0,
+ 0, 0, 0,
+ 0, 0, 0,
+
+ 0, 0, 0,
+ 0, 0, 0,
+ 0, 0, 0 }, qScale, qOffset);
+
+ std::vector<int> indices = armnnUtils::QuantizedVector<int>({ 0, 0,
+ 1, 1, }, qScale, qOffset);
+
+ std::vector<T> updates = armnnUtils::QuantizedVector<T>({ 1, 1, 1,
+ 2, 2, 2 }, qScale, qOffset);
+
+ std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({ 1, 1, 1,
+ 0, 0, 0,
+ 0, 0, 0,
+
+ 0, 0, 0,
+ 2, 2, 2,
+ 0, 0, 0,
+
+ 0, 0, 0,
+ 0, 0, 0,
+ 0, 0, 0 }, qScale, qOffset);
+
+ armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Update, true);
+
+ return ScatterNdTestImpl<ArmnnType, T, T, 3>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ input,
+ indices,
+ updates,
+ outputExpected,
+ inputInfo,
+ indicesInfo,
+ updatesInfo,
+ outputInfo,
+ descriptor);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 3> ScatterNd3Dim2Outter1InnerUpdateNoInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+
+ armnn::TensorInfo shapeInfo({3}, armnn::DataType::Signed32, qScale, qOffset);
+ armnn::TensorInfo indicesInfo({2, 2}, armnn::DataType::Signed32, qScale, qOffset);
+ armnn::TensorInfo updatesInfo({2, 3}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo outputInfo({3, 3, 3}, ArmnnType, qScale, qOffset);
+
+ std::vector<int> shape = armnnUtils::QuantizedVector<int>({ 3, 3, 3 }, qScale, qOffset);
+
+ std::vector<int> indices = armnnUtils::QuantizedVector<int>({ 0, 0,
+ 1, 1, }, qScale, qOffset);
+
+ std::vector<T> updates = armnnUtils::QuantizedVector<T>({ 1, 1, 1,
+ 2, 2, 2 }, qScale, qOffset);
+
+ std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({ 1, 1, 1,
+ 0, 0, 0,
+ 0, 0, 0,
+
+ 0, 0, 0,
+ 2, 2, 2,
+ 0, 0, 0,
+
+ 0, 0, 0,
+ 0, 0, 0,
+ 0, 0, 0 }, qScale, qOffset);
+
+ armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Update, false);
+
+ return ScatterNdTestImpl<ArmnnType, T, int, 3>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ shape,
+ indices,
+ updates,
+ outputExpected,
+ shapeInfo,
+ indicesInfo,
+ updatesInfo,
+ outputInfo,
+ descriptor);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 4> ScatterNd4DimUpdateWithInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+
+ armnn::TensorInfo inputInfo({2, 3, 3, 3}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo indicesInfo({3, 4}, armnn::DataType::Signed32, qScale, qOffset);
+ armnn::TensorInfo updatesInfo({3}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo outputInfo({2, 3, 3, 3}, ArmnnType, qScale, qOffset);
+
+ std::vector<T> input = armnnUtils::QuantizedVector<T>({ 0, 0, 0,
+ 0, 0, 0,
+ 0, 0, 0,
+
+ 0, 0, 0,
+ 0, 0, 0,
+ 0, 0, 0,
+
+ 0, 0, 0,
+ 0, 0, 0,
+ 0, 0, 0,
+
+ 0, 0, 0,
+ 0, 0, 0,
+ 0, 0, 0,
+
+ 0, 0, 0,
+ 0, 0, 0,
+ 0, 0, 0,
+
+ 0, 0, 0,
+ 0, 0, 0,
+ 0, 0, 0 }, qScale, qOffset);
+
+ std::vector<int> indices = armnnUtils::QuantizedVector<int>({ 0, 0, 0, 0,
+ 0, 1, 1, 1,
+ 1, 1, 1, 1 }, qScale, qOffset);
+
+ std::vector<T> updates = armnnUtils::QuantizedVector<T>({ 1, 2, 3 }, qScale, qOffset);
+
+ std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({ 1, 0, 0,
+ 0, 0, 0,
+ 0, 0, 0,
+
+ 0, 0, 0,
+ 0, 2, 0,
+ 0, 0, 0,
+
+ 0, 0, 0,
+ 0, 0, 0,
+ 0, 0, 0,
+
+ 0, 0, 0,
+ 0, 0, 0,
+ 0, 0, 0,
+
+ 0, 0, 0,
+ 0, 3, 0,
+ 0, 0, 0,
+
+ 0, 0, 0,
+ 0, 0, 0,
+ 0, 0, 0 }, qScale, qOffset);
+
+ armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Update, true);
+
+ return ScatterNdTestImpl<ArmnnType, T, T, 4>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ input,
+ indices,
+ updates,
+ outputExpected,
+ inputInfo,
+ indicesInfo,
+ updatesInfo,
+ outputInfo,
+ descriptor);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 4> ScatterNd4DimUpdateNoInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+
+ armnn::TensorInfo shapeInfo({4}, armnn::DataType::Signed32, qScale, qOffset);
+ armnn::TensorInfo indicesInfo({3, 4}, armnn::DataType::Signed32, qScale, qOffset);
+ armnn::TensorInfo updatesInfo({3}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo outputInfo({2, 3, 3, 3}, ArmnnType, qScale, qOffset);
+
+ std::vector<int> shape = armnnUtils::QuantizedVector<int>({ 2, 3, 3, 3 }, qScale, qOffset);
+
+ std::vector<int> indices = armnnUtils::QuantizedVector<int>({ 0, 0, 0, 0,
+ 0, 1, 1, 1,
+ 1, 1, 1, 1 }, qScale, qOffset);
+
+ std::vector<T> updates = armnnUtils::QuantizedVector<T>({ 1, 2, 3 }, qScale, qOffset);
+
+ std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({ 1, 0, 0,
+ 0, 0, 0,
+ 0, 0, 0,
+
+ 0, 0, 0,
+ 0, 2, 0,
+ 0, 0, 0,
+
+ 0, 0, 0,
+ 0, 0, 0,
+ 0, 0, 0,
+
+ 0, 0, 0,
+ 0, 0, 0,
+ 0, 0, 0,
+
+ 0, 0, 0,
+ 0, 3, 0,
+ 0, 0, 0,
+
+ 0, 0, 0,
+ 0, 0, 0,
+ 0, 0, 0 }, qScale, qOffset);
+
+ armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Update, false);
+
+ return ScatterNdTestImpl<ArmnnType, T, int, 4>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ shape,
+ indices,
+ updates,
+ outputExpected,
+ shapeInfo,
+ indicesInfo,
+ updatesInfo,
+ outputInfo,
+ descriptor);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 2> ScatterNd2DimAddWithInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+
+ armnn::TensorInfo inputInfo({3, 3}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo indicesInfo({3, 2}, armnn::DataType::Signed32, qScale, qOffset);
+ armnn::TensorInfo updatesInfo({3}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo outputInfo({3, 3}, ArmnnType, qScale, qOffset);
+
+ std::vector<T> input = armnnUtils::QuantizedVector<T>({ 1, 1, 1,
+ 1, 1, 1,
+ 1, 1, 1 }, qScale, qOffset);
+
+ std::vector<int> indices = armnnUtils::QuantizedVector<int>({ 0, 0,
+ 1, 1,
+ 2, 2 }, qScale, qOffset);
+
+ std::vector<T> updates = armnnUtils::QuantizedVector<T>({ 1, 2, 3 }, qScale, qOffset);
+
+ std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({ 2, 1, 1,
+ 1, 3, 1,
+ 1, 1, 4 }, qScale, qOffset);
+
+ armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Add, true);
+
+ return ScatterNdTestImpl<ArmnnType, T, T, 2>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ input,
+ indices,
+ updates,
+ outputExpected,
+ inputInfo,
+ indicesInfo,
+ updatesInfo,
+ outputInfo,
+ descriptor);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 2> ScatterNd2DimAddNoInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+
+ armnn::TensorInfo shapeInfo({2}, armnn::DataType::Signed32, qScale, qOffset);
+ armnn::TensorInfo indicesInfo({3, 2}, armnn::DataType::Signed32, qScale, qOffset);
+ armnn::TensorInfo updatesInfo({3}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo outputInfo({3, 3}, ArmnnType, qScale, qOffset);
+
+ std::vector<int> shape = armnnUtils::QuantizedVector<int>({ 3, 3 }, qScale, qOffset);
+
+ std::vector<int> indices = armnnUtils::QuantizedVector<int>({ 0, 0,
+ 1, 1,
+ 2, 2 }, qScale, qOffset);
+
+ std::vector<T> updates = armnnUtils::QuantizedVector<T>({ 1, 2, 3 }, qScale, qOffset);
+
+ std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({ 1, 0, 0,
+ 0, 2, 0,
+ 0, 0, 3 }, qScale, qOffset);
+
+ armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Add, false);
+
+ return ScatterNdTestImpl<ArmnnType, T, int, 2>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ shape,
+ indices,
+ updates,
+ outputExpected,
+ shapeInfo,
+ indicesInfo,
+ updatesInfo,
+ outputInfo,
+ descriptor);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 2> ScatterNd2DimSubWithInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+
+ armnn::TensorInfo inputInfo({3, 3}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo indicesInfo({3, 2}, armnn::DataType::Signed32, qScale, qOffset);
+ armnn::TensorInfo updatesInfo({3}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo outputInfo({3, 3}, ArmnnType, qScale, qOffset);
+
+ std::vector<T> input = armnnUtils::QuantizedVector<T>({ 1, 1, 1,
+ 1, 1, 1,
+ 1, 1, 1 }, qScale, qOffset);
+
+ std::vector<int> indices = armnnUtils::QuantizedVector<int>({ 0, 0,
+ 1, 1,
+ 2, 2 }, qScale, qOffset);
+
+ std::vector<T> updates = armnnUtils::QuantizedVector<T>({ 1, 2, 3 }, qScale, qOffset);
+
+ std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({ 0, 1, 1,
+ 1, -1, 1,
+ 1, 1, -2 }, qScale, qOffset);
+
+ armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Sub, true);
+
+ return ScatterNdTestImpl<ArmnnType, T, T, 2>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ input,
+ indices,
+ updates,
+ outputExpected,
+ inputInfo,
+ indicesInfo,
+ updatesInfo,
+ outputInfo,
+ descriptor);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 2> ScatterNd2DimSubNoInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+
+ armnn::TensorInfo shapeInfo({2}, armnn::DataType::Signed32, qScale, qOffset);
+ armnn::TensorInfo indicesInfo({3, 2}, armnn::DataType::Signed32, qScale, qOffset);
+ armnn::TensorInfo updatesInfo({3}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo outputInfo({3, 3}, ArmnnType, qScale, qOffset);
+
+ std::vector<int> shape = armnnUtils::QuantizedVector<int>({ 3, 3 }, qScale, qOffset);
+
+ std::vector<int> indices = armnnUtils::QuantizedVector<int>({ 0, 0,
+ 1, 1,
+ 2, 2 }, qScale, qOffset);
+
+ std::vector<T> updates = armnnUtils::QuantizedVector<T>({ 1, 2, 3 }, qScale, qOffset);
+
+ std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({ -1, 0, 0,
+ 0, -2, 0,
+ 0, 0, -3 }, qScale, qOffset);
+
+ armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Sub, false);
+
+ return ScatterNdTestImpl<ArmnnType, T, int, 2>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ shape,
+ indices,
+ updates,
+ outputExpected,
+ shapeInfo,
+ indicesInfo,
+ updatesInfo,
+ outputInfo,
+ descriptor);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 2> ScatterNd2DimMaxWithInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+
+ armnn::TensorInfo inputInfo({3, 3}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo indicesInfo({3, 2}, armnn::DataType::Signed32, qScale, qOffset);
+ armnn::TensorInfo updatesInfo({3}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo outputInfo({3, 3}, ArmnnType, qScale, qOffset);
+
+ std::vector<T> input = armnnUtils::QuantizedVector<T>({ 1, 1, 1,
+ 1, 1, 1,
+ 1, 1, 1 }, qScale, qOffset);
+
+ std::vector<int> indices = armnnUtils::QuantizedVector<int>({ 0, 0,
+ 1, 1,
+ 2, 2 }, qScale, qOffset);
+
+ std::vector<T> updates = armnnUtils::QuantizedVector<T>({ 0, 1, 2 }, qScale, qOffset);
+
+ std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({ 1, 1, 1,
+ 1, 1, 1,
+ 1, 1, 2 }, qScale, qOffset);
+
+ armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Max, true);
+
+ return ScatterNdTestImpl<ArmnnType, T, T, 2>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ input,
+ indices,
+ updates,
+ outputExpected,
+ inputInfo,
+ indicesInfo,
+ updatesInfo,
+ outputInfo,
+ descriptor);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 2> ScatterNd2DimMaxNoInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+
+ armnn::TensorInfo shapeInfo({2}, armnn::DataType::Signed32, qScale, qOffset);
+ armnn::TensorInfo indicesInfo({3, 2}, armnn::DataType::Signed32, qScale, qOffset);
+ armnn::TensorInfo updatesInfo({3}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo outputInfo({3, 3}, ArmnnType, qScale, qOffset);
+
+ std::vector<int> shape = armnnUtils::QuantizedVector<int>({ 3, 3 }, qScale, qOffset);
+
+ std::vector<int> indices = armnnUtils::QuantizedVector<int>({ 0, 0,
+ 1, 1,
+ 2, 2 }, qScale, qOffset);
+
+ std::vector<T> updates = armnnUtils::QuantizedVector<T>({ -1, 0, 1 }, qScale, qOffset);
+
+ std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({ 0, 0, 0,
+ 0, 0, 0,
+ 0, 0, 1 }, qScale, qOffset);
+
+ armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Max, false);
+
+ return ScatterNdTestImpl<ArmnnType, T, int, 2>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ shape,
+ indices,
+ updates,
+ outputExpected,
+ shapeInfo,
+ indicesInfo,
+ updatesInfo,
+ outputInfo,
+ descriptor);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 2> ScatterNd2DimMinWithInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+
+ armnn::TensorInfo inputInfo({3, 3}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo indicesInfo({3, 2}, armnn::DataType::Signed32, qScale, qOffset);
+ armnn::TensorInfo updatesInfo({3}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo outputInfo({3, 3}, ArmnnType, qScale, qOffset);
+
+ std::vector<T> input = armnnUtils::QuantizedVector<T>({ 1, 1, 1,
+ 1, 1, 1,
+ 1, 1, 1 }, qScale, qOffset);
+
+ std::vector<int> indices = armnnUtils::QuantizedVector<int>({ 0, 0,
+ 1, 1,
+ 2, 2 }, qScale, qOffset);
+
+ std::vector<T> updates = armnnUtils::QuantizedVector<T>({ 0, 1, 2 }, qScale, qOffset);
+
+ std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({ 0, 1, 1,
+ 1, 1, 1,
+ 1, 1, 1 }, qScale, qOffset);
+
+ armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Min, true);
+
+ return ScatterNdTestImpl<ArmnnType, T, T, 2>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ input,
+ indices,
+ updates,
+ outputExpected,
+ inputInfo,
+ indicesInfo,
+ updatesInfo,
+ outputInfo,
+ descriptor);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 2> ScatterNd2DimMinNoInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+
+ armnn::TensorInfo shapeInfo({2}, armnn::DataType::Signed32, qScale, qOffset);
+ armnn::TensorInfo indicesInfo({3, 2}, armnn::DataType::Signed32, qScale, qOffset);
+ armnn::TensorInfo updatesInfo({3}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo outputInfo({3, 3}, ArmnnType, qScale, qOffset);
+
+ std::vector<int> shape = armnnUtils::QuantizedVector<int>({ 3, 3 }, qScale, qOffset);
+
+ std::vector<int> indices = armnnUtils::QuantizedVector<int>({ 0, 0,
+ 1, 1,
+ 2, 2 }, qScale, qOffset);
+
+ std::vector<T> updates = armnnUtils::QuantizedVector<T>({ -1, 0, 1 }, qScale, qOffset);
+
+ std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({ -1, 0, 0,
+ 0, 0, 0,
+ 0, 0, 0 }, qScale, qOffset);
+
+ armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Min, false);
+
+ return ScatterNdTestImpl<ArmnnType, T, int, 2>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ shape,
+ indices,
+ updates,
+ outputExpected,
+ shapeInfo,
+ indicesInfo,
+ updatesInfo,
+ outputInfo,
+ descriptor);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 2> ScatterNd2DimMulWithInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+
+ armnn::TensorInfo inputInfo({3, 3}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo indicesInfo({3, 2}, armnn::DataType::Signed32, qScale, qOffset);
+ armnn::TensorInfo updatesInfo({3}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo outputInfo({3, 3}, ArmnnType, qScale, qOffset);
+
+ std::vector<T> input = armnnUtils::QuantizedVector<T>({ 1, 1, 1,
+ 1, 1, 1,
+ 1, 1, 1 }, qScale, qOffset);
+
+ std::vector<int> indices = armnnUtils::QuantizedVector<int>({ 0, 0,
+ 1, 1,
+ 2, 2 }, qScale, qOffset);
+
+ std::vector<T> updates = armnnUtils::QuantizedVector<T>({ 1, 2, 3 }, qScale, qOffset);
+
+ std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({ 1, 1, 1,
+ 1, 2, 1,
+ 1, 1, 3 }, qScale, qOffset);
+
+ armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Mul, true);
+
+ return ScatterNdTestImpl<ArmnnType, T, T, 2>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ input,
+ indices,
+ updates,
+ outputExpected,
+ inputInfo,
+ indicesInfo,
+ updatesInfo,
+ outputInfo,
+ descriptor);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 2> ScatterNd2DimMulNoInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+
+ armnn::TensorInfo shapeInfo({2}, armnn::DataType::Signed32, qScale, qOffset);
+ armnn::TensorInfo indicesInfo({3, 2}, armnn::DataType::Signed32, qScale, qOffset);
+ armnn::TensorInfo updatesInfo({3}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo outputInfo({3, 3}, ArmnnType, qScale, qOffset);
+
+ std::vector<int> shape = armnnUtils::QuantizedVector<int>({ 3, 3 }, qScale, qOffset);
+
+ std::vector<int> indices = armnnUtils::QuantizedVector<int>({ 0, 0,
+ 1, 1,
+ 2, 2 }, qScale, qOffset);
+
+ std::vector<T> updates = armnnUtils::QuantizedVector<T>({ 1, 2, 3 }, qScale, qOffset);
+
+ std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({ 0, 0, 0,
+ 0, 0, 0,
+ 0, 0, 0 }, qScale, qOffset);
+
+ armnn::ScatterNdDescriptor descriptor(armnn::ScatterNdFunction::Mul, false);
+
+ return ScatterNdTestImpl<ArmnnType, T, int, 2>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ shape,
+ indices,
+ updates,
+ outputExpected,
+ shapeInfo,
+ indicesInfo,
+ updatesInfo,
+ outputInfo,
+ descriptor);
+}
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 1>
+ScatterNd1DimUpdateWithInput<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 1>
+ScatterNd1DimUpdateNoInput<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 2>
+ScatterNd2DimUpdateWithInput<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 2>
+ScatterNd2DimUpdateNoInput<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 2>
+ScatterNd2Dim1Outter1InnerUpdateWithInput<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 2>
+ScatterNd2Dim1Outter1InnerUpdateNoInput<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 3>
+ScatterNd3DimUpdateWithInput<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 3>
+ScatterNd3DimUpdateNoInput<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 3>
+ScatterNd3Dim1Outter2InnerUpdateWithInput<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 3>
+ScatterNd3Dim1Outter2InnerUpdateNoInput<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 3>
+ScatterNd3Dim2Outter1InnerUpdateWithInput<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 3>
+ScatterNd3Dim2Outter1InnerUpdateNoInput<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
+ScatterNd4DimUpdateWithInput<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
+ScatterNd4DimUpdateNoInput<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 2>
+ScatterNd2DimAddWithInput<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 2>
+ScatterNd2DimAddNoInput<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 2>
+ScatterNd2DimSubWithInput<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 2>
+ScatterNd2DimSubNoInput<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 2>
+ScatterNd2DimMaxWithInput<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 2>
+ScatterNd2DimMaxNoInput<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 2>
+ScatterNd2DimMinWithInput<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 2>
+ScatterNd2DimMinNoInput<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 2>
+ScatterNd2DimMulWithInput<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 2>
+ScatterNd2DimMulNoInput<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 3>
+ScatterNd3DimUpdateWithInput<armnn::DataType::Float16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 3>
+ScatterNd3DimUpdateNoInput<armnn::DataType::Float16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 3>
+ScatterNd3DimUpdateWithInput<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 3>
+ScatterNd3DimUpdateNoInput<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 3>
+ScatterNd3DimUpdateWithInput<armnn::DataType::QAsymmU8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 3>
+ScatterNd3DimUpdateNoInput<armnn::DataType::QAsymmU8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS8>, 3>
+ScatterNd3DimUpdateWithInput<armnn::DataType::QSymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS8>, 3>
+ScatterNd3DimUpdateNoInput<armnn::DataType::QSymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 3>
+ScatterNd3DimUpdateWithInput<armnn::DataType::QSymmS16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 3>
+ScatterNd3DimUpdateNoInput<armnn::DataType::QSymmS16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Signed32>, 3>
+ScatterNd3DimUpdateWithInput<armnn::DataType::Signed32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Signed32>, 3>
+ScatterNd3DimUpdateNoInput<armnn::DataType::Signed32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory); \ No newline at end of file
diff --git a/src/backends/backendsCommon/test/layerTests/ScatterNdTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ScatterNdTestImpl.hpp
new file mode 100644
index 0000000000..5350904719
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/ScatterNdTestImpl.hpp
@@ -0,0 +1,156 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnnTestUtils/LayerTestResult.hpp>
+
+#include <ResolveType.hpp>
+
+#include <armnn/backends/IBackendInternal.hpp>
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 1> ScatterNd1DimUpdateWithInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 1> ScatterNd1DimUpdateNoInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> ScatterNd2DimUpdateWithInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> ScatterNd2DimUpdateNoInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> ScatterNd2Dim1Outter1InnerUpdateWithInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> ScatterNd2Dim1Outter1InnerUpdateNoInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 3> ScatterNd3DimUpdateWithInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 3> ScatterNd3DimUpdateNoInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 3> ScatterNd3Dim1Outter2InnerUpdateWithInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 3> ScatterNd3Dim1Outter2InnerUpdateNoInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 3> ScatterNd3Dim2Outter1InnerUpdateWithInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 3> ScatterNd3Dim2Outter1InnerUpdateNoInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> ScatterNd4DimUpdateWithInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> ScatterNd4DimUpdateNoInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> ScatterNd2DimAddWithInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> ScatterNd2DimAddNoInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> ScatterNd2DimSubWithInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> ScatterNd2DimSubNoInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> ScatterNd2DimMaxWithInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> ScatterNd2DimMaxNoInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> ScatterNd2DimMinWithInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> ScatterNd2DimMinNoInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> ScatterNd2DimMulWithInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> ScatterNd2DimMulNoInput(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory); \ No newline at end of file
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index f97d03a26e..654aeb55dc 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -356,6 +356,13 @@ bool RefLayerSupport::IsLayerSupported(const LayerType& type,
infos[1],
*(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
reasonIfUnsupported);
+ case LayerType::ScatterNd:
+ return IsScatterNdSupported(infos[0],
+ infos[1],
+ infos[2],
+ infos[3],
+ *(PolymorphicDowncast<const ScatterNdDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
case LayerType::Slice:
return IsSliceSupported(infos[0],
infos[1],
@@ -2442,6 +2449,64 @@ bool RefLayerSupport::IsReverseV2Supported(const TensorInfo& input0,
return supported;
}
+bool RefLayerSupport::IsScatterNdSupported(const TensorInfo& input,
+ const TensorInfo& indices,
+ const TensorInfo& updates,
+ const TensorInfo& output,
+ const ScatterNdDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ IgnoreUnused(descriptor);
+
+ bool supported = true;
+
+ std::array<DataType, 7> supportedTypes
+ {
+ DataType::Float32,
+ DataType::Float16,
+ DataType::QAsymmS8,
+ DataType::QAsymmU8,
+ DataType::QSymmS8,
+ DataType::QSymmS16,
+ DataType::Signed32
+ };
+
+ std::array<DataType, 1> indicesSupportedTypes =
+ {
+ DataType::Signed32
+ };
+
+ supported &= CheckSupportRule(TypeAnyOf(indices, indicesSupportedTypes), reasonIfUnsupported,
+ "ScatterNd: indices type not supported.");
+
+ supported &= CheckSupportRule(TypeAnyOf(updates, supportedTypes), reasonIfUnsupported,
+ "ScatterNd: updates type not supported.");
+
+ supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
+ "ScatterNd: output type not supported");
+
+ supported &= CheckSupportRule(TypesAreEqual(updates, output), reasonIfUnsupported,
+ "ScatterNd: input and updates types are mismatched");
+
+ if (descriptor.m_InputEnabled)
+ {
+ // If the input slot is enabled, we have the input tensor in this slot
+ supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
+ "ScatterNd: input type not supported.");
+
+ supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
+ "ScatterNd: input and output types are mismatched");
+ }
+ else
+ {
+ // If the input slot is not enabled, we have the shape tensor in this slot
+ supported &= CheckSupportRule(TypeAnyOf(input, indicesSupportedTypes), reasonIfUnsupported,
+ "ScatterNd: shape type not supported.");
+ }
+
+ return supported;
+}
+
bool RefLayerSupport::IsShapeSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index 9e7175389a..1b0f4a2bb5 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -309,6 +309,13 @@ public:
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+ bool IsScatterNdSupported(const TensorInfo& input,
+ const TensorInfo& indices,
+ const TensorInfo& updates,
+ const TensorInfo& output,
+ const ScatterNdDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+
bool IsShapeSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index ad6ec9a792..df458c1a6d 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include <Layer.hpp>
@@ -567,6 +567,11 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateWorkload(LayerType type,
auto reverseV2QueueDescriptor = PolymorphicDowncast<const ReverseV2QueueDescriptor*>(&descriptor);
return std::make_unique<RefReverseV2Workload>(*reverseV2QueueDescriptor, info);
}
+ case LayerType::ScatterNd:
+ {
+ auto scatterQueueDescriptor = PolymorphicDowncast<const ScatterNdQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefScatterNdWorkload>(*scatterQueueDescriptor, info);
+ }
case LayerType::Shape:
{
auto shapeQueueDescriptor = PolymorphicDowncast<const ShapeQueueDescriptor*>(&descriptor);
diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk
index 27ca8f607a..752255607a 100644
--- a/src/backends/reference/backend.mk
+++ b/src/backends/reference/backend.mk
@@ -1,5 +1,5 @@
#
-# Copyright © 2017-2023 ARM Ltd and Contributors. All rights reserved.
+# Copyright © 2017-2024 ARM Ltd and Contributors. All rights reserved.
# SPDX-License-Identifier: MIT
#
@@ -96,6 +96,7 @@ BACKEND_SOURCES := \
workloads/RefReshapeWorkload.cpp \
workloads/RefResizeWorkload.cpp \
workloads/RefReverseV2Workload.cpp \
+ workloads/RefScatterNdWorkload.cpp \
workloads/RefSliceWorkload.cpp \
workloads/RefSoftmaxWorkload.cpp \
workloads/RefSpaceToBatchNdWorkload.cpp \
@@ -109,6 +110,7 @@ BACKEND_SOURCES := \
workloads/RefUnidirectionalSequenceLstmWorkload.cpp \
workloads/Resize.cpp \
workloads/ReverseV2Impl.cpp \
+ workloads/ScatterNd.cpp \
workloads/Slice.cpp \
workloads/SpaceToBatchNd.cpp \
workloads/SpaceToDepth.cpp \
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index cfe85594b3..078338163f 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017,2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -2885,4 +2885,56 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo3dAxis0TestSigned32, BroadcastTo3dAxis0
ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo3dAxis1TestSigned32, BroadcastTo3dAxis1Test<DataType::Signed32>)
ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo3dAxis2TestSigned32, BroadcastTo3dAxis2Test<DataType::Signed32>)
ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo4dTestSigned32, BroadcastTo4dTest<DataType::Signed32>)
+
+// ScatterNd
+// With Input tensor
+ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd1DUpdateTestWithInputFloat32, ScatterNd1DimUpdateWithInput<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd2DUpdateTestWithInputFloat32, ScatterNd2DimUpdateWithInput<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd2Dim1Outter1InnerUpdateWithInputFloat32,
+ ScatterNd2Dim1Outter1InnerUpdateWithInput<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd3DimUpdateWithInputFloat32, ScatterNd3DimUpdateWithInput<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd3Dim1Outter2InnerUpdateWithInputFloat32,
+ ScatterNd3Dim1Outter2InnerUpdateWithInput<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd3Dim2Outter1InnerUpdateWithInputFloat32,
+ ScatterNd3Dim2Outter1InnerUpdateWithInput<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd4DimUpdateWithInputFloat32, ScatterNd4DimUpdateWithInput<DataType::Float32>)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd2DimAddWithInputFloat32, ScatterNd2DimAddWithInput<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd2DimSubWithInputFloat32, ScatterNd2DimSubWithInput<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd2DimMaxWithInputFloat32, ScatterNd2DimMaxWithInput<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd2DimMinWithInputFloat32, ScatterNd2DimMinWithInput<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd2DimMulWithInputFloat32, ScatterNd2DimMulWithInput<DataType::Float32>)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd3DimUpdateWithInputFloat16, ScatterNd3DimUpdateWithInput<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd3DimUpdateWithInputQAsymmS8, ScatterNd3DimUpdateWithInput<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd3DimUpdateWithInputQAsymmU8, ScatterNd3DimUpdateWithInput<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd3DimUpdateWithInputQSymmS8, ScatterNd3DimUpdateWithInput<DataType::QSymmS8>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd3DimUpdateWithInputQSymmS16, ScatterNd3DimUpdateWithInput<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd3DimUpdateWithInputSigned32, ScatterNd3DimUpdateWithInput<DataType::Signed32>)
+
+// No input tensor, only shape provided
+ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd1DUpdateTestNoInputFloat32, ScatterNd1DimUpdateNoInput<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd2DimUpdateTestNoInputFloat32, ScatterNd2DimUpdateNoInput<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd2Dim1Outter1InnerUpdateNoInputFloat32,
+ ScatterNd2Dim1Outter1InnerUpdateNoInput<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd3DimUpdateNoInputFloat32, ScatterNd3DimUpdateNoInput<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd3Dim1Outter2InnerUpdateNoInputFloat32,
+ ScatterNd3Dim1Outter2InnerUpdateNoInput<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd3Dim2Outter1InnerUpdateNoInputFloat32,
+ ScatterNd3Dim2Outter1InnerUpdateNoInput<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd4DimUpdateNoInputFloat32, ScatterNd4DimUpdateNoInput<DataType::Float32>)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd2DimAddNoInputFloat32, ScatterNd2DimAddNoInput<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd2DimSubNoInputFloat32, ScatterNd2DimSubNoInput<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd2DimMaxNoInputFloat32, ScatterNd2DimMaxNoInput<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd2DimMinNoInputFloat32, ScatterNd2DimMinNoInput<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd2DimMulNoInputFloat32, ScatterNd2DimMulNoInput<DataType::Float32>)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd3DimUpdateNoInputFloat16, ScatterNd3DimUpdateNoInput<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd3DimUpdateNoInputQAsymmS8, ScatterNd3DimUpdateNoInput<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd3DimUpdateNoInputQAsymmU8, ScatterNd3DimUpdateNoInput<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd3DimUpdateNoInputQSymmS8, ScatterNd3DimUpdateNoInput<DataType::QSymmS8>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd3DimUpdateNoInputQSymmS16, ScatterNd3DimUpdateNoInput<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ScatterNd3DimUpdateNoInputSigned32, ScatterNd3DimUpdateNoInput<DataType::Signed32>)
+
} \ No newline at end of file
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index 42f92aec1d..0f70cb0022 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -1,5 +1,5 @@
#
-# Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+# Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
# SPDX-License-Identifier: MIT
#
@@ -85,6 +85,8 @@ list(APPEND armnnRefBackendWorkloads_sources
RefCastWorkload.hpp
RefChannelShuffleWorkload.cpp
RefChannelShuffleWorkload.hpp
+ RefScatterNdWorkload.cpp
+ RefScatterNdWorkload.hpp
RefShapeWorkload.hpp
RefComparisonWorkload.cpp
RefComparisonWorkload.hpp
@@ -195,6 +197,8 @@ list(APPEND armnnRefBackendWorkloads_sources
Resize.cpp
Resize.hpp
Rsqrt.hpp
+ ScatterNd.cpp
+ ScatterNd.hpp
Sin.hpp
Slice.cpp
Slice.hpp
diff --git a/src/backends/reference/workloads/RefScatterNdWorkload.cpp b/src/backends/reference/workloads/RefScatterNdWorkload.cpp
new file mode 100644
index 0000000000..4713add0e9
--- /dev/null
+++ b/src/backends/reference/workloads/RefScatterNdWorkload.cpp
@@ -0,0 +1,100 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <fmt/format.h>
+#include "RefScatterNdWorkload.hpp"
+#include "RefWorkloadUtils.hpp"
+#include "ScatterNd.hpp"
+#include "Profiling.hpp"
+
+namespace armnn
+{
+
+ RefScatterNdWorkload::RefScatterNdWorkload(const ScatterNdQueueDescriptor& descriptor, const WorkloadInfo& info)
+ : RefBaseWorkload(descriptor, info)
+ {}
+
+ void RefScatterNdWorkload::Execute() const
+ {
+ Execute(m_Data.m_Inputs, m_Data.m_Outputs);
+ }
+
+ void RefScatterNdWorkload::ExecuteAsync(ExecutionData& executionData)
+ {
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
+ }
+
+ void RefScatterNdWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
+ {
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefScatterNdWorkload_Execute");
+
+ if (m_Data.m_Parameters.m_InputEnabled)
+ {
+ // Getting TensorInfos for three inputs slots
+ const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
+ const TensorInfo& indicesInfo = GetTensorInfo(inputs[1]);
+ const TensorInfo& updatesInfo = GetTensorInfo(inputs[2]);
+
+ // Getting Decoder for input
+ std::unique_ptr<Decoder<float>> inputDecoder = MakeDecoder<float>(GetTensorInfo(inputs[0]),
+ inputs[0]->Map());
+
+ // Getting Decoder for indices
+ std::unique_ptr<Decoder<int>> indicesDecoder = MakeDecoder<int>(GetTensorInfo(inputs[1]),
+ inputs[1]->Map());
+
+ // Getting Decoder for updates
+ std::unique_ptr<Decoder<float>> updatesDecoder = MakeDecoder<float>(GetTensorInfo(inputs[2]),
+ inputs[2]->Map());
+
+ // Getting Encoder for output
+ std::unique_ptr<Encoder<float>> outputEncoder = MakeEncoder<float>(GetTensorInfo(outputs[0]),
+ outputs[0]->Map());
+
+ ScatterNd(inputInfo,
+ indicesInfo,
+ updatesInfo,
+ *inputDecoder,
+ *indicesDecoder,
+ *updatesDecoder,
+ *outputEncoder,
+ m_Data.m_Parameters);
+ }
+ else
+ {
+ // Getting TensorInfos for three inputs slots
+ const TensorInfo& shapeInfo = GetTensorInfo(inputs[0]);
+ const TensorInfo& indicesInfo = GetTensorInfo(inputs[1]);
+ const TensorInfo& updatesInfo = GetTensorInfo(inputs[2]);
+
+ // Getting Decoder for shape
+ std::unique_ptr<Decoder<int>> shapeDecoder = MakeDecoder<int>(GetTensorInfo(inputs[0]),
+ inputs[0]->Map());
+
+ // Getting Decoder for indices
+ std::unique_ptr<Decoder<int>> indicesDecoder = MakeDecoder<int>(GetTensorInfo(inputs[1]),
+ inputs[1]->Map());
+
+ // Getting Decoder for updates
+ std::unique_ptr<Decoder<float>> updatesDecoder = MakeDecoder<float>(GetTensorInfo(inputs[2]),
+ inputs[2]->Map());
+
+ // Getting Encoder for output
+ std::unique_ptr<Encoder<float>> outputEncoder = MakeEncoder<float>(GetTensorInfo(outputs[0]),
+ outputs[0]->Map());
+
+ ScatterNd(indicesInfo,
+ updatesInfo,
+ shapeInfo,
+ *indicesDecoder,
+ *updatesDecoder,
+ *shapeDecoder,
+ *outputEncoder,
+ m_Data.m_Parameters);
+ }
+ }
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/backends/reference/workloads/RefScatterNdWorkload.hpp b/src/backends/reference/workloads/RefScatterNdWorkload.hpp
new file mode 100644
index 0000000000..c9cf5a3af3
--- /dev/null
+++ b/src/backends/reference/workloads/RefScatterNdWorkload.hpp
@@ -0,0 +1,30 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "RefBaseWorkload.hpp"
+#include <armnn/backends/WorkloadData.hpp>
+
+#include "ScatterNd.hpp"
+
+namespace armnn
+{
+
+ class RefScatterNdWorkload : public RefBaseWorkload<ScatterNdQueueDescriptor>
+ {
+ public:
+ explicit RefScatterNdWorkload(const ScatterNdQueueDescriptor& descriptor,
+ const WorkloadInfo& info);
+
+ void Execute() const override;
+ void ExecuteAsync(ExecutionData& executionData) override;
+
+ private:
+ void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
+
+ };
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp
index 98aa27b8a9..92b178c3d5 100644
--- a/src/backends/reference/workloads/RefWorkloads.hpp
+++ b/src/backends/reference/workloads/RefWorkloads.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -55,6 +55,7 @@
#include "RefReshapeWorkload.hpp"
#include "RefResizeWorkload.hpp"
#include "RefReverseV2Workload.hpp"
+#include "RefScatterNdWorkload.hpp"
#include "RefShapeWorkload.hpp"
#include "RefSliceWorkload.hpp"
#include "RefSplitterWorkload.hpp"
diff --git a/src/backends/reference/workloads/ScatterNd.cpp b/src/backends/reference/workloads/ScatterNd.cpp
new file mode 100644
index 0000000000..8eb53b00a8
--- /dev/null
+++ b/src/backends/reference/workloads/ScatterNd.cpp
@@ -0,0 +1,336 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ScatterNd.hpp"
+#include "Encoders.hpp"
+#include <armnn/backends/WorkloadData.hpp>
+#include <armnn/Logging.hpp>
+
+#include <fmt/format.h>
+
+#include <numeric>
+
+namespace armnn
+{
+
+float ScatterOperation(ScatterNdFunction operation,
+ float input,
+ float update)
+{
+ switch (operation)
+ {
+ case ScatterNdFunction::Update:
+ return update;
+ case ScatterNdFunction::Add:
+ return input + update;
+ case ScatterNdFunction::Sub:
+ return input - update;
+ case ScatterNdFunction::Max:
+ return std::max(input, update);
+ case ScatterNdFunction::Min:
+ return std::min(input, update);
+ case ScatterNdFunction::Mul:
+ return input * update;
+ default:
+ throw InvalidArgumentException("ScatterNd: cannot execute this operation.");
+ }
+}
+
+void ScatterNd(const TensorInfo& inputInfo,
+ const TensorInfo& indicesInfo,
+ const TensorInfo& updatesInfo,
+ Decoder<float>& input,
+ Decoder<int>& indices,
+ Decoder<float>& updates,
+ Encoder<float>& output,
+ ScatterNdDescriptor descriptor)
+{
+ // Axis Unsupported
+ if (descriptor.m_AxisEnabled)
+ {
+ throw InvalidArgumentException("ScatterNd: axis param not supported.");
+ }
+
+ // Get the shape for indices, updates, and input
+ TensorShape indicesShape = indicesInfo.GetShape();
+ TensorShape updatesShape = updatesInfo.GetShape();
+ TensorShape inputShape = inputInfo.GetShape();
+
+ // Get the dimensions for indices and updates
+ unsigned int dimension = inputInfo.GetNumDimensions();
+ unsigned int indicesDim = indicesInfo.GetNumDimensions();
+ unsigned int updatesDim = updatesInfo.GetNumDimensions();
+
+ // Calculate the outter and inner dimensions
+ unsigned int outterDim = indicesShape[indicesDim - 1];
+ unsigned int innerDim = dimension - outterDim;
+
+ // Calculate the number of elements in each dimension
+ unsigned int numElementsCount = 1;
+ std::vector<unsigned int> elementInDim(dimension);
+ for (unsigned int dimIndex = dimension; dimIndex > 0; --dimIndex)
+ {
+ elementInDim[dimIndex - 1] = numElementsCount;
+ numElementsCount *= inputShape[dimIndex - 1];
+ }
+
+ // Number of updates per index
+ unsigned int numUpdatesPerIndex = elementInDim[dimension - innerDim - 1];
+
+ // Number of indices to update
+ unsigned int numIndices = indicesShape[0];
+
+ // Check Input Requirements
+ // Requirement 1: Indices and Updates must have rank at least 1
+ if (indicesDim < 1 || updatesDim < 1)
+ {
+ throw InvalidArgumentException("ScatterNd: indices and updates must have rank >= 1.");
+ }
+
+ // Requirement 2: Input, Indices and Updates must have values
+ if (inputInfo.GetNumElements() == 0 ||
+ indicesInfo.GetNumElements() == 0 ||
+ updatesInfo.GetNumElements() == 0)
+ {
+ throw InvalidArgumentException("ScatterNd: input, indices and updates tensor must have values.");
+ }
+
+ // Requirement 3: Indices and Updates must match in shape
+ // The updates dimension should equals to 1 + inner dimension
+ if (updatesDim != 1 + innerDim)
+ {
+ throw InvalidArgumentException("ScatterNd: updates dimension should equal to 1 + inner dimension.");
+ }
+ // The inner dimension of updates has to match with shape of input
+ for (unsigned int dimBackIndex = 0; dimBackIndex < innerDim; ++dimBackIndex)
+ {
+ if (updatesShape[updatesDim - dimBackIndex - 1] != inputShape[dimension - dimBackIndex - 1])
+ {
+ throw InvalidArgumentException(
+ fmt::format("ScatterNd: input and updates shape not match on dimension {}",
+ dimension - dimBackIndex));
+ }
+ }
+
+ // Requirement 4: Check duplicate indices and out of bound indices
+ std::set<int> indicesSet;
+ std::vector<int> flattenIndices(numIndices);
+ for (unsigned int indicesIdx = 0; indicesIdx < numIndices; ++indicesIdx)
+ {
+ // Get the index
+ int flattenIndex = 0;
+
+ for (unsigned int outterIdx = 0; outterIdx < outterDim; ++outterIdx) {
+
+ int outterIndexValue = indices.Get();
+
+ // Check bounds
+ if (outterIndexValue < 0 || outterIndexValue >= int(inputShape[outterIdx]))
+ {
+ throw InvalidArgumentException(
+ fmt::format("ScatterNd: indices {} out of bound [0, {})",
+ outterIndexValue, inputShape[outterIdx]));
+ }
+
+ flattenIndex += int(elementInDim[outterIdx]) * outterIndexValue;
+ ++indices;
+ }
+
+ // Check duplicates when executing ScatterNd::Update
+ if (descriptor.m_Function == ScatterNdFunction::Update &&
+ indicesSet.find(flattenIndex) != indicesSet.end())
+ {
+ throw InvalidArgumentException(
+ fmt::format("ScatterNd: duplicate indices occurs {}", flattenIndex));
+ }
+
+ flattenIndices[indicesIdx] = flattenIndex;
+ indicesSet.insert(flattenIndex);
+ }
+
+ // Set the input data to output
+ for (unsigned int idx = 0; idx < inputInfo.GetNumElements(); ++idx)
+ {
+ float inputValue = input.Get();
+ ++input;
+ output.Set(inputValue);
+ ++output;
+ }
+
+ // Iterate through all indices to scatter updates
+ for (unsigned int indicesIdx = 0; indicesIdx < numIndices; ++indicesIdx)
+ {
+ // Get the index and calculate the flatten index
+ int flattenIndex = flattenIndices[indicesIdx];
+
+ // FlattenIndex is the place that we are going to update the elements
+ unsigned int updatesStartIdx = indicesIdx * numUpdatesPerIndex;
+ for (unsigned int updatesIdx = 0; updatesIdx < numUpdatesPerIndex; ++updatesIdx)
+ {
+ updates[updatesStartIdx + updatesIdx];
+ input[static_cast<unsigned int>(flattenIndex) + updatesIdx];
+ float updateValue = ScatterOperation(descriptor.m_Function, input.Get(), updates.Get());
+ output[static_cast<unsigned int>(flattenIndex) + updatesIdx];
+ output.Set(updateValue);
+ }
+ }
+}
+
+void ScatterNd(const TensorInfo& indicesInfo,
+ const TensorInfo& updatesInfo,
+ const TensorInfo& shapeInfo,
+ Decoder<int>& indices,
+ Decoder<float>& updates,
+ Decoder<int>& shape,
+ Encoder<float>& output,
+ ScatterNdDescriptor descriptor)
+{
+ // Axis Unsupported
+ if (descriptor.m_AxisEnabled)
+ {
+ throw InvalidArgumentException("ScatterNd: axis param not supported.");
+ }
+
+ // Get the shape for indices, updates, and input
+ TensorShape indicesShape = indicesInfo.GetShape();
+ TensorShape updatesShape = updatesInfo.GetShape();
+
+ // Get the shape values
+ std::vector<float> shapeValues = shape.DecodeTensor(shapeInfo.GetShape());
+ // Check the shape
+ if (shapeInfo.GetNumElements() == 0)
+ {
+ throw InvalidArgumentException("ScatterNd: shape must have values.");
+ }
+ for (auto shapeValue : shapeValues)
+ {
+ if (shapeValue <= 0)
+ {
+ throw InvalidArgumentException("ScatterNd: shape values must >= 0.");
+ }
+ }
+ // Get the input shape
+ std::vector<unsigned int> inputShape (shapeValues.begin(), shapeValues.end());
+ unsigned int inputElementsNum = static_cast<unsigned int>(
+ std::accumulate(inputShape.begin(), inputShape.end(), 1, std::multiplies<unsigned int>()));
+
+ // Get the dimensions for indices and updates
+ unsigned int dimension = shapeInfo.GetNumElements();
+ unsigned int indicesDim = indicesInfo.GetNumDimensions();
+ unsigned int updatesDim = updatesInfo.GetNumDimensions();
+
+ // Calculate the outter and inner dimensions
+ unsigned int outterDim = indicesShape[indicesDim - 1];
+ unsigned int innerDim = dimension - outterDim;
+
+ // Calculate the number of elements in each dimension
+ unsigned int numElementsCount = 1;
+ std::vector<unsigned int> elementInDim(dimension);
+ for (unsigned int dimIndex = dimension; dimIndex > 0; --dimIndex)
+ {
+ elementInDim[dimIndex - 1] = numElementsCount;
+ numElementsCount *= inputShape[dimIndex - 1];
+ }
+
+ // Number of updates per index
+ unsigned int numUpdatesPerIndex = elementInDim[dimension - innerDim - 1];
+
+ // Number of indices to update
+ unsigned int numIndices = indicesShape[0];
+
+ // Check Input Requirements
+ // Requirement 1: Indices and Updates must have rank at least 1
+ if (indicesDim < 1 || updatesDim < 1)
+ {
+ throw InvalidArgumentException("ScatterNd: indices and updates must have rank >= 1.");
+ }
+
+ // Requirement 2: shape, Indices and Updates must have values
+ if (indicesInfo.GetNumElements() == 0 ||
+ updatesInfo.GetNumElements() == 0)
+ {
+ throw InvalidArgumentException("ScatterNd: indices and updates tensor must have values.");
+ }
+
+ // Requirement 3: Indices and Updates must match in shape
+ // The updates dimension should equals to 1 + inner dimension
+ if (updatesDim != 1 + innerDim)
+ {
+ throw InvalidArgumentException("ScatterNd: updates dimension should equal to 1 + inner dimension.");
+ }
+ // The inner dimension of updates has to match with shape of input
+ for (unsigned int dimBackIndex = 0; dimBackIndex < innerDim; ++dimBackIndex)
+ {
+ if (updatesShape[updatesDim - dimBackIndex - 1] != inputShape[dimension - dimBackIndex - 1])
+ {
+ throw InvalidArgumentException(
+ fmt::format("ScatterNd: input and updates shape not match on dimension {}",
+ dimension - dimBackIndex));
+ }
+ }
+
+ // Requirement 4: Check duplicate indices and out of bound indices
+ std::set<int> indicesSet;
+ std::vector<int> flattenIndices(numIndices);
+ for (unsigned int indicesIdx = 0; indicesIdx < numIndices; ++indicesIdx)
+ {
+ // Get the index
+ int flattenIndex = 0;
+
+ for (unsigned int outterIdx = 0; outterIdx < outterDim; ++outterIdx) {
+
+ int outterIndexValue = indices.Get();
+
+ // Check bounds
+ if (outterIndexValue < 0 || outterIndexValue >= int(inputShape[outterIdx]))
+ {
+ throw InvalidArgumentException(
+ fmt::format("ScatterNd: indices {} out of bound [0, {})",
+ outterIndexValue, inputShape[outterIdx]));
+ }
+
+ flattenIndex += int(elementInDim[outterIdx]) * outterIndexValue;
+ ++indices;
+ }
+
+ // Check duplicates when executing ScatterNd::Update
+ if (descriptor.m_Function == ScatterNdFunction::Update &&
+ indicesSet.find(flattenIndex) != indicesSet.end())
+ {
+ throw InvalidArgumentException(
+ fmt::format("ScatterNd: duplicate indices {} occurs when executing ScatterNd::Update.",
+ flattenIndex));
+ }
+
+ flattenIndices[indicesIdx] = flattenIndex;
+ indicesSet.insert(flattenIndex);
+ }
+
+ // Set zeros to output
+ for (unsigned int idx = 0; idx < inputElementsNum; ++idx)
+ {
+ output.Set(0.0f);
+ ++output;
+ }
+
+ // Iterate through all indices to scatter updates
+ for (unsigned int indicesIdx = 0; indicesIdx < numIndices; ++indicesIdx)
+ {
+ // Get the index and calculate the flatten index
+ int flattenIndex = flattenIndices[indicesIdx];
+
+ // FlattenIndex is the place that we are going to update the elements
+ unsigned int updatesStartIdx = indicesIdx * numUpdatesPerIndex;
+ for (unsigned int updatesIdx = 0; updatesIdx < numUpdatesPerIndex; ++updatesIdx)
+ {
+ updates[updatesStartIdx + updatesIdx];
+ float updateValue = ScatterOperation(descriptor.m_Function, 0.0f, updates.Get());
+ output[static_cast<unsigned int>(flattenIndex) + updatesIdx];
+ output.Set(updateValue);
+ }
+ }
+}
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/backends/reference/workloads/ScatterNd.hpp b/src/backends/reference/workloads/ScatterNd.hpp
new file mode 100644
index 0000000000..e40d3640a7
--- /dev/null
+++ b/src/backends/reference/workloads/ScatterNd.hpp
@@ -0,0 +1,34 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/Tensor.hpp>
+#include "Encoders.hpp"
+#include "Decoders.hpp"
+#include "armnn/Descriptors.hpp"
+
+namespace armnn
+{
+// ScatterNd with input tensor
+void ScatterNd(const TensorInfo& inputInfo,
+ const TensorInfo& indicesInfo,
+ const TensorInfo& updatesInfo,
+ Decoder<float>& input,
+ Decoder<int>& indices,
+ Decoder<float>& updates,
+ Encoder<float>& output,
+ ScatterNdDescriptor descriptor);
+
+// ScatterNd without input tensor, only shape provided
+void ScatterNd(const TensorInfo& indicesInfo,
+ const TensorInfo& updatesInfo,
+ const TensorInfo& shapeInfo,
+ Decoder<int>& indices,
+ Decoder<float>& updates,
+ Decoder<int>& shape,
+ Encoder<float>& output,
+ ScatterNdDescriptor descriptor);
+} // namespace armnn \ No newline at end of file