aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/backendsCommon')
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp35
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.cpp11
-rw-r--r--src/backends/backendsCommon/WorkloadUtils.cpp45
-rw-r--r--src/backends/backendsCommon/WorkloadUtils.hpp7
-rw-r--r--src/backends/backendsCommon/common.mk1
-rw-r--r--src/backends/backendsCommon/test/CMakeLists.txt3
-rw-r--r--src/backends/backendsCommon/test/GatherNdEndToEndTestImpl.hpp161
-rw-r--r--src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp2
-rw-r--r--src/backends/backendsCommon/test/LayerTests.hpp1
-rw-r--r--src/backends/backendsCommon/test/layerTests/GatherNdTestImpl.cpp300
-rw-r--r--src/backends/backendsCommon/test/layerTests/GatherNdTestImpl.hpp32
11 files changed, 598 insertions, 0 deletions
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index fc48ffce28..962ecde24b 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -2718,6 +2718,41 @@ void RsqrtQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
}
+void GatherNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+{
+ const std::string descriptorName{"GatherNdQueueDescriptor"};
+
+ ValidateNumInputs(workloadInfo, descriptorName, 2);
+ ValidateNumOutputs(workloadInfo, descriptorName, 1);
+
+ const TensorInfo& indicesTensorInfo = workloadInfo.m_InputTensorInfos[1];
+ if (indicesTensorInfo.GetDataType() != DataType::Signed32)
+ {
+ throw InvalidArgumentException(descriptorName + ": Indices tensor type must be Int32.");
+ }
+
+ const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
+ const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
+
+ std::vector<DataType> supportedTypes =
+ {
+ DataType::BFloat16,
+ DataType::Float16,
+ DataType::Float32,
+ DataType::QAsymmS8,
+ DataType::QAsymmU8,
+ DataType::QSymmS16,
+ DataType::Signed32,
+ };
+
+ ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
+
+ ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
+
+ unsigned int outputDim = outputTensorInfo.GetNumDimensions();
+ ValidateTensorNumDimensions(outputTensorInfo, descriptorName, outputDim, "output");
+}
+
void GatherQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
{
const std::string descriptorName{"GatherQueueDescriptor"};
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 090e2856d8..f955aec30f 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -509,6 +509,17 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
reason);
break;
}
+ case LayerType::GatherNd:
+ {
+ const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+ const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
+ const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+ result = layerSupportObject.IsGatherNdSupported(OverrideDataType(input0, dataType),
+ input1,
+ OverrideDataType(output, dataType),
+ reason);
+ break;
+ }
case LayerType::Input:
{
const TensorInfo& input = layer.GetOutputSlot(0).GetTensorInfo();
diff --git a/src/backends/backendsCommon/WorkloadUtils.cpp b/src/backends/backendsCommon/WorkloadUtils.cpp
index fcdad3e21b..d2ae16af0c 100644
--- a/src/backends/backendsCommon/WorkloadUtils.cpp
+++ b/src/backends/backendsCommon/WorkloadUtils.cpp
@@ -10,6 +10,7 @@
#include <armnnUtils/DataLayoutIndexed.hpp>
#include <fmt/format.h>
+#include <numeric>
namespace armnn
{
@@ -294,4 +295,48 @@ int32_t ConvertMaskToACLFormat(int32_t mask, int32_t numDim)
return reversedMask;
}
+std::map<std::string, unsigned int> CalculateGatherNdKeyIndices(TensorInfo inputInfo0, TensorInfo inputInfo1)
+{
+ std::vector<unsigned int> paramsShape;
+ for (unsigned int i = 0; i < inputInfo0.GetNumDimensions(); ++i)
+ {
+ paramsShape.push_back(inputInfo0.GetShape()[i]);
+ }
+
+ std::vector<unsigned int> indicesShape;
+ for (unsigned int i = 0; i < inputInfo1.GetNumDimensions(); ++i)
+ {
+ indicesShape.push_back(inputInfo1.GetShape()[i]);
+ }
+
+ std::map<std::string, unsigned int> keyIndices;
+
+ // N: number of batches
+ keyIndices["N"] = 1;
+
+ // ND: number of dimensions that are sliced from params
+ keyIndices["ND"] = indicesShape.back();
+
+ // W: number of indices in each batch (all but the last dimension)
+ keyIndices["W"] =
+ static_cast<unsigned int>(std::accumulate(std::begin(indicesShape),
+ std::end(indicesShape) - 1,
+ 1,
+ std::multiplies<>() ));
+ // K: range of each index
+ keyIndices["K"] =
+ static_cast<unsigned int>(std::accumulate(std::begin(paramsShape),
+ std::begin(paramsShape) + static_cast<int>(keyIndices["ND"]),
+ 1,
+ std::multiplies<>() ));
+ // C: number of channels for each index
+ keyIndices["C"] =
+ static_cast<unsigned int>(std::accumulate(std::begin(paramsShape) + static_cast<int>(keyIndices["ND"]),
+ std::end(paramsShape),
+ 1,
+ std::multiplies<>() ));
+
+ return keyIndices;
+}
+
} // namespace armnn
diff --git a/src/backends/backendsCommon/WorkloadUtils.hpp b/src/backends/backendsCommon/WorkloadUtils.hpp
index 2f1c5c47f0..0e5487336f 100644
--- a/src/backends/backendsCommon/WorkloadUtils.hpp
+++ b/src/backends/backendsCommon/WorkloadUtils.hpp
@@ -251,4 +251,11 @@ std::tuple<ConstTensor, unsigned int> Convert1HWOtoMIHW(const ConstTensorHandle*
const DataLayout& dataLayout,
void* permuteBuffer);
+/// Calculates the key index values needed for GatherNd: N, ND, K, W, C (N is always 1)
+///
+/// \param inputInfo0 - TensorInfo of the corresponding input tensor: params
+/// \param inputInfo1 - TensorInfo of the corresponding input tensor: indices
+/// \return - A map with names and values for N, ND, K, W, C
+std::map<std::string, unsigned int> CalculateGatherNdKeyIndices(TensorInfo inputInfo0, TensorInfo inputInfo1);
+
} //namespace armnn
diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk
index 8f97669d0a..1f42a5cd8f 100644
--- a/src/backends/backendsCommon/common.mk
+++ b/src/backends/backendsCommon/common.mk
@@ -68,6 +68,7 @@ COMMON_TEST_SOURCES := \
test/layerTests/FillTestImpl.cpp \
test/layerTests/FloorTestImpl.cpp \
test/layerTests/FullyConnectedTestImpl.cpp \
+ test/layerTests/GatherNdTestImpl.cpp \
test/layerTests/GatherTestImpl.cpp \
test/layerTests/InstanceNormalizationTestImpl.cpp \
test/layerTests/L2NormalizationTestImpl.cpp \
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index 8ec65b3c17..06d230b006 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -30,6 +30,7 @@ list(APPEND armnnBackendsCommonUnitTests_sources
FillEndToEndTestImpl.hpp
FullyConnectedEndToEndTestImpl.hpp
GatherEndToEndTestImpl.hpp
+ GatherNdEndToEndTestImpl.hpp
InstanceNormalizationEndToEndTestImpl.cpp
InstanceNormalizationEndToEndTestImpl.hpp
IsLayerSupportedTestImpl.hpp
@@ -113,6 +114,8 @@ list(APPEND armnnBackendsCommonUnitTests_sources
layerTests/FloorTestImpl.hpp
layerTests/FullyConnectedTestImpl.cpp
layerTests/FullyConnectedTestImpl.hpp
+ layerTests/GatherNdTestImpl.cpp
+ layerTests/GatherNdTestImpl.hpp
layerTests/GatherTestImpl.cpp
layerTests/GatherTestImpl.hpp
layerTests/InstanceNormalizationTestImpl.cpp
diff --git a/src/backends/backendsCommon/test/GatherNdEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/GatherNdEndToEndTestImpl.hpp
new file mode 100644
index 0000000000..0eea91190e
--- /dev/null
+++ b/src/backends/backendsCommon/test/GatherNdEndToEndTestImpl.hpp
@@ -0,0 +1,161 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <CommonTestUtils.hpp>
+
+#include <armnn/INetwork.hpp>
+#include <ResolveType.hpp>
+
+#include <doctest/doctest.h>
+
+namespace{
+
+armnn::INetworkPtr CreateGatherNdNetwork(const armnn::TensorInfo& paramsInfo,
+ const armnn::TensorInfo& indicesInfo,
+ const armnn::TensorInfo& outputInfo,
+ const std::vector<int32_t>& indicesData)
+{
+ armnn::INetworkPtr net(armnn::INetwork::Create());
+
+ armnn::IConnectableLayer* paramsLayer = net->AddInputLayer(0);
+ armnn::IConnectableLayer* indicesLayer = net->AddConstantLayer(armnn::ConstTensor(indicesInfo, indicesData));
+ armnn::IConnectableLayer* gatherNdLayer = net->AddGatherNdLayer("gatherNd");
+ armnn::IConnectableLayer* outputLayer = net->AddOutputLayer(0, "output");
+ Connect(paramsLayer, gatherNdLayer, paramsInfo, 0, 0);
+ Connect(indicesLayer, gatherNdLayer, indicesInfo, 0, 1);
+ Connect(gatherNdLayer, outputLayer, outputInfo, 0, 0);
+
+ return net;
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void GatherNdEndToEnd(const std::vector<BackendId>& backends)
+{
+ armnn::TensorInfo paramsInfo({ 2, 3, 8, 4 }, ArmnnType);
+ armnn::TensorInfo indicesInfo({ 2, 2 }, armnn::DataType::Signed32);
+ armnn::TensorInfo outputInfo({ 2, 8, 4 }, ArmnnType);
+
+ paramsInfo.SetQuantizationScale(1.0f);
+ paramsInfo.SetQuantizationOffset(0);
+ paramsInfo.SetConstant(true);
+ indicesInfo.SetConstant(true);
+ outputInfo.SetQuantizationScale(1.0f);
+ outputInfo.SetQuantizationOffset(0);
+
+ // Creates structures for input & output.
+ std::vector<T> paramsData{
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
+
+ 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127,
+
+ 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143,
+ 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
+
+ 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
+ 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191
+ };
+
+ std::vector<int32_t> indicesData{
+ { 1, 2, 1, 1},
+ };
+
+ std::vector<T> expectedOutput{
+ 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
+ 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
+
+ 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143,
+ 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159
+ };
+
+ // Builds up the structure of the network
+ armnn::INetworkPtr net = CreateGatherNdNetwork(paramsInfo, indicesInfo, outputInfo, indicesData);
+
+ CHECK(net);
+
+ std::map<int, std::vector<T>> inputTensorData = {{ 0, paramsData }};
+ std::map<int, std::vector<T>> expectedOutputData = {{ 0, expectedOutput }};
+
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void GatherNdMultiDimEndToEnd(const std::vector<BackendId>& backends)
+{
+ armnn::TensorInfo paramsInfo({ 5, 5, 2 }, ArmnnType);
+ armnn::TensorInfo indicesInfo({ 2, 2, 3, 2 }, armnn::DataType::Signed32);
+ armnn::TensorInfo outputInfo({ 2, 2, 3, 2 }, ArmnnType);
+
+ paramsInfo.SetQuantizationScale(1.0f);
+ paramsInfo.SetQuantizationOffset(0);
+ paramsInfo.SetConstant(true);
+ indicesInfo.SetConstant(true);
+ outputInfo.SetQuantizationScale(1.0f);
+ outputInfo.SetQuantizationOffset(0);
+
+ // Creates structures for input & output.
+ std::vector<T> paramsData{
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49
+ };
+
+ std::vector<int32_t> indicesData{
+ 0, 0,
+ 3, 3,
+ 4, 4,
+
+ 0, 0,
+ 1, 1,
+ 2, 2,
+
+ 4, 4,
+ 3, 3,
+ 0, 0,
+
+ 2, 2,
+ 1, 1,
+ 0, 0
+ };
+
+ std::vector<T> expectedOutput{
+ 0, 1,
+ 36, 37,
+ 48, 49,
+
+ 0, 1,
+ 12, 13,
+ 24, 25,
+
+ 48, 49,
+ 36, 37,
+ 0, 1,
+
+ 24, 25,
+ 12, 13,
+ 0, 1
+ };
+
+ // Builds up the structure of the network
+ armnn::INetworkPtr net = CreateGatherNdNetwork(paramsInfo, indicesInfo, outputInfo, indicesData);
+
+ std::map<int, std::vector<T>> inputTensorData = {{ 0, paramsData }};
+ std::map<int, std::vector<T>> expectedOutputData = {{ 0, expectedOutput }};
+
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends);
+}
+
+} // anonymous namespace
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index 06f3eb561e..ba8cfd5f68 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -666,6 +666,8 @@ DECLARE_LAYER_POLICY_2_PARAM(FullyConnected)
DECLARE_LAYER_POLICY_2_PARAM(Gather)
+DECLARE_LAYER_POLICY_1_PARAM(GatherNd)
+
DECLARE_LAYER_POLICY_CUSTOM_PARAM(Input, armnn::LayerBindingId)
DECLARE_LAYER_POLICY_2_PARAM(InstanceNormalization)
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index 6bd29438a8..e30cf2b6f6 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -33,6 +33,7 @@
#include <backendsCommon/test/layerTests/FillTestImpl.hpp>
#include <backendsCommon/test/layerTests/FloorTestImpl.hpp>
#include <backendsCommon/test/layerTests/FullyConnectedTestImpl.hpp>
+#include <backendsCommon/test/layerTests/GatherNdTestImpl.hpp>
#include <backendsCommon/test/layerTests/GatherTestImpl.hpp>
#include <backendsCommon/test/layerTests/InstanceNormalizationTestImpl.hpp>
#include <backendsCommon/test/layerTests/L2NormalizationTestImpl.hpp>
diff --git a/src/backends/backendsCommon/test/layerTests/GatherNdTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/GatherNdTestImpl.cpp
new file mode 100644
index 0000000000..57a30c6f33
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/GatherNdTestImpl.cpp
@@ -0,0 +1,300 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "GatherNdTestImpl.hpp"
+
+#include <DataTypeUtils.hpp>
+#include <armnnTestUtils/TensorCopyUtils.hpp>
+#include <armnnTestUtils/WorkloadTestUtils.hpp>
+
+namespace
+{
+
+template<armnn::DataType ArmnnType,
+ typename T = armnn::ResolveType<ArmnnType>,
+ size_t ParamsDim,
+ size_t IndicesDim,
+ size_t OutputDim>
+LayerTestResult<T, OutputDim> GatherNdTestImpl(
+ armnn::IWorkloadFactory &workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
+ const armnn::ITensorHandleFactory &tensorHandleFactory,
+ const armnn::TensorInfo &paramsInfo,
+ const armnn::TensorInfo &indicesInfo,
+ const armnn::TensorInfo &outputInfo,
+ const std::vector<T> &paramsData,
+ const std::vector<int32_t> &indicesData,
+ const std::vector<T> &outputData)
+{
+ IgnoreUnused(memoryManager);
+
+ std::vector<T> actualOutput(outputInfo.GetNumElements());
+
+ std::unique_ptr<armnn::ITensorHandle> paramsHandle = tensorHandleFactory.CreateTensorHandle(paramsInfo);
+ std::unique_ptr<armnn::ITensorHandle> indicesHandle = tensorHandleFactory.CreateTensorHandle(indicesInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
+
+ armnn::GatherNdQueueDescriptor data;
+ armnn::WorkloadInfo info;
+ AddInputToWorkload(data, info, paramsInfo, paramsHandle.get());
+ AddInputToWorkload(data, info, indicesInfo, indicesHandle.get());
+ AddOutputToWorkload(data, info, outputInfo, outputHandle.get());
+
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::GatherNd,
+ data,
+ info);
+
+ paramsHandle->Allocate();
+ indicesHandle->Allocate();
+ outputHandle->Allocate();
+
+ CopyDataToITensorHandle(paramsHandle.get(), paramsData.data());
+ CopyDataToITensorHandle(indicesHandle.get(), indicesData.data());
+
+ workload->Execute();
+
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
+
+ return LayerTestResult<T, OutputDim>(actualOutput,
+ outputData,
+ outputHandle->GetShape(),
+ outputInfo.GetShape());
+}
+} // anonymous namespace
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 2> SimpleGatherNd2dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ armnn::TensorInfo paramsInfo({ 5, 2 }, ArmnnType);
+ armnn::TensorInfo indicesInfo({ 3, 1 }, armnn::DataType::Signed32);
+ armnn::TensorInfo outputInfo({ 3, 2 }, ArmnnType);
+ if (armnn::IsQuantizedType<T>())
+ {
+ paramsInfo.SetQuantizationScale(1.0f);
+ paramsInfo.SetQuantizationOffset(1);
+ outputInfo.SetQuantizationScale(1.0f);
+ outputInfo.SetQuantizationOffset(1);
+ }
+ const std::vector<T> params = ConvertToDataType<ArmnnType>(
+ { 1, 2,
+ 3, 4,
+ 5, 6,
+ 7, 8,
+ 9, 10},
+ paramsInfo);
+ const std::vector<int32_t> indices = ConvertToDataType<armnn::DataType::Signed32>(
+ { 1, 0, 4},
+ indicesInfo);
+ const std::vector<T> expectedOutput = ConvertToDataType<ArmnnType>(
+ { 3, 4,
+ 1, 2,
+ 9, 10},
+ outputInfo);
+ return GatherNdTestImpl<ArmnnType, T, 2, 2, 2>(
+ workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ paramsInfo,
+ indicesInfo,
+ outputInfo,
+ params,
+ indices,
+ expectedOutput);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 3> SimpleGatherNd3dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ armnn::TensorInfo paramsInfo({ 2, 3, 8, 4 }, ArmnnType);
+ armnn::TensorInfo indicesInfo({ 2, 2 }, armnn::DataType::Signed32);
+ armnn::TensorInfo outputInfo({ 2, 8, 4 }, ArmnnType);
+
+ if (armnn::IsQuantizedType<T>())
+ {
+ paramsInfo.SetQuantizationScale(1.0f);
+ paramsInfo.SetQuantizationOffset(0);
+ outputInfo.SetQuantizationScale(1.0f);
+ outputInfo.SetQuantizationOffset(0);
+ }
+ const std::vector<T> params = ConvertToDataType<ArmnnType>(
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
+
+ 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127,
+
+ 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143,
+ 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
+
+ 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
+ 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191 },
+ paramsInfo);
+
+ const std::vector<int32_t> indices = ConvertToDataType<armnn::DataType::Signed32>(
+ { 1, 2, 1, 1},
+ indicesInfo);
+
+ const std::vector<T> expectedOutput = ConvertToDataType<ArmnnType>(
+ { 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
+ 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
+
+ 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143,
+ 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159},
+ outputInfo);
+
+ return GatherNdTestImpl<ArmnnType, T, 4, 2, 3>(
+ workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ paramsInfo,
+ indicesInfo,
+ outputInfo,
+ params,
+ indices,
+ expectedOutput);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 4> SimpleGatherNd4dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ armnn::TensorInfo paramsInfo({ 5, 5, 2 }, ArmnnType);
+ armnn::TensorInfo indicesInfo({ 2, 2, 3, 2 }, armnn::DataType::Signed32);
+ armnn::TensorInfo outputInfo({ 2, 2, 3, 2 }, ArmnnType);
+
+ if (armnn::IsQuantizedType<T>())
+ {
+ paramsInfo.SetQuantizationScale(1.0f);
+ paramsInfo.SetQuantizationOffset(0);
+ outputInfo.SetQuantizationScale(1.0f);
+ outputInfo.SetQuantizationOffset(0);
+ }
+ const std::vector<T> params = ConvertToDataType<ArmnnType>(
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49 },
+ paramsInfo);
+
+ const std::vector<int32_t> indices = ConvertToDataType<armnn::DataType::Signed32>(
+ { 0, 0,
+ 3, 3,
+ 4, 4,
+
+ 0, 0,
+ 1, 1,
+ 2, 2,
+
+ 4, 4,
+ 3, 3,
+ 0, 0,
+
+ 2, 2,
+ 1, 1,
+ 0, 0 },
+ indicesInfo);
+
+ const std::vector<T> expectedOutput = ConvertToDataType<ArmnnType>(
+ { 0, 1,
+ 36, 37,
+ 48, 49,
+
+ 0, 1,
+ 12, 13,
+ 24, 25,
+
+ 48, 49,
+ 36, 37,
+ 0, 1,
+
+ 24, 25,
+ 12, 13,
+ 0, 1 },
+ outputInfo);
+
+ return GatherNdTestImpl<ArmnnType, T, 3, 4, 4>(
+ workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ paramsInfo,
+ indicesInfo,
+ outputInfo,
+ params,
+ indices,
+ expectedOutput);
+}
+
+//
+// Explicit template specializations
+//
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 2>
+SimpleGatherNd2dTest<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 3>
+SimpleGatherNd3dTest<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
+SimpleGatherNd4dTest<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 2>
+SimpleGatherNd2dTest<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 3>
+SimpleGatherNd3dTest<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
+SimpleGatherNd4dTest<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Signed32>, 2>
+SimpleGatherNd2dTest<armnn::DataType::Signed32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Signed32>, 3>
+SimpleGatherNd3dTest<armnn::DataType::Signed32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Signed32>, 4>
+SimpleGatherNd4dTest<armnn::DataType::Signed32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory); \ No newline at end of file
diff --git a/src/backends/backendsCommon/test/layerTests/GatherNdTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/GatherNdTestImpl.hpp
new file mode 100644
index 0000000000..6f0845ddde
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/GatherNdTestImpl.hpp
@@ -0,0 +1,32 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnnTestUtils/LayerTestResult.hpp>
+
+#include <Half.hpp>
+#include <ResolveType.hpp>
+
+#include <armnn/backends/IBackendInternal.hpp>
+#include <armnn/backends/WorkloadFactory.hpp>
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> SimpleGatherNd2dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 3> SimpleGatherNd3dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> SimpleGatherNd4dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory); \ No newline at end of file