From 93bbf00d968101fb9a9174ad011b655ca7100546 Mon Sep 17 00:00:00 2001 From: Kevin May Date: Mon, 11 Mar 2024 09:31:10 +0000 Subject: IVGCVSW-8232 ScatterNd added to delegate and opaque delegate Signed-off-by: Kevin May Signed-off-by: Teresa Charlin Change-Id: I5839f54c71f74eaa6819333393bb3054db9db5be --- delegate/CMakeLists.txt | 4 +- delegate/classic/CMakeLists.txt | 3 +- delegate/classic/src/ScatterNd.hpp | 161 ++++++++++++ delegate/classic/src/armnn_delegate.cpp | 7 + delegate/opaque/CMakeLists.txt | 3 +- delegate/opaque/src/ScatterNd.hpp | 173 +++++++++++++ delegate/opaque/src/armnn_delegate.cpp | 7 + delegate/test/ScatterNdTest.cpp | 446 ++++++++++++++++++++++++++++++++ delegate/test/ScatterNdTestHelper.hpp | 174 +++++++++++++ docs/05_03_delegate.dox | 2 + 10 files changed, 977 insertions(+), 3 deletions(-) create mode 100644 delegate/classic/src/ScatterNd.hpp create mode 100644 delegate/opaque/src/ScatterNd.hpp create mode 100644 delegate/test/ScatterNdTest.cpp create mode 100644 delegate/test/ScatterNdTestHelper.hpp diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt index d92611f84b..f8b0300976 100644 --- a/delegate/CMakeLists.txt +++ b/delegate/CMakeLists.txt @@ -1,5 +1,5 @@ # -# Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved. +# Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved. # SPDX-License-Identifier: MIT # @@ -188,6 +188,8 @@ if(BUILD_UNIT_TESTS) test/ReverseV2TestHelper.hpp test/RoundTest.cpp test/RoundTestHelper.hpp + test/ScatterNdTest.cpp + test/ScatterNdTestHelper.hpp test/SoftmaxTest.cpp test/SoftmaxTestHelper.hpp test/SpaceDepthTest.cpp diff --git a/delegate/classic/CMakeLists.txt b/delegate/classic/CMakeLists.txt index fbd19ede30..72ecc28d97 100644 --- a/delegate/classic/CMakeLists.txt +++ b/delegate/classic/CMakeLists.txt @@ -1,5 +1,5 @@ # -# Copyright © 2023 Arm Ltd and Contributors. All rights reserved. +# Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved. # SPDX-License-Identifier: MIT # @@ -37,6 +37,7 @@ list(APPEND armnnClassicDelegateObject_sources src/Resize.hpp src/ReverseV2.hpp src/Round.hpp + src/ScatterNd.hpp src/Shape.hpp src/SharedFunctions.hpp src/SharedFunctions.cpp diff --git a/delegate/classic/src/ScatterNd.hpp b/delegate/classic/src/ScatterNd.hpp new file mode 100644 index 0000000000..c73e231c46 --- /dev/null +++ b/delegate/classic/src/ScatterNd.hpp @@ -0,0 +1,161 @@ +// +// Copyright © 2024 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace armnnDelegate +{ +TfLiteStatus ValidateScatterNdOperator(DelegateData& delegateData, + TfLiteContext* tfLiteContext, + const armnn::TensorInfo& indicesInfo, + const armnn::TensorInfo& updatesInfo, + const armnn::TensorInfo& shapeInfo, + const armnn::TensorInfo& outputInfo, + const armnn::ScatterNdDescriptor& descriptor) +{ + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC("SCATTER_ND", + tfLiteContext, + IsScatterNdSupported, + delegateData.m_Backends, + isSupported, + armnn::BackendId(), + shapeInfo, + indicesInfo, + updatesInfo, + outputInfo, + descriptor); + return isSupported ? kTfLiteOk : kTfLiteError; +} + +TfLiteStatus VisitScatterNdOperator(DelegateData& delegateData, + TfLiteContext* tfLiteContext, + TfLiteNode* tfLiteNode, + int nodeIndex, + int32_t scatterNdOperatorCode) +{ + TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex)); + TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + + const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors; + + // The indices tensor are the positions the data is updated/scattered into + const TfLiteTensor& tfLiteIndicesTensor = tfLiteTensors[tfLiteNode->inputs->data[0]]; + if (IsDynamicTensor(tfLiteIndicesTensor)) + { + TF_LITE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ", + scatterNdOperatorCode, nodeIndex); + return kTfLiteError; + } + + // The updates tensor provides the data which will be updated/scattered into the relevant indices + const TfLiteTensor& tfLiteUpdatesTensor = tfLiteTensors[tfLiteNode->inputs->data[1]]; + if (IsDynamicTensor(tfLiteUpdatesTensor)) + { + TF_LITE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ", + scatterNdOperatorCode, nodeIndex); + return kTfLiteError; + } + + // For tflite scatternd there is no input tensor + // The shape tensor is a 1D tensor which represents the shape of an input tensor to be filled with zeros + const TfLiteTensor& tfLiteShapeTensor = tfLiteTensors[tfLiteNode->inputs->data[2]]; + if (IsDynamicTensor(tfLiteUpdatesTensor)) + { + TF_LITE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ", + scatterNdOperatorCode, nodeIndex); + return kTfLiteError; + } + + // The output tensor + const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]]; + if (IsDynamicTensor(tfLiteOutputTensor)) + { + TF_LITE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ", + scatterNdOperatorCode, nodeIndex); + return kTfLiteError; + } + + const armnn::TensorInfo& indicesTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteIndicesTensor); + const armnn::TensorInfo& updatesTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteUpdatesTensor); + const armnn::TensorInfo& shapeTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteShapeTensor); + const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor); + + armnn::ScatterNdDescriptor scatterNdDescriptor; + scatterNdDescriptor.m_Function = armnn::ScatterNdFunction::Update; + scatterNdDescriptor.m_InputEnabled = false; + scatterNdDescriptor.m_Axis = 0; + scatterNdDescriptor.m_AxisEnabled = false; + + // Check output dimensions + if (shapeTensorInfo.GetShape().GetNumElements() != outputTensorInfo.GetNumDimensions()) + { + TF_LITE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnDelegate: Shape tensor number of elements and output tensor dimension differ", + "Operator: #%d node #%d: ", + scatterNdOperatorCode, nodeIndex); + return kTfLiteError; + } + + // No network pointer indicates that only support for this operator should be checked + if (!delegateData.m_Network) + { + return ValidateScatterNdOperator(delegateData, + tfLiteContext, + indicesTensorInfo, + updatesTensorInfo, + shapeTensorInfo, + outputTensorInfo, + scatterNdDescriptor); + } + + auto layerName = GetLayerName(armnn::LayerType::ScatterNd, nodeIndex); + armnn::IConnectableLayer* layer = delegateData.m_Network->AddScatterNdLayer(scatterNdDescriptor, layerName.c_str()); + + if (layer == nullptr) + { + return kTfLiteError; + } + + layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); + + if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk) + { + return kTfLiteError; + } + + if (static_cast(tfLiteNode->outputs->size) != layer->GetNumOutputSlots()) + { + return kTfLiteError; + } + + delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[2]]->Connect(layer->GetInputSlot(0)); + delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[0]]->Connect(layer->GetInputSlot(1)); + delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[1]]->Connect(layer->GetInputSlot(2)); + + // Prepare output slots + armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0); + delegateData.m_OutputSlotForNode[static_cast(tfLiteNode->outputs->data[0])] = &outputSlot; + + return kTfLiteOk; +} + +} // namespace armnnDelegate \ No newline at end of file diff --git a/delegate/classic/src/armnn_delegate.cpp b/delegate/classic/src/armnn_delegate.cpp index 05bf9b2ee7..52621ee6c1 100644 --- a/delegate/classic/src/armnn_delegate.cpp +++ b/delegate/classic/src/armnn_delegate.cpp @@ -34,6 +34,7 @@ #include "Resize.hpp" #include "ReverseV2.hpp" #include "Round.hpp" +#include "ScatterNd.hpp" #include "Shape.hpp" #include "Slice.hpp" #include "StridedSlice.hpp" @@ -1070,6 +1071,12 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData, tfLiteNode, nodeIndex, kTfLiteBuiltinTransposeConv); + case kTfLiteBuiltinScatterNd: + return VisitScatterNdOperator(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + kTfLiteBuiltinScatterNd); case kTfLiteBuiltinSoftmax: return VisitSoftmaxOperator(delegateData, tfLiteContext, diff --git a/delegate/opaque/CMakeLists.txt b/delegate/opaque/CMakeLists.txt index abbf38d071..858a0a35ab 100644 --- a/delegate/opaque/CMakeLists.txt +++ b/delegate/opaque/CMakeLists.txt @@ -1,5 +1,5 @@ # -# Copyright © 2023 Arm Ltd and Contributors. All rights reserved. +# Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved. # SPDX-License-Identifier: MIT # @@ -34,6 +34,7 @@ list(APPEND armnnOpaqueDelegateObject_sources src/Resize.hpp src/ReverseV2.hpp src/Round.hpp + src/ScatterNd.hpp src/Shape.hpp src/SharedFunctions.cpp src/SharedFunctions.hpp diff --git a/delegate/opaque/src/ScatterNd.hpp b/delegate/opaque/src/ScatterNd.hpp new file mode 100644 index 0000000000..08bbed7f0e --- /dev/null +++ b/delegate/opaque/src/ScatterNd.hpp @@ -0,0 +1,173 @@ +// +// Copyright © 2024 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include + +namespace armnnOpaqueDelegate +{ +TfLiteStatus ValidateScatterNdOperator(DelegateData& delegateData, + TfLiteOpaqueContext *tfLiteContext, + const armnn::TensorInfo& indicesInfo, + const armnn::TensorInfo& updatesInfo, + const armnn::TensorInfo& shapeInfo, + const armnn::TensorInfo& outputInfo, + const armnn::ScatterNdDescriptor& descriptor) +{ + bool isSupported = false; + FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("SCATTER_ND", + tfLiteContext, + IsScatterNdSupported, + delegateData.m_Backends, + isSupported, + armnn::BackendId(), + shapeInfo, + indicesInfo, + updatesInfo, + outputInfo, + descriptor); + return isSupported ? kTfLiteOk : kTfLiteError; +} + +TfLiteStatus VisitScatterNdOperator(DelegateData& delegateData, + TfLiteOpaqueContext* tfLiteContext, + TfLiteOpaqueNode* tfLiteNode, + int nodeIndex, + int32_t scatterNdOperatorCode) +{ + TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex)); + TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + + // Gather input indices and use to get input tensor. + auto numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode); + const int* inputTensors; + if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ", + nodeIndex); + return kTfLiteError; + } + + // Gather input indices and use to get output tensor. + int numOutputs = 0; + const int* outputTensors; + if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ", + nodeIndex); + return kTfLiteError; + } + + // The indices tensor are the positions the data is updated/scattered into + const TfLiteOpaqueTensor* tfLiteIndicesTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]); + if (IsDynamicTensor(tfLiteIndicesTensor)) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ", + scatterNdOperatorCode, nodeIndex); + return kTfLiteError; + } + + // The updates tensor provides the data which will be updated/scattered into the relevant indices + const TfLiteOpaqueTensor* tfLiteUpdatesTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]); + if (IsDynamicTensor(tfLiteUpdatesTensor)) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ", + scatterNdOperatorCode, nodeIndex); + return kTfLiteError; + } + + // For TFLite ScatterNd there is no input tensor + // The shape tensor is a 1D tensor which represents the shape of an input tensor to be filled with zeros + const TfLiteOpaqueTensor* tfLiteShapeTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[2]); + if (IsDynamicTensor(tfLiteShapeTensor)) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ", + scatterNdOperatorCode, nodeIndex); + return kTfLiteError; + } + + // The output tensor + const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]); + if (IsDynamicTensor(tfLiteOutputTensor)) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ", + scatterNdOperatorCode, nodeIndex); + return kTfLiteError; + } + + const armnn::TensorInfo& shapeTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteShapeTensor); + const armnn::TensorInfo& indicesTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteIndicesTensor); + const armnn::TensorInfo& updatesTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteUpdatesTensor); + const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true); + + armnn::ScatterNdDescriptor scatterNdDescriptor; + scatterNdDescriptor.m_Function = armnn::ScatterNdFunction::Update; + scatterNdDescriptor.m_InputEnabled = false; + scatterNdDescriptor.m_Axis = 0; + scatterNdDescriptor.m_AxisEnabled = false; + + // Check output dimensions + if (shapeTensorInfo.GetShape().GetNumElements() != outputTensorInfo.GetNumDimensions()) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Input tensor dimension and output tensor dimension differ", + "Operator: #%d node #%d: ", + scatterNdOperatorCode, nodeIndex); + return kTfLiteError; + } + + // No network pointer indicates that only support for this operator should be checked + if (!delegateData.m_Network) + { + return ValidateScatterNdOperator(delegateData, + tfLiteContext, + indicesTensorInfo, + updatesTensorInfo, + shapeTensorInfo, + outputTensorInfo, + scatterNdDescriptor); + } + + auto layerName = GetName(armnn::LayerType::ScatterNd, nodeIndex); + armnn::IConnectableLayer* layer = delegateData.m_Network->AddScatterNdLayer(scatterNdDescriptor, layerName.c_str()); + + if (layer == nullptr) + { + return kTfLiteError; + } + + layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); + + if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk) + { + return kTfLiteError; + } + + delegateData.m_OutputSlotForNode[inputTensors[2]]->Connect(layer->GetInputSlot(0)); + delegateData.m_OutputSlotForNode[inputTensors[0]]->Connect(layer->GetInputSlot(1)); + delegateData.m_OutputSlotForNode[inputTensors[1]]->Connect(layer->GetInputSlot(2)); + + // Prepare output slots + armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0); + delegateData.m_OutputSlotForNode[static_cast(outputTensors[0])] = &outputSlot; + + return kTfLiteOk; +} + +} // namespace armnnOpaqueDelegate \ No newline at end of file diff --git a/delegate/opaque/src/armnn_delegate.cpp b/delegate/opaque/src/armnn_delegate.cpp index 4ed0a78013..9e047f6a68 100644 --- a/delegate/opaque/src/armnn_delegate.cpp +++ b/delegate/opaque/src/armnn_delegate.cpp @@ -33,6 +33,7 @@ #include "Resize.hpp" #include "ReverseV2.hpp" #include "Round.hpp" +#include "ScatterNd.hpp" #include "Shape.hpp" #include "Slice.hpp" #include "StridedSlice.hpp" @@ -1154,6 +1155,12 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData, nodeIndex, kTfLiteBuiltinRsqrt, armnn::UnaryOperation::Rsqrt); + case kTfLiteBuiltinScatterNd: + return VisitScatterNdOperator(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + kTfLiteBuiltinScatterNd); case kTfLiteBuiltinShape: return VisitShapeOperator(delegateData, tfLiteContext, diff --git a/delegate/test/ScatterNdTest.cpp b/delegate/test/ScatterNdTest.cpp new file mode 100644 index 0000000000..2b2a67c4eb --- /dev/null +++ b/delegate/test/ScatterNdTest.cpp @@ -0,0 +1,446 @@ +// +// Copyright © 2024 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "ScatterNdTestHelper.hpp" + +#include + +namespace armnnDelegate +{ + +template +void ScatterNd1DimTest(tflite::TensorType tensorType, const std::vector& backends = {}) +{ + // Set shapes + std::vector indicesShape = { 3, 1 }; + std::vector updatesShape = { 3 }; + std::vector shapeShape = { 1 }; + std::vector expectedOutputShape = { 5 }; + + // Set Values + std::vector indicesValues = { 0, 1, 2 }; + std::vector updatesValues = { 1, 2, 3 }; + std::vector shapeValue = { 5 }; + std::vector expectedOutputValues = { 1, 2, 3, 0, 0 }; + + ScatterNdTestImpl(tensorType, + indicesShape, + indicesValues, + updatesShape, + updatesValues, + shapeShape, + shapeValue, + expectedOutputShape, + expectedOutputValues, + backends); +} + +template +void ScatterNd2DimTest(tflite::TensorType tensorType, const std::vector& backends = {}) +{ + // Set shapes + std::vector indicesShape = { 3, 2 }; + std::vector updatesShape = { 3 }; + std::vector shapeShape = { 2 }; + std::vector expectedOutputShape = { 3, 3 }; + + // Set Values + std::vector indicesValues = { 0, 0, + 1, 1, + 2, 2 }; + std::vector updatesValues = { 1, 2, 3 }; + std::vector shapeValue = { 3, 3 }; + std::vector expectedOutputValues = { 1, 0, 0, + 0, 2, 0, + 0, 0, 3 }; + + ScatterNdTestImpl(tensorType, + indicesShape, + indicesValues, + updatesShape, + updatesValues, + shapeShape, + shapeValue, + expectedOutputShape, + expectedOutputValues, + backends); +} + +template +void ScatterNd2Dim1Outter1InnerTest(tflite::TensorType tensorType, const std::vector& backends = {}) +{ + // Set shapes + std::vector indicesShape = { 2, 1 }; + std::vector updatesShape = { 2, 3 }; + std::vector shapeShape = { 2 }; + std::vector expectedOutputShape = { 3, 3 }; + + // Set Values + std::vector indicesValues = { 0, 1 }; + std::vector updatesValues = { 1, 1, 1, + 1, 1, 1 }; + std::vector shapeValue = { 3, 3 }; + std::vector expectedOutputValues = { 1, 1, 1, + 1, 1, 1, + 0, 0, 0 }; + + ScatterNdTestImpl(tensorType, + indicesShape, + indicesValues, + updatesShape, + updatesValues, + shapeShape, + shapeValue, + expectedOutputShape, + expectedOutputValues, + backends); +} + +template +void ScatterNd3DimTest(tflite::TensorType tensorType, const std::vector& backends = {}) +{ + // Set shapes + std::vector indicesShape = { 3, 3 }; + std::vector updatesShape = { 3 }; + std::vector shapeShape = { 3 }; + std::vector expectedOutputShape = { 3, 3, 3 }; + + // Set Values + std::vector indicesValues = { 0, 0, 0, + 1, 1, 1, + 2, 2, 2 }; + std::vector updatesValues = { 1, 2, 3 }; + std::vector shapeValue = { 3, 3, 3 }; + std::vector expectedOutputValues = { 1, 0, 0, + 0, 0, 0, + 0, 0, 0, + + 0, 0, 0, + 0, 2, 0, + 0, 0, 0, + + 0, 0, 0, + 0, 0, 0, + 0, 0, 3 }; + + ScatterNdTestImpl(tensorType, + indicesShape, + indicesValues, + updatesShape, + updatesValues, + shapeShape, + shapeValue, + expectedOutputShape, + expectedOutputValues, + backends); +} + +template +void ScatterNd3Dim1Outter2InnerTest(tflite::TensorType tensorType, const std::vector& backends = {}) +{ + // Set shapes + std::vector indicesShape = { 2, 1 }; + std::vector updatesShape = { 2, 3, 3 }; + std::vector shapeShape = { 3 }; + std::vector expectedOutputShape = { 3, 3, 3 }; + + // Set Values + std::vector indicesValues = { 0, 1 }; + std::vector updatesValues = { 1, 1, 1, + 1, 1, 1, + 1, 1, 1, + + 2, 2, 2, + 2, 2, 2, + 2, 2, 2 }; + std::vector shapeValue = { 3, 3, 3 }; + std::vector expectedOutputValues = { 1, 1, 1, + 1, 1, 1, + 1, 1, 1, + + 2, 2, 2, + 2, 2, 2, + 2, 2, 2, + + 0, 0, 0, + 0, 0, 0, + 0, 0, 0 }; + + ScatterNdTestImpl(tensorType, + indicesShape, + indicesValues, + updatesShape, + updatesValues, + shapeShape, + shapeValue, + expectedOutputShape, + expectedOutputValues, + backends); +} + +template +void ScatterNd3Dim2Outter1InnerTest(tflite::TensorType tensorType, const std::vector& backends = {}) +{ + // Set shapes + std::vector indicesShape = { 2, 2 }; + std::vector updatesShape = { 2, 3 }; + std::vector shapeShape = { 3 }; + std::vector expectedOutputShape = { 3, 3, 3 }; + + // Set Values + std::vector indicesValues = { 0, 0, + 1, 1 }; + std::vector updatesValues = { 1, 1, 1, + 2, 2, 2 }; + std::vector shapeValue = { 3, 3, 3 }; + std::vector expectedOutputValues = { 1, 1, 1, + 0, 0, 0, + 0, 0, 0, + + 0, 0, 0, + 2, 2, 2, + 0, 0, 0, + + 0, 0, 0, + 0, 0, 0, + 0, 0, 0 }; + + ScatterNdTestImpl(tensorType, + indicesShape, + indicesValues, + updatesShape, + updatesValues, + shapeShape, + shapeValue, + expectedOutputShape, + expectedOutputValues, + backends); +} + +template +void ScatterNdDim4(tflite::TensorType tensorType, const std::vector& backends = {}) +{ + // Set shapes + std::vector indicesShape = { 3, 4 }; + std::vector updatesShape = { 3 }; + std::vector shapeShape = { 4 }; + std::vector expectedOutputShape = { 2, 3, 3, 3 }; + + // Set Values + std::vector indicesValues = { 0, 0, 0, 0, + 0, 1, 1, 1, + 1, 1, 1, 1 }; + std::vector updatesValues = { 1, 2, 3 }; + std::vector shapeValue = { 2, 3, 3, 3 }; + std::vector expectedOutputValues = { 1, 0, 0, + 0, 0, 0, + 0, 0, 0, + + 0, 0, 0, + 0, 2, 0, + 0, 0, 0, + + 0, 0, 0, + 0, 0, 0, + 0, 0, 0, + + 0, 0, 0, + 0, 0, 0, + 0, 0, 0, + + 0, 0, 0, + 0, 3, 0, + 0, 0, 0, + + 0, 0, 0, + 0, 0, 0, + 0, 0, 0 }; + + ScatterNdTestImpl(tensorType, + indicesShape, + indicesValues, + updatesShape, + updatesValues, + shapeShape, + shapeValue, + expectedOutputShape, + expectedOutputValues, + backends); +} + +TEST_SUITE("ScatterNdDelegateTests") +{ + +TEST_CASE ("ScatterNd_1Dim_FP32_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + ScatterNd1DimTest(tflite::TensorType_FLOAT32, backends); +} + +TEST_CASE ("ScatterNd_1Dim_INT32_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + ScatterNd1DimTest(tflite::TensorType_INT32, backends); +} + +TEST_CASE ("ScatterNd_1Dim_INT8_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + ScatterNd1DimTest(tflite::TensorType_INT8, backends); +} + +TEST_CASE ("ScatterNd_1Dim_UINT8_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + ScatterNd1DimTest(tflite::TensorType_UINT8, backends); +} + +TEST_CASE ("ScatterNd_2Dim_FP32_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + ScatterNd2DimTest(tflite::TensorType_FLOAT32, backends); +} + +TEST_CASE ("ScatterNd_2Dim_INT32_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + ScatterNd2DimTest(tflite::TensorType_INT32, backends); +} + +TEST_CASE ("ScatterNd_2Dim_INT8_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + ScatterNd2DimTest(tflite::TensorType_INT8, backends); +} + +TEST_CASE ("ScatterNd_2Dim_UINT8_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + ScatterNd2DimTest(tflite::TensorType_UINT8, backends); +} + +TEST_CASE ("ScatterNd_2Dim_1Outter_1Inner_FP32_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + ScatterNd2Dim1Outter1InnerTest(tflite::TensorType_FLOAT32, backends); +} + +TEST_CASE ("ScatterNd_2Dim_1Outter_1Inner_INT32_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + ScatterNd2Dim1Outter1InnerTest(tflite::TensorType_INT32, backends); +} + +TEST_CASE ("ScatterNd_2Dim_1Outter_1Inner_INT8_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + ScatterNd2Dim1Outter1InnerTest(tflite::TensorType_INT8, backends); +} + +TEST_CASE ("ScatterNd_2Dim_1Outter_1Inner_UINT8_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + ScatterNd2Dim1Outter1InnerTest(tflite::TensorType_UINT8, backends); +} + +TEST_CASE ("ScatterNd_3Dim_FP32_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + ScatterNd3DimTest(tflite::TensorType_FLOAT32, backends); +} + +TEST_CASE ("ScatterNd_3Dim_INT32_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + ScatterNd3DimTest(tflite::TensorType_INT32, backends); +} + +TEST_CASE ("ScatterNd_3Dim_INT8_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + ScatterNd3DimTest(tflite::TensorType_INT8, backends); +} + +TEST_CASE ("ScatterNd_3Dim_UINT8_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + ScatterNd3DimTest(tflite::TensorType_UINT8, backends); +} + +TEST_CASE ("ScatterNd_3Dim_1Outter_2Inner_FP32_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + ScatterNd3Dim1Outter2InnerTest(tflite::TensorType_FLOAT32, backends); +} + +TEST_CASE ("ScatterNd_3Dim_1Outter_2Inner_INT32_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + ScatterNd3Dim1Outter2InnerTest(tflite::TensorType_INT32, backends); +} + +TEST_CASE ("ScatterNd_3Dim_1Outter_2Inner_INT8_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + ScatterNd3Dim1Outter2InnerTest(tflite::TensorType_INT8, backends); +} + +TEST_CASE ("ScatterNd_3Dim_1Outter_2Inner_UINT8_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + ScatterNd3Dim1Outter2InnerTest(tflite::TensorType_UINT8, backends); +} + +TEST_CASE ("ScatterNd_3Dim_2Outter_1Inner_FP32_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + ScatterNd3Dim2Outter1InnerTest(tflite::TensorType_FLOAT32, backends); +} + +TEST_CASE ("ScatterNd_3Dim_2Outter_1Inner_INT32_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + ScatterNd3Dim2Outter1InnerTest(tflite::TensorType_INT32, backends); +} + +TEST_CASE ("ScatterNd_3Dim_2Outter_1Inner_INT8_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + ScatterNd3Dim2Outter1InnerTest(tflite::TensorType_INT8, backends); +} + +TEST_CASE ("ScatterNd_3Dim_2Outter_1Inner_UINT8_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + ScatterNd3Dim2Outter1InnerTest(tflite::TensorType_UINT8, backends); +} + +TEST_CASE ("ScatterNd_4Dim_FP32_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + ScatterNdDim4(tflite::TensorType_FLOAT32, backends); +} + +TEST_CASE ("ScatterNd_4Dim_INT32_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + ScatterNdDim4(tflite::TensorType_INT32, backends); +} + +TEST_CASE ("ScatterNd_4Dim_INT8_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + ScatterNdDim4(tflite::TensorType_INT8, backends); +} + +TEST_CASE ("ScatterNd_4Dim_UINT8_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + ScatterNdDim4(tflite::TensorType_UINT8, backends); +} + +} // TEST_SUITE("ScatterNdDelegateTests") + +} // namespace armnnDelegate \ No newline at end of file diff --git a/delegate/test/ScatterNdTestHelper.hpp b/delegate/test/ScatterNdTestHelper.hpp new file mode 100644 index 0000000000..5d2cfb011e --- /dev/null +++ b/delegate/test/ScatterNdTestHelper.hpp @@ -0,0 +1,174 @@ +// +// Copyright © 2024 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "TestUtils.hpp" + +#include +#include + +#include + +namespace +{ + +std::vector CreateScatterNdTfLiteModel(tflite::TensorType tensorType, + const std::vector& indicesShape, + const std::vector& updatesShape, + const std::vector& shapeShape, + const std::vector& outputShape, + const std::vector& shapeData, + float quantScale = 1.0f, + int quantOffset = 0) +{ + using namespace tflite; + flatbuffers::FlatBufferBuilder flatBufferBuilder; + + std::vector> buffers; + buffers.push_back(CreateBuffer(flatBufferBuilder)); + buffers.push_back(CreateBuffer(flatBufferBuilder)); // indices + buffers.push_back(CreateBuffer(flatBufferBuilder)); // updates + buffers.push_back(CreateBuffer(flatBufferBuilder, + flatBufferBuilder.CreateVector(reinterpret_cast(shapeData.data()), + sizeof(int32_t) * shapeData.size()))); + buffers.push_back(CreateBuffer(flatBufferBuilder)); // output + + auto quantizationParameters = + CreateQuantizationParameters(flatBufferBuilder, + 0, + 0, + flatBufferBuilder.CreateVector({ quantScale }), + flatBufferBuilder.CreateVector({ quantOffset })); + + std::array, 4> tensors; + tensors[0] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(indicesShape.data(), + indicesShape.size()), + TensorType_INT32, + 1, + flatBufferBuilder.CreateString("indices_tensor"), + quantizationParameters); + + tensors[1] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(updatesShape.data(), + updatesShape.size()), + tensorType, + 2, + flatBufferBuilder.CreateString("updates_tensor"), + quantizationParameters); + + tensors[2] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(shapeShape.data(), + shapeShape.size()), + TensorType_INT32, + 3, + flatBufferBuilder.CreateString("shape_tensor"), + quantizationParameters); + + tensors[3] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(outputShape.data(), + outputShape.size()), + tensorType, + 4, + flatBufferBuilder.CreateString("output_tensor"), + quantizationParameters); + + // Create Operator + tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_ScatterNdOptions; + flatbuffers::Offset operatorBuiltinOptions = CreateScatterNdOptions(flatBufferBuilder).Union(); + + const std::vector operatorInputs { 0, 1, 2 }; + const std::vector operatorOutputs { 3 }; + + flatbuffers::Offset scatterNdOperator = + CreateOperator(flatBufferBuilder, + 0, + flatBufferBuilder.CreateVector(operatorInputs.data(), operatorInputs.size()), + flatBufferBuilder.CreateVector(operatorOutputs.data(), operatorOutputs.size()), + operatorBuiltinOptionsType, + operatorBuiltinOptions); + + const std::vector subgraphInputs{ 0, 1, 2 }; + const std::vector subgraphOutputs{ 3 }; + flatbuffers::Offset subgraph = + CreateSubGraph(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensors.data(), tensors.size()), + flatBufferBuilder.CreateVector(subgraphInputs.data(), subgraphInputs.size()), + flatBufferBuilder.CreateVector(subgraphOutputs.data(), subgraphOutputs.size()), + flatBufferBuilder.CreateVector(&scatterNdOperator, 1)); + + flatbuffers::Offset modelDescription = + flatBufferBuilder.CreateString("ArmnnDelegate: ScatterNd Operator Model"); + flatbuffers::Offset opCode = CreateOperatorCode(flatBufferBuilder, + tflite::BuiltinOperator_SCATTER_ND); + + flatbuffers::Offset flatbufferModel = + CreateModel(flatBufferBuilder, + TFLITE_SCHEMA_VERSION, + flatBufferBuilder.CreateVector(&opCode, 1), + flatBufferBuilder.CreateVector(&subgraph, 1), + modelDescription, + flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); + + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); + + return std::vector(flatBufferBuilder.GetBufferPointer(), + flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); +} + +template +void ScatterNdTestImpl(tflite::TensorType tensorType, + std::vector& indicesShape, + std::vector& indicesValues, + std::vector& updatesShape, + std::vector& updatesValues, + std::vector& shapeShape, + std::vector& shapeValue, + std::vector& expectedOutputShape, + std::vector& expectedOutputValues, + const std::vector& backends = {}, + float quantScale = 1.0f, + int quantOffset = 0) +{ + using namespace delegateTestInterpreter; + + std::vector modelBuffer = CreateScatterNdTfLiteModel(tensorType, + indicesShape, + updatesShape, + shapeShape, + expectedOutputShape, + shapeValue, + quantScale, + quantOffset); + + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(indicesValues, 0) == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(updatesValues, 1) == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(shapeValue, 2) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends)); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(indicesValues, 0) == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(updatesValues, 1) == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(shapeValue, 2) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape); + + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); +} + +} // anonymous namespace \ No newline at end of file diff --git a/docs/05_03_delegate.dox b/docs/05_03_delegate.dox index dde50e13f7..75ebf9c4ac 100644 --- a/docs/05_03_delegate.dox +++ b/docs/05_03_delegate.dox @@ -172,6 +172,8 @@ The Arm NN SDK TensorFlow Lite delegate currently supports the following operato - RSQRT +- SCATTERND + - SHAPE - SIN -- cgit v1.2.1