diff options
author | Teresa Charlin <teresa.charlinreyes@arm.com> | 2022-04-25 18:23:41 +0100 |
---|---|---|
committer | TeresaARM <teresa.charlinreyes@arm.com> | 2022-05-04 12:25:18 +0000 |
commit | d5c0ed24ce91ee0da1dcb5858da16f0f8a3d3172 (patch) | |
tree | 4e05c08d5c224e7df9f7ea358e2c598c6be18ff9 /delegate/src/test/GatherNdTestHelper.hpp | |
parent | 91a53eab529d88f78572b1155bfd07eb5de141f4 (diff) | |
download | armnn-d5c0ed24ce91ee0da1dcb5858da16f0f8a3d3172.tar.gz |
IVGCVSW-6858 Add GATHERNd Support to the TfLite Delegate
Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Change-Id: I56418875b3bb2ae45b5c69bfeaafa1a6126b8085
Diffstat (limited to 'delegate/src/test/GatherNdTestHelper.hpp')
-rw-r--r-- | delegate/src/test/GatherNdTestHelper.hpp | 178 |
1 files changed, 178 insertions, 0 deletions
diff --git a/delegate/src/test/GatherNdTestHelper.hpp b/delegate/src/test/GatherNdTestHelper.hpp new file mode 100644 index 0000000000..f475584dc5 --- /dev/null +++ b/delegate/src/test/GatherNdTestHelper.hpp @@ -0,0 +1,178 @@ +// +// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "TestUtils.hpp" + +#include <armnn_delegate.hpp> + +#include <flatbuffers/flatbuffers.h> +#include <tensorflow/lite/interpreter.h> +#include <tensorflow/lite/kernels/register.h> +#include <tensorflow/lite/model.h> +#include <tensorflow/lite/schema/schema_generated.h> +#include <tensorflow/lite/version.h> + +#include <doctest/doctest.h> + +namespace +{ + +std::vector<char> CreateGatherNdTfLiteModel(tflite::TensorType tensorType, + std::vector<int32_t>& paramsShape, + std::vector<int32_t>& indicesShape, + const std::vector<int32_t>& expectedOutputShape, + float quantScale = 1.0f, + int quantOffset = 0) +{ + using namespace tflite; + flatbuffers::FlatBufferBuilder flatBufferBuilder; + + std::vector<flatbuffers::Offset<tflite::Buffer>> buffers; + buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}))); + + auto quantizationParameters = + CreateQuantizationParameters(flatBufferBuilder, + 0, + 0, + flatBufferBuilder.CreateVector<float>({quantScale}), + flatBufferBuilder.CreateVector<int64_t>({quantOffset})); + + std::array<flatbuffers::Offset<Tensor>, 3> tensors; + tensors[0] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector<int32_t>(paramsShape.data(), + paramsShape.size()), + tensorType, + 0, + flatBufferBuilder.CreateString("params"), + quantizationParameters); + tensors[1] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector<int32_t>(indicesShape.data(), + indicesShape.size()), + ::tflite::TensorType_INT32, + 0, + flatBufferBuilder.CreateString("indices"), + quantizationParameters); + tensors[2] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector<int32_t>(expectedOutputShape.data(), + expectedOutputShape.size()), + tensorType, + 0, + flatBufferBuilder.CreateString("output"), + quantizationParameters); + + + // create operator + tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_GatherNdOptions; + flatbuffers::Offset<void> operatorBuiltinOptions = CreateGatherNdOptions(flatBufferBuilder).Union(); + + const std::vector<int> operatorInputs{{0, 1}}; + const std::vector<int> operatorOutputs{2}; + flatbuffers::Offset<Operator> controlOperator = + CreateOperator(flatBufferBuilder, + 0, + flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), + operatorInputs.size()), + flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), + operatorOutputs.size()), + operatorBuiltinOptionsType, + operatorBuiltinOptions); + + const std::vector<int> subgraphInputs{{0, 1}}; + const std::vector<int> subgraphOutputs{2}; + flatbuffers::Offset<SubGraph> subgraph = + CreateSubGraph(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensors.data(), tensors.size()), + flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), + subgraphInputs.size()), + flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), + subgraphOutputs.size()), + flatBufferBuilder.CreateVector(&controlOperator, 1)); + + flatbuffers::Offset<flatbuffers::String> modelDescription = + flatBufferBuilder.CreateString("ArmnnDelegate: GATHER_ND Operator Model"); + flatbuffers::Offset<OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, + BuiltinOperator_GATHER_ND); + + flatbuffers::Offset<Model> flatbufferModel = + CreateModel(flatBufferBuilder, + TFLITE_SCHEMA_VERSION, + flatBufferBuilder.CreateVector(&operatorCode, 1), + flatBufferBuilder.CreateVector(&subgraph, 1), + modelDescription, + flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); + + flatBufferBuilder.Finish(flatbufferModel); + + return std::vector<char>(flatBufferBuilder.GetBufferPointer(), + flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); +} + +template<typename T> +void GatherNdTest(tflite::TensorType tensorType, + std::vector<armnn::BackendId>& backends, + std::vector<int32_t>& paramsShape, + std::vector<int32_t>& indicesShape, + std::vector<int32_t>& expectedOutputShape, + std::vector<T>& paramsValues, + std::vector<int32_t>& indicesValues, + std::vector<T>& expectedOutputValues, + float quantScale = 1.0f, + int quantOffset = 0) +{ + using namespace tflite; + std::vector<char> modelBuffer = CreateGatherNdTfLiteModel(tensorType, + paramsShape, + indicesShape, + expectedOutputShape, + quantScale, + quantOffset); + const Model* tfLiteModel = GetModel(modelBuffer.data()); + + // Create TfLite Interpreters + std::unique_ptr<Interpreter> armnnDelegate; + CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) + (&armnnDelegate) == kTfLiteOk); + CHECK(armnnDelegate != nullptr); + CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk); + + std::unique_ptr<Interpreter> tfLiteDelegate; + CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) + (&tfLiteDelegate) == kTfLiteOk); + CHECK(tfLiteDelegate != nullptr); + CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk); + + // Create the ArmNN Delegate + armnnDelegate::DelegateOptions delegateOptions(backends); + std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)> + theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), + armnnDelegate::TfLiteArmnnDelegateDelete); + CHECK(theArmnnDelegate != nullptr); + + // Modify armnnDelegateInterpreter to use armnnDelegate + CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); + + // Set input data + armnnDelegate::FillInput<T>(tfLiteDelegate, 0, paramsValues); + armnnDelegate::FillInput<T>(armnnDelegate, 0, paramsValues); + armnnDelegate::FillInput<int32_t>(tfLiteDelegate, 1, indicesValues); + armnnDelegate::FillInput<int32_t>(armnnDelegate, 1, indicesValues); + + // Run EnqueWorkload + CHECK(tfLiteDelegate->Invoke() == kTfLiteOk); + CHECK(armnnDelegate->Invoke() == kTfLiteOk); + + // Compare output data + armnnDelegate::CompareOutputData<T>(tfLiteDelegate, + armnnDelegate, + expectedOutputShape, + expectedOutputValues, + 0); + + tfLiteDelegate.reset(nullptr); + armnnDelegate.reset(nullptr); +} +} // anonymous namespace
\ No newline at end of file |