diff options
author | Sadik Armagan <sadik.armagan@arm.com> | 2021-01-19 17:24:21 +0000 |
---|---|---|
committer | Sadik Armagan <sadik.armagan@arm.com> | 2021-01-19 17:24:21 +0000 |
commit | dc032fca290deb39af65050c254a701596b53fa8 (patch) | |
tree | e3957a2651f0fbfe9a13f3ff1d2f092178578257 /delegate/src/test | |
parent | 97bf84f6e162307fc3e8c53045ef0bc60a3e3289 (diff) | |
download | armnn-dc032fca290deb39af65050c254a701596b53fa8.tar.gz |
IVGCVSW-5399 'TfLiteDelegate: Implement the ArgMinMax operators'
* Added ARG_MIN and ARG_MAX support to armnn_delegate
Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: Ia000c4b64378e28320164edd4df2902ca13dcda6
Diffstat (limited to 'delegate/src/test')
-rw-r--r-- | delegate/src/test/ArgMinMaxTest.cpp | 174 | ||||
-rw-r--r-- | delegate/src/test/ArgMinMaxTestHelper.hpp | 198 |
2 files changed, 372 insertions, 0 deletions
diff --git a/delegate/src/test/ArgMinMaxTest.cpp b/delegate/src/test/ArgMinMaxTest.cpp new file mode 100644 index 0000000000..bf60a77cb2 --- /dev/null +++ b/delegate/src/test/ArgMinMaxTest.cpp @@ -0,0 +1,174 @@ +// +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "ArgMinMaxTestHelper.hpp" + +#include <armnn_delegate.hpp> + +#include <flatbuffers/flatbuffers.h> +#include <tensorflow/lite/schema/schema_generated.h> + +#include <doctest/doctest.h> + +namespace armnnDelegate +{ + +void ArgMaxFP32Test(std::vector<armnn::BackendId>& backends, int axisValue) +{ + // Set input data + std::vector<int32_t> inputShape { 1, 3, 2, 4 }; + std::vector<int32_t> outputShape { 1, 3, 4 }; + std::vector<int32_t> axisShape { 1 }; + + std::vector<float> inputValues = { 1.0f, 2.0f, 3.0f, 4.0f, + 5.0f, 6.0f, 7.0f, 8.0f, + + 10.0f, 20.0f, 30.0f, 40.0f, + 50.0f, 60.0f, 70.0f, 80.0f, + + 100.0f, 200.0f, 300.0f, 400.0f, + 500.0f, 600.0f, 700.0f, 800.0f }; + + std::vector<int32_t> expectedOutputValues = { 1, 1, 1, 1, + 1, 1, 1, 1, + 1, 1, 1, 1 }; + + ArgMinMaxTest<float, int32_t>(tflite::BuiltinOperator_ARG_MAX, + ::tflite::TensorType_FLOAT32, + backends, + inputShape, + axisShape, + outputShape, + inputValues, + expectedOutputValues, + axisValue, + ::tflite::TensorType_INT32); +} + +void ArgMinFP32Test(std::vector<armnn::BackendId>& backends, int axisValue) +{ + // Set input data + std::vector<int32_t> inputShape { 1, 3, 2, 4 }; + std::vector<int32_t> outputShape { 1, 3, 2 }; + std::vector<int32_t> axisShape { 1 }; + + std::vector<float> inputValues = { 1.0f, 2.0f, 3.0f, 4.0f, + 5.0f, 6.0f, 7.0f, 8.0f, + + 10.0f, 20.0f, 30.0f, 40.0f, + 50.0f, 60.0f, 70.0f, 80.0f, + + 100.0f, 200.0f, 300.0f, 400.0f, + 500.0f, 600.0f, 700.0f, 800.0f }; + + std::vector<int32_t> expectedOutputValues = { 0, 0, + 0, 0, + 0, 0 }; + + ArgMinMaxTest<float, int32_t>(tflite::BuiltinOperator_ARG_MIN, + ::tflite::TensorType_FLOAT32, + backends, + inputShape, + axisShape, + outputShape, + inputValues, + expectedOutputValues, + axisValue, + ::tflite::TensorType_INT32); +} + +void ArgMaxUint8Test(std::vector<armnn::BackendId>& backends, int axisValue) +{ + // Set input data + std::vector<int32_t> inputShape { 1, 1, 1, 5 }; + std::vector<int32_t> outputShape { 1, 1, 1 }; + std::vector<int32_t> axisShape { 1 }; + + std::vector<uint8_t> inputValues = { 5, 2, 8, 10, 9 }; + + std::vector<int32_t> expectedOutputValues = { 3 }; + + ArgMinMaxTest<uint8_t, int32_t>(tflite::BuiltinOperator_ARG_MAX, + ::tflite::TensorType_UINT8, + backends, + inputShape, + axisShape, + outputShape, + inputValues, + expectedOutputValues, + axisValue, + ::tflite::TensorType_INT32); +} + +TEST_SUITE("ArgMinMax_CpuRefTests") +{ + +TEST_CASE ("ArgMaxFP32Test_CpuRef_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef }; + ArgMaxFP32Test(backends, 2); +} + +TEST_CASE ("ArgMinFP32Test_CpuRef_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef }; + ArgMinFP32Test(backends, 3); +} + +TEST_CASE ("ArgMaxUint8Test_CpuRef_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef }; + ArgMaxUint8Test(backends, -1); +} + +} // TEST_SUITE("ArgMinMax_CpuRefTests") + +TEST_SUITE("ArgMinMax_CpuAccTests") +{ + +TEST_CASE ("ArgMaxFP32Test_CpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc }; + ArgMaxFP32Test(backends, 2); +} + +TEST_CASE ("ArgMinFP32Test_CpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc }; + ArgMinFP32Test(backends, 3); +} + +TEST_CASE ("ArgMaxUint8Test_CpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc }; + ArgMaxUint8Test(backends, -1); +} + +} // TEST_SUITE("ArgMinMax_CpuAccTests") + +TEST_SUITE("ArgMinMax_GpuAccTests") +{ + +TEST_CASE ("ArgMaxFP32Test_GpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc }; + ArgMaxFP32Test(backends, 2); +} + +TEST_CASE ("ArgMinFP32Test_GpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc }; + ArgMinFP32Test(backends, 3); +} + +TEST_CASE ("ArgMaxUint8Test_GpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc }; + ArgMaxUint8Test(backends, -1); +} + +} // TEST_SUITE("ArgMinMax_GpuAccTests") + +} // namespace armnnDelegate
\ No newline at end of file diff --git a/delegate/src/test/ArgMinMaxTestHelper.hpp b/delegate/src/test/ArgMinMaxTestHelper.hpp new file mode 100644 index 0000000000..d071653ac7 --- /dev/null +++ b/delegate/src/test/ArgMinMaxTestHelper.hpp @@ -0,0 +1,198 @@ +// +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "TestUtils.hpp" + +#include <armnn_delegate.hpp> + +#include <flatbuffers/flatbuffers.h> +#include <tensorflow/lite/interpreter.h> +#include <tensorflow/lite/kernels/register.h> +#include <tensorflow/lite/model.h> +#include <tensorflow/lite/schema/schema_generated.h> +#include <tensorflow/lite/version.h> + +#include <doctest/doctest.h> + +namespace +{ + +template <typename InputT, typename OutputT> +std::vector<char> CreateArgMinMaxTfLiteModel(tflite::BuiltinOperator argMinMaxOperatorCode, + tflite::TensorType tensorType, + const std::vector<int32_t>& inputTensorShape, + const std::vector<int32_t>& axisTensorShape, + const std::vector<int32_t>& outputTensorShape, + const std::vector<OutputT> axisValue, + tflite::TensorType outputType, + float quantScale = 1.0f, + int quantOffset = 0) +{ + using namespace tflite; + flatbuffers::FlatBufferBuilder flatBufferBuilder; + + auto quantizationParameters = + CreateQuantizationParameters(flatBufferBuilder, + 0, + 0, + flatBufferBuilder.CreateVector<float>({ quantScale }), + flatBufferBuilder.CreateVector<int64_t>({ quantOffset })); + + auto inputTensor = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(), + inputTensorShape.size()), + tensorType, + 0, + flatBufferBuilder.CreateString("input"), + quantizationParameters); + + auto axisTensor = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector<int32_t>(axisTensorShape.data(), + axisTensorShape.size()), + tflite::TensorType_INT32, + 1, + flatBufferBuilder.CreateString("axis")); + + auto outputTensor = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(), + outputTensorShape.size()), + outputType, + 2, + flatBufferBuilder.CreateString("output"), + quantizationParameters); + + std::vector<flatbuffers::Offset<Tensor>> tensors = { inputTensor, axisTensor, outputTensor }; + + std::vector<flatbuffers::Offset<tflite::Buffer>> buffers; + buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}))); + buffers.push_back( + CreateBuffer(flatBufferBuilder, + flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(axisValue.data()), + sizeof(OutputT)))); + buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}))); + + std::vector<int32_t> operatorInputs = {{ 0, 1 }}; + std::vector<int> subgraphInputs = {{ 0, 1 }}; + + tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_ArgMaxOptions; + flatbuffers::Offset<void> operatorBuiltinOptions = CreateArgMaxOptions(flatBufferBuilder, outputType).Union(); + + if (argMinMaxOperatorCode == tflite::BuiltinOperator_ARG_MIN) + { + operatorBuiltinOptionsType = BuiltinOptions_ArgMinOptions; + operatorBuiltinOptions = CreateArgMinOptions(flatBufferBuilder, outputType).Union(); + } + + // create operator + const std::vector<int32_t> operatorOutputs{{ 2 }}; + flatbuffers::Offset <Operator> argMinMaxOperator = + CreateOperator(flatBufferBuilder, + 0, + flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()), + flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()), + operatorBuiltinOptionsType, + operatorBuiltinOptions); + + const std::vector<int> subgraphOutputs{{ 2 }}; + flatbuffers::Offset <SubGraph> subgraph = + CreateSubGraph(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensors.data(), tensors.size()), + flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()), + flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()), + flatBufferBuilder.CreateVector(&argMinMaxOperator, 1)); + + flatbuffers::Offset <flatbuffers::String> modelDescription = + flatBufferBuilder.CreateString("ArmnnDelegate: ArgMinMax Operator Model"); + flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, + argMinMaxOperatorCode); + + flatbuffers::Offset <Model> flatbufferModel = + CreateModel(flatBufferBuilder, + TFLITE_SCHEMA_VERSION, + flatBufferBuilder.CreateVector(&operatorCode, 1), + flatBufferBuilder.CreateVector(&subgraph, 1), + modelDescription, + flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); + + flatBufferBuilder.Finish(flatbufferModel); + + return std::vector<char>(flatBufferBuilder.GetBufferPointer(), + flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); +} + +template <typename InputT, typename OutputT> +void ArgMinMaxTest(tflite::BuiltinOperator argMinMaxOperatorCode, + tflite::TensorType tensorType, + const std::vector<armnn::BackendId>& backends, + const std::vector<int32_t>& inputShape, + const std::vector<int32_t>& axisShape, + std::vector<int32_t>& outputShape, + std::vector<InputT>& inputValues, + std::vector<OutputT>& expectedOutputValues, + OutputT axisValue, + tflite::TensorType outputType, + float quantScale = 1.0f, + int quantOffset = 0) +{ + using namespace tflite; + std::vector<char> modelBuffer = CreateArgMinMaxTfLiteModel<InputT, OutputT>(argMinMaxOperatorCode, + tensorType, + inputShape, + axisShape, + outputShape, + {axisValue}, + outputType, + quantScale, + quantOffset); + + const Model* tfLiteModel = GetModel(modelBuffer.data()); + CHECK(tfLiteModel != nullptr); + + std::unique_ptr<Interpreter> armnnDelegateInterpreter; + CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) + (&armnnDelegateInterpreter) == kTfLiteOk); + CHECK(armnnDelegateInterpreter != nullptr); + CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); + + std::unique_ptr<Interpreter> tfLiteInterpreter; + CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) + (&tfLiteInterpreter) == kTfLiteOk); + CHECK(tfLiteInterpreter != nullptr); + CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); + + // Create the ArmNN Delegate + armnnDelegate::DelegateOptions delegateOptions(backends); + std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)> + theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), + armnnDelegate::TfLiteArmnnDelegateDelete); + CHECK(theArmnnDelegate != nullptr); + // Modify armnnDelegateInterpreter to use armnnDelegate + CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); + + // Set input data + armnnDelegate::FillInput<InputT>(tfLiteInterpreter, 0, inputValues); + armnnDelegate::FillInput<InputT>(armnnDelegateInterpreter, 0, inputValues); + + // Run EnqueueWorkload + CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); + CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); + + // Compare output data + auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0]; + auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<OutputT>(tfLiteDelegateOutputId); + auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0]; + auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<OutputT>(armnnDelegateOutputId); + + for (size_t i = 0; i < expectedOutputValues.size(); i++) + { + CHECK(expectedOutputValues[i] == armnnDelegateOutputData[i]); + CHECK(tfLiteDelageOutputData[i] == expectedOutputValues[i]); + CHECK(tfLiteDelageOutputData[i] == armnnDelegateOutputData[i]); + } +} + +} // anonymous namespace
\ No newline at end of file |