diff options
author | Sadik Armagan <sadik.armagan@arm.com> | 2020-10-27 17:30:18 +0000 |
---|---|---|
committer | Jim Flynn <jim.flynn@arm.com> | 2020-10-28 11:56:16 +0000 |
commit | 0534e0364473c0b1244f96462cbde1808e92ce81 (patch) | |
tree | 454ae9378d1882d945eaa699711c84ba6656f87e /delegate/src/test/ElementwiseUnaryTestHelper.hpp | |
parent | bf18a266bf5d0fe74db7cca0f54fb1ae25869da8 (diff) | |
download | armnn-0534e0364473c0b1244f96462cbde1808e92ce81.tar.gz |
IVGCVSW-5378 'TfLiteDelegate: Implement the ElementWiseUnary operators '
* Moved ElementwiseUnary operators tests into single file
* Implemented FP32 test for supported ElementwiseUnary operators
Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: I4b7eab190c3c8edb50927b8e1e94dd353597efcb
Diffstat (limited to 'delegate/src/test/ElementwiseUnaryTestHelper.hpp')
-rw-r--r-- | delegate/src/test/ElementwiseUnaryTestHelper.hpp | 141 |
1 files changed, 141 insertions, 0 deletions
diff --git a/delegate/src/test/ElementwiseUnaryTestHelper.hpp b/delegate/src/test/ElementwiseUnaryTestHelper.hpp new file mode 100644 index 0000000000..4d45f4e964 --- /dev/null +++ b/delegate/src/test/ElementwiseUnaryTestHelper.hpp @@ -0,0 +1,141 @@ +// +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include <armnn_delegate.hpp> + +#include <flatbuffers/flatbuffers.h> +#include <tensorflow/lite/interpreter.h> +#include <tensorflow/lite/kernels/register.h> +#include <tensorflow/lite/model.h> +#include <tensorflow/lite/schema/schema_generated.h> +#include <tensorflow/lite/version.h> + +#include <doctest/doctest.h> + +namespace +{ + +std::vector<char> CreateElementwiseUnaryTfLiteModel(tflite::BuiltinOperator unaryOperatorCode, + tflite::TensorType tensorType, + const std::vector <int32_t>& tensorShape) +{ + using namespace tflite; + flatbuffers::FlatBufferBuilder flatBufferBuilder; + + std::array<flatbuffers::Offset<tflite::Buffer>, 1> buffers; + buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})); + + std::array<flatbuffers::Offset<Tensor>, 2> tensors; + tensors[0] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(), tensorShape.size()), + tensorType); + tensors[1] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(), tensorShape.size()), + tensorType); + + // create operator + const std::vector<int> operatorInputs{{0}}; + const std::vector<int> operatorOutputs{{1}}; + flatbuffers::Offset <Operator> unaryOperator = + CreateOperator(flatBufferBuilder, + 0, + flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()), + flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size())); + + const std::vector<int> subgraphInputs{{0}}; + const std::vector<int> subgraphOutputs{{1}}; + flatbuffers::Offset <SubGraph> subgraph = + CreateSubGraph(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensors.data(), tensors.size()), + flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()), + flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()), + flatBufferBuilder.CreateVector(&unaryOperator, 1)); + + flatbuffers::Offset <flatbuffers::String> modelDescription = + flatBufferBuilder.CreateString("ArmnnDelegate: Elementwise Unary Operator Model"); + flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, unaryOperatorCode); + + flatbuffers::Offset <Model> flatbufferModel = + CreateModel(flatBufferBuilder, + TFLITE_SCHEMA_VERSION, + flatBufferBuilder.CreateVector(&operatorCode, 1), + flatBufferBuilder.CreateVector(&subgraph, 1), + modelDescription, + flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); + + flatBufferBuilder.Finish(flatbufferModel); + + return std::vector<char>(flatBufferBuilder.GetBufferPointer(), + flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); +} + +void ElementwiseUnaryFP32Test(tflite::BuiltinOperator unaryOperatorCode, + std::vector<armnn::BackendId>& backends, + std::vector<float>& inputValues, + std::vector<float>& expectedOutputValues) +{ + using namespace tflite; + const std::vector<int32_t> inputShape { { 3, 1, 2} }; + std::vector<char> modelBuffer = CreateElementwiseUnaryTfLiteModel(unaryOperatorCode, + ::tflite::TensorType_FLOAT32, + inputShape); + + const Model* tfLiteModel = GetModel(modelBuffer.data()); + // Create TfLite Interpreters + std::unique_ptr<Interpreter> armnnDelegateInterpreter; + CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) + (&armnnDelegateInterpreter) == kTfLiteOk); + CHECK(armnnDelegateInterpreter != nullptr); + CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); + + std::unique_ptr<Interpreter> tfLiteInterpreter; + CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) + (&tfLiteInterpreter) == kTfLiteOk); + CHECK(tfLiteInterpreter != nullptr); + CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); + // Create the ArmNN Delegate + armnnDelegate::DelegateOptions delegateOptions(backends); + auto armnnDelegate = TfLiteArmnnDelegateCreate(delegateOptions); + CHECK(armnnDelegate != nullptr); + // Modify armnnDelegateInterpreter to use armnnDelegate + CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(armnnDelegate) == kTfLiteOk); + + // Set input data + auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0]; + auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateInputId); + for (unsigned int i = 0; i < inputValues.size(); ++i) + { + tfLiteDelageInputData[i] = inputValues[i]; + } + + auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0]; + auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateInputId); + for (unsigned int i = 0; i < inputValues.size(); ++i) + { + armnnDelegateInputData[i] = inputValues[i]; + } + // Run EnqueWorkload + CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); + CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); + + // Compare output data + auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0]; + auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId); + auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0]; + auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId); + for (size_t i = 0; i < inputValues.size(); i++) + { + CHECK(expectedOutputValues[i] == armnnDelegateOutputData[i]); + CHECK(tfLiteDelageOutputData[i] == armnnDelegateOutputData[i]); + } +} + +} // anonymous namespace + + + + |