From 29b49cf1771620233f9c16dfa0214e6d8a64c4d7 Mon Sep 17 00:00:00 2001 From: Sadik Armagan Date: Mon, 22 Feb 2021 18:09:07 +0000 Subject: IVGCVSW-5401 Implement the FILL operator * Added FILL operator to TfLite ArmNN Delegate * Added unit tests Signed-off-by: David Monahan Signed-off-by: Sadik Armagan Change-Id: I335ef469ff773fa4305eb87f6e93ae9c03fc6997 --- delegate/src/test/FillTestHelper.hpp | 160 +++++++++++++++++++++++++++++++++++ 1 file changed, 160 insertions(+) create mode 100644 delegate/src/test/FillTestHelper.hpp (limited to 'delegate/src/test/FillTestHelper.hpp') diff --git a/delegate/src/test/FillTestHelper.hpp b/delegate/src/test/FillTestHelper.hpp new file mode 100644 index 0000000000..e6890a2b2d --- /dev/null +++ b/delegate/src/test/FillTestHelper.hpp @@ -0,0 +1,160 @@ +// +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "TestUtils.hpp" + +#include + +#include +#include +#include +#include +#include +#include + +#include + +namespace +{ + +template +std::vector CreateFillTfLiteModel(tflite::BuiltinOperator fillOperatorCode, + tflite::TensorType tensorType, + const std::vector& inputShape, + const std::vector & tensorShape, + const std::vector fillValue) +{ + using namespace tflite; + flatbuffers::FlatBufferBuilder flatBufferBuilder; + + std::vector> buffers; + buffers.push_back( + CreateBuffer(flatBufferBuilder, + flatBufferBuilder.CreateVector({}))); + buffers.push_back( + CreateBuffer(flatBufferBuilder, + flatBufferBuilder.CreateVector(reinterpret_cast(tensorShape.data()), + sizeof(int32_t) * tensorShape.size()))); + buffers.push_back( + CreateBuffer(flatBufferBuilder, + flatBufferBuilder.CreateVector(reinterpret_cast(fillValue.data()), + sizeof(T) * fillValue.size()))); + + std::array, 3> tensors; + tensors[0] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(inputShape.data(), + inputShape.size()), + tflite::TensorType_INT32, + 1, + flatBufferBuilder.CreateString("dims")); + + std::vector fillShape = {}; + tensors[1] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(fillShape.data(), + fillShape.size()), + tensorType, + 2, + flatBufferBuilder.CreateString("value")); + + tensors[2] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensorShape.data(), + tensorShape.size()), + tensorType, + 0, + flatBufferBuilder.CreateString("output")); + + tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_FillOptions; + flatbuffers::Offset operatorBuiltinOptions = CreateFillOptions(flatBufferBuilder).Union(); + + // create operator + const std::vector operatorInputs{ {0, 1} }; + const std::vector operatorOutputs{ 2 }; + flatbuffers::Offset fillOperator = + CreateOperator(flatBufferBuilder, + 0, + flatBufferBuilder.CreateVector(operatorInputs.data(), operatorInputs.size()), + flatBufferBuilder.CreateVector(operatorOutputs.data(), operatorOutputs.size()), + operatorBuiltinOptionsType, + operatorBuiltinOptions); + + const std::vector subgraphInputs{ {0, 1} }; + const std::vector subgraphOutputs{ 2 }; + flatbuffers::Offset subgraph = + CreateSubGraph(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensors.data(), tensors.size()), + flatBufferBuilder.CreateVector(subgraphInputs.data(), subgraphInputs.size()), + flatBufferBuilder.CreateVector(subgraphOutputs.data(), subgraphOutputs.size()), + flatBufferBuilder.CreateVector(&fillOperator, 1)); + + flatbuffers::Offset modelDescription = + flatBufferBuilder.CreateString("ArmnnDelegate: Fill Operator Model"); + flatbuffers::Offset operatorCode = CreateOperatorCode(flatBufferBuilder, + fillOperatorCode); + + flatbuffers::Offset flatbufferModel = + CreateModel(flatBufferBuilder, + TFLITE_SCHEMA_VERSION, + flatBufferBuilder.CreateVector(&operatorCode, 1), + flatBufferBuilder.CreateVector(&subgraph, 1), + modelDescription, + flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); + + flatBufferBuilder.Finish(flatbufferModel); + + return std::vector(flatBufferBuilder.GetBufferPointer(), + flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); + +} + +template +void FillTest(tflite::BuiltinOperator fillOperatorCode, + tflite::TensorType tensorType, + const std::vector& backends, + std::vector& inputShape, + std::vector& tensorShape, + std::vector& expectedOutputValues, + T fillValue) +{ + using namespace tflite; + std::vector modelBuffer = CreateFillTfLiteModel(fillOperatorCode, + tensorType, + inputShape, + tensorShape, + {fillValue}); + + const Model* tfLiteModel = GetModel(modelBuffer.data()); + CHECK(tfLiteModel != nullptr); + + std::unique_ptr armnnDelegateInterpreter; + CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) + (&armnnDelegateInterpreter) == kTfLiteOk); + CHECK(armnnDelegateInterpreter != nullptr); + CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); + + std::unique_ptr tfLiteInterpreter; + CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) + (&tfLiteInterpreter) == kTfLiteOk); + CHECK(tfLiteInterpreter != nullptr); + CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); + + // Create the ArmNN Delegate + armnnDelegate::DelegateOptions delegateOptions(backends); + std::unique_ptr + theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), + armnnDelegate::TfLiteArmnnDelegateDelete); + CHECK(theArmnnDelegate != nullptr); + // Modify armnnDelegateInterpreter to use armnnDelegate + CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); + + // Run EnqueueWorkload + CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); + CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); + + armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, tensorShape, expectedOutputValues); +} + +} // anonymous namespace -- cgit v1.2.1