From 0b51d5ad533f8ecde71f957077690195eea29ffc Mon Sep 17 00:00:00 2001 From: Narumol Prangnawarat Date: Wed, 20 Jan 2021 15:58:29 +0000 Subject: IVGCVSW-5619 Add OptimizerOptions and NetworkProperties to ArmNN Delegate * Add OptimizerOptions, NetworkProperties, DebugCallbackFunction to DelegateOptions * Enable OptimizerOptions when the network is being optimized * Enable NetworkProperties when loading network * Enable DebugCallbackFunction * Add error message when loading network * Log warning instead of error when operator is not supported but could fallback to another backend * Improve uint16_t CompareData * Unit tests Signed-off-by: Narumol Prangnawarat Change-Id: I353035afb442774bfeb1c62570a90755c2ceaf38 --- delegate/src/test/DelegateOptionsTestHelper.hpp | 298 ++++++++++++++++++++++++ 1 file changed, 298 insertions(+) create mode 100644 delegate/src/test/DelegateOptionsTestHelper.hpp (limited to 'delegate/src/test/DelegateOptionsTestHelper.hpp') diff --git a/delegate/src/test/DelegateOptionsTestHelper.hpp b/delegate/src/test/DelegateOptionsTestHelper.hpp new file mode 100644 index 0000000000..6e0cc3154c --- /dev/null +++ b/delegate/src/test/DelegateOptionsTestHelper.hpp @@ -0,0 +1,298 @@ +// +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include + +#include "ConvolutionTestHelper.hpp" +#include "TestUtils.hpp" + +#include +#include +#include +#include +#include +#include + +#include + +namespace +{ + +struct StreamRedirector +{ +public: + StreamRedirector(std::ostream &stream, std::streambuf *newStreamBuffer) + : m_Stream(stream), m_BackupBuffer(m_Stream.rdbuf(newStreamBuffer)) {} + + ~StreamRedirector() { m_Stream.rdbuf(m_BackupBuffer); } + +private: + std::ostream &m_Stream; + std::streambuf *m_BackupBuffer; +}; + +std::vector CreateAddDivTfLiteModel(tflite::TensorType tensorType, + const std::vector& tensorShape, + float quantScale = 1.0f, + int quantOffset = 0) +{ + using namespace tflite; + flatbuffers::FlatBufferBuilder flatBufferBuilder; + + std::vector> buffers; + buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}))); + + auto quantizationParameters = + CreateQuantizationParameters(flatBufferBuilder, + 0, + 0, + flatBufferBuilder.CreateVector({ quantScale }), + flatBufferBuilder.CreateVector({ quantOffset })); + + + std::array, 5> tensors; + tensors[0] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensorShape.data(), + tensorShape.size()), + tensorType, + 0, + flatBufferBuilder.CreateString("input_0"), + quantizationParameters); + tensors[1] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensorShape.data(), + tensorShape.size()), + tensorType, + 0, + flatBufferBuilder.CreateString("input_1"), + quantizationParameters); + tensors[2] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensorShape.data(), + tensorShape.size()), + tensorType, + 0, + flatBufferBuilder.CreateString("input_2"), + quantizationParameters); + tensors[3] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensorShape.data(), + tensorShape.size()), + tensorType, + 0, + flatBufferBuilder.CreateString("add"), + quantizationParameters); + tensors[4] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensorShape.data(), + tensorShape.size()), + tensorType, + 0, + flatBufferBuilder.CreateString("output"), + quantizationParameters); + + // create operator + tflite::BuiltinOptions addBuiltinOptionsType = tflite::BuiltinOptions_AddOptions; + flatbuffers::Offset addBuiltinOptions = + CreateAddOptions(flatBufferBuilder, ActivationFunctionType_NONE).Union(); + + tflite::BuiltinOptions divBuiltinOptionsType = tflite::BuiltinOptions_DivOptions; + flatbuffers::Offset divBuiltinOptions = + CreateAddOptions(flatBufferBuilder, ActivationFunctionType_NONE).Union(); + + std::array, 2> operators; + const std::vector addInputs{0, 1}; + const std::vector addOutputs{3}; + operators[0] = CreateOperator(flatBufferBuilder, + 0, + flatBufferBuilder.CreateVector(addInputs.data(), addInputs.size()), + flatBufferBuilder.CreateVector(addOutputs.data(), addOutputs.size()), + addBuiltinOptionsType, + addBuiltinOptions); + const std::vector divInputs{3, 2}; + const std::vector divOutputs{4}; + operators[1] = CreateOperator(flatBufferBuilder, + 1, + flatBufferBuilder.CreateVector(divInputs.data(), divInputs.size()), + flatBufferBuilder.CreateVector(divOutputs.data(), divOutputs.size()), + divBuiltinOptionsType, + divBuiltinOptions); + + const std::vector subgraphInputs{0, 1, 2}; + const std::vector subgraphOutputs{4}; + flatbuffers::Offset subgraph = + CreateSubGraph(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensors.data(), tensors.size()), + flatBufferBuilder.CreateVector(subgraphInputs.data(), subgraphInputs.size()), + flatBufferBuilder.CreateVector(subgraphOutputs.data(), subgraphOutputs.size()), + flatBufferBuilder.CreateVector(operators.data(), operators.size())); + + flatbuffers::Offset modelDescription = + flatBufferBuilder.CreateString("ArmnnDelegate: Add and Div Operator Model"); + + std::array, 2> codes; + codes[0] = CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_ADD); + codes[1] = CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_DIV); + + flatbuffers::Offset flatbufferModel = + CreateModel(flatBufferBuilder, + TFLITE_SCHEMA_VERSION, + flatBufferBuilder.CreateVector(codes.data(), codes.size()), + flatBufferBuilder.CreateVector(&subgraph, 1), + modelDescription, + flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); + + flatBufferBuilder.Finish(flatbufferModel); + + return std::vector(flatBufferBuilder.GetBufferPointer(), + flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); +} + +void ReduceFp32ToBf16TestImpl() +{ + using namespace tflite; + // Set input data + std::vector inputShape{ 1, 5, 5, 1 }; + std::vector filterShape{ 1, 3, 3, 1 }; + std::vector biasShape{ 1 }; + std::vector outputShape{ 1, 3, 3, 1 }; + + std::vector inputValues = + { + 1, 5, 2, 3, 5, + 8, 7, 3, 6, 3, + 3, 3, 9, 1, 9, + 4, 1, 8, 1, 3, + 6, 8, 1, 9, 2 + }; + + std::vector filterValues = + { + 4, 5, 6, + 0, 0, 0, + 3, 2, 1 + }; + + std::vector biasValues = { 5 }; + + std::vector expectedResult = + { + 28, 38, 29, + 96, 104, 53, + 31, 55, 24 + }; + + tflite::Padding padding = Padding_SAME; + + std::vector modelBuffer; + modelBuffer = CreateConv2dTfLiteModel(BuiltinOperator_CONV_2D, + ::tflite::TensorType_FLOAT32, + 2, + 2, + 1, + 1, + padding, + ActivationFunctionType_NONE, + inputShape, + filterShape, + biasShape, + outputShape, + filterValues, + biasValues); + + + const Model* tfLiteModel = GetModel(modelBuffer.data()); + // Create TfLite Interpreters + std::unique_ptr armnnDelegateInterpreter; + CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) + (&armnnDelegateInterpreter) == kTfLiteOk); + CHECK(armnnDelegateInterpreter != nullptr); + CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); + + // Create the Armnn Delegate + std::vector backends = {armnn::Compute::CpuRef}; + std::vector backendOptions; + + // Enable debug with BF16 enabled + armnn::OptimizerOptions optimizerOptions(false, true, true, false); + + armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions); + std::unique_ptr + theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), + armnnDelegate::TfLiteArmnnDelegateDelete); + CHECK(theArmnnDelegate != nullptr); + // Modify armnnDelegateInterpreter to use armnnDelegate + CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); + + // Set input data + armnnDelegate::FillInput(armnnDelegateInterpreter, 0, inputValues); + + // Run EnqueueWorkload + CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); + + // Compare output data + auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0]; + auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateOutputId); + armnnDelegate::CompareData(expectedResult.data(), armnnDelegateOutputData, expectedResult.size()); + armnnDelegateInterpreter.reset(nullptr); +} + +template +void DelegateOptionTest(tflite::TensorType tensorType, + const std::vector& backends, + std::vector& tensorShape, + std::vector& input0Values, + std::vector& input1Values, + std::vector& input2Values, + std::vector& expectedOutputValues, + const armnnDelegate::DelegateOptions& delegateOptions, + float quantScale = 1.0f, + int quantOffset = 0) +{ + using namespace tflite; + std::vector modelBuffer = CreateAddDivTfLiteModel(tensorType, + tensorShape, + quantScale, + quantOffset); + + const Model* tfLiteModel = GetModel(modelBuffer.data()); + // Create TfLite Interpreters + std::unique_ptr armnnDelegateInterpreter; + CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) + (&armnnDelegateInterpreter) == kTfLiteOk); + CHECK(armnnDelegateInterpreter != nullptr); + CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); + + std::unique_ptr tfLiteInterpreter; + CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) + (&tfLiteInterpreter) == kTfLiteOk); + CHECK(tfLiteInterpreter != nullptr); + CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); + + // Create the ArmNN Delegate + std::unique_ptr + theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), + armnnDelegate::TfLiteArmnnDelegateDelete); + CHECK(theArmnnDelegate != nullptr); + // Modify armnnDelegateInterpreter to use armnnDelegate + CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); + + // Set input data + armnnDelegate::FillInput(tfLiteInterpreter, 0, input0Values); + armnnDelegate::FillInput(tfLiteInterpreter, 1, input1Values); + armnnDelegate::FillInput(tfLiteInterpreter, 2, input2Values); + + armnnDelegate::FillInput(armnnDelegateInterpreter, 0, input0Values); + armnnDelegate::FillInput(armnnDelegateInterpreter, 1, input1Values); + armnnDelegate::FillInput(armnnDelegateInterpreter, 2, input2Values); + + // Run EnqueueWorkload + CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); + CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); + + armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, tensorShape, expectedOutputValues); + + armnnDelegateInterpreter.reset(nullptr); +} + +} // anonymous namespace \ No newline at end of file -- cgit v1.2.1