// // Copyright © 2021 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once #include "TestUtils.hpp" #include #include #include #include #include #include #include #include #include namespace { std::vector CreateUnpackTfLiteModel(tflite::BuiltinOperator unpackOperatorCode, tflite::TensorType tensorType, std::vector& inputTensorShape, const std::vector & outputTensorShape, const int32_t outputTensorNum, unsigned int axis = 0, float quantScale = 1.0f, int quantOffset = 0) { using namespace tflite; flatbuffers::FlatBufferBuilder flatBufferBuilder; std::vector> buffers; buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}))); auto quantizationParameters = CreateQuantizationParameters(flatBufferBuilder, 0, 0, flatBufferBuilder.CreateVector({ quantScale }), flatBufferBuilder.CreateVector({ quantOffset })); const std::vector operatorInputs{ 0 }; std::vector operatorOutputs{}; const std::vector subgraphInputs{ 0 }; std::vector subgraphOutputs{}; std::vector> tensors(outputTensorNum + 1); // Create input tensor tensors[0] = CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(inputTensorShape.data(), inputTensorShape.size()), tensorType, 0, flatBufferBuilder.CreateString("input"), quantizationParameters); for (int i = 0; i < outputTensorNum; ++i) { tensors[i + 1] = CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(outputTensorShape.data(), outputTensorShape.size()), tensorType, 0, flatBufferBuilder.CreateString("output" + std::to_string(i)), quantizationParameters); operatorOutputs.push_back(i + 1); subgraphOutputs.push_back(i + 1); } // create operator tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_UnpackOptions; flatbuffers::Offset operatorBuiltinOptions = CreateUnpackOptions(flatBufferBuilder, outputTensorNum, axis).Union(); flatbuffers::Offset unpackOperator = CreateOperator(flatBufferBuilder, 0, flatBufferBuilder.CreateVector(operatorInputs.data(), operatorInputs.size()), flatBufferBuilder.CreateVector(operatorOutputs.data(), operatorOutputs.size()), operatorBuiltinOptionsType, operatorBuiltinOptions); flatbuffers::Offset subgraph = CreateSubGraph(flatBufferBuilder, flatBufferBuilder.CreateVector(tensors.data(), tensors.size()), flatBufferBuilder.CreateVector(subgraphInputs.data(), subgraphInputs.size()), flatBufferBuilder.CreateVector(subgraphOutputs.data(), subgraphOutputs.size()), flatBufferBuilder.CreateVector(&unpackOperator, 1)); flatbuffers::Offset modelDescription = flatBufferBuilder.CreateString("ArmnnDelegate: Unpack Operator Model"); flatbuffers::Offset operatorCode = CreateOperatorCode(flatBufferBuilder, unpackOperatorCode); flatbuffers::Offset flatbufferModel = CreateModel(flatBufferBuilder, TFLITE_SCHEMA_VERSION, flatBufferBuilder.CreateVector(&operatorCode, 1), flatBufferBuilder.CreateVector(&subgraph, 1), modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); flatBufferBuilder.Finish(flatbufferModel); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); } template void UnpackTest(tflite::BuiltinOperator unpackOperatorCode, tflite::TensorType tensorType, std::vector& backends, std::vector& inputShape, std::vector& expectedOutputShape, std::vector& inputValues, std::vector>& expectedOutputValues, unsigned int axis = 0, float quantScale = 1.0f, int quantOffset = 0) { using namespace tflite; std::vector modelBuffer = CreateUnpackTfLiteModel(unpackOperatorCode, tensorType, inputShape, expectedOutputShape, expectedOutputValues.size(), axis, quantScale, quantOffset); const Model* tfLiteModel = GetModel(modelBuffer.data()); // Create TfLite Interpreters std::unique_ptr armnnDelegateInterpreter; CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) (&armnnDelegateInterpreter) == kTfLiteOk); CHECK(armnnDelegateInterpreter != nullptr); CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); std::unique_ptr tfLiteInterpreter; CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) (&tfLiteInterpreter) == kTfLiteOk); CHECK(tfLiteInterpreter != nullptr); CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); // Create the ArmNN Delegate armnnDelegate::DelegateOptions delegateOptions(backends); std::unique_ptr theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), armnnDelegate::TfLiteArmnnDelegateDelete); CHECK(theArmnnDelegate != nullptr); // Modify armnnDelegateInterpreter to use armnnDelegate CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); // Set input data armnnDelegate::FillInput(tfLiteInterpreter, 0, inputValues); armnnDelegate::FillInput(armnnDelegateInterpreter, 0, inputValues); // Run EnqueueWorkload CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); // Compare output data for (unsigned int i = 0; i < expectedOutputValues.size(); ++i) { armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, expectedOutputShape, expectedOutputValues[i], i); } armnnDelegateInterpreter.reset(nullptr); } } // anonymous namespace