// // Copyright © 2020 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once #include "TestUtils.hpp" #include #include #include #include #include #include #include #include namespace { std::vector CreateRedefineTfLiteModel( tflite::BuiltinOperator redefineOperatorCode, tflite::TensorType tensorType, const std::vector& inputTensorShape, const std::vector& outputTensorShape, const std::vector& targetShape, bool useOption = true, float quantScale = 1.0f, int quantOffset = 0) { using namespace tflite; flatbuffers::FlatBufferBuilder flatBufferBuilder; std::vector> buffers; buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}))); buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}))); auto quantizationParameters = CreateQuantizationParameters(flatBufferBuilder, 0, 0, flatBufferBuilder.CreateVector({ quantScale }), flatBufferBuilder.CreateVector({ quantOffset })); auto inputTensor = CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(inputTensorShape.data(), inputTensorShape.size()), tensorType, 0, flatBufferBuilder.CreateString("input"), quantizationParameters); auto outputTensor = CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(outputTensorShape.data(), outputTensorShape.size()), tensorType, 1, flatBufferBuilder.CreateString("output"), quantizationParameters); std::vector> tensors; std::vector operatorInputs; std::vector subgraphInputs; flatbuffers::Offset operatorBuiltinOptions; if (useOption) { tensors = { inputTensor, outputTensor}; operatorInputs = {{0}}; subgraphInputs = {{0}}; operatorBuiltinOptions = CreateReshapeOptions( flatBufferBuilder, flatBufferBuilder.CreateVector(targetShape.data(), targetShape.size())).Union(); } else { buffers.push_back( CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector(reinterpret_cast(targetShape.data()), sizeof(int32_t) * targetShape.size()))); int32_t size = static_cast(targetShape.size()); auto shapeTensor = CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector( { size } ), tflite::TensorType_INT32, 2, flatBufferBuilder.CreateString("shape")); tensors = { inputTensor, outputTensor, shapeTensor }; operatorInputs = {{ 0, 2 }}; subgraphInputs = {{ 0, 2 }}; operatorBuiltinOptions = CreateReshapeOptions(flatBufferBuilder).Union(); } // create operator tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_ReshapeOptions; const std::vector operatorOutputs{{1}}; flatbuffers::Offset redefineOperator = CreateOperator(flatBufferBuilder, 0, flatBufferBuilder.CreateVector(operatorInputs.data(), operatorInputs.size()), flatBufferBuilder.CreateVector(operatorOutputs.data(), operatorOutputs.size()), operatorBuiltinOptionsType, operatorBuiltinOptions); const std::vector subgraphOutputs{{1}}; flatbuffers::Offset subgraph = CreateSubGraph(flatBufferBuilder, flatBufferBuilder.CreateVector(tensors.data(), tensors.size()), flatBufferBuilder.CreateVector(subgraphInputs.data(), subgraphInputs.size()), flatBufferBuilder.CreateVector(subgraphOutputs.data(), subgraphOutputs.size()), flatBufferBuilder.CreateVector(&redefineOperator, 1)); flatbuffers::Offset modelDescription = flatBufferBuilder.CreateString("ArmnnDelegate: Reshape Operator Model"); flatbuffers::Offset operatorCode = CreateOperatorCode(flatBufferBuilder, redefineOperatorCode); flatbuffers::Offset flatbufferModel = CreateModel(flatBufferBuilder, TFLITE_SCHEMA_VERSION, flatBufferBuilder.CreateVector(&operatorCode, 1), flatBufferBuilder.CreateVector(&subgraph, 1), modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); flatBufferBuilder.Finish(flatbufferModel); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); } template void RedefineTest(tflite::BuiltinOperator redefineOperatorCode, tflite::TensorType tensorType, const std::vector& backends, const std::vector& inputShape, const std::vector& outputShape, std::vector& inputValues, std::vector& expectedOutputValues, std::vector& targetShape, bool useOption = true, float quantScale = 1.0f, int quantOffset = 0) { using namespace tflite; std::vector modelBuffer = CreateRedefineTfLiteModel(redefineOperatorCode, tensorType, inputShape, outputShape, targetShape, useOption, quantScale, quantOffset); const Model* tfLiteModel = GetModel(modelBuffer.data()); CHECK(tfLiteModel != nullptr); // Create TfLite Interpreters std::unique_ptr armnnDelegateInterpreter; CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) (&armnnDelegateInterpreter) == kTfLiteOk); CHECK(armnnDelegateInterpreter != nullptr); CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); std::unique_ptr tfLiteInterpreter; CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) (&tfLiteInterpreter) == kTfLiteOk); CHECK(tfLiteInterpreter != nullptr); CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); // Create the ArmNN Delegate armnnDelegate::DelegateOptions delegateOptions(backends); std::unique_ptr theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), armnnDelegate::TfLiteArmnnDelegateDelete); CHECK(theArmnnDelegate != nullptr); // Modify armnnDelegateInterpreter to use armnnDelegate CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); // Set input data armnnDelegate::FillInput(tfLiteInterpreter, 0, inputValues); armnnDelegate::FillInput(armnnDelegateInterpreter, 0, inputValues); // Run EnqueueWorkload CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0]; auto tfLiteDelegateOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); auto tfLiteDelegateOutputTensor = tfLiteInterpreter->tensor(tfLiteDelegateOutputId); auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0]; auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateOutputId); auto armnnDelegateOutputTensor = armnnDelegateInterpreter->tensor(armnnDelegateOutputId); CHECK(outputShape.size() == tfLiteDelegateOutputTensor->dims->size); CHECK(outputShape.size() == armnnDelegateOutputTensor->dims->size); for (size_t i = 0; i < static_cast(tfLiteDelegateOutputTensor->dims->size); i++) { CHECK(outputShape[i] == armnnDelegateOutputTensor->dims->data[i]); CHECK(tfLiteDelegateOutputTensor->dims->data[i] == armnnDelegateOutputTensor->dims->data[i]); } for (size_t i = 0; i < expectedOutputValues.size(); i++) { CHECK(expectedOutputValues[i] == armnnDelegateOutputData[i]); CHECK(tfLiteDelegateOutputData[i] == expectedOutputValues[i]); CHECK(tfLiteDelegateOutputData[i] == armnnDelegateOutputData[i]); } } } // anonymous namespace