// // Copyright © 2021 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once #include "TestUtils.hpp" #include #include #include #include #include #include #include #include namespace { std::vector CreateShapeTfLiteModel(tflite::TensorType inputTensorType, tflite::TensorType outputTensorType, const std::vector& inputTensorShape, const std::vector& outputTensorShape, float quantScale = 1.0f, int quantOffset = 0) { using namespace tflite; flatbuffers::FlatBufferBuilder flatBufferBuilder; std::vector> buffers; buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}))); auto quantizationParameters = CreateQuantizationParameters(flatBufferBuilder, 0, 0, flatBufferBuilder.CreateVector({ quantScale }), flatBufferBuilder.CreateVector({ quantOffset })); std::array, 2> tensors; tensors[0] = CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(inputTensorShape.data(), inputTensorShape.size()), inputTensorType, 0, flatBufferBuilder.CreateString("input"), quantizationParameters); tensors[1] = CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(outputTensorShape.data(), outputTensorShape.size()), outputTensorType, 0, flatBufferBuilder.CreateString("output"), quantizationParameters); const std::vector operatorInputs({ 0 }); const std::vector operatorOutputs({ 1 }); flatbuffers::Offset shapeOperator = CreateOperator(flatBufferBuilder, 0, flatBufferBuilder.CreateVector(operatorInputs.data(), operatorInputs.size()), flatBufferBuilder.CreateVector(operatorOutputs.data(), operatorOutputs.size()), BuiltinOptions_ShapeOptions, CreateShapeOptions(flatBufferBuilder, outputTensorType).Union()); flatbuffers::Offset modelDescription = flatBufferBuilder.CreateString("ArmnnDelegate: SHAPE Operator Model"); flatbuffers::Offset operatorCode = CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_SHAPE); const std::vector subgraphInputs({ 0 }); const std::vector subgraphOutputs({ 1 }); flatbuffers::Offset subgraph = CreateSubGraph(flatBufferBuilder, flatBufferBuilder.CreateVector(tensors.data(), tensors.size()), flatBufferBuilder.CreateVector(subgraphInputs.data(), subgraphInputs.size()), flatBufferBuilder.CreateVector(subgraphOutputs.data(), subgraphOutputs.size()), flatBufferBuilder.CreateVector(&shapeOperator, 1)); flatbuffers::Offset flatbufferModel = CreateModel(flatBufferBuilder, TFLITE_SCHEMA_VERSION, flatBufferBuilder.CreateVector(&operatorCode, 1), flatBufferBuilder.CreateVector(&subgraph, 1), modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); flatBufferBuilder.Finish(flatbufferModel); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); } template void ShapeTest(tflite::TensorType inputTensorType, tflite::TensorType outputTensorType, std::vector& backends, std::vector& inputShape, std::vector& inputValues, std::vector& expectedOutputValues, std::vector& expectedOutputShape, float quantScale = 1.0f, int quantOffset = 0) { using namespace tflite; std::vector modelBuffer = CreateShapeTfLiteModel(inputTensorType, outputTensorType, inputShape, expectedOutputShape, quantScale, quantOffset); const Model* tfLiteModel = GetModel(modelBuffer.data()); // Create TfLite Interpreters std::unique_ptr armnnDelegate; CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) (&armnnDelegate) == kTfLiteOk); CHECK(armnnDelegate != nullptr); CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk); std::unique_ptr tfLiteDelegate; CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) (&tfLiteDelegate) == kTfLiteOk); CHECK(tfLiteDelegate != nullptr); CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk); // Create the ArmNN Delegate armnnDelegate::DelegateOptions delegateOptions(backends); std::unique_ptr < TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete) > theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), armnnDelegate::TfLiteArmnnDelegateDelete); CHECK(theArmnnDelegate != nullptr); // Modify armnnDelegateInterpreter to use armnnDelegate CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); // Set input data armnnDelegate::FillInput(tfLiteDelegate, 0, inputValues); armnnDelegate::FillInput(armnnDelegate, 0, inputValues); // Run EnqueWorkload CHECK(tfLiteDelegate->Invoke() == kTfLiteOk); CHECK(armnnDelegate->Invoke() == kTfLiteOk); // Compare output data armnnDelegate::CompareOutputData(tfLiteDelegate, armnnDelegate, expectedOutputShape, expectedOutputValues, 0); tfLiteDelegate.reset(nullptr); armnnDelegate.reset(nullptr); } } // anonymous namespace