// // Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once #include "TestUtils.hpp" #include #include #include namespace { std::vector CreateBroadcastToTfLiteModel(tflite::BuiltinOperator operatorCode, tflite::TensorType inputTensorType, const std::vector& inputTensorShape, const std::vector& shapeTensorShape, const std::vector& shapeTensorData, const std::vector& outputTensorShape) { using namespace tflite; flatbuffers::FlatBufferBuilder flatBufferBuilder; std::vector> buffers; buffers.push_back(CreateBuffer(flatBufferBuilder)); buffers.push_back(CreateBuffer(flatBufferBuilder)); buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector( reinterpret_cast(shapeTensorData.data()), sizeof(int32_t) * shapeTensorData.size()))); buffers.push_back(CreateBuffer(flatBufferBuilder)); float qScale = 1.0f; int32_t qOffset = 0; auto quantizationParameters = CreateQuantizationParameters(flatBufferBuilder, 0, 0, flatBufferBuilder.CreateVector({ qScale }), flatBufferBuilder.CreateVector({ qOffset })); std::array, 3> tensors; tensors[0] = CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(inputTensorShape.data(), inputTensorShape.size()), inputTensorType, 1, flatBufferBuilder.CreateString("input_tensor"), quantizationParameters); tensors[1] = CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(shapeTensorShape.data(), shapeTensorShape.size()), TensorType_INT32, 2, flatBufferBuilder.CreateString("shape_input_tensor"), quantizationParameters); tensors[2] = CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(outputTensorShape.data(), outputTensorShape.size()), inputTensorType, 3, flatBufferBuilder.CreateString("output_tensor"), quantizationParameters); // Create Operator tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_BroadcastToOptions; flatbuffers::Offset operatorBuiltinOption = 0; const std::vector operatorInputs {0, 1}; const std::vector operatorOutputs {2}; flatbuffers::Offset broadcastOperator = CreateOperator(flatBufferBuilder, 0, flatBufferBuilder.CreateVector(operatorInputs.data(), operatorInputs.size()), flatBufferBuilder.CreateVector(operatorOutputs.data(), operatorOutputs.size()), operatorBuiltinOptionsType, operatorBuiltinOption); const std::vector subgraphInputs{0, 1}; const std::vector subgraphOutputs{2}; flatbuffers::Offset subgraph = CreateSubGraph(flatBufferBuilder, flatBufferBuilder.CreateVector(tensors.data(), tensors.size()), flatBufferBuilder.CreateVector(subgraphInputs.data(), subgraphInputs.size()), flatBufferBuilder.CreateVector(subgraphOutputs.data(), subgraphOutputs.size()), flatBufferBuilder.CreateVector(&broadcastOperator, 1)); flatbuffers::Offset modelDescription = flatBufferBuilder.CreateString("ArmnnDelegate: BrodacastTo Operator Model"); flatbuffers::Offset opCode = CreateOperatorCode(flatBufferBuilder,0, 0, 2, tflite::BuiltinOperator_BROADCAST_TO); flatbuffers::Offset flatbufferModel = CreateModel(flatBufferBuilder, TFLITE_SCHEMA_VERSION, flatBufferBuilder.CreateVector(&opCode, 1), flatBufferBuilder.CreateVector(&subgraph, 1), modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); } template void BroadcastToTestImpl(tflite::TensorType inputTensorType, tflite::BuiltinOperator operatorCode, std::vector& inputValues, std::vector inputShape, std::vector shapeShapes, std::vector shapeData, std::vector& expectedOutputValues, std::vector expectedOutputShape, const std::vector& backends) { using namespace delegateTestInterpreter; std::vector modelBuffer = CreateBroadcastToTfLiteModel(operatorCode, inputTensorType, inputShape, shapeShapes, shapeData, expectedOutputShape); // Setup interpreter with just TFLite Runtime. auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); CHECK(tfLiteInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); CHECK(tfLiteInterpreter.FillInputTensor(shapeData, 1) == kTfLiteOk); CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); // Setup interpreter with Arm NN Delegate applied. auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends)); CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); CHECK(armnnInterpreter.FillInputTensor(shapeData, 1) == kTfLiteOk); CHECK(armnnInterpreter.Invoke() == kTfLiteOk); std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape); tfLiteInterpreter.Cleanup(); armnnInterpreter.Cleanup(); } } // anonymous namespace