// // Copyright © 2021, 2023-2024 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once #include "TestUtils.hpp" #include #include #include namespace { template std::vector CreateArgMinMaxTfLiteModel(tflite::BuiltinOperator argMinMaxOperatorCode, tflite::TensorType tensorType, const std::vector& inputTensorShape, const std::vector& axisTensorShape, const std::vector& outputTensorShape, const std::vector axisValue, tflite::TensorType outputType, float quantScale = 1.0f, int quantOffset = 0) { using namespace tflite; flatbuffers::FlatBufferBuilder flatBufferBuilder; auto quantizationParameters = CreateQuantizationParameters(flatBufferBuilder, 0, 0, flatBufferBuilder.CreateVector({ quantScale }), flatBufferBuilder.CreateVector({ quantOffset })); auto inputTensor = CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(inputTensorShape.data(), inputTensorShape.size()), tensorType, 1, flatBufferBuilder.CreateString("input"), quantizationParameters); auto axisTensor = CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(axisTensorShape.data(), axisTensorShape.size()), tflite::TensorType_INT32, 2, flatBufferBuilder.CreateString("axis")); auto outputTensor = CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(outputTensorShape.data(), outputTensorShape.size()), outputType, 3, flatBufferBuilder.CreateString("output"), quantizationParameters); std::vector> tensors = { inputTensor, axisTensor, outputTensor }; std::vector> buffers; buffers.push_back(CreateBuffer(flatBufferBuilder)); buffers.push_back(CreateBuffer(flatBufferBuilder)); buffers.push_back( CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector(reinterpret_cast(axisValue.data()), sizeof(OutputT)))); buffers.push_back(CreateBuffer(flatBufferBuilder)); std::vector operatorInputs = {{ 0, 1 }}; std::vector subgraphInputs = {{ 0, 1 }}; tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_ArgMaxOptions; flatbuffers::Offset operatorBuiltinOptions = CreateArgMaxOptions(flatBufferBuilder, outputType).Union(); if (argMinMaxOperatorCode == tflite::BuiltinOperator_ARG_MIN) { operatorBuiltinOptionsType = BuiltinOptions_ArgMinOptions; operatorBuiltinOptions = CreateArgMinOptions(flatBufferBuilder, outputType).Union(); } // create operator const std::vector operatorOutputs{ 2 }; flatbuffers::Offset argMinMaxOperator = CreateOperator(flatBufferBuilder, 0, flatBufferBuilder.CreateVector(operatorInputs.data(), operatorInputs.size()), flatBufferBuilder.CreateVector(operatorOutputs.data(), operatorOutputs.size()), operatorBuiltinOptionsType, operatorBuiltinOptions); const std::vector subgraphOutputs{ 2 }; flatbuffers::Offset subgraph = CreateSubGraph(flatBufferBuilder, flatBufferBuilder.CreateVector(tensors.data(), tensors.size()), flatBufferBuilder.CreateVector(subgraphInputs.data(), subgraphInputs.size()), flatBufferBuilder.CreateVector(subgraphOutputs.data(), subgraphOutputs.size()), flatBufferBuilder.CreateVector(&argMinMaxOperator, 1)); flatbuffers::Offset modelDescription = flatBufferBuilder.CreateString("ArmnnDelegate: ArgMinMax Operator Model"); flatbuffers::Offset operatorCode = CreateOperatorCode(flatBufferBuilder, argMinMaxOperatorCode); flatbuffers::Offset flatbufferModel = CreateModel(flatBufferBuilder, TFLITE_SCHEMA_VERSION, flatBufferBuilder.CreateVector(&operatorCode, 1), flatBufferBuilder.CreateVector(&subgraph, 1), modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); } template void ArgMinMaxTest(tflite::BuiltinOperator argMinMaxOperatorCode, tflite::TensorType tensorType, const std::vector& inputShape, const std::vector& axisShape, std::vector& outputShape, std::vector& inputValues, std::vector& expectedOutputValues, OutputT axisValue, tflite::TensorType outputType, float quantScale = 1.0f, int quantOffset = 0, const std::vector& backends = {}) { using namespace delegateTestInterpreter; std::vector modelBuffer = CreateArgMinMaxTfLiteModel(argMinMaxOperatorCode, tensorType, inputShape, axisShape, outputShape, {axisValue}, outputType, quantScale, quantOffset); // Setup interpreter with just TFLite Runtime. auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); CHECK(tfLiteInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); // Setup interpreter with Arm NN Delegate applied. auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends)); CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); CHECK(armnnInterpreter.Invoke() == kTfLiteOk); std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape); tfLiteInterpreter.Cleanup(); armnnInterpreter.Cleanup(); } } // anonymous namespace