// // Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once #include "TestUtils.hpp" #include #include #include #include #include #include #include #include namespace { std::vector CreatePooling2dTfLiteModel( tflite::BuiltinOperator poolingOperatorCode, tflite::TensorType tensorType, const std::vector & inputTensorShape, const std::vector & outputTensorShape, tflite::Padding padding = tflite::Padding_SAME, int32_t strideWidth = 0, int32_t strideHeight = 0, int32_t filterWidth = 0, int32_t filterHeight = 0, tflite::ActivationFunctionType fusedActivation = tflite::ActivationFunctionType_NONE, float quantScale = 1.0f, int quantOffset = 0) { using namespace tflite; flatbuffers::FlatBufferBuilder flatBufferBuilder; flatbuffers::Offset buffers[3] = {CreateBuffer(flatBufferBuilder), CreateBuffer(flatBufferBuilder), CreateBuffer(flatBufferBuilder)}; auto quantizationParameters = CreateQuantizationParameters(flatBufferBuilder, 0, 0, flatBufferBuilder.CreateVector({ quantScale }), flatBufferBuilder.CreateVector({ quantOffset })); flatbuffers::Offset tensors[2] { CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(inputTensorShape), tensorType, 1, flatBufferBuilder.CreateString("input"), quantizationParameters), CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(outputTensorShape), tensorType, 2, flatBufferBuilder.CreateString("output"), quantizationParameters) }; // create operator tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_Pool2DOptions; flatbuffers::Offset operatorBuiltinOptions = CreatePool2DOptions(flatBufferBuilder, padding, strideWidth, strideHeight, filterWidth, filterHeight, fusedActivation).Union(); const std::vector operatorInputs{0}; const std::vector operatorOutputs{1}; flatbuffers::Offset poolingOperator = CreateOperator(flatBufferBuilder, 0, flatBufferBuilder.CreateVector(operatorInputs), flatBufferBuilder.CreateVector(operatorOutputs), operatorBuiltinOptionsType, operatorBuiltinOptions); const int subgraphInputs[1] = {0}; const int subgraphOutputs[1] = {1}; flatbuffers::Offset subgraph = CreateSubGraph(flatBufferBuilder, flatBufferBuilder.CreateVector(tensors, 2), flatBufferBuilder.CreateVector(subgraphInputs, 1), flatBufferBuilder.CreateVector(subgraphOutputs, 1), flatBufferBuilder.CreateVector(&poolingOperator, 1)); flatbuffers::Offset modelDescription = flatBufferBuilder.CreateString("ArmnnDelegate: Pooling2d Operator Model"); flatbuffers::Offset operatorCode = CreateOperatorCode(flatBufferBuilder, poolingOperatorCode); flatbuffers::Offset flatbufferModel = CreateModel(flatBufferBuilder, TFLITE_SCHEMA_VERSION, flatBufferBuilder.CreateVector(&operatorCode, 1), flatBufferBuilder.CreateVector(&subgraph, 1), modelDescription, flatBufferBuilder.CreateVector(buffers, 3)); flatBufferBuilder.Finish(flatbufferModel); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); } template void Pooling2dTest(tflite::BuiltinOperator poolingOperatorCode, tflite::TensorType tensorType, std::vector& backends, std::vector& inputShape, std::vector& outputShape, std::vector& inputValues, std::vector& expectedOutputValues, tflite::Padding padding = tflite::Padding_SAME, int32_t strideWidth = 0, int32_t strideHeight = 0, int32_t filterWidth = 0, int32_t filterHeight = 0, tflite::ActivationFunctionType fusedActivation = tflite::ActivationFunctionType_NONE, float quantScale = 1.0f, int quantOffset = 0) { using namespace tflite; std::vector modelBuffer = CreatePooling2dTfLiteModel(poolingOperatorCode, tensorType, inputShape, outputShape, padding, strideWidth, strideHeight, filterWidth, filterHeight, fusedActivation, quantScale, quantOffset); const Model* tfLiteModel = GetModel(modelBuffer.data()); CHECK(tfLiteModel != nullptr); // Create TfLite Interpreters std::unique_ptr armnnDelegateInterpreter; CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) (&armnnDelegateInterpreter) == kTfLiteOk); CHECK(armnnDelegateInterpreter != nullptr); CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); std::unique_ptr tfLiteInterpreter; CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) (&tfLiteInterpreter) == kTfLiteOk); CHECK(tfLiteInterpreter != nullptr); CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); // Create the ArmNN Delegate armnnDelegate::DelegateOptions delegateOptions(backends); std::unique_ptr theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), armnnDelegate::TfLiteArmnnDelegateDelete); CHECK(theArmnnDelegate != nullptr); // Modify armnnDelegateInterpreter to use armnnDelegate CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); // Set input data auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0]; auto tfLiteDelegateInputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateInputId); for (unsigned int i = 0; i < inputValues.size(); ++i) { tfLiteDelegateInputData[i] = inputValues[i]; } auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0]; auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateInputId); for (unsigned int i = 0; i < inputValues.size(); ++i) { armnnDelegateInputData[i] = inputValues[i]; } // Run EnqueueWorkload CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, outputShape, expectedOutputValues); } } // anonymous namespace