// // Copyright © 2021 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once #include "TestUtils.hpp" #include #include #include #include #include #include #include #include namespace { std::vector CreatePreluTfLiteModel(tflite::BuiltinOperator preluOperatorCode, tflite::TensorType tensorType, const std::vector& inputShape, const std::vector& alphaShape, const std::vector& outputShape, std::vector& alphaData, bool alphaIsConstant) { using namespace tflite; flatbuffers::FlatBufferBuilder flatBufferBuilder; std::vector> buffers; buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}))); buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector( reinterpret_cast(alphaData.data()), sizeof(float) * alphaData.size()))); auto quantizationParameters = CreateQuantizationParameters(flatBufferBuilder, 0, 0, flatBufferBuilder.CreateVector({ 1.0f }), flatBufferBuilder.CreateVector({ 0 })); auto inputTensor = CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(inputShape.data(), inputShape.size()), tensorType, 0, flatBufferBuilder.CreateString("input"), quantizationParameters); auto alphaTensor = CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(alphaShape.data(), alphaShape.size()), tensorType, 1, flatBufferBuilder.CreateString("alpha"), quantizationParameters); auto outputTensor = CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(outputShape.data(), outputShape.size()), tensorType, 0, flatBufferBuilder.CreateString("output"), quantizationParameters); std::vector> tensors = { inputTensor, alphaTensor, outputTensor }; const std::vector operatorInputs{0, 1}; const std::vector operatorOutputs{2}; flatbuffers::Offset preluOperator = CreateOperator(flatBufferBuilder, 0, flatBufferBuilder.CreateVector(operatorInputs.data(), operatorInputs.size()), flatBufferBuilder.CreateVector(operatorOutputs.data(), operatorOutputs.size())); std::vector subgraphInputs{0}; if (!alphaIsConstant) { subgraphInputs.push_back(1); } const std::vector subgraphOutputs{2}; flatbuffers::Offset subgraph = CreateSubGraph(flatBufferBuilder, flatBufferBuilder.CreateVector(tensors.data(), tensors.size()), flatBufferBuilder.CreateVector(subgraphInputs.data(), subgraphInputs.size()), flatBufferBuilder.CreateVector(subgraphOutputs.data(), subgraphOutputs.size()), flatBufferBuilder.CreateVector(&preluOperator, 1)); flatbuffers::Offset modelDescription = flatBufferBuilder.CreateString("ArmnnDelegate: Prelu Operator Model"); flatbuffers::Offset opCode = CreateOperatorCode(flatBufferBuilder, preluOperatorCode); flatbuffers::Offset flatbufferModel = CreateModel(flatBufferBuilder, TFLITE_SCHEMA_VERSION, flatBufferBuilder.CreateVector(&opCode, 1), flatBufferBuilder.CreateVector(&subgraph, 1), modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); flatBufferBuilder.Finish(flatbufferModel); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); } void PreluTest(tflite::BuiltinOperator preluOperatorCode, tflite::TensorType tensorType, const std::vector& backends, const std::vector& inputShape, const std::vector& alphaShape, std::vector& outputShape, std::vector& inputData, std::vector& alphaData, std::vector& expectedOutput, bool alphaIsConstant) { using namespace tflite; std::vector modelBuffer = CreatePreluTfLiteModel(preluOperatorCode, tensorType, inputShape, alphaShape, outputShape, alphaData, alphaIsConstant); const Model* tfLiteModel = GetModel(modelBuffer.data()); CHECK(tfLiteModel != nullptr); std::unique_ptr armnnDelegateInterpreter; CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) (&armnnDelegateInterpreter) == kTfLiteOk); CHECK(armnnDelegateInterpreter != nullptr); CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); std::unique_ptr tfLiteInterpreter; CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) (&tfLiteInterpreter) == kTfLiteOk); CHECK(tfLiteInterpreter != nullptr); CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); // Create the ArmNN Delegate armnnDelegate::DelegateOptions delegateOptions(backends); std::unique_ptr theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), armnnDelegate::TfLiteArmnnDelegateDelete); CHECK(theArmnnDelegate != nullptr); // Modify armnnDelegateInterpreter to use armnnDelegate CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); // Set input data armnnDelegate::FillInput(tfLiteInterpreter, 0, inputData); armnnDelegate::FillInput(armnnDelegateInterpreter, 0, inputData); // Set alpha data if not constant if (!alphaIsConstant) { armnnDelegate::FillInput(tfLiteInterpreter, 1, alphaData); armnnDelegate::FillInput(armnnDelegateInterpreter, 1, alphaData); } // Run EnqueueWorkload CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); // Compare output data auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0]; auto tfLiteDelegateOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0]; auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateOutputId); for (size_t i = 0; i < expectedOutput.size(); i++) { CHECK(expectedOutput[i] == armnnDelegateOutputData[i]); CHECK(tfLiteDelegateOutputData[i] == expectedOutput[i]); CHECK(tfLiteDelegateOutputData[i] == armnnDelegateOutputData[i]); } } } // anonymous namespace