// // Copyright © 2020 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once #include "TestUtils.hpp" #include #include #include #include #include #include #include #include #include namespace { std::vector CreateSplitTfLiteModel(tflite::TensorType tensorType, std::vector& axisTensorShape, std::vector& inputTensorShape, const std::vector>& outputTensorShapes, std::vector& axisData, const int32_t numSplits, float quantScale = 1.0f, int quantOffset = 0) { using namespace tflite; flatbuffers::FlatBufferBuilder flatBufferBuilder; std::array, 2> buffers; buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})); buffers[1] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector(reinterpret_cast(axisData.data()), sizeof(int32_t) * axisData.size())); auto quantizationParameters = CreateQuantizationParameters(flatBufferBuilder, 0, 0, flatBufferBuilder.CreateVector({ quantScale }), flatBufferBuilder.CreateVector({ quantOffset })); std::array, 4> tensors; tensors[0] = CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(axisTensorShape.data(), axisTensorShape.size()), ::tflite::TensorType_INT32, 1, flatBufferBuilder.CreateString("axis"), quantizationParameters); tensors[1] = CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(inputTensorShape.data(), inputTensorShape.size()), tensorType, 0, flatBufferBuilder.CreateString("input"), quantizationParameters); // Create output tensor for (unsigned int i = 0; i < outputTensorShapes.size(); ++i) { tensors[i + 2] = CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(outputTensorShapes[i].data(), outputTensorShapes[i].size()), tensorType, 0, flatBufferBuilder.CreateString("output"), quantizationParameters); } // create operator. Mean uses ReducerOptions. tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_SplitOptions; flatbuffers::Offset operatorBuiltinOptions = CreateSplitOptions(flatBufferBuilder, numSplits).Union(); const std::vector operatorInputs{ {0, 1} }; const std::vector operatorOutputs{ {2, 3} }; flatbuffers::Offset controlOperator = CreateOperator(flatBufferBuilder, 0, flatBufferBuilder.CreateVector(operatorInputs.data(), operatorInputs.size()), flatBufferBuilder.CreateVector(operatorOutputs.data(), operatorOutputs.size()), operatorBuiltinOptionsType, operatorBuiltinOptions); const std::vector subgraphInputs{ {0, 1} }; const std::vector subgraphOutputs{ {2, 3} }; flatbuffers::Offset subgraph = CreateSubGraph(flatBufferBuilder, flatBufferBuilder.CreateVector(tensors.data(), tensors.size()), flatBufferBuilder.CreateVector(subgraphInputs.data(), subgraphInputs.size()), flatBufferBuilder.CreateVector(subgraphOutputs.data(), subgraphOutputs.size()), flatBufferBuilder.CreateVector(&controlOperator, 1)); flatbuffers::Offset modelDescription = flatBufferBuilder.CreateString("ArmnnDelegate: SPLIT Operator Model"); flatbuffers::Offset operatorCode = CreateOperatorCode(flatBufferBuilder, BuiltinOperator_SPLIT); flatbuffers::Offset flatbufferModel = CreateModel(flatBufferBuilder, TFLITE_SCHEMA_VERSION, flatBufferBuilder.CreateVector(&operatorCode, 1), flatBufferBuilder.CreateVector(&subgraph, 1), modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); flatBufferBuilder.Finish(flatbufferModel); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); } template void SplitTest(tflite::TensorType tensorType, std::vector& backends, std::vector& axisTensorShape, std::vector& inputTensorShape, std::vector>& outputTensorShapes, std::vector& axisData, std::vector& inputValues, std::vector>& expectedOutputValues, const int32_t numSplits, float quantScale = 1.0f, int quantOffset = 0) { using namespace tflite; std::vector modelBuffer = CreateSplitTfLiteModel(tensorType, axisTensorShape, inputTensorShape, outputTensorShapes, axisData, numSplits, quantScale, quantOffset); const Model* tfLiteModel = GetModel(modelBuffer.data()); // Create TfLite Interpreters std::unique_ptr armnnDelegate; CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) (&armnnDelegate) == kTfLiteOk); CHECK(armnnDelegate != nullptr); CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk); std::unique_ptr tfLiteDelegate; CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) (&tfLiteDelegate) == kTfLiteOk); CHECK(tfLiteDelegate != nullptr); CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk); // Create the ArmNN Delegate armnnDelegate::DelegateOptions delegateOptions(backends); std::unique_ptr theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), armnnDelegate::TfLiteArmnnDelegateDelete); CHECK(theArmnnDelegate != nullptr); // Modify armnnDelegateInterpreter to use armnnDelegate CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); // Set input data armnnDelegate::FillInput(tfLiteDelegate, 1, inputValues); armnnDelegate::FillInput(armnnDelegate, 1, inputValues); // Run EnqueWorkload CHECK(tfLiteDelegate->Invoke() == kTfLiteOk); CHECK(armnnDelegate->Invoke() == kTfLiteOk); // Compare output data for (unsigned int i = 0; i < expectedOutputValues.size(); ++i) { armnnDelegate::CompareOutputData(tfLiteDelegate, armnnDelegate, outputTensorShapes[i], expectedOutputValues[i], i); } tfLiteDelegate.reset(nullptr); armnnDelegate.reset(nullptr); } // End of SPLIT Test std::vector CreateSplitVTfLiteModel(tflite::TensorType tensorType, std::vector& inputTensorShape, std::vector& splitsTensorShape, std::vector& axisTensorShape, const std::vector>& outputTensorShapes, std::vector& splitsData, std::vector& axisData, const int32_t numSplits, float quantScale = 1.0f, int quantOffset = 0) { using namespace tflite; flatbuffers::FlatBufferBuilder flatBufferBuilder; std::array, 3> buffers; buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})); buffers[1] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector(reinterpret_cast(splitsData.data()), sizeof(int32_t) * splitsData.size())); buffers[2] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector(reinterpret_cast(axisData.data()), sizeof(int32_t) * axisData.size())); auto quantizationParameters = CreateQuantizationParameters(flatBufferBuilder, 0, 0, flatBufferBuilder.CreateVector({ quantScale }), flatBufferBuilder.CreateVector({ quantOffset })); std::array, 5> tensors; tensors[0] = CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(inputTensorShape.data(), inputTensorShape.size()), tensorType, 0, flatBufferBuilder.CreateString("input"), quantizationParameters); tensors[1] = CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(splitsTensorShape.data(), splitsTensorShape.size()), ::tflite::TensorType_INT32, 1, flatBufferBuilder.CreateString("splits"), quantizationParameters); tensors[2] = CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(axisTensorShape.data(), axisTensorShape.size()), ::tflite::TensorType_INT32, 2, flatBufferBuilder.CreateString("axis"), quantizationParameters); // Create output tensor for (unsigned int i = 0; i < outputTensorShapes.size(); ++i) { tensors[i + 3] = CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(outputTensorShapes[i].data(), outputTensorShapes[i].size()), tensorType, 0, flatBufferBuilder.CreateString("output"), quantizationParameters); } // create operator. Mean uses ReducerOptions. tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_SplitVOptions; flatbuffers::Offset operatorBuiltinOptions = CreateSplitVOptions(flatBufferBuilder, numSplits).Union(); const std::vector operatorInputs{ {0, 1, 2} }; const std::vector operatorOutputs{ {3, 4} }; flatbuffers::Offset controlOperator = CreateOperator(flatBufferBuilder, 0, flatBufferBuilder.CreateVector(operatorInputs.data(), operatorInputs.size()), flatBufferBuilder.CreateVector(operatorOutputs.data(), operatorOutputs.size()), operatorBuiltinOptionsType, operatorBuiltinOptions); const std::vector subgraphInputs{ {0, 1, 2} }; const std::vector subgraphOutputs{ {3, 4} }; flatbuffers::Offset subgraph = CreateSubGraph(flatBufferBuilder, flatBufferBuilder.CreateVector(tensors.data(), tensors.size()), flatBufferBuilder.CreateVector(subgraphInputs.data(), subgraphInputs.size()), flatBufferBuilder.CreateVector(subgraphOutputs.data(), subgraphOutputs.size()), flatBufferBuilder.CreateVector(&controlOperator, 1)); flatbuffers::Offset modelDescription = flatBufferBuilder.CreateString("ArmnnDelegate: SPLIT_V Operator Model"); flatbuffers::Offset operatorCode = CreateOperatorCode(flatBufferBuilder, BuiltinOperator_SPLIT_V); flatbuffers::Offset flatbufferModel = CreateModel(flatBufferBuilder, TFLITE_SCHEMA_VERSION, flatBufferBuilder.CreateVector(&operatorCode, 1), flatBufferBuilder.CreateVector(&subgraph, 1), modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); flatBufferBuilder.Finish(flatbufferModel); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); } template void SplitVTest(tflite::TensorType tensorType, std::vector& backends, std::vector& inputTensorShape, std::vector& splitsTensorShape, std::vector& axisTensorShape, std::vector>& outputTensorShapes, std::vector& inputValues, std::vector& splitsData, std::vector& axisData, std::vector>& expectedOutputValues, const int32_t numSplits, float quantScale = 1.0f, int quantOffset = 0) { using namespace tflite; std::vector modelBuffer = CreateSplitVTfLiteModel(tensorType, inputTensorShape, splitsTensorShape, axisTensorShape, outputTensorShapes, splitsData, axisData, numSplits, quantScale, quantOffset); const Model* tfLiteModel = GetModel(modelBuffer.data()); // Create TfLite Interpreters std::unique_ptr armnnDelegate; CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) (&armnnDelegate) == kTfLiteOk); CHECK(armnnDelegate != nullptr); CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk); std::unique_ptr tfLiteDelegate; CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) (&tfLiteDelegate) == kTfLiteOk); CHECK(tfLiteDelegate != nullptr); CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk); // Create the ArmNN Delegate armnnDelegate::DelegateOptions delegateOptions(backends); std::unique_ptr theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), armnnDelegate::TfLiteArmnnDelegateDelete); CHECK(theArmnnDelegate != nullptr); // Modify armnnDelegateInterpreter to use armnnDelegate CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); // Set input data armnnDelegate::FillInput(tfLiteDelegate, 0, inputValues); armnnDelegate::FillInput(armnnDelegate, 0, inputValues); // Run EnqueWorkload CHECK(tfLiteDelegate->Invoke() == kTfLiteOk); CHECK(armnnDelegate->Invoke() == kTfLiteOk); // Compare output data for (unsigned int i = 0; i < expectedOutputValues.size(); ++i) { armnnDelegate::CompareOutputData(tfLiteDelegate, armnnDelegate, outputTensorShapes[i], expectedOutputValues[i], i); } tfLiteDelegate.reset(nullptr); armnnDelegate.reset(nullptr); } // End of SPLIT_V Test } // anonymous namespace