From 238ecd9279f08a1b05189e03ffa73a9201b1c0ce Mon Sep 17 00:00:00 2001 From: Ryan OShea Date: Tue, 7 Mar 2023 11:44:23 +0000 Subject: IVGCVSW-7526 Upgrade ArmNN to Tensorflow 2.12 When creating a flatbuffers model, we need to provide an empty buffer 0 that is reserved by tensorflow. When creating empty buffers for inputs and outputs we can not pass in an empty vector, or tflite will assume that we know how many bytes to allocate in advance. Instead we need to only pass in the builder. * Update libraries in FindTfLite.cmake * Add nullptr to delegate struct for OpaqueDelegateBuilder * Fix issue in unit tests where Flatbuffers model was not being parsed by tflite * Tensorflow 2.12 now includes C++ 17 features. Update our cmake build to require a compiler to support these features. * Change minimum cmake in Arm NN to 3.7 as that's the minimum for the delegate build. Signed-off-by: Ryan OShea Signed-off-by: Narumol Prangnawarat Signed-off-by: Colm Donelan Change-Id: I7d15b196b8c59b1914f8fc1c4c2f8960630c069c --- delegate/src/test/BatchMatMulTestHelper.hpp | 350 ++++++++++++++-------------- 1 file changed, 176 insertions(+), 174 deletions(-) (limited to 'delegate/src/test/BatchMatMulTestHelper.hpp') diff --git a/delegate/src/test/BatchMatMulTestHelper.hpp b/delegate/src/test/BatchMatMulTestHelper.hpp index 42c1ed6a1e..7437064a42 100644 --- a/delegate/src/test/BatchMatMulTestHelper.hpp +++ b/delegate/src/test/BatchMatMulTestHelper.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -20,184 +20,186 @@ namespace { - - std::vector CreateBatchMatMulTfLiteModel( - tflite::BuiltinOperator bmmOperatorCode, - tflite::TensorType tensorType, - const std::vector & LHSInputTensorShape, - const std::vector & RHSInputTensorShape, - const std::vector & outputTensorShape, - bool adjX = false, - bool adjY = false, - float quantScale = 1.0f, - int quantOffset = 0) +std::vector CreateBatchMatMulTfLiteModel( + tflite::BuiltinOperator bmmOperatorCode, + tflite::TensorType tensorType, + const std::vector & LHSInputTensorShape, + const std::vector & RHSInputTensorShape, + const std::vector & outputTensorShape, + bool adjX = false, + bool adjY = false, + float quantScale = 1.0f, + int quantOffset = 0) +{ + using namespace tflite; + flatbuffers::FlatBufferBuilder flatBufferBuilder; + + std::vector> buffers; + buffers.push_back(CreateBuffer(flatBufferBuilder)); + buffers.push_back(CreateBuffer(flatBufferBuilder)); + buffers.push_back(CreateBuffer(flatBufferBuilder)); + buffers.push_back(CreateBuffer(flatBufferBuilder)); + + auto quantizationParameters = + CreateQuantizationParameters(flatBufferBuilder, + 0, + 0, + flatBufferBuilder.CreateVector({ quantScale }), + flatBufferBuilder.CreateVector({ quantOffset })); + + std::array, 3> tensors; + tensors[0] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(LHSInputTensorShape.data(), + LHSInputTensorShape.size()), + tensorType, + 1, + flatBufferBuilder.CreateString("LHSInput"), + quantizationParameters); + + tensors[1] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(RHSInputTensorShape.data(), + RHSInputTensorShape.size()), + tensorType, + 2, + flatBufferBuilder.CreateString("RHSInput"), + quantizationParameters); + + tensors[2] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(outputTensorShape.data(), + outputTensorShape.size()), + tensorType, + 3, + flatBufferBuilder.CreateString("output"), + quantizationParameters); + + // create operator + tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_BatchMatMulOptions; + flatbuffers::Offset operatorBuiltinOptions = CreateBatchMatMulOptions(flatBufferBuilder, + adjX, + adjY).Union(); + + const std::vector operatorInputs{{0, 1}}; + const std::vector operatorOutputs{2}; + flatbuffers::Offset bmmOperator = + CreateOperator(flatBufferBuilder, + 0, + flatBufferBuilder.CreateVector(operatorInputs.data(), operatorInputs.size()), + flatBufferBuilder.CreateVector(operatorOutputs.data(), + operatorOutputs.size()), + operatorBuiltinOptionsType, + operatorBuiltinOptions); + + const std::vector subgraphInputs{{0, 1}}; + const std::vector subgraphOutputs{2}; + flatbuffers::Offset subgraph = + CreateSubGraph(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensors.data(), tensors.size()), + flatBufferBuilder.CreateVector(subgraphInputs.data(), subgraphInputs.size()), + flatBufferBuilder.CreateVector(subgraphOutputs.data(), + subgraphOutputs.size()), + flatBufferBuilder.CreateVector(&bmmOperator, 1)); + + flatbuffers::Offset modelDescription = + flatBufferBuilder.CreateString("ArmnnDelegate: BatchMatMul Operator Model"); + flatbuffers::Offset operatorCode = CreateOperatorCode(flatBufferBuilder, bmmOperatorCode); + + flatbuffers::Offset flatbufferModel = + CreateModel(flatBufferBuilder, + TFLITE_SCHEMA_VERSION, + flatBufferBuilder.CreateVector(&operatorCode, 1), + flatBufferBuilder.CreateVector(&subgraph, 1), + modelDescription, + flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); + + flatBufferBuilder.Finish(flatbufferModel); + + return std::vector(flatBufferBuilder.GetBufferPointer(), + flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); +} + +template +void BatchMatMulTest(tflite::BuiltinOperator bmmOperatorCode, + tflite::TensorType tensorType, + std::vector& backends, + std::vector& LHSInputShape, + std::vector& RHSInputShape, + std::vector& outputShape, + std::vector& LHSInputValues, + std::vector& RHSInputValues, + std::vector& expectedOutputValues, + bool adjX = false, + bool adjY = false, + float quantScale = 1.0f, + int quantOffset = 0) +{ + using namespace tflite; + std::vector modelBuffer = CreateBatchMatMulTfLiteModel(bmmOperatorCode, + tensorType, + LHSInputShape, + RHSInputShape, + outputShape, + adjX, + adjY, + quantScale, + quantOffset); + + const Model* tfLiteModel = GetModel(modelBuffer.data()); + CHECK(tfLiteModel != nullptr); + // Create TfLite Interpreters + std::unique_ptr armnnDelegateInterpreter; + CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) + (&armnnDelegateInterpreter) == kTfLiteOk); + CHECK(armnnDelegateInterpreter != nullptr); + CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); + + std::unique_ptr tfLiteInterpreter; + CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) + (&tfLiteInterpreter) == kTfLiteOk); + CHECK(tfLiteInterpreter != nullptr); + CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); + + // Create the ArmNN Delegate + armnnDelegate::DelegateOptions delegateOptions(backends); + std::unique_ptr + theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), + armnnDelegate::TfLiteArmnnDelegateDelete); + CHECK(theArmnnDelegate != nullptr); + // Modify armnnDelegateInterpreter to use armnnDelegate + CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); + + // Set input data + auto tfLiteDelegateLHSInputId = tfLiteInterpreter->inputs()[0]; + auto tfLiteDelegateLHSInputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateLHSInputId); + auto tfLiteDelegateRHSInputId = tfLiteInterpreter->inputs()[1]; + auto tfLiteDelegateRHSInputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateRHSInputId); + for (unsigned int i = 0; i < LHSInputValues.size(); ++i) + { + tfLiteDelegateLHSInputData[i] = LHSInputValues[i]; + } + for (unsigned int i = 0; i < RHSInputValues.size(); ++i) { - using namespace tflite; - flatbuffers::FlatBufferBuilder flatBufferBuilder; - - std::vector> buffers; - buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}))); - - auto quantizationParameters = - CreateQuantizationParameters(flatBufferBuilder, - 0, - 0, - flatBufferBuilder.CreateVector({ quantScale }), - flatBufferBuilder.CreateVector({ quantOffset })); - - std::array, 3> tensors; - tensors[0] = CreateTensor(flatBufferBuilder, - flatBufferBuilder.CreateVector(LHSInputTensorShape.data(), - LHSInputTensorShape.size()), - tensorType, - 0, - flatBufferBuilder.CreateString("LHSInput"), - quantizationParameters); - - tensors[1] = CreateTensor(flatBufferBuilder, - flatBufferBuilder.CreateVector(RHSInputTensorShape.data(), - RHSInputTensorShape.size()), - tensorType, - 0, - flatBufferBuilder.CreateString("RHSInput"), - quantizationParameters); - - tensors[2] = CreateTensor(flatBufferBuilder, - flatBufferBuilder.CreateVector(outputTensorShape.data(), - outputTensorShape.size()), - tensorType, - 0, - flatBufferBuilder.CreateString("output"), - quantizationParameters); - - // create operator - tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_BatchMatMulOptions; - flatbuffers::Offset operatorBuiltinOptions = CreateBatchMatMulOptions(flatBufferBuilder, - adjX, - adjY).Union(); - - const std::vector operatorInputs{{0, 1}}; - const std::vector operatorOutputs{2}; - flatbuffers::Offset bmmOperator = - CreateOperator(flatBufferBuilder, - 0, - flatBufferBuilder.CreateVector(operatorInputs.data(), operatorInputs.size()), - flatBufferBuilder.CreateVector(operatorOutputs.data(), - operatorOutputs.size()), - operatorBuiltinOptionsType, - operatorBuiltinOptions); - - const std::vector subgraphInputs{{0, 1}}; - const std::vector subgraphOutputs{2}; - flatbuffers::Offset subgraph = - CreateSubGraph(flatBufferBuilder, - flatBufferBuilder.CreateVector(tensors.data(), tensors.size()), - flatBufferBuilder.CreateVector(subgraphInputs.data(), subgraphInputs.size()), - flatBufferBuilder.CreateVector(subgraphOutputs.data(), - subgraphOutputs.size()), - flatBufferBuilder.CreateVector(&bmmOperator, 1)); - - flatbuffers::Offset modelDescription = - flatBufferBuilder.CreateString("ArmnnDelegate: BatchMatMul Operator Model"); - flatbuffers::Offset operatorCode = CreateOperatorCode(flatBufferBuilder, bmmOperatorCode); - - flatbuffers::Offset flatbufferModel = - CreateModel(flatBufferBuilder, - TFLITE_SCHEMA_VERSION, - flatBufferBuilder.CreateVector(&operatorCode, 1), - flatBufferBuilder.CreateVector(&subgraph, 1), - modelDescription, - flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); - - flatBufferBuilder.Finish(flatbufferModel); - - return std::vector(flatBufferBuilder.GetBufferPointer(), - flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); + tfLiteDelegateRHSInputData[i] = RHSInputValues[i]; } - template - void BatchMatMulTest(tflite::BuiltinOperator bmmOperatorCode, - tflite::TensorType tensorType, - std::vector& backends, - std::vector& LHSInputShape, - std::vector& RHSInputShape, - std::vector& outputShape, - std::vector& LHSInputValues, - std::vector& RHSInputValues, - std::vector& expectedOutputValues, - bool adjX = false, - bool adjY = false, - float quantScale = 1.0f, - int quantOffset = 0) + auto armnnDelegateLHSInputId = armnnDelegateInterpreter->inputs()[0]; + auto armnnDelegateLHSInputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateLHSInputId); + auto armnnDelegateRHSInputId = armnnDelegateInterpreter->inputs()[1]; + auto armnnDelegateRHSInputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateRHSInputId); + for (unsigned int i = 0; i < LHSInputValues.size(); ++i) + { + armnnDelegateLHSInputData[i] = LHSInputValues[i]; + } + for (unsigned int i = 0; i < RHSInputValues.size(); ++i) { - using namespace tflite; - std::vector modelBuffer = CreateBatchMatMulTfLiteModel(bmmOperatorCode, - tensorType, - LHSInputShape, - RHSInputShape, - outputShape, - adjX, - adjY, - quantScale, - quantOffset); - - const Model* tfLiteModel = GetModel(modelBuffer.data()); - CHECK(tfLiteModel != nullptr); - // Create TfLite Interpreters - std::unique_ptr armnnDelegateInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegateInterpreter) == kTfLiteOk); - CHECK(armnnDelegateInterpreter != nullptr); - CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteInterpreter) == kTfLiteOk); - CHECK(tfLiteInterpreter != nullptr); - CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data - auto tfLiteDelegateLHSInputId = tfLiteInterpreter->inputs()[0]; - auto tfLiteDelegateLHSInputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateLHSInputId); - auto tfLiteDelegateRHSInputId = tfLiteInterpreter->inputs()[1]; - auto tfLiteDelegateRHSInputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateRHSInputId); - for (unsigned int i = 0; i < LHSInputValues.size(); ++i) - { - tfLiteDelegateLHSInputData[i] = LHSInputValues[i]; - } - for (unsigned int i = 0; i < RHSInputValues.size(); ++i) - { - tfLiteDelegateRHSInputData[i] = RHSInputValues[i]; - } - - auto armnnDelegateLHSInputId = armnnDelegateInterpreter->inputs()[0]; - auto armnnDelegateLHSInputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateLHSInputId); - auto armnnDelegateRHSInputId = armnnDelegateInterpreter->inputs()[1]; - auto armnnDelegateRHSInputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateRHSInputId); - for (unsigned int i = 0; i < LHSInputValues.size(); ++i) - { - armnnDelegateLHSInputData[i] = LHSInputValues[i]; - } - for (unsigned int i = 0; i < RHSInputValues.size(); ++i) - { - armnnDelegateRHSInputData[i] = RHSInputValues[i]; - } - // Run EnqueueWorkload - CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); - CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); - - armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, - outputShape, expectedOutputValues); + armnnDelegateRHSInputData[i] = RHSInputValues[i]; } + // Run EnqueueWorkload + CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); + CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); + + armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, + outputShape, expectedOutputValues); +} } // anonymous namespace -- cgit v1.2.1