From 238ecd9279f08a1b05189e03ffa73a9201b1c0ce Mon Sep 17 00:00:00 2001 From: Ryan OShea Date: Tue, 7 Mar 2023 11:44:23 +0000 Subject: IVGCVSW-7526 Upgrade ArmNN to Tensorflow 2.12 When creating a flatbuffers model, we need to provide an empty buffer 0 that is reserved by tensorflow. When creating empty buffers for inputs and outputs we can not pass in an empty vector, or tflite will assume that we know how many bytes to allocate in advance. Instead we need to only pass in the builder. * Update libraries in FindTfLite.cmake * Add nullptr to delegate struct for OpaqueDelegateBuilder * Fix issue in unit tests where Flatbuffers model was not being parsed by tflite * Tensorflow 2.12 now includes C++ 17 features. Update our cmake build to require a compiler to support these features. * Change minimum cmake in Arm NN to 3.7 as that's the minimum for the delegate build. Signed-off-by: Ryan OShea Signed-off-by: Narumol Prangnawarat Signed-off-by: Colm Donelan Change-Id: I7d15b196b8c59b1914f8fc1c4c2f8960630c069c --- delegate/src/test/SplitTestHelper.hpp | 86 ++++++++++++++++++----------------- 1 file changed, 44 insertions(+), 42 deletions(-) (limited to 'delegate/src/test/SplitTestHelper.hpp') diff --git a/delegate/src/test/SplitTestHelper.hpp b/delegate/src/test/SplitTestHelper.hpp index 31fc7d5e46..3c5f50ffac 100644 --- a/delegate/src/test/SplitTestHelper.hpp +++ b/delegate/src/test/SplitTestHelper.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -35,11 +35,12 @@ std::vector CreateSplitTfLiteModel(tflite::TensorType tensorType, using namespace tflite; flatbuffers::FlatBufferBuilder flatBufferBuilder; - std::array, 2> buffers; - buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})); - buffers[1] = CreateBuffer(flatBufferBuilder, - flatBufferBuilder.CreateVector(reinterpret_cast(axisData.data()), - sizeof(int32_t) * axisData.size())); + std::vector> buffers; + buffers.push_back(CreateBuffer(flatBufferBuilder)); + buffers.push_back(CreateBuffer(flatBufferBuilder)); + buffers.push_back(CreateBuffer(flatBufferBuilder, + flatBufferBuilder.CreateVector(reinterpret_cast(axisData.data()), + sizeof(int32_t) * axisData.size()))); auto quantizationParameters = CreateQuantizationParameters(flatBufferBuilder, @@ -53,27 +54,28 @@ std::vector CreateSplitTfLiteModel(tflite::TensorType tensorType, flatBufferBuilder.CreateVector(axisTensorShape.data(), axisTensorShape.size()), ::tflite::TensorType_INT32, - 1, + 2, flatBufferBuilder.CreateString("axis"), quantizationParameters); tensors[1] = CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(inputTensorShape.data(), inputTensorShape.size()), tensorType, - 0, + 1, flatBufferBuilder.CreateString("input"), quantizationParameters); // Create output tensor for (unsigned int i = 0; i < outputTensorShapes.size(); ++i) { + buffers.push_back(CreateBuffer(flatBufferBuilder)); tensors[i + 2] = CreateTensor(flatBufferBuilder, - flatBufferBuilder.CreateVector(outputTensorShapes[i].data(), - outputTensorShapes[i].size()), - tensorType, - 0, - flatBufferBuilder.CreateString("output"), - quantizationParameters); + flatBufferBuilder.CreateVector(outputTensorShapes[i].data(), + outputTensorShapes[i].size()), + tensorType, + (i+3), + flatBufferBuilder.CreateString("output"), + quantizationParameters); } // create operator. Mean uses ReducerOptions. @@ -109,7 +111,7 @@ std::vector CreateSplitTfLiteModel(tflite::TensorType tensorType, flatBufferBuilder.CreateVector(&operatorCode, 1), flatBufferBuilder.CreateVector(&subgraph, 1), modelDescription, - flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); + flatBufferBuilder.CreateVector(buffers)); flatBufferBuilder.Finish(flatbufferModel); @@ -144,21 +146,21 @@ void SplitTest(tflite::TensorType tensorType, // Create TfLite Interpreters std::unique_ptr armnnDelegate; CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegate) == kTfLiteOk); + (&armnnDelegate) == kTfLiteOk); CHECK(armnnDelegate != nullptr); CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk); std::unique_ptr tfLiteDelegate; CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteDelegate) == kTfLiteOk); + (&tfLiteDelegate) == kTfLiteOk); CHECK(tfLiteDelegate != nullptr); CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk); // Create the ArmNN Delegate armnnDelegate::DelegateOptions delegateOptions(backends); std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); + theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), + armnnDelegate::TfLiteArmnnDelegateDelete); CHECK(theArmnnDelegate != nullptr); // Modify armnnDelegateInterpreter to use armnnDelegate @@ -210,11 +212,11 @@ std::vector CreateSplitVTfLiteModel(tflite::TensorType tensorType, sizeof(int32_t) * axisData.size())); auto quantizationParameters = - CreateQuantizationParameters(flatBufferBuilder, - 0, - 0, - flatBufferBuilder.CreateVector({ quantScale }), - flatBufferBuilder.CreateVector({ quantOffset })); + CreateQuantizationParameters(flatBufferBuilder, + 0, + 0, + flatBufferBuilder.CreateVector({ quantScale }), + flatBufferBuilder.CreateVector({ quantOffset })); std::array, 5> tensors; tensors[0] = CreateTensor(flatBufferBuilder, @@ -258,33 +260,33 @@ std::vector CreateSplitVTfLiteModel(tflite::TensorType tensorType, const std::vector operatorInputs{ {0, 1, 2} }; const std::vector operatorOutputs{ {3, 4} }; flatbuffers::Offset controlOperator = - CreateOperator(flatBufferBuilder, - 0, - flatBufferBuilder.CreateVector(operatorInputs.data(), operatorInputs.size()), - flatBufferBuilder.CreateVector(operatorOutputs.data(), operatorOutputs.size()), - operatorBuiltinOptionsType, - operatorBuiltinOptions); + CreateOperator(flatBufferBuilder, + 0, + flatBufferBuilder.CreateVector(operatorInputs.data(), operatorInputs.size()), + flatBufferBuilder.CreateVector(operatorOutputs.data(), operatorOutputs.size()), + operatorBuiltinOptionsType, + operatorBuiltinOptions); const std::vector subgraphInputs{ {0, 1, 2} }; const std::vector subgraphOutputs{ {3, 4} }; flatbuffers::Offset subgraph = - CreateSubGraph(flatBufferBuilder, - flatBufferBuilder.CreateVector(tensors.data(), tensors.size()), - flatBufferBuilder.CreateVector(subgraphInputs.data(), subgraphInputs.size()), - flatBufferBuilder.CreateVector(subgraphOutputs.data(), subgraphOutputs.size()), - flatBufferBuilder.CreateVector(&controlOperator, 1)); + CreateSubGraph(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensors.data(), tensors.size()), + flatBufferBuilder.CreateVector(subgraphInputs.data(), subgraphInputs.size()), + flatBufferBuilder.CreateVector(subgraphOutputs.data(), subgraphOutputs.size()), + flatBufferBuilder.CreateVector(&controlOperator, 1)); flatbuffers::Offset modelDescription = - flatBufferBuilder.CreateString("ArmnnDelegate: SPLIT_V Operator Model"); + flatBufferBuilder.CreateString("ArmnnDelegate: SPLIT_V Operator Model"); flatbuffers::Offset operatorCode = CreateOperatorCode(flatBufferBuilder, BuiltinOperator_SPLIT_V); flatbuffers::Offset flatbufferModel = - CreateModel(flatBufferBuilder, - TFLITE_SCHEMA_VERSION, - flatBufferBuilder.CreateVector(&operatorCode, 1), - flatBufferBuilder.CreateVector(&subgraph, 1), - modelDescription, - flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); + CreateModel(flatBufferBuilder, + TFLITE_SCHEMA_VERSION, + flatBufferBuilder.CreateVector(&operatorCode, 1), + flatBufferBuilder.CreateVector(&subgraph, 1), + modelDescription, + flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); flatBufferBuilder.Finish(flatbufferModel); -- cgit v1.2.1