From ebe392df1635790bf21714549adb97f2f75559e1 Mon Sep 17 00:00:00 2001 From: Matthew Sloyan Date: Thu, 30 Mar 2023 10:12:08 +0100 Subject: IVGCVSW-7562 Implement DelegateTestInterpreter for classic delegate * Updated all tests to use new DelegateTestInterpreter. * Fixed some unit tests where the shape was incorrect. * Add file identifier to FlatBuffersBuilder, as it is required for validation when creating the model using new API. Signed-off-by: Matthew Sloyan Change-Id: I1c4f5464367b35d4528571fa94d14bfaef18fb4d --- delegate/test/DelegateOptionsTestHelper.hpp | 135 +++++++++++----------------- 1 file changed, 53 insertions(+), 82 deletions(-) (limited to 'delegate/test/DelegateOptionsTestHelper.hpp') diff --git a/delegate/test/DelegateOptionsTestHelper.hpp b/delegate/test/DelegateOptionsTestHelper.hpp index fb5403c7de..b6974c9fb6 100644 --- a/delegate/test/DelegateOptionsTestHelper.hpp +++ b/delegate/test/DelegateOptionsTestHelper.hpp @@ -5,17 +5,17 @@ #pragma once -#include - #include "TestUtils.hpp" +#include +#include + #include -#include #include -#include -#include #include +#include + #include namespace @@ -146,7 +146,7 @@ std::vector CreateAddDivTfLiteModel(tflite::TensorType tensorType, modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); @@ -218,14 +218,13 @@ std::vector CreateCeilTfLiteModel(tflite::TensorType tensorType, modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); } template void DelegateOptionTest(tflite::TensorType tensorType, - const std::vector& backends, std::vector& tensorShape, std::vector& input0Values, std::vector& input1Values, @@ -235,55 +234,41 @@ void DelegateOptionTest(tflite::TensorType tensorType, float quantScale = 1.0f, int quantOffset = 0) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector modelBuffer = CreateAddDivTfLiteModel(tensorType, tensorShape, quantScale, quantOffset); - const Model* tfLiteModel = GetModel(modelBuffer.data()); - // Create TfLite Interpreters - std::unique_ptr armnnDelegateInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegateInterpreter) == kTfLiteOk); - CHECK(armnnDelegateInterpreter != nullptr); - CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteInterpreter) == kTfLiteOk); - CHECK(tfLiteInterpreter != nullptr); - CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data - armnnDelegate::FillInput(tfLiteInterpreter, 0, input0Values); - armnnDelegate::FillInput(tfLiteInterpreter, 1, input1Values); - armnnDelegate::FillInput(tfLiteInterpreter, 2, input2Values); - - armnnDelegate::FillInput(armnnDelegateInterpreter, 0, input0Values); - armnnDelegate::FillInput(armnnDelegateInterpreter, 1, input1Values); - armnnDelegate::FillInput(armnnDelegateInterpreter, 2, input2Values); - - // Run EnqueueWorkload - CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); - CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); - - armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, tensorShape, expectedOutputValues); - - armnnDelegateInterpreter.reset(nullptr); + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(input0Values, 0) == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(input1Values, 1) == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(input2Values, 2) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, delegateOptions); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(input0Values, 0) == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(input1Values, 1) == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(input2Values, 2) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, tensorShape); + + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } template void DelegateOptionNoFallbackTest(tflite::TensorType tensorType, - const std::vector& backends, std::vector& tensorShape, std::vector& inputValues, std::vector& expectedOutputValues, @@ -291,53 +276,39 @@ void DelegateOptionNoFallbackTest(tflite::TensorType tensorType, float quantScale = 1.0f, int quantOffset = 0) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector modelBuffer = CreateCeilTfLiteModel(tensorType, tensorShape, quantScale, quantOffset); - const Model* tfLiteModel = GetModel(modelBuffer.data()); - // Create TfLite Interpreters - std::unique_ptr armnnDelegateInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegateInterpreter) == kTfLiteOk); - CHECK(armnnDelegateInterpreter != nullptr); - CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteInterpreter) == kTfLiteOk); - CHECK(tfLiteInterpreter != nullptr); - CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - // Modify armnnDelegateInterpreter to use armnnDelegate + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + tfLiteInterpreter.Cleanup(); + try { - armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()); + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, delegateOptions); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + armnnInterpreter.Cleanup(); + + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, tensorShape); } catch (const armnn::Exception& e) { // Forward the exception message to std::cout std::cout << e.what() << std::endl; } - - // Set input data - armnnDelegate::FillInput(tfLiteInterpreter, 0, inputValues); - armnnDelegate::FillInput(armnnDelegateInterpreter, 0, inputValues); - - // Run EnqueueWorkload - CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); - CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); - - armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, tensorShape, expectedOutputValues); - - armnnDelegateInterpreter.reset(nullptr); } } // anonymous namespace \ No newline at end of file -- cgit v1.2.1