From ebe392df1635790bf21714549adb97f2f75559e1 Mon Sep 17 00:00:00 2001 From: Matthew Sloyan Date: Thu, 30 Mar 2023 10:12:08 +0100 Subject: IVGCVSW-7562 Implement DelegateTestInterpreter for classic delegate * Updated all tests to use new DelegateTestInterpreter. * Fixed some unit tests where the shape was incorrect. * Add file identifier to FlatBuffersBuilder, as it is required for validation when creating the model using new API. Signed-off-by: Matthew Sloyan Change-Id: I1c4f5464367b35d4528571fa94d14bfaef18fb4d --- delegate/CMakeLists.txt | 4 + .../classic/src/test/DelegateTestInterpreter.cpp | 74 +++++++ .../common/src/test/DelegateTestInterpreter.hpp | 175 ++++++++++++++++ .../src/test/DelegateTestInterpreterUtils.hpp | 110 ++++++++++ delegate/test/ActivationTestHelper.hpp | 76 +++---- delegate/test/ArgMinMaxTestHelper.hpp | 75 +++---- delegate/test/BatchMatMulTestHelper.hpp | 89 +++----- delegate/test/BatchSpaceTestHelper.hpp | 72 +++---- delegate/test/CastTestHelper.hpp | 73 +++---- delegate/test/ComparisonTestHelper.hpp | 100 +++------ delegate/test/ControlTestHelper.hpp | 133 +++++------- delegate/test/ConvolutionTestHelper.hpp | 225 +++++++-------------- delegate/test/DelegateOptionsTest.cpp | 11 +- delegate/test/DelegateOptionsTestHelper.hpp | 135 +++++-------- delegate/test/DepthwiseConvolution2dTest.cpp | 2 +- delegate/test/ElementwiseBinaryTestHelper.hpp | 74 +++---- delegate/test/ElementwiseUnaryTestHelper.hpp | 138 +++++-------- delegate/test/FillTestHelper.hpp | 58 +++--- delegate/test/FullyConnectedTestHelper.hpp | 82 ++++---- delegate/test/GatherNdTestHelper.hpp | 77 +++---- delegate/test/GatherTestHelper.hpp | 77 +++---- delegate/test/LogicalTest.cpp | 72 +++---- delegate/test/LogicalTestHelper.hpp | 91 ++++----- delegate/test/LstmTestHelper.hpp | 85 +++----- delegate/test/NormalizationTestHelper.hpp | 65 +++--- delegate/test/PackTestHelper.hpp | 66 +++--- delegate/test/PadTestHelper.hpp | 64 +++--- delegate/test/Pooling2dTestHelper.hpp | 75 +++---- delegate/test/Pooling3dTestHelper.hpp | 102 +++------- delegate/test/PreluTest.cpp | 3 +- delegate/test/PreluTestHelper.hpp | 80 +++----- delegate/test/QuantizationTestHelper.hpp | 89 +++----- delegate/test/RedefineTestHelper.hpp | 64 +++--- delegate/test/ReduceTestHelper.hpp | 74 +++---- delegate/test/ResizeTest.cpp | 4 +- delegate/test/ResizeTestHelper.hpp | 85 +++----- delegate/test/RoundTestHelper.hpp | 73 +++---- delegate/test/ShapeTestHelper.hpp | 75 +++---- delegate/test/SliceTestHelper.hpp | 75 +++---- delegate/test/SoftmaxTestHelper.hpp | 86 +++----- delegate/test/SpaceDepthTestHelper.hpp | 64 +++--- delegate/test/SplitTestHelper.hpp | 138 +++++-------- delegate/test/StridedSliceTestHelper.hpp | 75 +++---- delegate/test/TestUtils.cpp | 44 +--- delegate/test/TestUtils.hpp | 58 ++---- delegate/test/TransposeTest.cpp | 29 ++- delegate/test/TransposeTestHelper.hpp | 129 ++++-------- .../test/UnidirectionalSequenceLstmTestHelper.hpp | 88 +++----- delegate/test/UnpackTestHelper.hpp | 70 +++---- 49 files changed, 1624 insertions(+), 2259 deletions(-) create mode 100644 delegate/classic/src/test/DelegateTestInterpreter.cpp create mode 100644 delegate/common/src/test/DelegateTestInterpreter.hpp create mode 100644 delegate/common/src/test/DelegateTestInterpreterUtils.hpp diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt index 433cee6743..73df68fc4c 100644 --- a/delegate/CMakeLists.txt +++ b/delegate/CMakeLists.txt @@ -143,6 +143,9 @@ if(BUILD_UNIT_TESTS AND BUILD_CLASSIC_DELEGATE) test/ConvolutionTestHelper.hpp test/DelegateOptionsTest.cpp test/DelegateOptionsTestHelper.hpp + classic/src/test/DelegateTestInterpreter.cpp + common/src/test/DelegateTestInterpreter.hpp + common/src/test/DelegateTestInterpreterUtils.hpp test/DepthwiseConvolution2dTest.cpp test/ElementwiseBinaryTest.cpp test/ElementwiseBinaryTestHelper.hpp @@ -236,6 +239,7 @@ if(BUILD_UNIT_TESTS AND BUILD_CLASSIC_DELEGATE) add_executable(DelegateUnitTests ${armnnDelegate_unittest_sources}) target_include_directories(DelegateUnitTests SYSTEM PRIVATE "${TF_LITE_SCHEMA_INCLUDE_PATH}") + target_include_directories(DelegateUnitTests SYSTEM PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/common/src/test") # Add half library from armnn third-party libraries target_link_libraries(DelegateUnitTests PRIVATE thirdparty_headers) diff --git a/delegate/classic/src/test/DelegateTestInterpreter.cpp b/delegate/classic/src/test/DelegateTestInterpreter.cpp new file mode 100644 index 0000000000..45b6cd0932 --- /dev/null +++ b/delegate/classic/src/test/DelegateTestInterpreter.cpp @@ -0,0 +1,74 @@ +// +// Copyright © 2023 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include + +#include + +namespace delegateTestInterpreter +{ + +DelegateTestInterpreter::DelegateTestInterpreter(std::vector& modelBuffer, + const std::vector& backends, + const std::string& customOp, + bool disableFallback) +{ + TfLiteModel* tfLiteModel = delegateTestInterpreter::CreateTfLiteModel(modelBuffer); + + TfLiteInterpreterOptions* options = delegateTestInterpreter::CreateTfLiteInterpreterOptions(); + if (!customOp.empty()) + { + options->mutable_op_resolver = delegateTestInterpreter::GenerateCustomOpResolver(customOp); + } + + // Disable fallback by default for unit tests unless specified. + armnnDelegate::DelegateOptions delegateOptions(backends); + delegateOptions.DisableTfLiteRuntimeFallback(disableFallback); + + auto armnnDelegate = armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions); + TfLiteInterpreterOptionsAddDelegate(options, armnnDelegate); + + m_TfLiteDelegate = armnnDelegate; + m_TfLiteInterpreter = TfLiteInterpreterCreate(tfLiteModel, options); + + // The options and model can be deleted after the interpreter is created. + TfLiteInterpreterOptionsDelete(options); + TfLiteModelDelete(tfLiteModel); +} + +DelegateTestInterpreter::DelegateTestInterpreter(std::vector& modelBuffer, + const armnnDelegate::DelegateOptions& delegateOptions, + const std::string& customOp) +{ + TfLiteModel* tfLiteModel = delegateTestInterpreter::CreateTfLiteModel(modelBuffer); + + TfLiteInterpreterOptions* options = delegateTestInterpreter::CreateTfLiteInterpreterOptions(); + if (!customOp.empty()) + { + options->mutable_op_resolver = delegateTestInterpreter::GenerateCustomOpResolver(customOp); + } + + auto armnnDelegate = armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions); + TfLiteInterpreterOptionsAddDelegate(options, armnnDelegate); + + m_TfLiteDelegate = armnnDelegate; + m_TfLiteInterpreter = TfLiteInterpreterCreate(tfLiteModel, options); + + // The options and model can be deleted after the interpreter is created. + TfLiteInterpreterOptionsDelete(options); + TfLiteModelDelete(tfLiteModel); +} + +void DelegateTestInterpreter::Cleanup() +{ + TfLiteInterpreterDelete(m_TfLiteInterpreter); + + if (m_TfLiteDelegate) + { + armnnDelegate::TfLiteArmnnDelegateDelete(static_cast(m_TfLiteDelegate)); + } +} + +} // anonymous namespace \ No newline at end of file diff --git a/delegate/common/src/test/DelegateTestInterpreter.hpp b/delegate/common/src/test/DelegateTestInterpreter.hpp new file mode 100644 index 0000000000..0b63441ddd --- /dev/null +++ b/delegate/common/src/test/DelegateTestInterpreter.hpp @@ -0,0 +1,175 @@ +// +// Copyright © 2023 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include + +#include + +#include +#include + +#include +#include +#include +#include +#include + +namespace delegateTestInterpreter +{ + +class DelegateTestInterpreter +{ +public: + /// Create TfLite Interpreter only + DelegateTestInterpreter(std::vector& modelBuffer, const std::string& customOp = "") + { + TfLiteModel* model = delegateTestInterpreter::CreateTfLiteModel(modelBuffer); + + TfLiteInterpreterOptions* options = delegateTestInterpreter::CreateTfLiteInterpreterOptions(); + if (!customOp.empty()) + { + options->mutable_op_resolver = delegateTestInterpreter::GenerateCustomOpResolver(customOp); + } + + m_TfLiteInterpreter = TfLiteInterpreterCreate(model, options); + m_TfLiteDelegate = nullptr; + + // The options and model can be deleted after the interpreter is created. + TfLiteInterpreterOptionsDelete(options); + TfLiteModelDelete(model); + } + + /// Create Interpreter with default Arm NN Classic/Opaque Delegate applied + DelegateTestInterpreter(std::vector& model, + const std::vector& backends, + const std::string& customOp = "", + bool disableFallback = true); + + /// Create Interpreter with Arm NN Classic/Opaque Delegate applied and DelegateOptions + DelegateTestInterpreter(std::vector& model, + const armnnDelegate::DelegateOptions& delegateOptions, + const std::string& customOp = ""); + + /// Allocate the TfLiteTensors within the graph. + /// This must be called before FillInputTensor(values, index) and Invoke(). + TfLiteStatus AllocateTensors() + { + return TfLiteInterpreterAllocateTensors(m_TfLiteInterpreter); + } + + /// Copy a buffer of values into an input tensor at a given index. + template + TfLiteStatus FillInputTensor(std::vector& inputValues, int index) + { + TfLiteTensor* inputTensor = delegateTestInterpreter::GetInputTensorFromInterpreter(m_TfLiteInterpreter, index); + return delegateTestInterpreter::CopyFromBufferToTensor(inputTensor, inputValues); + } + + /// Copy a boolean buffer of values into an input tensor at a given index. + /// Boolean types get converted to a bit representation in a vector. + /// vector.data() returns a void pointer instead of a pointer to bool, so the tensor needs to be accessed directly. + TfLiteStatus FillInputTensor(std::vector& inputValues, int index) + { + TfLiteTensor* inputTensor = delegateTestInterpreter::GetInputTensorFromInterpreter(m_TfLiteInterpreter, index); + if(inputTensor->type != kTfLiteBool) + { + throw armnn::Exception("Input tensor at the given index is not of bool type: " + std::to_string(index)); + } + + // Make sure there is enough bytes allocated to copy into. + if(inputTensor->bytes < inputValues.size() * sizeof(bool)) + { + throw armnn::Exception("Input tensor has not been allocated to match number of input values."); + } + + for (unsigned int i = 0; i < inputValues.size(); ++i) + { + inputTensor->data.b[i] = inputValues[i]; + } + + return kTfLiteOk; + } + + /// Run the interpreter either on TFLite Runtime or Arm NN Delegate. + /// AllocateTensors() must be called before Invoke(). + TfLiteStatus Invoke() + { + return TfLiteInterpreterInvoke(m_TfLiteInterpreter); + } + + /// Return a buffer of values from the output tensor at a given index. + /// This must be called after Invoke(). + template + std::vector GetOutputResult(int index) + { + const TfLiteTensor* outputTensor = + delegateTestInterpreter::GetOutputTensorFromInterpreter(m_TfLiteInterpreter, index); + + int64_t n = tflite::NumElements(outputTensor); + std::vector output; + output.resize(n); + + TfLiteStatus status = TfLiteTensorCopyToBuffer(outputTensor, output.data(), output.size() * sizeof(T)); + if(status != kTfLiteOk) + { + throw armnn::Exception("An error occurred when copying output buffer."); + } + + return output; + } + + /// Return a buffer of values from the output tensor at a given index. This must be called after Invoke(). + /// Boolean types get converted to a bit representation in a vector. + /// vector.data() returns a void pointer instead of a pointer to bool, so the tensor needs to be accessed directly. + std::vector GetOutputResult(int index) + { + const TfLiteTensor* outputTensor = + delegateTestInterpreter::GetOutputTensorFromInterpreter(m_TfLiteInterpreter, index); + if(outputTensor->type != kTfLiteBool) + { + throw armnn::Exception("Output tensor at the given index is not of bool type: " + std::to_string(index)); + } + + int64_t n = tflite::NumElements(outputTensor); + std::vector output(n, false); + output.reserve(n); + + for (unsigned int i = 0; i < output.size(); ++i) + { + output[i] = outputTensor->data.b[i]; + } + return output; + } + + /// Return a buffer of dimensions from the output tensor at a given index. + std::vector GetOutputShape(int index) + { + const TfLiteTensor* outputTensor = + delegateTestInterpreter::GetOutputTensorFromInterpreter(m_TfLiteInterpreter, index); + int32_t numDims = TfLiteTensorNumDims(outputTensor); + + std::vector dims; + dims.reserve(numDims); + + for (int32_t i = 0; i < numDims; ++i) + { + dims.push_back(TfLiteTensorDim(outputTensor, i)); + } + return dims; + } + + /// Delete TfLiteInterpreter and the TfLiteDelegate/TfLiteOpaqueDelegate + void Cleanup(); + +private: + TfLiteInterpreter* m_TfLiteInterpreter; + + /// m_TfLiteDelegate can be TfLiteDelegate or TfLiteOpaqueDelegate + void* m_TfLiteDelegate; +}; + +} // anonymous namespace \ No newline at end of file diff --git a/delegate/common/src/test/DelegateTestInterpreterUtils.hpp b/delegate/common/src/test/DelegateTestInterpreterUtils.hpp new file mode 100644 index 0000000000..396c75c22e --- /dev/null +++ b/delegate/common/src/test/DelegateTestInterpreterUtils.hpp @@ -0,0 +1,110 @@ +// +// Copyright © 2023 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include + +#include +#include +#include + +#include + +namespace delegateTestInterpreter +{ + +inline TfLiteTensor* GetInputTensorFromInterpreter(TfLiteInterpreter* interpreter, int index) +{ + TfLiteTensor* inputTensor = TfLiteInterpreterGetInputTensor(interpreter, index); + if(inputTensor == nullptr) + { + throw armnn::Exception("Input tensor was not found at the given index: " + std::to_string(index)); + } + return inputTensor; +} + +inline const TfLiteTensor* GetOutputTensorFromInterpreter(TfLiteInterpreter* interpreter, int index) +{ + const TfLiteTensor* outputTensor = TfLiteInterpreterGetOutputTensor(interpreter, index); + if(outputTensor == nullptr) + { + throw armnn::Exception("Output tensor was not found at the given index: " + std::to_string(index)); + } + return outputTensor; +} + +inline TfLiteModel* CreateTfLiteModel(std::vector& data) +{ + TfLiteModel* tfLiteModel = TfLiteModelCreate(data.data(), data.size()); + if(tfLiteModel == nullptr) + { + throw armnn::Exception("An error has occurred when creating the TfLiteModel."); + } + return tfLiteModel; +} + +inline TfLiteInterpreterOptions* CreateTfLiteInterpreterOptions() +{ + TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate(); + if(options == nullptr) + { + throw armnn::Exception("An error has occurred when creating the TfLiteInterpreterOptions."); + } + return options; +} + +inline tflite::ops::builtin::BuiltinOpResolver GenerateCustomOpResolver(const std::string& opName) +{ + tflite::ops::builtin::BuiltinOpResolver opResolver; + if (opName == "MaxPool3D") + { + opResolver.AddCustom("MaxPool3D", tflite::ops::custom::Register_MAX_POOL_3D()); + } + else if (opName == "AveragePool3D") + { + opResolver.AddCustom("AveragePool3D", tflite::ops::custom::Register_AVG_POOL_3D()); + } + else + { + throw armnn::Exception("The custom op isn't supported by the DelegateTestInterpreter."); + } + return opResolver; +} + +template +inline TfLiteStatus CopyFromBufferToTensor(TfLiteTensor* tensor, std::vector& values) +{ + // Make sure there is enough bytes allocated to copy into for uint8_t and int16_t case. + if(tensor->bytes < values.size() * sizeof(T)) + { + throw armnn::Exception("Tensor has not been allocated to match number of values."); + } + + // Requires uint8_t and int16_t specific case as the number of bytes is larger than values passed when creating + // TFLite tensors of these types. Otherwise, use generic TfLiteTensorCopyFromBuffer function. + TfLiteStatus status = kTfLiteOk; + if (std::is_same::value) + { + for (unsigned int i = 0; i < values.size(); ++i) + { + tensor->data.uint8[i] = values[i]; + } + } + else if (std::is_same::value) + { + for (unsigned int i = 0; i < values.size(); ++i) + { + tensor->data.i16[i] = values[i]; + } + } + else + { + status = TfLiteTensorCopyFromBuffer(tensor, values.data(), values.size() * sizeof(T)); + } + return status; +} + +} // anonymous namespace \ No newline at end of file diff --git a/delegate/test/ActivationTestHelper.hpp b/delegate/test/ActivationTestHelper.hpp index 110c684c23..e1901b7d9f 100644 --- a/delegate/test/ActivationTestHelper.hpp +++ b/delegate/test/ActivationTestHelper.hpp @@ -8,14 +8,14 @@ #include "TestUtils.hpp" #include +#include #include -#include #include -#include -#include #include +#include + #include namespace @@ -69,7 +69,7 @@ std::vector CreateActivationTfLiteModel(tflite::BuiltinOperator activation modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); @@ -80,51 +80,33 @@ void ActivationTest(tflite::BuiltinOperator activationOperatorCode, std::vector& inputValues, std::vector& expectedOutputValues) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector inputShape { { 4, 1, 4} }; std::vector modelBuffer = CreateActivationTfLiteModel(activationOperatorCode, - ::tflite::TensorType_FLOAT32, - inputShape); - - const Model* tfLiteModel = GetModel(modelBuffer.data()); - // Create TfLite Interpreters - std::unique_ptr armnnDelegateInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegateInterpreter) == kTfLiteOk); - CHECK(armnnDelegateInterpreter != nullptr); - CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteInterpreter) == kTfLiteOk); - CHECK(tfLiteInterpreter != nullptr); - CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data - armnnDelegate::FillInput(tfLiteInterpreter, 0, inputValues); - armnnDelegate::FillInput(armnnDelegateInterpreter, 0, inputValues); - - // Run EnqueWorkload - CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); - CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); - - // Compare output data - armnnDelegate::CompareOutputData(tfLiteInterpreter, - armnnDelegateInterpreter, - inputShape, - expectedOutputValues); - - tfLiteInterpreter.reset(nullptr); - armnnDelegateInterpreter.reset(nullptr); + ::tflite::TensorType_FLOAT32, + inputShape); + + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, inputShape); + + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } } // anonymous namespace \ No newline at end of file diff --git a/delegate/test/ArgMinMaxTestHelper.hpp b/delegate/test/ArgMinMaxTestHelper.hpp index 91cf1f81e7..fd230fff94 100644 --- a/delegate/test/ArgMinMaxTestHelper.hpp +++ b/delegate/test/ArgMinMaxTestHelper.hpp @@ -8,14 +8,14 @@ #include "TestUtils.hpp" #include +#include #include -#include #include -#include -#include #include +#include + #include namespace @@ -119,7 +119,7 @@ std::vector CreateArgMinMaxTfLiteModel(tflite::BuiltinOperator argMinMaxOp modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); @@ -139,7 +139,7 @@ void ArgMinMaxTest(tflite::BuiltinOperator argMinMaxOperatorCode, float quantScale = 1.0f, int quantOffset = 0) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector modelBuffer = CreateArgMinMaxTfLiteModel(argMinMaxOperatorCode, tensorType, inputShape, @@ -150,50 +150,27 @@ void ArgMinMaxTest(tflite::BuiltinOperator argMinMaxOperatorCode, quantScale, quantOffset); - const Model* tfLiteModel = GetModel(modelBuffer.data()); - CHECK(tfLiteModel != nullptr); - - std::unique_ptr armnnDelegateInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegateInterpreter) == kTfLiteOk); - CHECK(armnnDelegateInterpreter != nullptr); - CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteInterpreter) == kTfLiteOk); - CHECK(tfLiteInterpreter != nullptr); - CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data - armnnDelegate::FillInput(tfLiteInterpreter, 0, inputValues); - armnnDelegate::FillInput(armnnDelegateInterpreter, 0, inputValues); - - // Run EnqueueWorkload - CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); - CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); - - // Compare output data - auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0]; - auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); - auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0]; - auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateOutputId); - - for (size_t i = 0; i < expectedOutputValues.size(); i++) - { - CHECK(expectedOutputValues[i] == armnnDelegateOutputData[i]); - CHECK(tfLiteDelageOutputData[i] == expectedOutputValues[i]); - CHECK(tfLiteDelageOutputData[i] == armnnDelegateOutputData[i]); - } + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape); + + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } } // anonymous namespace \ No newline at end of file diff --git a/delegate/test/BatchMatMulTestHelper.hpp b/delegate/test/BatchMatMulTestHelper.hpp index 32b0a4fc71..d45f438f5c 100644 --- a/delegate/test/BatchMatMulTestHelper.hpp +++ b/delegate/test/BatchMatMulTestHelper.hpp @@ -8,14 +8,14 @@ #include "TestUtils.hpp" #include +#include #include -#include #include -#include -#include #include +#include + #include namespace @@ -111,7 +111,7 @@ std::vector CreateBatchMatMulTfLiteModel( modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); @@ -132,7 +132,7 @@ void BatchMatMulTest(tflite::BuiltinOperator bmmOperatorCode, float quantScale = 1.0f, int quantOffset = 0) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector modelBuffer = CreateBatchMatMulTfLiteModel(bmmOperatorCode, tensorType, LHSInputShape, @@ -143,62 +143,29 @@ void BatchMatMulTest(tflite::BuiltinOperator bmmOperatorCode, quantScale, quantOffset); - const Model* tfLiteModel = GetModel(modelBuffer.data()); - CHECK(tfLiteModel != nullptr); - // Create TfLite Interpreters - std::unique_ptr armnnDelegateInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegateInterpreter) == kTfLiteOk); - CHECK(armnnDelegateInterpreter != nullptr); - CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteInterpreter) == kTfLiteOk); - CHECK(tfLiteInterpreter != nullptr); - CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data - auto tfLiteDelegateLHSInputId = tfLiteInterpreter->inputs()[0]; - auto tfLiteDelegateLHSInputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateLHSInputId); - auto tfLiteDelegateRHSInputId = tfLiteInterpreter->inputs()[1]; - auto tfLiteDelegateRHSInputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateRHSInputId); - for (unsigned int i = 0; i < LHSInputValues.size(); ++i) - { - tfLiteDelegateLHSInputData[i] = LHSInputValues[i]; - } - for (unsigned int i = 0; i < RHSInputValues.size(); ++i) - { - tfLiteDelegateRHSInputData[i] = RHSInputValues[i]; - } - - auto armnnDelegateLHSInputId = armnnDelegateInterpreter->inputs()[0]; - auto armnnDelegateLHSInputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateLHSInputId); - auto armnnDelegateRHSInputId = armnnDelegateInterpreter->inputs()[1]; - auto armnnDelegateRHSInputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateRHSInputId); - for (unsigned int i = 0; i < LHSInputValues.size(); ++i) - { - armnnDelegateLHSInputData[i] = LHSInputValues[i]; - } - for (unsigned int i = 0; i < RHSInputValues.size(); ++i) - { - armnnDelegateRHSInputData[i] = RHSInputValues[i]; - } - // Run EnqueueWorkload - CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); - CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); - - armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, - outputShape, expectedOutputValues); + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(LHSInputValues, 0) == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(RHSInputValues, 1) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(LHSInputValues, 0) == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(RHSInputValues, 1) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape); + + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } } // anonymous namespace diff --git a/delegate/test/BatchSpaceTestHelper.hpp b/delegate/test/BatchSpaceTestHelper.hpp index 597139d390..ba6afb1382 100644 --- a/delegate/test/BatchSpaceTestHelper.hpp +++ b/delegate/test/BatchSpaceTestHelper.hpp @@ -8,14 +8,14 @@ #include "TestUtils.hpp" #include +#include #include -#include #include -#include -#include #include +#include + #include namespace @@ -143,7 +143,7 @@ std::vector CreateBatchSpaceTfLiteModel(tflite::BuiltinOperator batchSpace modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); @@ -162,7 +162,7 @@ void BatchSpaceTest(tflite::BuiltinOperator controlOperatorCode, float quantScale = 1.0f, int quantOffset = 0) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector modelBuffer = CreateBatchSpaceTfLiteModel(controlOperatorCode, tensorType, inputShape, @@ -172,47 +172,27 @@ void BatchSpaceTest(tflite::BuiltinOperator controlOperatorCode, quantScale, quantOffset); - const Model* tfLiteModel = GetModel(modelBuffer.data()); - - // Create TfLite Interpreters - std::unique_ptr armnnDelegateInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegateInterpreter) == kTfLiteOk); - CHECK(armnnDelegateInterpreter != nullptr); - CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteInterpreter) == kTfLiteOk); - CHECK(tfLiteInterpreter != nullptr); - CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data - armnnDelegate::FillInput(tfLiteInterpreter, 0, inputValues); - armnnDelegate::FillInput(armnnDelegateInterpreter, 0, inputValues); - - // Run EnqueWorkload - CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); - CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); - - // Compare output data - armnnDelegate::CompareOutputData(tfLiteInterpreter, - armnnDelegateInterpreter, - expectedOutputShape, - expectedOutputValues); - - armnnDelegateInterpreter.reset(nullptr); - tfLiteInterpreter.reset(nullptr); + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape); + + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } } // anonymous namespace \ No newline at end of file diff --git a/delegate/test/CastTestHelper.hpp b/delegate/test/CastTestHelper.hpp index be1967ccd6..ac8f033bb8 100644 --- a/delegate/test/CastTestHelper.hpp +++ b/delegate/test/CastTestHelper.hpp @@ -8,14 +8,14 @@ #include "TestUtils.hpp" #include +#include #include -#include #include -#include -#include #include +#include + #include namespace @@ -90,7 +90,7 @@ std::vector CreateCastTfLiteModel(tflite::TensorType inputTensorType, modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); } @@ -105,55 +105,34 @@ void CastTest(tflite::TensorType inputTensorType, float quantScale = 1.0f, int quantOffset = 0) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector modelBuffer = CreateCastTfLiteModel(inputTensorType, outputTensorType, shape, quantScale, quantOffset); - const Model* tfLiteModel = GetModel(modelBuffer.data()); - - // Create TfLite Interpreters - std::unique_ptr armnnDelegate; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegate) == kTfLiteOk); - CHECK(armnnDelegate != nullptr); - CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteDelegate; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteDelegate) == kTfLiteOk); - CHECK(tfLiteDelegate != nullptr); - CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data - armnnDelegate::FillInput(tfLiteDelegate, 0, inputValues); - armnnDelegate::FillInput(armnnDelegate, 0, inputValues); - - // Run EnqueWorkload - CHECK(tfLiteDelegate->Invoke() == kTfLiteOk); - CHECK(armnnDelegate->Invoke() == kTfLiteOk); - - // Compare output data - armnnDelegate::CompareOutputData(tfLiteDelegate, - armnnDelegate, - shape, - expectedOutputValues, - 0); - - tfLiteDelegate.reset(nullptr); - armnnDelegate.reset(nullptr); + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, shape); + + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } } // anonymous namespace diff --git a/delegate/test/ComparisonTestHelper.hpp b/delegate/test/ComparisonTestHelper.hpp index ef9f87a5d5..a1114cb938 100644 --- a/delegate/test/ComparisonTestHelper.hpp +++ b/delegate/test/ComparisonTestHelper.hpp @@ -8,14 +8,14 @@ #include "TestUtils.hpp" #include +#include #include -#include #include -#include -#include #include +#include + #include namespace @@ -141,7 +141,7 @@ std::vector CreateComparisonTfLiteModel(tflite::BuiltinOperator comparison modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); @@ -160,7 +160,7 @@ void ComparisonTest(tflite::BuiltinOperator comparisonOperatorCode, float quantScale = 1.0f, int quantOffset = 0) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector modelBuffer = CreateComparisonTfLiteModel(comparisonOperatorCode, tensorType, input0Shape, @@ -169,70 +169,32 @@ void ComparisonTest(tflite::BuiltinOperator comparisonOperatorCode, quantScale, quantOffset); - const Model* tfLiteModel = GetModel(modelBuffer.data()); - // Create TfLite Interpreters - std::unique_ptr armnnDelegateInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegateInterpreter) == kTfLiteOk); - CHECK(armnnDelegateInterpreter != nullptr); - CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteInterpreter) == kTfLiteOk); - CHECK(tfLiteInterpreter != nullptr); - CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data - auto tfLiteDelegateInput0Id = tfLiteInterpreter->inputs()[0]; - auto tfLiteDelageInput0Data = tfLiteInterpreter->typed_tensor(tfLiteDelegateInput0Id); - for (unsigned int i = 0; i < input0Values.size(); ++i) - { - tfLiteDelageInput0Data[i] = input0Values[i]; - } - - auto tfLiteDelegateInput1Id = tfLiteInterpreter->inputs()[1]; - auto tfLiteDelageInput1Data = tfLiteInterpreter->typed_tensor(tfLiteDelegateInput1Id); - for (unsigned int i = 0; i < input1Values.size(); ++i) - { - tfLiteDelageInput1Data[i] = input1Values[i]; - } - - auto armnnDelegateInput0Id = armnnDelegateInterpreter->inputs()[0]; - auto armnnDelegateInput0Data = armnnDelegateInterpreter->typed_tensor(armnnDelegateInput0Id); - for (unsigned int i = 0; i < input0Values.size(); ++i) - { - armnnDelegateInput0Data[i] = input0Values[i]; - } - - auto armnnDelegateInput1Id = armnnDelegateInterpreter->inputs()[1]; - auto armnnDelegateInput1Data = armnnDelegateInterpreter->typed_tensor(armnnDelegateInput1Id); - for (unsigned int i = 0; i < input1Values.size(); ++i) - { - armnnDelegateInput1Data[i] = input1Values[i]; - } - - // Run EnqueWorkload - CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); - CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); - // Compare output data - auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0]; - auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); - auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0]; - auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateOutputId); - - armnnDelegate::CompareData(expectedOutputValues , armnnDelegateOutputData, expectedOutputValues.size()); - armnnDelegate::CompareData(expectedOutputValues , tfLiteDelageOutputData , expectedOutputValues.size()); - armnnDelegate::CompareData(tfLiteDelageOutputData, armnnDelegateOutputData, expectedOutputValues.size()); + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(input0Values, 0) == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(input1Values, 1) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(input0Values, 0) == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(input1Values, 1) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + + armnnDelegate::CompareData(expectedOutputValues, armnnOutputValues, expectedOutputValues.size()); + armnnDelegate::CompareData(expectedOutputValues, tfLiteOutputValues, expectedOutputValues.size()); + armnnDelegate::CompareData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues.size()); + + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape); + + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } } // anonymous namespace \ No newline at end of file diff --git a/delegate/test/ControlTestHelper.hpp b/delegate/test/ControlTestHelper.hpp index f68cc07519..9e082a78af 100644 --- a/delegate/test/ControlTestHelper.hpp +++ b/delegate/test/ControlTestHelper.hpp @@ -8,17 +8,15 @@ #include "TestUtils.hpp" #include +#include #include -#include #include -#include -#include #include -#include +#include -#include +#include namespace { @@ -108,7 +106,7 @@ std::vector CreateConcatTfLiteModel(tflite::BuiltinOperator controlOperato modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); @@ -201,7 +199,7 @@ std::vector CreateMeanTfLiteModel(tflite::BuiltinOperator controlOperatorC modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); @@ -219,7 +217,7 @@ void ConcatenationTest(tflite::BuiltinOperator controlOperatorCode, float quantScale = 1.0f, int quantOffset = 0) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector modelBuffer = CreateConcatTfLiteModel(controlOperatorCode, tensorType, inputShapes, @@ -229,51 +227,33 @@ void ConcatenationTest(tflite::BuiltinOperator controlOperatorCode, quantScale, quantOffset); - const Model* tfLiteModel = GetModel(modelBuffer.data()); + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); - // Create TfLite Interpreters - std::unique_ptr armnnDelegateInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegateInterpreter) == kTfLiteOk); - CHECK(armnnDelegateInterpreter != nullptr); - CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); - std::unique_ptr tfLiteInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteInterpreter) == kTfLiteOk); - CHECK(tfLiteInterpreter != nullptr); - CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data for all input tensors. for (unsigned int i = 0; i < inputValues.size(); ++i) { - // Get single input tensor and assign to interpreters. - auto inputTensorValues = inputValues[i]; - armnnDelegate::FillInput(tfLiteInterpreter, i, inputTensorValues); - armnnDelegate::FillInput(armnnDelegateInterpreter, i, inputTensorValues); + CHECK(tfLiteInterpreter.FillInputTensor(inputValues[i], i) == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(inputValues[i], i) == kTfLiteOk); } - // Run EnqueWorkload - CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); - CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); - // Compare output data - armnnDelegate::CompareOutputData(tfLiteInterpreter, - armnnDelegateInterpreter, - expectedOutputShape, - expectedOutputValues); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); - armnnDelegateInterpreter.reset(nullptr); + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape); + + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } template @@ -290,7 +270,7 @@ void MeanTest(tflite::BuiltinOperator controlOperatorCode, float quantScale = 1.0f, int quantOffset = 0) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector modelBuffer = CreateMeanTfLiteModel(controlOperatorCode, tensorType, input0Shape, @@ -301,46 +281,27 @@ void MeanTest(tflite::BuiltinOperator controlOperatorCode, quantScale, quantOffset); - const Model* tfLiteModel = GetModel(modelBuffer.data()); - - // Create TfLite Interpreters - std::unique_ptr armnnDelegateInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegateInterpreter) == kTfLiteOk); - CHECK(armnnDelegateInterpreter != nullptr); - CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteInterpreter) == kTfLiteOk); - CHECK(tfLiteInterpreter != nullptr); - CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data - armnnDelegate::FillInput(tfLiteInterpreter, 0, input0Values); - armnnDelegate::FillInput(armnnDelegateInterpreter, 0, input0Values); - - // Run EnqueWorkload - CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); - CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); - - // Compare output data - armnnDelegate::CompareOutputData(tfLiteInterpreter, - armnnDelegateInterpreter, - expectedOutputShape, - expectedOutputValues); - - armnnDelegateInterpreter.reset(nullptr); + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(input0Values, 0) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(input0Values, 0) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape); + + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } } // anonymous namespace \ No newline at end of file diff --git a/delegate/test/ConvolutionTestHelper.hpp b/delegate/test/ConvolutionTestHelper.hpp index 2e211b2ee9..6a3400e9cb 100644 --- a/delegate/test/ConvolutionTestHelper.hpp +++ b/delegate/test/ConvolutionTestHelper.hpp @@ -8,6 +8,7 @@ #include "TestUtils.hpp" #include +#include #include #include @@ -186,7 +187,7 @@ std::vector CreateConv2dTfLiteModel(tflite::BuiltinOperator convolutionOpe modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); @@ -222,10 +223,9 @@ void ConvolutionTest(tflite::BuiltinOperator convolutionOperatorCode, int32_t filterQuantizationDim = 3) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector modelBuffer; - modelBuffer = CreateConv2dTfLiteModel(convolutionOperatorCode, tensorType, strideX, @@ -251,59 +251,27 @@ void ConvolutionTest(tflite::BuiltinOperator convolutionOperatorCode, depth_multiplier, filterQuantizationDim); - - const Model* tfLiteModel = GetModel(modelBuffer.data()); - // Create TfLite Interpreters - std::unique_ptr armnnDelegateInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegateInterpreter) == kTfLiteOk); - CHECK(armnnDelegateInterpreter != nullptr); - CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteInterpreter) == kTfLiteOk); - CHECK(tfLiteInterpreter != nullptr); - CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data - auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0]; - auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateInputId); - for (unsigned int i = 0; i < inputValues.size(); ++i) - { - tfLiteDelageInputData[i] = inputValues[i]; - } - - auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0]; - auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateInputId); - for (unsigned int i = 0; i < inputValues.size(); ++i) - { - armnnDelegateInputData[i] = inputValues[i]; - } - // Run EnqueueWorkload - CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); - CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); - - // Compare output data - auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0]; - auto tfLiteDelagateOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); - auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0]; - auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateOutputId); - for (size_t i = 0; i < expectedOutputValues.size(); i++) - { - CHECK(tfLiteDelagateOutputData[i] == armnnDelegateOutputData[i]); - CHECK(doctest::Approx(tfLiteDelagateOutputData[i]).epsilon(0.000001f) == expectedOutputValues[i]); - CHECK(doctest::Approx(armnnDelegateOutputData[i]).epsilon(0.000001f) == expectedOutputValues[i]); - } + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape); + + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } // Conv3d is only correctly supported for external delegates from TF Lite v2.6, as there was a breaking bug in v2.5. @@ -457,7 +425,7 @@ std::vector CreateConv3dTfLiteModel(tflite::BuiltinOperator convolutionOpe modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); @@ -490,7 +458,7 @@ void Convolution3dTest(tflite::BuiltinOperator convolutionOperatorCode, int32_t depth_multiplier = 1, int32_t filterQuantizationDim = 3) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector modelBuffer; modelBuffer = CreateConv3dTfLiteModel(convolutionOperatorCode, @@ -516,48 +484,30 @@ void Convolution3dTest(tflite::BuiltinOperator convolutionOperatorCode, depth_multiplier, filterQuantizationDim); - const Model* tfLiteModel = GetModel(modelBuffer.data()); - - // Create TfLite Interpreters - std::unique_ptr armnnDelegateInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegateInterpreter) == kTfLiteOk); - CHECK(armnnDelegateInterpreter != nullptr); - CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteInterpreter) == kTfLiteOk); - CHECK(tfLiteInterpreter != nullptr); - CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data - armnnDelegate::FillInput(tfLiteInterpreter, 0, inputValues); - armnnDelegate::FillInput(armnnDelegateInterpreter, 0, inputValues); - - // Run EnqueueWorkload - CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); - CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); - - // Compare output data - auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0]; - auto tfLiteDelagateOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); - auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0]; - auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateOutputId); - - armnnDelegate::CompareData(expectedOutputValues.data(), armnnDelegateOutputData, expectedOutputValues.size(), 1); - armnnDelegate::CompareData(expectedOutputValues.data(), tfLiteDelagateOutputData, expectedOutputValues.size(), 1); - armnnDelegate::CompareData(tfLiteDelagateOutputData, armnnDelegateOutputData, expectedOutputValues.size(), 1); + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape); + + armnnDelegate::CompareData(expectedOutputValues.data(), armnnOutputValues.data(), expectedOutputValues.size(), 1); + armnnDelegate::CompareData(expectedOutputValues.data(), tfLiteOutputValues.data(), expectedOutputValues.size(), 1); + armnnDelegate::CompareData(tfLiteOutputValues.data(), armnnOutputValues.data(), expectedOutputValues.size(), 1); + + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } #endif @@ -675,7 +625,7 @@ std::vector CreateTransposeConvTfLiteModel(tflite::TensorType tensorType, modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); @@ -702,7 +652,7 @@ void TransposeConvTest(std::vector& backends, float quantScale = 1.0f, int quantOffset = 0) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector modelBuffer; modelBuffer = CreateTransposeConvTfLiteModel(tensorType, @@ -723,58 +673,27 @@ void TransposeConvTest(std::vector& backends, quantOffset); - const Model* tfLiteModel = GetModel(modelBuffer.data()); - // Create TfLite Interpreters - std::unique_ptr armnnDelegateInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegateInterpreter) == kTfLiteOk); - CHECK(armnnDelegateInterpreter != nullptr); - CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteInterpreter) == kTfLiteOk); - CHECK(tfLiteInterpreter != nullptr); - CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data - auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[2]; - auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateInputId); - for (unsigned int i = 0; i < inputValues.size(); ++i) - { - tfLiteDelageInputData[i] = inputValues[i]; - } + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(inputValues, 2) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); - auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[2]; - auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateInputId); - for (unsigned int i = 0; i < inputValues.size(); ++i) - { - armnnDelegateInputData[i] = inputValues[i]; - } - // Run EnqueueWorkload - CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); - CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); - - // Compare output data - auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0]; - auto tfLiteDelagateOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); - auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0]; - auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateOutputId); - for (size_t i = 0; i < expectedOutputValues.size(); i++) - { - CHECK(armnnDelegateOutputData[i] == expectedOutputValues[i]); - CHECK(tfLiteDelagateOutputData[i] == expectedOutputValues[i]); - CHECK(tfLiteDelagateOutputData[i] == armnnDelegateOutputData[i]); - } + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(inputValues, 2) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputTensorShape); + + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } } // anonymous namespace diff --git a/delegate/test/DelegateOptionsTest.cpp b/delegate/test/DelegateOptionsTest.cpp index ecd8c736e8..d84d420977 100644 --- a/delegate/test/DelegateOptionsTest.cpp +++ b/delegate/test/DelegateOptionsTest.cpp @@ -30,7 +30,6 @@ TEST_CASE ("ArmnnDelegateOptimizerOptionsReduceFp32ToFp16") armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions); DelegateOptionTest(::tflite::TensorType_FLOAT32, - backends, tensorShape, inputData, inputData, @@ -60,7 +59,6 @@ TEST_CASE ("ArmnnDelegateOptimizerOptionsDebug") armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions); DelegateOptionTest(::tflite::TensorType_FLOAT32, - backends, tensorShape, inputData, inputData, @@ -104,7 +102,6 @@ TEST_CASE ("ArmnnDelegateOptimizerOptionsDebugFunction") CHECK(!callback); DelegateOptionTest(::tflite::TensorType_FLOAT32, - backends, tensorShape, inputData, inputData, @@ -118,7 +115,7 @@ TEST_CASE ("ArmnnDelegateOptimizerOptionsDebugFunction") TEST_CASE ("ArmnnDelegateOptimizerOptionsImport") { - std::vector backends = { armnn::Compute::CpuAcc, armnn::Compute::CpuRef }; + std::vector backends = { armnn::Compute::CpuRef }; std::vector tensorShape { 1, 2, 2, 1 }; std::vector inputData = { 1, 2, 3, 4 }; std::vector divData = { 2, 2, 3, 4 }; @@ -128,7 +125,6 @@ TEST_CASE ("ArmnnDelegateOptimizerOptionsImport") armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions); DelegateOptionTest(::tflite::TensorType_UINT8, - backends, tensorShape, inputData, inputData, @@ -164,7 +160,6 @@ TEST_CASE ("ArmnnDelegateStringParsingOptionDisableTfLiteRuntimeFallback") armnnDelegate::DelegateOptions delegateOptions(options_keys.get(), options_values.get(), num_options, nullptr); DelegateOptionNoFallbackTest(::tflite::TensorType_FLOAT32, - backends, tensorShape, inputData, expectedResult, @@ -200,7 +195,6 @@ TEST_CASE ("ArmnnDelegateStringParsingOptionEnableTfLiteRuntimeFallback") armnnDelegate::DelegateOptions delegateOptions(options_keys.get(), options_values.get(), num_options, nullptr); DelegateOptionNoFallbackTest(::tflite::TensorType_FLOAT32, - backends, tensorShape, inputData, expectedResult, @@ -237,7 +231,6 @@ TEST_CASE ("ArmnnDelegateModelOptions_CpuAcc_Test") armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions); DelegateOptionTest(::tflite::TensorType_FLOAT32, - backends, tensorShape, inputData, inputData, @@ -268,7 +261,6 @@ TEST_CASE ("ArmnnDelegateSerializeToDot") // Enable serialize to dot by specifying the target file name. delegateOptions.SetSerializeToDot(filename); DelegateOptionTest(::tflite::TensorType_FLOAT32, - backends, tensorShape, inputData, inputData, @@ -309,7 +301,6 @@ void CreateFp16StringParsingTestRun(std::vector& keys, armnnDelegate::DelegateOptions delegateOptions(options_keys.get(), options_values.get(), num_options, nullptr); DelegateOptionTest(::tflite::TensorType_FLOAT32, - backends, tensorShape, inputData, inputData, diff --git a/delegate/test/DelegateOptionsTestHelper.hpp b/delegate/test/DelegateOptionsTestHelper.hpp index fb5403c7de..b6974c9fb6 100644 --- a/delegate/test/DelegateOptionsTestHelper.hpp +++ b/delegate/test/DelegateOptionsTestHelper.hpp @@ -5,17 +5,17 @@ #pragma once -#include - #include "TestUtils.hpp" +#include +#include + #include -#include #include -#include -#include #include +#include + #include namespace @@ -146,7 +146,7 @@ std::vector CreateAddDivTfLiteModel(tflite::TensorType tensorType, modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); @@ -218,14 +218,13 @@ std::vector CreateCeilTfLiteModel(tflite::TensorType tensorType, modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); } template void DelegateOptionTest(tflite::TensorType tensorType, - const std::vector& backends, std::vector& tensorShape, std::vector& input0Values, std::vector& input1Values, @@ -235,55 +234,41 @@ void DelegateOptionTest(tflite::TensorType tensorType, float quantScale = 1.0f, int quantOffset = 0) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector modelBuffer = CreateAddDivTfLiteModel(tensorType, tensorShape, quantScale, quantOffset); - const Model* tfLiteModel = GetModel(modelBuffer.data()); - // Create TfLite Interpreters - std::unique_ptr armnnDelegateInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegateInterpreter) == kTfLiteOk); - CHECK(armnnDelegateInterpreter != nullptr); - CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteInterpreter) == kTfLiteOk); - CHECK(tfLiteInterpreter != nullptr); - CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data - armnnDelegate::FillInput(tfLiteInterpreter, 0, input0Values); - armnnDelegate::FillInput(tfLiteInterpreter, 1, input1Values); - armnnDelegate::FillInput(tfLiteInterpreter, 2, input2Values); - - armnnDelegate::FillInput(armnnDelegateInterpreter, 0, input0Values); - armnnDelegate::FillInput(armnnDelegateInterpreter, 1, input1Values); - armnnDelegate::FillInput(armnnDelegateInterpreter, 2, input2Values); - - // Run EnqueueWorkload - CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); - CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); - - armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, tensorShape, expectedOutputValues); - - armnnDelegateInterpreter.reset(nullptr); + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(input0Values, 0) == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(input1Values, 1) == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(input2Values, 2) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, delegateOptions); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(input0Values, 0) == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(input1Values, 1) == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(input2Values, 2) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, tensorShape); + + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } template void DelegateOptionNoFallbackTest(tflite::TensorType tensorType, - const std::vector& backends, std::vector& tensorShape, std::vector& inputValues, std::vector& expectedOutputValues, @@ -291,53 +276,39 @@ void DelegateOptionNoFallbackTest(tflite::TensorType tensorType, float quantScale = 1.0f, int quantOffset = 0) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector modelBuffer = CreateCeilTfLiteModel(tensorType, tensorShape, quantScale, quantOffset); - const Model* tfLiteModel = GetModel(modelBuffer.data()); - // Create TfLite Interpreters - std::unique_ptr armnnDelegateInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegateInterpreter) == kTfLiteOk); - CHECK(armnnDelegateInterpreter != nullptr); - CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteInterpreter) == kTfLiteOk); - CHECK(tfLiteInterpreter != nullptr); - CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - // Modify armnnDelegateInterpreter to use armnnDelegate + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + tfLiteInterpreter.Cleanup(); + try { - armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()); + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, delegateOptions); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + armnnInterpreter.Cleanup(); + + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, tensorShape); } catch (const armnn::Exception& e) { // Forward the exception message to std::cout std::cout << e.what() << std::endl; } - - // Set input data - armnnDelegate::FillInput(tfLiteInterpreter, 0, inputValues); - armnnDelegate::FillInput(armnnDelegateInterpreter, 0, inputValues); - - // Run EnqueueWorkload - CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); - CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); - - armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, tensorShape, expectedOutputValues); - - armnnDelegateInterpreter.reset(nullptr); } } // anonymous namespace \ No newline at end of file diff --git a/delegate/test/DepthwiseConvolution2dTest.cpp b/delegate/test/DepthwiseConvolution2dTest.cpp index 9ee589c977..5fdbfc4801 100644 --- a/delegate/test/DepthwiseConvolution2dTest.cpp +++ b/delegate/test/DepthwiseConvolution2dTest.cpp @@ -25,7 +25,7 @@ void DepthwiseConv2dValidReluFp32Test(std::vector& backends) std::vector inputShape { 1, 3, 2, 2 }; std::vector filterShape { 1, 2, 2, 4 }; std::vector biasShape { 4 }; - std::vector outputShape { 1, 3, 3, 1 }; + std::vector outputShape { 1, 2, 1, 4 }; static std::vector inputValues = { diff --git a/delegate/test/ElementwiseBinaryTestHelper.hpp b/delegate/test/ElementwiseBinaryTestHelper.hpp index 47ee7c2410..fa9cbb881e 100644 --- a/delegate/test/ElementwiseBinaryTestHelper.hpp +++ b/delegate/test/ElementwiseBinaryTestHelper.hpp @@ -8,14 +8,14 @@ #include "TestUtils.hpp" #include +#include #include -#include #include -#include -#include #include +#include + #include namespace @@ -164,7 +164,7 @@ std::vector CreateElementwiseBinaryTfLiteModel(tflite::BuiltinOperator bin modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); @@ -185,7 +185,7 @@ void ElementwiseBinaryTest(tflite::BuiltinOperator binaryOperatorCode, int quantOffset = 0, bool constantInput = false) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector modelBuffer = CreateElementwiseBinaryTfLiteModel(binaryOperatorCode, activationType, tensorType, @@ -197,47 +197,29 @@ void ElementwiseBinaryTest(tflite::BuiltinOperator binaryOperatorCode, quantScale, quantOffset); - const Model* tfLiteModel = GetModel(modelBuffer.data()); - // Create TfLite Interpreters - std::unique_ptr armnnDelegateInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegateInterpreter) == kTfLiteOk); - CHECK(armnnDelegateInterpreter != nullptr); - CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteInterpreter) == kTfLiteOk); - CHECK(tfLiteInterpreter != nullptr); - CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data - armnnDelegate::FillInput(tfLiteInterpreter, 0, input0Values); - armnnDelegate::FillInput(armnnDelegateInterpreter, 0, input0Values); - if (!constantInput) - { - armnnDelegate::FillInput(tfLiteInterpreter, 1, input1Values); - armnnDelegate::FillInput(armnnDelegateInterpreter, 1, input1Values); - } - // Run EnqueWorkload - CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); - CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); - - // Compare output data - armnnDelegate::CompareOutputData(tfLiteInterpreter, - armnnDelegateInterpreter, - outputShape, - expectedOutputValues); - armnnDelegateInterpreter.reset(nullptr); + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(input0Values, 0) == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(input1Values, 1) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(input0Values, 0) == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(input1Values, 1) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape); + + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } } // anonymous namespace \ No newline at end of file diff --git a/delegate/test/ElementwiseUnaryTestHelper.hpp b/delegate/test/ElementwiseUnaryTestHelper.hpp index f6a534a64f..7f8879b50d 100644 --- a/delegate/test/ElementwiseUnaryTestHelper.hpp +++ b/delegate/test/ElementwiseUnaryTestHelper.hpp @@ -8,14 +8,14 @@ #include "TestUtils.hpp" #include +#include #include -#include #include -#include -#include #include +#include + #include namespace @@ -69,7 +69,7 @@ std::vector CreateElementwiseUnaryTfLiteModel(tflite::BuiltinOperator unar modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); @@ -80,48 +80,33 @@ void ElementwiseUnaryFP32Test(tflite::BuiltinOperator unaryOperatorCode, std::vector& inputValues, std::vector& expectedOutputValues) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector inputShape { { 3, 1, 2} }; std::vector modelBuffer = CreateElementwiseUnaryTfLiteModel(unaryOperatorCode, ::tflite::TensorType_FLOAT32, inputShape); - const Model* tfLiteModel = GetModel(modelBuffer.data()); - // Create TfLite Interpreters - std::unique_ptr armnnDelegateInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegateInterpreter) == kTfLiteOk); - CHECK(armnnDelegateInterpreter != nullptr); - CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteInterpreter) == kTfLiteOk); - CHECK(tfLiteInterpreter != nullptr); - CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data - armnnDelegate::FillInput(armnnDelegateInterpreter, 0, inputValues); - armnnDelegate::FillInput(tfLiteInterpreter, 0, inputValues); - - // Run EnqueWorkload - CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); - CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); - - // Compare output data - armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, inputShape, expectedOutputValues); - - armnnDelegateInterpreter.reset(nullptr); - tfLiteInterpreter.reset(nullptr); + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, inputShape); + + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } void ElementwiseUnaryBoolTest(tflite::BuiltinOperator unaryOperatorCode, @@ -130,56 +115,35 @@ void ElementwiseUnaryBoolTest(tflite::BuiltinOperator unaryOperatorCode, std::vector& inputValues, std::vector& expectedOutputValues) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector modelBuffer = CreateElementwiseUnaryTfLiteModel(unaryOperatorCode, ::tflite::TensorType_BOOL, inputShape); - const Model* tfLiteModel = GetModel(modelBuffer.data()); - // Create TfLite Interpreters - std::unique_ptr armnnDelegateInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegateInterpreter) == kTfLiteOk); - CHECK(armnnDelegateInterpreter != nullptr); - CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteInterpreter) == kTfLiteOk); - CHECK(tfLiteInterpreter != nullptr); - CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data - armnnDelegate::FillInput(armnnDelegateInterpreter, 0, inputValues); - armnnDelegate::FillInput(tfLiteInterpreter, 0, inputValues); - - // Run EnqueWorkload - CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); - CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); - - // Compare output data, comparing Boolean values is handled differently and needs to call the CompareData function - // directly instead. This is because Boolean types get converted to a bit representation in a vector. - auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0]; - auto tfLiteDelegateOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); - auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0]; - auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateOutputId); - - armnnDelegate::CompareData(expectedOutputValues, armnnDelegateOutputData, expectedOutputValues.size()); - armnnDelegate::CompareData(expectedOutputValues, tfLiteDelegateOutputData, expectedOutputValues.size()); - armnnDelegate::CompareData(tfLiteDelegateOutputData, armnnDelegateOutputData, expectedOutputValues.size()); - - armnnDelegateInterpreter.reset(nullptr); - tfLiteInterpreter.reset(nullptr); + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + + armnnDelegate::CompareData(expectedOutputValues, armnnOutputValues, expectedOutputValues.size()); + armnnDelegate::CompareData(expectedOutputValues, tfLiteOutputValues, expectedOutputValues.size()); + armnnDelegate::CompareData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues.size()); + + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, inputShape); + + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } } // anonymous namespace diff --git a/delegate/test/FillTestHelper.hpp b/delegate/test/FillTestHelper.hpp index c8aadb087b..70162c4a1d 100644 --- a/delegate/test/FillTestHelper.hpp +++ b/delegate/test/FillTestHelper.hpp @@ -8,14 +8,14 @@ #include "TestUtils.hpp" #include +#include #include -#include #include -#include -#include #include +#include + #include namespace @@ -102,7 +102,7 @@ std::vector CreateFillTfLiteModel(tflite::BuiltinOperator fillOperatorCode modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); @@ -118,42 +118,32 @@ void FillTest(tflite::BuiltinOperator fillOperatorCode, std::vector& expectedOutputValues, T fillValue) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector modelBuffer = CreateFillTfLiteModel(fillOperatorCode, tensorType, inputShape, tensorShape, {fillValue}); - const Model* tfLiteModel = GetModel(modelBuffer.data()); - CHECK(tfLiteModel != nullptr); - - std::unique_ptr armnnDelegateInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegateInterpreter) == kTfLiteOk); - CHECK(armnnDelegateInterpreter != nullptr); - CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteInterpreter) == kTfLiteOk); - CHECK(tfLiteInterpreter != nullptr); - CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Run EnqueueWorkload - CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); - CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); - - armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, tensorShape, expectedOutputValues); + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, tensorShape); + + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } } // anonymous namespace diff --git a/delegate/test/FullyConnectedTestHelper.hpp b/delegate/test/FullyConnectedTestHelper.hpp index d6bbd93176..e9e5c092d6 100644 --- a/delegate/test/FullyConnectedTestHelper.hpp +++ b/delegate/test/FullyConnectedTestHelper.hpp @@ -8,14 +8,14 @@ #include "TestUtils.hpp" #include +#include #include -#include #include -#include -#include #include +#include + #include namespace @@ -159,7 +159,7 @@ std::vector CreateFullyConnectedTfLiteModel(tflite::TensorType tensorType, modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); @@ -180,7 +180,7 @@ void FullyConnectedTest(std::vector& backends, float quantScale = 1.0f, int quantOffset = 0) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector modelBuffer = CreateFullyConnectedTfLiteModel(tensorType, activationType, @@ -192,64 +192,50 @@ void FullyConnectedTest(std::vector& backends, constantWeights, quantScale, quantOffset); - const Model* tfLiteModel = GetModel(modelBuffer.data()); - - // Create TfLite Interpreters - std::unique_ptr armnnDelegateInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegateInterpreter) == kTfLiteOk); - CHECK(armnnDelegateInterpreter != nullptr); - CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteInterpreter) == kTfLiteOk); - CHECK(tfLiteInterpreter != nullptr); - CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data - armnnDelegate::FillInput(tfLiteInterpreter, 0, inputValues); - armnnDelegate::FillInput(armnnDelegateInterpreter, 0, inputValues); + + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + + CHECK(tfLiteInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); if (!constantWeights) { - armnnDelegate::FillInput(tfLiteInterpreter, 1, weightsData); - armnnDelegate::FillInput(armnnDelegateInterpreter, 1, weightsData); + CHECK(tfLiteInterpreter.FillInputTensor(weightsData, 1) == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(weightsData, 1) == kTfLiteOk); if (tensorType == ::tflite::TensorType_INT8) { std::vector biasData = {10}; - armnnDelegate::FillInput(tfLiteInterpreter, 2, biasData); - armnnDelegate::FillInput(armnnDelegateInterpreter, 2, biasData); + CHECK(tfLiteInterpreter.FillInputTensor(biasData, 2) == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(biasData, 2) == kTfLiteOk); } else { std::vector biasData = {10}; - armnnDelegate::FillInput(tfLiteInterpreter, 2, biasData); - armnnDelegate::FillInput(armnnDelegateInterpreter, 2, biasData); + CHECK(tfLiteInterpreter.FillInputTensor(biasData, 2) == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(biasData, 2) == kTfLiteOk); } } - // Run EnqueWorkload - CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); - CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputTensorShape); - // Compare output data - armnnDelegate::CompareOutputData(tfLiteInterpreter, - armnnDelegateInterpreter, - outputTensorShape, - expectedOutputValues); - armnnDelegateInterpreter.reset(nullptr); + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } } // anonymous namespace \ No newline at end of file diff --git a/delegate/test/GatherNdTestHelper.hpp b/delegate/test/GatherNdTestHelper.hpp index 7b1595bafb..604b2159fd 100644 --- a/delegate/test/GatherNdTestHelper.hpp +++ b/delegate/test/GatherNdTestHelper.hpp @@ -8,14 +8,14 @@ #include "TestUtils.hpp" #include +#include #include -#include #include -#include -#include #include +#include + #include namespace @@ -108,7 +108,7 @@ std::vector CreateGatherNdTfLiteModel(tflite::TensorType tensorType, modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); @@ -126,56 +126,35 @@ void GatherNdTest(tflite::TensorType tensorType, float quantScale = 1.0f, int quantOffset = 0) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector modelBuffer = CreateGatherNdTfLiteModel(tensorType, paramsShape, indicesShape, expectedOutputShape, quantScale, quantOffset); - const Model* tfLiteModel = GetModel(modelBuffer.data()); - - // Create TfLite Interpreters - std::unique_ptr armnnDelegate; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegate) == kTfLiteOk); - CHECK(armnnDelegate != nullptr); - CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteDelegate; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteDelegate) == kTfLiteOk); - CHECK(tfLiteDelegate != nullptr); - CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data - armnnDelegate::FillInput(tfLiteDelegate, 0, paramsValues); - armnnDelegate::FillInput(armnnDelegate, 0, paramsValues); - armnnDelegate::FillInput(tfLiteDelegate, 1, indicesValues); - armnnDelegate::FillInput(armnnDelegate, 1, indicesValues); - - // Run EnqueWorkload - CHECK(tfLiteDelegate->Invoke() == kTfLiteOk); - CHECK(armnnDelegate->Invoke() == kTfLiteOk); - - // Compare output data - armnnDelegate::CompareOutputData(tfLiteDelegate, - armnnDelegate, - expectedOutputShape, - expectedOutputValues, - 0); - - tfLiteDelegate.reset(nullptr); - armnnDelegate.reset(nullptr); + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(paramsValues, 0) == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(indicesValues, 1) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(paramsValues, 0) == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(indicesValues, 1) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape); + + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } } // anonymous namespace \ No newline at end of file diff --git a/delegate/test/GatherTestHelper.hpp b/delegate/test/GatherTestHelper.hpp index 41e3b55a50..43717a3b2f 100644 --- a/delegate/test/GatherTestHelper.hpp +++ b/delegate/test/GatherTestHelper.hpp @@ -8,14 +8,14 @@ #include "TestUtils.hpp" #include +#include #include -#include #include -#include -#include #include +#include + #include namespace @@ -109,7 +109,7 @@ std::vector CreateGatherTfLiteModel(tflite::TensorType tensorType, modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); @@ -128,7 +128,7 @@ void GatherTest(tflite::TensorType tensorType, float quantScale = 1.0f, int quantOffset = 0) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector modelBuffer = CreateGatherTfLiteModel(tensorType, paramsShape, indicesShape, @@ -136,49 +136,28 @@ void GatherTest(tflite::TensorType tensorType, axis, quantScale, quantOffset); - const Model* tfLiteModel = GetModel(modelBuffer.data()); - - // Create TfLite Interpreters - std::unique_ptr armnnDelegate; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegate) == kTfLiteOk); - CHECK(armnnDelegate != nullptr); - CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteDelegate; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteDelegate) == kTfLiteOk); - CHECK(tfLiteDelegate != nullptr); - CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data - armnnDelegate::FillInput(tfLiteDelegate, 0, paramsValues); - armnnDelegate::FillInput(armnnDelegate, 0, paramsValues); - armnnDelegate::FillInput(tfLiteDelegate, 1, indicesValues); - armnnDelegate::FillInput(armnnDelegate, 1, indicesValues); - - // Run EnqueWorkload - CHECK(tfLiteDelegate->Invoke() == kTfLiteOk); - CHECK(armnnDelegate->Invoke() == kTfLiteOk); - - // Compare output data - armnnDelegate::CompareOutputData(tfLiteDelegate, - armnnDelegate, - expectedOutputShape, - expectedOutputValues, - 0); - - tfLiteDelegate.reset(nullptr); - armnnDelegate.reset(nullptr); + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(paramsValues, 0) == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(indicesValues, 1) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(paramsValues, 0) == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(indicesValues, 1) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape); + + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } } // anonymous namespace \ No newline at end of file diff --git a/delegate/test/LogicalTest.cpp b/delegate/test/LogicalTest.cpp index 57bbd318e7..8414293547 100644 --- a/delegate/test/LogicalTest.cpp +++ b/delegate/test/LogicalTest.cpp @@ -27,15 +27,15 @@ void LogicalBinaryAndBoolTest(std::vector& backends) std::vector input1Values { 0, 1, 0, 1 }; std::vector expectedOutputValues { 0, 0, 0, 1 }; - LogicalBinaryTest(tflite::BuiltinOperator_LOGICAL_AND, - ::tflite::TensorType_BOOL, - backends, - input0Shape, - input1Shape, - expectedOutputShape, - input0Values, - input1Values, - expectedOutputValues); + LogicalBinaryTest(tflite::BuiltinOperator_LOGICAL_AND, + ::tflite::TensorType_BOOL, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues); } void LogicalBinaryAndBroadcastTest(std::vector& backends) @@ -48,15 +48,15 @@ void LogicalBinaryAndBroadcastTest(std::vector& backends) std::vector input1Values { 1 }; std::vector expectedOutputValues { 0, 1, 0, 1 }; - LogicalBinaryTest(tflite::BuiltinOperator_LOGICAL_AND, - ::tflite::TensorType_BOOL, - backends, - input0Shape, - input1Shape, - expectedOutputShape, - input0Values, - input1Values, - expectedOutputValues); + LogicalBinaryTest(tflite::BuiltinOperator_LOGICAL_AND, + ::tflite::TensorType_BOOL, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues); } void LogicalBinaryOrBoolTest(std::vector& backends) @@ -69,15 +69,15 @@ void LogicalBinaryOrBoolTest(std::vector& backends) std::vector input1Values { 0, 1, 0, 1 }; std::vector expectedOutputValues { 0, 1, 1, 1 }; - LogicalBinaryTest(tflite::BuiltinOperator_LOGICAL_OR, - ::tflite::TensorType_BOOL, - backends, - input0Shape, - input1Shape, - expectedOutputShape, - input0Values, - input1Values, - expectedOutputValues); + LogicalBinaryTest(tflite::BuiltinOperator_LOGICAL_OR, + ::tflite::TensorType_BOOL, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues); } void LogicalBinaryOrBroadcastTest(std::vector& backends) @@ -90,15 +90,15 @@ void LogicalBinaryOrBroadcastTest(std::vector& backends) std::vector input1Values { 1 }; std::vector expectedOutputValues { 1, 1, 1, 1 }; - LogicalBinaryTest(tflite::BuiltinOperator_LOGICAL_OR, - ::tflite::TensorType_BOOL, - backends, - input0Shape, - input1Shape, - expectedOutputShape, - input0Values, - input1Values, - expectedOutputValues); + LogicalBinaryTest(tflite::BuiltinOperator_LOGICAL_OR, + ::tflite::TensorType_BOOL, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues); } // LogicalNot operator uses ElementwiseUnary unary layer and descriptor but is still classed as logical operator. diff --git a/delegate/test/LogicalTestHelper.hpp b/delegate/test/LogicalTestHelper.hpp index 2f2ae7bf40..7da8ad9bfc 100644 --- a/delegate/test/LogicalTestHelper.hpp +++ b/delegate/test/LogicalTestHelper.hpp @@ -8,14 +8,14 @@ #include "TestUtils.hpp" #include +#include #include -#include #include -#include -#include #include +#include + #include namespace @@ -120,26 +120,25 @@ std::vector CreateLogicalBinaryTfLiteModel(tflite::BuiltinOperator logical modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); } -template void LogicalBinaryTest(tflite::BuiltinOperator logicalOperatorCode, tflite::TensorType tensorType, std::vector& backends, std::vector& input0Shape, std::vector& input1Shape, std::vector& expectedOutputShape, - std::vector& input0Values, - std::vector& input1Values, - std::vector& expectedOutputValues, + std::vector& input0Values, + std::vector& input1Values, + std::vector& expectedOutputValues, float quantScale = 1.0f, int quantOffset = 0) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector modelBuffer = CreateLogicalBinaryTfLiteModel(logicalOperatorCode, tensorType, input0Shape, @@ -148,54 +147,32 @@ void LogicalBinaryTest(tflite::BuiltinOperator logicalOperatorCode, quantScale, quantOffset); - const Model* tfLiteModel = GetModel(modelBuffer.data()); - // Create TfLite Interpreters - std::unique_ptr armnnDelegateInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegateInterpreter) == kTfLiteOk); - CHECK(armnnDelegateInterpreter != nullptr); - CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteInterpreter) == kTfLiteOk); - CHECK(tfLiteInterpreter != nullptr); - CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data for the armnn interpreter - armnnDelegate::FillInput(armnnDelegateInterpreter, 0, input0Values); - armnnDelegate::FillInput(armnnDelegateInterpreter, 1, input1Values); - - // Set input data for the tflite interpreter - armnnDelegate::FillInput(tfLiteInterpreter, 0, input0Values); - armnnDelegate::FillInput(tfLiteInterpreter, 1, input1Values); - - // Run EnqueWorkload - CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); - CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); - - // Compare output data, comparing Boolean values is handled differently and needs to call the CompareData function - // directly. This is because Boolean types get converted to a bit representation in a vector. - auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0]; - auto tfLiteDelegateOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); - auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0]; - auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateOutputId); - - armnnDelegate::CompareData(expectedOutputValues, armnnDelegateOutputData, expectedOutputValues.size()); - armnnDelegate::CompareData(expectedOutputValues, tfLiteDelegateOutputData, expectedOutputValues.size()); - armnnDelegate::CompareData(tfLiteDelegateOutputData, armnnDelegateOutputData, expectedOutputValues.size()); - - armnnDelegateInterpreter.reset(nullptr); - tfLiteInterpreter.reset(nullptr); + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(input0Values, 0) == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(input1Values, 1) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(input0Values, 0) == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(input1Values, 1) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape); + + armnnDelegate::CompareData(expectedOutputValues, armnnOutputValues, expectedOutputValues.size()); + armnnDelegate::CompareData(expectedOutputValues, tfLiteOutputValues, expectedOutputValues.size()); + armnnDelegate::CompareData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues.size()); + + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } } // anonymous namespace \ No newline at end of file diff --git a/delegate/test/LstmTestHelper.hpp b/delegate/test/LstmTestHelper.hpp index 14776ca341..4ff517509d 100644 --- a/delegate/test/LstmTestHelper.hpp +++ b/delegate/test/LstmTestHelper.hpp @@ -8,14 +8,13 @@ #include "TestUtils.hpp" #include +#include #include -#include #include -#include -#include #include -#include + +#include #include @@ -539,7 +538,7 @@ std::vector CreateLstmTfLiteModel(tflite::TensorType tensorType, modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); @@ -591,7 +590,7 @@ void LstmTestImpl(std::vector& backends, float clippingThresCell, float clippingThresProj) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector modelBuffer = CreateLstmTfLiteModel(tensorType, batchSize, @@ -635,57 +634,29 @@ void LstmTestImpl(std::vector& backends, clippingThresCell, clippingThresProj); - const Model* tfLiteModel = GetModel(modelBuffer.data()); - // Create TfLite Interpreters - std::unique_ptr armnnDelegateInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegateInterpreter) == kTfLiteOk); - CHECK(armnnDelegateInterpreter != nullptr); - CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteInterpreter) == kTfLiteOk); - CHECK(tfLiteInterpreter != nullptr); - CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data - auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0]; - auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateInputId); - for (unsigned int i = 0; i < inputValues.size(); ++i) - { - tfLiteDelageInputData[i] = inputValues[i]; - } - - auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0]; - auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateInputId); - for (unsigned int i = 0; i < inputValues.size(); ++i) - { - armnnDelegateInputData[i] = inputValues[i]; - } - - // Run EnqueWorkload - CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); - CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); - - // Compare output data - auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0]; - auto tfLiteDelagateOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); - auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0]; - auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateOutputId); - - armnnDelegate::CompareData(expectedOutputValues.data(), armnnDelegateOutputData, expectedOutputValues.size()); - armnnDelegate::CompareData(expectedOutputValues.data(), tfLiteDelagateOutputData, expectedOutputValues.size()); - armnnDelegate::CompareData(tfLiteDelagateOutputData, armnnDelegateOutputData, expectedOutputValues.size()); + std::vector expectedOutputShape {batchSize , outputSize}; + + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape); + + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } } // anonymous namespace \ No newline at end of file diff --git a/delegate/test/NormalizationTestHelper.hpp b/delegate/test/NormalizationTestHelper.hpp index eafdf84835..a9db6b8fbf 100644 --- a/delegate/test/NormalizationTestHelper.hpp +++ b/delegate/test/NormalizationTestHelper.hpp @@ -8,14 +8,14 @@ #include "TestUtils.hpp" #include +#include #include -#include #include -#include -#include #include +#include + #include namespace @@ -110,7 +110,7 @@ std::vector CreateNormalizationTfLiteModel(tflite::BuiltinOperator normali modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); @@ -131,7 +131,7 @@ void NormalizationTest(tflite::BuiltinOperator normalizationOperatorCode, float quantScale = 1.0f, int quantOffset = 0) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector modelBuffer = CreateNormalizationTfLiteModel(normalizationOperatorCode, tensorType, inputShape, @@ -143,40 +143,27 @@ void NormalizationTest(tflite::BuiltinOperator normalizationOperatorCode, quantScale, quantOffset); - const Model* tfLiteModel = GetModel(modelBuffer.data()); - CHECK(tfLiteModel != nullptr); - - std::unique_ptr armnnDelegateInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegateInterpreter) == kTfLiteOk); - CHECK(armnnDelegateInterpreter != nullptr); - CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteInterpreter) == kTfLiteOk); - CHECK(tfLiteInterpreter != nullptr); - CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data - armnnDelegate::FillInput(tfLiteInterpreter, 0, inputValues); - armnnDelegate::FillInput(armnnDelegateInterpreter, 0, inputValues); - - // Run EnqueueWorkload - CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); - CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); - - // Compare output data - armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, outputShape, expectedOutputValues); + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape); + + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } void L2NormalizationTest(std::vector& backends) diff --git a/delegate/test/PackTestHelper.hpp b/delegate/test/PackTestHelper.hpp index 0fd2f195f4..112eccb5be 100644 --- a/delegate/test/PackTestHelper.hpp +++ b/delegate/test/PackTestHelper.hpp @@ -8,17 +8,15 @@ #include "TestUtils.hpp" #include +#include #include -#include #include -#include -#include #include -#include +#include -#include +#include namespace { @@ -108,7 +106,7 @@ std::vector CreatePackTfLiteModel(tflite::BuiltinOperator packOperatorCode modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); @@ -126,7 +124,7 @@ void PackTest(tflite::BuiltinOperator packOperatorCode, float quantScale = 1.0f, int quantOffset = 0) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector modelBuffer = CreatePackTfLiteModel(packOperatorCode, tensorType, inputShape, @@ -136,51 +134,35 @@ void PackTest(tflite::BuiltinOperator packOperatorCode, quantScale, quantOffset); - const Model* tfLiteModel = GetModel(modelBuffer.data()); + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); - // Create TfLite Interpreters - std::unique_ptr armnnDelegateInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegateInterpreter) == kTfLiteOk); - CHECK(armnnDelegateInterpreter != nullptr); - CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteInterpreter) == kTfLiteOk); - CHECK(tfLiteInterpreter != nullptr); - CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); // Set input data for all input tensors. for (unsigned int i = 0; i < inputValues.size(); ++i) { - // Get single input tensor and assign to interpreters. auto inputTensorValues = inputValues[i]; - armnnDelegate::FillInput(tfLiteInterpreter, i, inputTensorValues); - armnnDelegate::FillInput(armnnDelegateInterpreter, i, inputTensorValues); + CHECK(tfLiteInterpreter.FillInputTensor(inputTensorValues, i) == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(inputTensorValues, i) == kTfLiteOk); } - // Run EnqueWorkload - CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); - CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); - // Compare output data - armnnDelegate::CompareOutputData(tfLiteInterpreter, - armnnDelegateInterpreter, - expectedOutputShape, - expectedOutputValues); + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape); - armnnDelegateInterpreter.reset(nullptr); + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } } // anonymous namespace \ No newline at end of file diff --git a/delegate/test/PadTestHelper.hpp b/delegate/test/PadTestHelper.hpp index d049c52635..c4bfd89458 100644 --- a/delegate/test/PadTestHelper.hpp +++ b/delegate/test/PadTestHelper.hpp @@ -8,14 +8,14 @@ #include "TestUtils.hpp" #include +#include #include -#include #include -#include -#include #include +#include + #include namespace @@ -153,7 +153,7 @@ std::vector CreatePadTfLiteModel( modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); @@ -174,7 +174,7 @@ void PadTest(tflite::BuiltinOperator padOperatorCode, int quantOffset = 0, tflite::MirrorPadMode paddingMode = tflite::MirrorPadMode_SYMMETRIC) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector modelBuffer = CreatePadTfLiteModel(padOperatorCode, tensorType, paddingMode, @@ -186,39 +186,27 @@ void PadTest(tflite::BuiltinOperator padOperatorCode, quantScale, quantOffset); - const Model* tfLiteModel = GetModel(modelBuffer.data()); - CHECK(tfLiteModel != nullptr); - - std::unique_ptr armnnDelegateInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegateInterpreter) == kTfLiteOk); - CHECK(armnnDelegateInterpreter != nullptr); - CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteInterpreter) == kTfLiteOk); - CHECK(tfLiteInterpreter != nullptr); - CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data - armnnDelegate::FillInput(tfLiteInterpreter, 0, inputValues); - armnnDelegate::FillInput(armnnDelegateInterpreter, 0, inputValues); - - // Run EnqueueWorkload - CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); - CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); - - armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, outputShape, expectedOutputValues); + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape); + + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } } // anonymous namespace diff --git a/delegate/test/Pooling2dTestHelper.hpp b/delegate/test/Pooling2dTestHelper.hpp index 6de85b63c5..d08a45b588 100644 --- a/delegate/test/Pooling2dTestHelper.hpp +++ b/delegate/test/Pooling2dTestHelper.hpp @@ -8,14 +8,14 @@ #include "TestUtils.hpp" #include +#include #include -#include #include -#include -#include #include +#include + #include namespace @@ -106,7 +106,7 @@ std::vector CreatePooling2dTfLiteModel( modelDescription, flatBufferBuilder.CreateVector(buffers, 3)); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); @@ -129,7 +129,7 @@ void Pooling2dTest(tflite::BuiltinOperator poolingOperatorCode, float quantScale = 1.0f, int quantOffset = 0) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector modelBuffer = CreatePooling2dTfLiteModel(poolingOperatorCode, tensorType, inputShape, @@ -143,50 +143,27 @@ void Pooling2dTest(tflite::BuiltinOperator poolingOperatorCode, quantScale, quantOffset); - const Model* tfLiteModel = GetModel(modelBuffer.data()); - CHECK(tfLiteModel != nullptr); - // Create TfLite Interpreters - std::unique_ptr armnnDelegateInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegateInterpreter) == kTfLiteOk); - CHECK(armnnDelegateInterpreter != nullptr); - CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteInterpreter) == kTfLiteOk); - CHECK(tfLiteInterpreter != nullptr); - CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data - auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0]; - auto tfLiteDelegateInputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateInputId); - for (unsigned int i = 0; i < inputValues.size(); ++i) - { - tfLiteDelegateInputData[i] = inputValues[i]; - } - - auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0]; - auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateInputId); - for (unsigned int i = 0; i < inputValues.size(); ++i) - { - armnnDelegateInputData[i] = inputValues[i]; - } - - // Run EnqueueWorkload - CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); - CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); - - armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, outputShape, expectedOutputValues); + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape); + + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } } // anonymous namespace diff --git a/delegate/test/Pooling3dTestHelper.hpp b/delegate/test/Pooling3dTestHelper.hpp index dd90e4bb1c..59d2e18228 100644 --- a/delegate/test/Pooling3dTestHelper.hpp +++ b/delegate/test/Pooling3dTestHelper.hpp @@ -8,16 +8,16 @@ #include "TestUtils.hpp" #include +#include #include #include -#include -#include #include -#include -#include +#include #include +#include + #include namespace @@ -131,7 +131,7 @@ std::vector CreatePooling3dTfLiteModel( modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); @@ -156,7 +156,7 @@ void Pooling3dTest(std::string poolType, float quantScale = 1.0f, int quantOffset = 0) { - using namespace tflite; + using namespace delegateTestInterpreter; // Create the single op model buffer std::vector modelBuffer = CreatePooling3dTfLiteModel(poolType, tensorType, @@ -173,79 +173,37 @@ void Pooling3dTest(std::string poolType, quantScale, quantOffset); - const Model* tfLiteModel = GetModel(modelBuffer.data()); - CHECK(tfLiteModel != nullptr); - // Create TfLite Interpreters - std::unique_ptr armnnDelegateInterpreter; - - // Custom ops need to be added to the BuiltinOp resolver before the interpreter is created - // Based on the poolType from the test case add the custom operator using the name and the tflite - // registration function - tflite::ops::builtin::BuiltinOpResolver armnn_op_resolver; + std::string opType = ""; if (poolType == "kMax") { - armnn_op_resolver.AddCustom("MaxPool3D", tflite::ops::custom::Register_MAX_POOL_3D()); + opType = "MaxPool3D"; } else { - armnn_op_resolver.AddCustom("AveragePool3D", tflite::ops::custom::Register_AVG_POOL_3D()); + opType = "AveragePool3D"; } - CHECK(InterpreterBuilder(tfLiteModel, armnn_op_resolver) - (&armnnDelegateInterpreter) == kTfLiteOk); - CHECK(armnnDelegateInterpreter != nullptr); - CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteInterpreter; - - // Custom ops need to be added to the BuiltinOp resolver before the interpreter is created - // Based on the poolType from the test case add the custom operator using the name and the tflite - // registration function - tflite::ops::builtin::BuiltinOpResolver tflite_op_resolver; - if (poolType == "kMax") - { - tflite_op_resolver.AddCustom("MaxPool3D", tflite::ops::custom::Register_MAX_POOL_3D()); - } - else - { - tflite_op_resolver.AddCustom("AveragePool3D", tflite::ops::custom::Register_AVG_POOL_3D()); - } - - CHECK(InterpreterBuilder(tfLiteModel, tflite_op_resolver) - (&tfLiteInterpreter) == kTfLiteOk); - CHECK(tfLiteInterpreter != nullptr); - CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data - auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0]; - auto tfLiteDelegateInputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateInputId); - for (unsigned int i = 0; i < inputValues.size(); ++i) - { - tfLiteDelegateInputData[i] = inputValues[i]; - } - - auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0]; - auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateInputId); - for (unsigned int i = 0; i < inputValues.size(); ++i) - { - armnnDelegateInputData[i] = inputValues[i]; - } - - // Run EnqueueWorkload - CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); - CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); - - armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, outputShape, expectedOutputValues); + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer, opType); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends, opType); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape); + + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } // Function to create the flexbuffer custom options for the custom pooling3d operator. diff --git a/delegate/test/PreluTest.cpp b/delegate/test/PreluTest.cpp index 40bf1dda56..f65e15bb97 100644 --- a/delegate/test/PreluTest.cpp +++ b/delegate/test/PreluTest.cpp @@ -18,7 +18,8 @@ namespace armnnDelegate { -void PreluFloatSimpleTest(std::vector & backends, bool isAlphaConst, bool isDynamicOutput = false) { +void PreluFloatSimpleTest(std::vector & backends, bool isAlphaConst, bool isDynamicOutput = false) +{ std::vector inputShape { 1, 2, 3 }; std::vector alphaShape { 1 }; std::vector outputShape { 1, 2, 3 }; diff --git a/delegate/test/PreluTestHelper.hpp b/delegate/test/PreluTestHelper.hpp index 0721c139ac..c2a9435d0c 100644 --- a/delegate/test/PreluTestHelper.hpp +++ b/delegate/test/PreluTestHelper.hpp @@ -8,14 +8,14 @@ #include "TestUtils.hpp" #include +#include #include -#include #include -#include -#include #include +#include + #include namespace @@ -107,7 +107,7 @@ std::vector CreatePreluTfLiteModel(tflite::BuiltinOperator preluOperatorCo modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); @@ -124,7 +124,7 @@ void PreluTest(tflite::BuiltinOperator preluOperatorCode, std::vector& expectedOutput, bool alphaIsConstant) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector modelBuffer = CreatePreluTfLiteModel(preluOperatorCode, tensorType, @@ -134,62 +134,42 @@ void PreluTest(tflite::BuiltinOperator preluOperatorCode, alphaData, alphaIsConstant); - const Model* tfLiteModel = GetModel(modelBuffer.data()); - - CHECK(tfLiteModel != nullptr); - - std::unique_ptr armnnDelegateInterpreter; - - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegateInterpreter) == kTfLiteOk); - CHECK(armnnDelegateInterpreter != nullptr); - CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); - std::unique_ptr tfLiteInterpreter; + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteInterpreter) == kTfLiteOk); - CHECK(tfLiteInterpreter != nullptr); - CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data - armnnDelegate::FillInput(tfLiteInterpreter, 0, inputData); - armnnDelegate::FillInput(armnnDelegateInterpreter, 0, inputData); + CHECK(armnnInterpreter.FillInputTensor(inputData, 0) == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(inputData, 0) == kTfLiteOk); // Set alpha data if not constant - if (!alphaIsConstant) { - armnnDelegate::FillInput(tfLiteInterpreter, 1, alphaData); - armnnDelegate::FillInput(armnnDelegateInterpreter, 1, alphaData); + if (!alphaIsConstant) + { + CHECK(tfLiteInterpreter.FillInputTensor(alphaData, 1) == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(alphaData, 1) == kTfLiteOk); } - // Run EnqueueWorkload - CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); - CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); - - // Compare output data - auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0]; + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); - auto tfLiteDelegateOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); - auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0]; - auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateOutputId); + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutput); - for (size_t i = 0; i < expectedOutput.size(); i++) + // Don't compare shapes on dynamic output tests, as output shape gets cleared. + if(!outputShape.empty()) { - CHECK(expectedOutput[i] == armnnDelegateOutputData[i]); - CHECK(tfLiteDelegateOutputData[i] == expectedOutput[i]); - CHECK(tfLiteDelegateOutputData[i] == armnnDelegateOutputData[i]); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape); } + + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } } // anonymous namespace \ No newline at end of file diff --git a/delegate/test/QuantizationTestHelper.hpp b/delegate/test/QuantizationTestHelper.hpp index af898f332d..8554a01967 100644 --- a/delegate/test/QuantizationTestHelper.hpp +++ b/delegate/test/QuantizationTestHelper.hpp @@ -5,15 +5,17 @@ #pragma once +#include "TestUtils.hpp" + #include +#include #include -#include #include -#include -#include #include +#include + #include namespace @@ -112,7 +114,7 @@ std::vector CreateQuantizationTfLiteModel(tflite::BuiltinOperator quantiza modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); @@ -130,7 +132,7 @@ void QuantizationTest(tflite::BuiltinOperator quantizeOperatorCode, float quantScale = 1.0f, int quantOffset = 0) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector modelBuffer = CreateQuantizationTfLiteModel(quantizeOperatorCode, inputTensorType, outputTensorType, @@ -139,62 +141,27 @@ void QuantizationTest(tflite::BuiltinOperator quantizeOperatorCode, quantScale, quantOffset); - const Model* tfLiteModel = GetModel(modelBuffer.data()); - - // Create TfLite Interpreters - std::unique_ptr armnnDelegateInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegateInterpreter) == kTfLiteOk); - CHECK(armnnDelegateInterpreter != nullptr); - CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteInterpreter) == kTfLiteOk); - CHECK(tfLiteInterpreter != nullptr); - CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data - auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0]; - auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateInputId); - for (unsigned int i = 0; i < inputValues.size(); ++i) - { - tfLiteDelageInputData[i] = inputValues[i]; - } - - auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0]; - auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateInputId); - for (unsigned int i = 0; i < inputValues.size(); ++i) - { - armnnDelegateInputData[i] = inputValues[i]; - } - - // Run EnqueWorkload - CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); - CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); - - // Compare output data - auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0]; - auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); - auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0]; - auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateOutputId); - - for (size_t i = 0; i < expectedOutputValues.size(); i++) - { - CHECK(expectedOutputValues[i] == armnnDelegateOutputData[i]); - CHECK(tfLiteDelageOutputData[i] == expectedOutputValues[i]); - CHECK(tfLiteDelageOutputData[i] == armnnDelegateOutputData[i]); - } + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape); + + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } } // anonymous namespace \ No newline at end of file diff --git a/delegate/test/RedefineTestHelper.hpp b/delegate/test/RedefineTestHelper.hpp index ce60db0664..80631ccf8d 100644 --- a/delegate/test/RedefineTestHelper.hpp +++ b/delegate/test/RedefineTestHelper.hpp @@ -8,14 +8,14 @@ #include "TestUtils.hpp" #include +#include #include -#include #include -#include -#include #include +#include + #include namespace @@ -135,7 +135,7 @@ std::vector CreateRedefineTfLiteModel( modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); @@ -154,7 +154,7 @@ void RedefineTest(tflite::BuiltinOperator redefineOperatorCode, float quantScale = 1.0f, int quantOffset = 0) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector modelBuffer = CreateRedefineTfLiteModel(redefineOperatorCode, tensorType, inputShape, @@ -164,39 +164,27 @@ void RedefineTest(tflite::BuiltinOperator redefineOperatorCode, quantScale, quantOffset); - const Model* tfLiteModel = GetModel(modelBuffer.data()); - CHECK(tfLiteModel != nullptr); - // Create TfLite Interpreters - std::unique_ptr armnnDelegateInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegateInterpreter) == kTfLiteOk); - CHECK(armnnDelegateInterpreter != nullptr); - CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteInterpreter) == kTfLiteOk); - CHECK(tfLiteInterpreter != nullptr); - CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data - armnnDelegate::FillInput(tfLiteInterpreter, 0, inputValues); - armnnDelegate::FillInput(armnnDelegateInterpreter, 0, inputValues); - - // Run EnqueueWorkload - CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); - CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); - - armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, outputShape, expectedOutputValues); + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape); + + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } } // anonymous namespace \ No newline at end of file diff --git a/delegate/test/ReduceTestHelper.hpp b/delegate/test/ReduceTestHelper.hpp index fedf7ee150..a268981865 100644 --- a/delegate/test/ReduceTestHelper.hpp +++ b/delegate/test/ReduceTestHelper.hpp @@ -8,17 +8,15 @@ #include "TestUtils.hpp" #include +#include #include -#include #include -#include -#include #include -#include +#include -#include +#include namespace { @@ -140,7 +138,7 @@ std::vector CreateReduceTfLiteModel(tflite::BuiltinOperator reduceOperator modelDescription, flatBufferBuilder.CreateVector(buffers, 4)); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); @@ -160,7 +158,7 @@ void ReduceTest(tflite::BuiltinOperator reduceOperatorCode, float quantScale = 1.0f, int quantOffset = 0) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector modelBufferArmNN = CreateReduceTfLiteModel(reduceOperatorCode, tensorType, input0Shape, @@ -182,47 +180,27 @@ void ReduceTest(tflite::BuiltinOperator reduceOperatorCode, quantOffset, true); - const Model* tfLiteModelArmNN = GetModel(modelBufferArmNN.data()); - const Model* tfLiteModelTFLite = GetModel(modelBufferTFLite.data()); - - // Create TfLite Interpreters - std::unique_ptr armnnDelegateInterpreter; - CHECK(InterpreterBuilder(tfLiteModelArmNN, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegateInterpreter) == kTfLiteOk); - CHECK(armnnDelegateInterpreter != nullptr); - CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteInterpreter; - CHECK(InterpreterBuilder(tfLiteModelTFLite, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteInterpreter) == kTfLiteOk); - CHECK(tfLiteInterpreter != nullptr); - CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data - armnnDelegate::FillInput(tfLiteInterpreter, 0, input0Values); - armnnDelegate::FillInput(armnnDelegateInterpreter, 0, input0Values); - - // Run EnqueWorkload - CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); - CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); - - // Compare output data - armnnDelegate::CompareOutputData(tfLiteInterpreter, - armnnDelegateInterpreter, - expectedOutputShape, - expectedOutputValues); - - armnnDelegateInterpreter.reset(nullptr); + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBufferTFLite); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(input0Values, 0) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBufferArmNN, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(input0Values, 0) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape); + + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } } // anonymous namespace \ No newline at end of file diff --git a/delegate/test/ResizeTest.cpp b/delegate/test/ResizeTest.cpp index 20113875a8..f3bfe43143 100644 --- a/delegate/test/ResizeTest.cpp +++ b/delegate/test/ResizeTest.cpp @@ -42,7 +42,7 @@ void ResizeBiliniarFloat32Test(std::vector& backends) const std::vector input1Shape { 1, 3, 3, 1 }; const std::vector input2Shape { 2 }; - const std::vector expectedOutputShape = input2NewShape; + const std::vector expectedOutputShape = { 1, 5, 5, 1 }; ResizeFP32TestImpl(tflite::BuiltinOperator_RESIZE_BILINEAR, backends, @@ -66,7 +66,7 @@ void ResizeNearestNeighbourFloat32Test(std::vector& backends) const std::vector input1Shape { 1, 2, 2, 1 }; const std::vector input2Shape { 2 }; - const std::vector expectedOutputShape = input2NewShape; + const std::vector expectedOutputShape = { 1, 1, 1, 1 }; ResizeFP32TestImpl(tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR, backends, diff --git a/delegate/test/ResizeTestHelper.hpp b/delegate/test/ResizeTestHelper.hpp index ab7de14612..ff0c413fbf 100644 --- a/delegate/test/ResizeTestHelper.hpp +++ b/delegate/test/ResizeTestHelper.hpp @@ -8,14 +8,14 @@ #include "TestUtils.hpp" #include +#include #include -#include #include -#include -#include #include +#include + #include namespace @@ -113,7 +113,7 @@ std::vector CreateResizeTfLiteModel(tflite::BuiltinOperator operatorCode, modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); @@ -128,7 +128,7 @@ void ResizeFP32TestImpl(tflite::BuiltinOperator operatorCode, std::vector& expectedOutputValues, std::vector expectedOutputShape) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector modelBuffer = CreateResizeTfLiteModel(operatorCode, ::tflite::TensorType_FLOAT32, @@ -137,58 +137,29 @@ void ResizeFP32TestImpl(tflite::BuiltinOperator operatorCode, input2Shape, expectedOutputShape); - const Model* tfLiteModel = GetModel(modelBuffer.data()); - - // The model will be executed using tflite and using the armnn delegate so that the outputs - // can be compared. - - // Create TfLite Interpreter with armnn delegate - std::unique_ptr armnnDelegateInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegateInterpreter) == kTfLiteOk); - CHECK(armnnDelegateInterpreter != nullptr); - CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); - - // Create TfLite Interpreter without armnn delegate - std::unique_ptr tfLiteInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteInterpreter) == kTfLiteOk); - CHECK(tfLiteInterpreter != nullptr); - CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data for the armnn interpreter - armnnDelegate::FillInput(armnnDelegateInterpreter, 0, input1Values); - armnnDelegate::FillInput(armnnDelegateInterpreter, 1, input2NewShape); - - // Set input data for the tflite interpreter - armnnDelegate::FillInput(tfLiteInterpreter, 0, input1Values); - armnnDelegate::FillInput(tfLiteInterpreter, 1, input2NewShape); - - // Run EnqueWorkload - CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); - CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); - - // Compare output data - auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0]; - auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); - auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0]; - auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateOutputId); - for (size_t i = 0; i < expectedOutputValues.size(); i++) - { - CHECK(expectedOutputValues[i] == doctest::Approx(armnnDelegateOutputData[i])); - CHECK(armnnDelegateOutputData[i] == doctest::Approx(tfLiteDelageOutputData[i])); - } - - armnnDelegateInterpreter.reset(nullptr); + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(input1Values, 0) == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(input2NewShape, 1) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(input1Values, 0) == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(input2NewShape, 1) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape); + + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } } // anonymous namespace \ No newline at end of file diff --git a/delegate/test/RoundTestHelper.hpp b/delegate/test/RoundTestHelper.hpp index dc14abf6e3..3aa066b8f6 100644 --- a/delegate/test/RoundTestHelper.hpp +++ b/delegate/test/RoundTestHelper.hpp @@ -8,14 +8,14 @@ #include "TestUtils.hpp" #include +#include #include -#include #include -#include -#include #include +#include + #include namespace @@ -94,7 +94,7 @@ std::vector CreateRoundTfLiteModel(tflite::BuiltinOperator roundOperatorCo modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); } @@ -109,55 +109,34 @@ void RoundTest(tflite::BuiltinOperator roundOperatorCode, float quantScale = 1.0f, int quantOffset = 0) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector modelBuffer = CreateRoundTfLiteModel(roundOperatorCode, tensorType, shape, quantScale, quantOffset); - const Model* tfLiteModel = GetModel(modelBuffer.data()); - - // Create TfLite Interpreters - std::unique_ptr armnnDelegate; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegate) == kTfLiteOk); - CHECK(armnnDelegate != nullptr); - CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteDelegate; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteDelegate) == kTfLiteOk); - CHECK(tfLiteDelegate != nullptr); - CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data - armnnDelegate::FillInput(tfLiteDelegate, 0, inputValues); - armnnDelegate::FillInput(armnnDelegate, 0, inputValues); - - // Run EnqueWorkload - CHECK(tfLiteDelegate->Invoke() == kTfLiteOk); - CHECK(armnnDelegate->Invoke() == kTfLiteOk); - - // Compare output data - armnnDelegate::CompareOutputData(tfLiteDelegate, - armnnDelegate, - shape, - expectedOutputValues, - 0); - - tfLiteDelegate.reset(nullptr); - armnnDelegate.reset(nullptr); + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, shape); + + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } } // anonymous namespace diff --git a/delegate/test/ShapeTestHelper.hpp b/delegate/test/ShapeTestHelper.hpp index 54e27ac8fd..42f258b00b 100644 --- a/delegate/test/ShapeTestHelper.hpp +++ b/delegate/test/ShapeTestHelper.hpp @@ -8,14 +8,14 @@ #include "TestUtils.hpp" #include +#include #include -#include #include -#include -#include #include +#include + #include namespace @@ -97,7 +97,7 @@ std::vector CreateShapeTfLiteModel(tflite::TensorType inputTensorType, modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); @@ -114,7 +114,7 @@ void ShapeTest(tflite::TensorType inputTensorType, float quantScale = 1.0f, int quantOffset = 0) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector modelBuffer = CreateShapeTfLiteModel(inputTensorType, outputTensorType, inputShape, @@ -122,52 +122,25 @@ void ShapeTest(tflite::TensorType inputTensorType, quantScale, quantOffset); - const Model* tfLiteModel = GetModel(modelBuffer.data()); - - // Create TfLite Interpreters - std::unique_ptr armnnDelegate; - - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegate) == kTfLiteOk); - CHECK(armnnDelegate != nullptr); - CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteDelegate; - - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteDelegate) == kTfLiteOk); - CHECK(tfLiteDelegate != nullptr); - CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - - std::unique_ptr < TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete) > - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - - CHECK(theArmnnDelegate != nullptr); - - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data - armnnDelegate::FillInput(tfLiteDelegate, 0, inputValues); - armnnDelegate::FillInput(armnnDelegate, 0, inputValues); - - // Run EnqueWorkload - CHECK(tfLiteDelegate->Invoke() == kTfLiteOk); - CHECK(armnnDelegate->Invoke() == kTfLiteOk); - - // Compare output data - armnnDelegate::CompareOutputData(tfLiteDelegate, - armnnDelegate, - expectedOutputShape, - expectedOutputValues, - 0); - - tfLiteDelegate.reset(nullptr); - armnnDelegate.reset(nullptr); + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape); + + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } } // anonymous namespace diff --git a/delegate/test/SliceTestHelper.hpp b/delegate/test/SliceTestHelper.hpp index c938fad31b..19f2b3d8ea 100644 --- a/delegate/test/SliceTestHelper.hpp +++ b/delegate/test/SliceTestHelper.hpp @@ -8,18 +8,15 @@ #include "TestUtils.hpp" #include -#include +#include #include -#include #include -#include -#include #include -#include +#include -#include +#include namespace { @@ -110,7 +107,7 @@ std::vector CreateSliceTfLiteModel(tflite::TensorType tensorType, modelDescription, flatBufferBuilder.CreateVector(buffers, 5)); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); @@ -127,7 +124,7 @@ void SliceTestImpl(std::vector& backends, std::vector& sizeTensorShape, std::vector& outputTensorShape) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector modelBuffer = CreateSliceTfLiteModel( ::tflite::TensorType_FLOAT32, inputTensorShape, @@ -137,47 +134,27 @@ void SliceTestImpl(std::vector& backends, sizeTensorShape, outputTensorShape); - auto tfLiteModel = GetModel(modelBuffer.data()); - - // Create TfLite Interpreters - std::unique_ptr armnnDelegate; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegate) == kTfLiteOk); - CHECK(armnnDelegate != nullptr); - CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteDelegate; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteDelegate) == kTfLiteOk); - CHECK(tfLiteDelegate != nullptr); - CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data - armnnDelegate::FillInput(tfLiteDelegate, 0, inputValues); - armnnDelegate::FillInput(armnnDelegate, 0, inputValues); - - // Run EnqueWorkload - CHECK(tfLiteDelegate->Invoke() == kTfLiteOk); - CHECK(armnnDelegate->Invoke() == kTfLiteOk); - - // Compare output data - armnnDelegate::CompareOutputData(tfLiteDelegate, - armnnDelegate, - outputTensorShape, - expectedOutputValues); - - tfLiteDelegate.reset(nullptr); - armnnDelegate.reset(nullptr); + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputTensorShape); + + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } // End of Slice Test } // anonymous namespace \ No newline at end of file diff --git a/delegate/test/SoftmaxTestHelper.hpp b/delegate/test/SoftmaxTestHelper.hpp index 15177b7088..ffd02abdf7 100644 --- a/delegate/test/SoftmaxTestHelper.hpp +++ b/delegate/test/SoftmaxTestHelper.hpp @@ -5,16 +5,18 @@ #pragma once +#include "TestUtils.hpp" + #include +#include #include #include -#include #include -#include -#include #include +#include + #include namespace @@ -95,7 +97,7 @@ std::vector CreateSoftmaxTfLiteModel(tflite::BuiltinOperator softmaxOperat flatBufferBuilder.CreateVector(&subgraph, 1), modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); } @@ -108,65 +110,33 @@ void SoftmaxTest(tflite::BuiltinOperator softmaxOperatorCode, std::vector& expectedOutputValues, float beta = 0) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector modelBuffer = CreateSoftmaxTfLiteModel(softmaxOperatorCode, tensorType, shape, beta); - const Model* tfLiteModel = GetModel(modelBuffer.data()); - // Create TfLite Interpreters - std::unique_ptr armnnDelegateInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegateInterpreter) == kTfLiteOk); - CHECK(armnnDelegateInterpreter != nullptr); - CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteInterpreter) == kTfLiteOk); - CHECK(tfLiteInterpreter != nullptr); - CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data - auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0]; - auto tfLiteInterpreterInputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateInputId); - for (unsigned int i = 0; i < inputValues.size(); ++i) - { - tfLiteInterpreterInputData[i] = inputValues[i]; - } - - auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0]; - auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateInputId); - for (unsigned int i = 0; i < inputValues.size(); ++i) - { - armnnDelegateInputData[i] = inputValues[i]; - } - // Run EnqueWorkload - CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); - CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); - - // Compare output data - auto tfLiteInterpreterOutputId = tfLiteInterpreter->outputs()[0]; - auto tfLiteInterpreterOutputData = tfLiteInterpreter->typed_tensor(tfLiteInterpreterOutputId); - auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0]; - auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateOutputId); - - for (size_t i = 0; i < inputValues.size(); ++i) - { - CHECK(armnnUtils::within_percentage_tolerance(expectedOutputValues[i], armnnDelegateOutputData[i], 0.1)); - CHECK(armnnUtils::within_percentage_tolerance(tfLiteInterpreterOutputData[i], - armnnDelegateOutputData[i], 0.1)); - } + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, shape); + + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } diff --git a/delegate/test/SpaceDepthTestHelper.hpp b/delegate/test/SpaceDepthTestHelper.hpp index 6e8e39d0b0..912472d6c7 100644 --- a/delegate/test/SpaceDepthTestHelper.hpp +++ b/delegate/test/SpaceDepthTestHelper.hpp @@ -8,14 +8,14 @@ #include "TestUtils.hpp" #include +#include #include -#include #include -#include -#include #include +#include + #include namespace @@ -108,7 +108,7 @@ std::vector CreateSpaceDepthTfLiteModel(tflite::BuiltinOperator spaceDepth flatBufferBuilder.CreateVector(&subgraph, 1), modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); } @@ -123,46 +123,34 @@ void SpaceDepthTest(tflite::BuiltinOperator spaceDepthOperatorCode, std::vector& expectedOutputValues, int32_t blockSize = 2) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector modelBuffer = CreateSpaceDepthTfLiteModel(spaceDepthOperatorCode, tensorType, inputShape, outputShape, blockSize); - const Model* tfLiteModel = GetModel(modelBuffer.data()); - // Create TfLite Interpreters - std::unique_ptr armnnDelegateInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegateInterpreter) == kTfLiteOk); - CHECK(armnnDelegateInterpreter != nullptr); - CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteInterpreter) == kTfLiteOk); - CHECK(tfLiteInterpreter != nullptr); - CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data - armnnDelegate::FillInput(tfLiteInterpreter, 0, inputValues); - armnnDelegate::FillInput(armnnDelegateInterpreter, 0, inputValues); - - // Run EnqueWorkload - CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); - CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); - - // Compare output data - armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, outputShape, expectedOutputValues); + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape); + + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } } // anonymous namespace diff --git a/delegate/test/SplitTestHelper.hpp b/delegate/test/SplitTestHelper.hpp index 503fbc85ae..1d5f459148 100644 --- a/delegate/test/SplitTestHelper.hpp +++ b/delegate/test/SplitTestHelper.hpp @@ -8,17 +8,15 @@ #include "TestUtils.hpp" #include +#include #include -#include #include -#include -#include #include -#include +#include -#include +#include namespace { @@ -113,7 +111,7 @@ std::vector CreateSplitTfLiteModel(tflite::TensorType tensorType, modelDescription, flatBufferBuilder.CreateVector(buffers)); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); @@ -132,7 +130,7 @@ void SplitTest(tflite::TensorType tensorType, float quantScale = 1.0f, int quantOffset = 0) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector modelBuffer = CreateSplitTfLiteModel(tensorType, axisTensorShape, inputTensorShape, @@ -141,51 +139,34 @@ void SplitTest(tflite::TensorType tensorType, numSplits, quantScale, quantOffset); - const Model* tfLiteModel = GetModel(modelBuffer.data()); - - // Create TfLite Interpreters - std::unique_ptr armnnDelegate; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegate) == kTfLiteOk); - CHECK(armnnDelegate != nullptr); - CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteDelegate; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteDelegate) == kTfLiteOk); - CHECK(tfLiteDelegate != nullptr); - CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data - armnnDelegate::FillInput(tfLiteDelegate, 1, inputValues); - armnnDelegate::FillInput(armnnDelegate, 1, inputValues); - - // Run EnqueWorkload - CHECK(tfLiteDelegate->Invoke() == kTfLiteOk); - CHECK(armnnDelegate->Invoke() == kTfLiteOk); + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(inputValues, 1) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(inputValues, 1) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); // Compare output data for (unsigned int i = 0; i < expectedOutputValues.size(); ++i) { - armnnDelegate::CompareOutputData(tfLiteDelegate, - armnnDelegate, - outputTensorShapes[i], - expectedOutputValues[i], - i); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(i); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(i); + + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(i); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(i); + + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues[i]); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputTensorShapes[i]); } - tfLiteDelegate.reset(nullptr); - armnnDelegate.reset(nullptr); + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); + } // End of SPLIT Test std::vector CreateSplitVTfLiteModel(tflite::TensorType tensorType, @@ -288,7 +269,7 @@ std::vector CreateSplitVTfLiteModel(tflite::TensorType tensorType, modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); @@ -309,7 +290,7 @@ void SplitVTest(tflite::TensorType tensorType, float quantScale = 1.0f, int quantOffset = 0) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector modelBuffer = CreateSplitVTfLiteModel(tensorType, inputTensorShape, splitsTensorShape, @@ -320,51 +301,34 @@ void SplitVTest(tflite::TensorType tensorType, numSplits, quantScale, quantOffset); - const Model* tfLiteModel = GetModel(modelBuffer.data()); - - // Create TfLite Interpreters - std::unique_ptr armnnDelegate; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegate) == kTfLiteOk); - CHECK(armnnDelegate != nullptr); - CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteDelegate; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteDelegate) == kTfLiteOk); - CHECK(tfLiteDelegate != nullptr); - CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data - armnnDelegate::FillInput(tfLiteDelegate, 0, inputValues); - armnnDelegate::FillInput(armnnDelegate, 0, inputValues); - - // Run EnqueWorkload - CHECK(tfLiteDelegate->Invoke() == kTfLiteOk); - CHECK(armnnDelegate->Invoke() == kTfLiteOk); + + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); // Compare output data for (unsigned int i = 0; i < expectedOutputValues.size(); ++i) { - armnnDelegate::CompareOutputData(tfLiteDelegate, - armnnDelegate, - outputTensorShapes[i], - expectedOutputValues[i], - i); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(i); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(i); + + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(i); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(i); + + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues[i]); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputTensorShapes[i]); } - tfLiteDelegate.reset(nullptr); - armnnDelegate.reset(nullptr); + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } // End of SPLIT_V Test } // anonymous namespace \ No newline at end of file diff --git a/delegate/test/StridedSliceTestHelper.hpp b/delegate/test/StridedSliceTestHelper.hpp index fde7e16c72..d3d160158b 100644 --- a/delegate/test/StridedSliceTestHelper.hpp +++ b/delegate/test/StridedSliceTestHelper.hpp @@ -8,18 +8,15 @@ #include "TestUtils.hpp" #include -#include +#include #include -#include #include -#include -#include #include -#include +#include -#include +#include namespace { @@ -132,7 +129,7 @@ std::vector CreateStridedSliceTfLiteModel(tflite::TensorType tensorType, modelDescription, flatBufferBuilder.CreateVector(buffers, 6)); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); @@ -157,7 +154,7 @@ void StridedSliceTestImpl(std::vector& backends, const int32_t ShrinkAxisMask = 0, const armnn::DataLayout& dataLayout = armnn::DataLayout::NHWC) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector modelBuffer = CreateStridedSliceTfLiteModel( ::tflite::TensorType_FLOAT32, inputTensorShape, @@ -175,47 +172,27 @@ void StridedSliceTestImpl(std::vector& backends, ShrinkAxisMask, dataLayout); - auto tfLiteModel = GetModel(modelBuffer.data()); - - // Create TfLite Interpreters - std::unique_ptr armnnDelegate; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegate) == kTfLiteOk); - CHECK(armnnDelegate != nullptr); - CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteDelegate; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteDelegate) == kTfLiteOk); - CHECK(tfLiteDelegate != nullptr); - CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data - armnnDelegate::FillInput(tfLiteDelegate, 0, inputValues); - armnnDelegate::FillInput(armnnDelegate, 0, inputValues); - - // Run EnqueWorkload - CHECK(tfLiteDelegate->Invoke() == kTfLiteOk); - CHECK(armnnDelegate->Invoke() == kTfLiteOk); - - // Compare output data - armnnDelegate::CompareOutputData(tfLiteDelegate, - armnnDelegate, - outputTensorShape, - expectedOutputValues); - - tfLiteDelegate.reset(nullptr); - armnnDelegate.reset(nullptr); + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputTensorShape); + + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } // End of StridedSlice Test } // anonymous namespace \ No newline at end of file diff --git a/delegate/test/TestUtils.cpp b/delegate/test/TestUtils.cpp index 2689c2eaa3..0d53d9492b 100644 --- a/delegate/test/TestUtils.cpp +++ b/delegate/test/TestUtils.cpp @@ -17,7 +17,7 @@ void CompareData(bool tensor1[], bool tensor2[], size_t tensorSize) } } -void CompareData(std::vector& tensor1, bool tensor2[], size_t tensorSize) +void CompareData(std::vector& tensor1, std::vector& tensor2, size_t tensorSize) { auto compareBool = [](auto a, auto b) {return (((a == 0) && (b == 0)) || ((a != 0) && (b != 0)));}; for (size_t i = 0; i < tensorSize; i++) @@ -108,44 +108,18 @@ void CompareData(TfLiteFloat16 tensor1[], Half tensor2[], size_t tensorSize) { } } -template <> -void CompareOutputData(std::unique_ptr& tfLiteInterpreter, - std::unique_ptr& armnnDelegateInterpreter, - std::vector& expectedOutputShape, - std::vector& expectedOutputValues, - unsigned int outputIndex) +void CompareOutputShape(const std::vector& tfLiteDelegateShape, + const std::vector& armnnDelegateShape, + const std::vector& expectedOutputShape) { - auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[outputIndex]; - auto tfLiteDelegateOutputTensor = tfLiteInterpreter->tensor(tfLiteDelegateOutputId); - auto tfLiteDelegateOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); - auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[outputIndex]; - auto armnnDelegateOutputTensor = armnnDelegateInterpreter->tensor(armnnDelegateOutputId); - auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateOutputId); - - CHECK(expectedOutputShape.size() == tfLiteDelegateOutputTensor->dims->size); - CHECK(expectedOutputShape.size() == armnnDelegateOutputTensor->dims->size); + CHECK(expectedOutputShape.size() == tfLiteDelegateShape.size()); + CHECK(expectedOutputShape.size() == armnnDelegateShape.size()); for (size_t i = 0; i < expectedOutputShape.size(); i++) { - CHECK(armnnDelegateOutputTensor->dims->data[i] == expectedOutputShape[i]); - CHECK(tfLiteDelegateOutputTensor->dims->data[i] == expectedOutputShape[i]); - CHECK(tfLiteDelegateOutputTensor->dims->data[i] == armnnDelegateOutputTensor->dims->data[i]); - } - - armnnDelegate::CompareData(armnnDelegateOutputData, expectedOutputValues.data(), expectedOutputValues.size()); - armnnDelegate::CompareData(tfLiteDelegateOutputData, expectedOutputValues.data(), expectedOutputValues.size()); - armnnDelegate::CompareData(tfLiteDelegateOutputData, armnnDelegateOutputData, expectedOutputValues.size()); -} - -template <> -void FillInput(std::unique_ptr& interpreter, int inputIndex, std::vector& inputValues) -{ - auto tfLiteDelegateInputId = interpreter->inputs()[inputIndex]; - auto tfLiteDelageInputData = interpreter->typed_tensor(tfLiteDelegateInputId); - for (unsigned int i = 0; i < inputValues.size(); ++i) - { - tfLiteDelageInputData[i].data = half_float::detail::float2half(inputValues[i]); - + CHECK(expectedOutputShape[i] == armnnDelegateShape[i]); + CHECK(tfLiteDelegateShape[i] == expectedOutputShape[i]); + CHECK(tfLiteDelegateShape[i] == armnnDelegateShape[i]); } } diff --git a/delegate/test/TestUtils.hpp b/delegate/test/TestUtils.hpp index 95dd257c92..ba81cd8d56 100644 --- a/delegate/test/TestUtils.hpp +++ b/delegate/test/TestUtils.hpp @@ -17,26 +17,12 @@ using Half = half_float::half; namespace armnnDelegate { -/// Can be used to assign input data from a vector to a model input. -/// Example usage can be found in ResizeTesthelper.hpp -template -void FillInput(std::unique_ptr& interpreter, int inputIndex, std::vector& inputValues) -{ - auto tfLiteDelegateInputId = interpreter->inputs()[inputIndex]; - auto tfLiteDelageInputData = interpreter->typed_tensor(tfLiteDelegateInputId); - for (unsigned int i = 0; i < inputValues.size(); ++i) - { - tfLiteDelageInputData[i] = inputValues[i]; - } -} - -template <> -void FillInput(std::unique_ptr& interpreter, int inputIndex, std::vector& inputValues); +constexpr const char* FILE_IDENTIFIER = "TFL3"; /// Can be used to compare bool data coming from a tflite interpreter /// Boolean types get converted to a bit representation in a vector. vector.data() returns a void pointer /// instead of a pointer to bool. Therefore a special function to compare to vector of bool is required -void CompareData(std::vector& tensor1, bool tensor2[], size_t tensorSize); +void CompareData(std::vector& tensor1, std::vector& tensor2, size_t tensorSize); void CompareData(bool tensor1[], bool tensor2[], size_t tensorSize); /// Can be used to compare float data coming from a tflite interpreter with a tolerance of limit_of_float*100 @@ -66,36 +52,22 @@ void CompareData(TfLiteFloat16 tensor1[], TfLiteFloat16 tensor2[], size_t tensor /// Can be used to compare Half (Float16) data and TfLiteFloat16 data coming from a tflite interpreter void CompareData(TfLiteFloat16 tensor1[], Half tensor2[], size_t tensorSize); -/// Can be used to compare the output tensor shape and values -/// from armnnDelegateInterpreter and tfLiteInterpreter. +/// Can be used to compare the output tensor shape +/// Example usage can be found in ControlTestHelper.hpp +void CompareOutputShape(const std::vector& tfLiteDelegateShape, + const std::vector& armnnDelegateShape, + const std::vector& expectedOutputShape); + +/// Can be used to compare the output tensor values /// Example usage can be found in ControlTestHelper.hpp template -void CompareOutputData(std::unique_ptr& tfLiteInterpreter, - std::unique_ptr& armnnDelegateInterpreter, - std::vector& expectedOutputShape, - std::vector& expectedOutputValues, - unsigned int outputIndex = 0) +void CompareOutputData(std::vector& tfLiteDelegateOutputs, + std::vector& armnnDelegateOutputs, + std::vector& expectedOutputValues) { - auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[outputIndex]; - auto tfLiteDelegateOutputTensor = tfLiteInterpreter->tensor(tfLiteDelegateOutputId); - auto tfLiteDelegateOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); - auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[outputIndex]; - auto armnnDelegateOutputTensor = armnnDelegateInterpreter->tensor(armnnDelegateOutputId); - auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateOutputId); - - CHECK(expectedOutputShape.size() == tfLiteDelegateOutputTensor->dims->size); - CHECK(expectedOutputShape.size() == armnnDelegateOutputTensor->dims->size); - - for (size_t i = 0; i < expectedOutputShape.size(); i++) - { - CHECK(expectedOutputShape[i] == armnnDelegateOutputTensor->dims->data[i]); - CHECK(tfLiteDelegateOutputTensor->dims->data[i] == expectedOutputShape[i]); - CHECK(tfLiteDelegateOutputTensor->dims->data[i] == armnnDelegateOutputTensor->dims->data[i]); - } - - armnnDelegate::CompareData(expectedOutputValues.data(), armnnDelegateOutputData , expectedOutputValues.size()); - armnnDelegate::CompareData(tfLiteDelegateOutputData , expectedOutputValues.data(), expectedOutputValues.size()); - armnnDelegate::CompareData(tfLiteDelegateOutputData , armnnDelegateOutputData , expectedOutputValues.size()); + armnnDelegate::CompareData(expectedOutputValues.data(), armnnDelegateOutputs.data(), expectedOutputValues.size()); + armnnDelegate::CompareData(tfLiteDelegateOutputs.data(), expectedOutputValues.data(), expectedOutputValues.size()); + armnnDelegate::CompareData(tfLiteDelegateOutputs.data(), armnnDelegateOutputs.data(), expectedOutputValues.size()); } } // namespace armnnDelegate diff --git a/delegate/test/TransposeTest.cpp b/delegate/test/TransposeTest.cpp index c210128ac8..cb3b327b13 100644 --- a/delegate/test/TransposeTest.cpp +++ b/delegate/test/TransposeTest.cpp @@ -13,6 +13,28 @@ namespace armnnDelegate { +void TransposeFP32Test(std::vector& backends) +{ + // set test input data + std::vector input0Shape {4, 2, 3}; + std::vector inputPermVecShape {3}; + std::vector outputShape {3, 4, 2}; + + std::vector input0Values = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}; + std::vector inputPermVec = {2, 0, 1}; + std::vector expectedOutputValues = {0, 3, 6, 9, 12, 15, 18, 21, 1, 4, 7, 10, + 13, 16, 19, 22, 2, 5, 8, 11, 14, 17, 20, 23}; + + TransposeTest(backends, + input0Shape, + inputPermVecShape, + outputShape, + input0Values, + inputPermVec, + expectedOutputValues); +} + TEST_SUITE ("Transpose_GpuAccTests") { @@ -37,10 +59,13 @@ TEST_CASE ("Transpose_Float32_CpuAcc_Test") TEST_SUITE ("Transpose_CpuRefTests") { + TEST_CASE ("Transpose_Float32_CpuRef_Test") { - std::vector backends = { armnn::Compute::CpuRef }; - TransposeFP32Test(backends); + std::vector backends = { armnn::Compute::CpuRef }; + TransposeFP32Test(backends); } + } + } // namespace armnnDelegate diff --git a/delegate/test/TransposeTestHelper.hpp b/delegate/test/TransposeTestHelper.hpp index 99bb60b91a..57f4e291bf 100644 --- a/delegate/test/TransposeTestHelper.hpp +++ b/delegate/test/TransposeTestHelper.hpp @@ -5,15 +5,17 @@ #pragma once +#include "TestUtils.hpp" + #include +#include #include -#include #include -#include -#include #include +#include + #include namespace @@ -76,102 +78,51 @@ std::vector CreateTransposeTfLiteModel(tflite::TensorType tensorType, flatBufferBuilder.CreateVector(&subgraph, 1), modelDescription, flatBufferBuilder.CreateVector(buffers, 4)); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); } -void TransposeFP32Test(std::vector& backends) +template +void TransposeTest(std::vector& backends, + std::vector& inputShape, + std::vector& inputPermVecShape, + std::vector& outputShape, + std::vector& inputValues, + std::vector& inputPermVec, + std::vector& expectedOutputValues) { - using namespace tflite; - - // set test input data - std::vector input0Shape {4, 2, 3}; - std::vector inputPermVecShape {3}; - std::vector outputShape {2, 3, 4}; + using namespace delegateTestInterpreter; - std::vector input0Values = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, - 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}; - std::vector inputPermVec = {2, 0, 1}; - std::vector expectedOutputValues = {0, 3, 6, 9, 12, 15, 18, 21, 1, 4, 7, 10, - 13, 16, 19, 22, 2, 5, 8, 11, 14, 17, 20, 23}; - - // create model + // Create model std::vector modelBuffer = CreateTransposeTfLiteModel(::tflite::TensorType_FLOAT32, - input0Shape, + inputShape, inputPermVecShape, outputShape, inputPermVec); - const Model* tfLiteModel = GetModel(modelBuffer.data()); - // Create TfLite Interpreters - std::unique_ptr armnnDelegateInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegateInterpreter) == kTfLiteOk); - CHECK(armnnDelegateInterpreter != nullptr); - CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteInterpreter) == kTfLiteOk); - CHECK(tfLiteInterpreter != nullptr); - CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data for tflite - auto tfLiteInterpreterInput0Id = tfLiteInterpreter->inputs()[0]; - auto tfLiteInterpreterInput0Data = tfLiteInterpreter->typed_tensor(tfLiteInterpreterInput0Id); - for (unsigned int i = 0; i < input0Values.size(); ++i) - { - tfLiteInterpreterInput0Data[i] = input0Values[i]; - } - - auto tfLiteInterpreterInput1Id = tfLiteInterpreter->inputs()[1]; - auto tfLiteInterpreterInput1Data = tfLiteInterpreter->typed_tensor(tfLiteInterpreterInput1Id); - for (unsigned int i = 0; i < inputPermVec.size(); ++i) - { - tfLiteInterpreterInput1Data[i] = inputPermVec[i]; - } - - //Set input data for armnn delegate - auto armnnDelegateInput0Id = armnnDelegateInterpreter->inputs()[0]; - auto armnnDelegateInput0Data = armnnDelegateInterpreter->typed_tensor(armnnDelegateInput0Id); - for (unsigned int i = 0; i < input0Values.size(); ++i) - { - armnnDelegateInput0Data[i] = input0Values[i]; - } - - auto armnnDelegateInput1Id = armnnDelegateInterpreter->inputs()[1]; - auto armnnDelegateInput1Data = armnnDelegateInterpreter->typed_tensor(armnnDelegateInput1Id); - for (unsigned int i = 0; i < inputPermVec.size(); ++i) - { - armnnDelegateInput1Data[i] = inputPermVec[i]; - } - - // Run EnqueWorkload - CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); - CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); - - // Compare output data - auto tfLiteInterpreterOutputId = tfLiteInterpreter->outputs()[0]; - auto tfLiteInterpreterOutputData = tfLiteInterpreter->typed_tensor(tfLiteInterpreterOutputId); - auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0]; - auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateOutputId); - for (size_t i = 0; i < expectedOutputValues.size(); ++i) - { - CHECK(expectedOutputValues[i] == armnnDelegateOutputData[i]); - CHECK(tfLiteInterpreterOutputData[i] == expectedOutputValues[i]); - CHECK(tfLiteInterpreterOutputData[i] == armnnDelegateOutputData[i]); - } - - armnnDelegateInterpreter.reset(nullptr); + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(inputPermVec, 1) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(inputPermVec, 1) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape); + + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } } diff --git a/delegate/test/UnidirectionalSequenceLstmTestHelper.hpp b/delegate/test/UnidirectionalSequenceLstmTestHelper.hpp index 0ff04e7949..c058d83bc6 100644 --- a/delegate/test/UnidirectionalSequenceLstmTestHelper.hpp +++ b/delegate/test/UnidirectionalSequenceLstmTestHelper.hpp @@ -8,14 +8,13 @@ #include "TestUtils.hpp" #include +#include #include -#include #include -#include -#include #include -#include + +#include #include @@ -569,7 +568,7 @@ std::vector CreateUnidirectionalSequenceLstmTfLiteModel(tflite::TensorType modelDescription, flatBufferBuilder.CreateVector(buffers)); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); @@ -624,7 +623,7 @@ void UnidirectionalSequenceLstmTestImpl(std::vector& backends, bool isTimeMajor, float quantScale = 0.1f) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector modelBuffer = CreateUnidirectionalSequenceLstmTfLiteModel(tensorType, batchSize, @@ -671,72 +670,51 @@ void UnidirectionalSequenceLstmTestImpl(std::vector& backends, isTimeMajor, quantScale); - const Model* tfLiteModel = GetModel(modelBuffer.data()); - // Create TfLite Interpreters - std::unique_ptr armnnDelegateInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegateInterpreter) == kTfLiteOk); - CHECK(armnnDelegateInterpreter != nullptr); - CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteInterpreter) == kTfLiteOk); - CHECK(tfLiteInterpreter != nullptr); - CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); - - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data - auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0]; - auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateInputId); - for (unsigned int i = 0; i < inputValues.size(); ++i) + std::vector outputShape; + if (isTimeMajor) { - tfLiteDelageInputData[i] = inputValues[i]; + outputShape = {timeSize, batchSize, outputSize}; } - - auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0]; - auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateInputId); - for (unsigned int i = 0; i < inputValues.size(); ++i) + else { - armnnDelegateInputData[i] = inputValues[i]; + outputShape = {batchSize, timeSize, outputSize}; } - // Run EnqueueWorkload - CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); - CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); - // Compare output data - auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0]; - auto tfLiteDelagateOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); - auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0]; - auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateOutputId); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape); if (tensorType == ::tflite::TensorType_INT8) { // Allow 2% tolerance for Quantized weights - armnnDelegate::CompareData(expectedOutputValues.data(), armnnDelegateOutputData, + armnnDelegate::CompareData(expectedOutputValues.data(), armnnOutputValues.data(), expectedOutputValues.size(), 2); - armnnDelegate::CompareData(expectedOutputValues.data(), tfLiteDelagateOutputData, + armnnDelegate::CompareData(expectedOutputValues.data(), tfLiteOutputValues.data(), expectedOutputValues.size(), 2); - armnnDelegate::CompareData(tfLiteDelagateOutputData, armnnDelegateOutputData, + armnnDelegate::CompareData(tfLiteOutputValues.data(), armnnOutputValues.data(), expectedOutputValues.size(), 2); } else { - armnnDelegate::CompareData(expectedOutputValues.data(), armnnDelegateOutputData, - expectedOutputValues.size()); - armnnDelegate::CompareData(expectedOutputValues.data(), tfLiteDelagateOutputData, - expectedOutputValues.size()); - armnnDelegate::CompareData(tfLiteDelagateOutputData, armnnDelegateOutputData, expectedOutputValues.size()); + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); } + + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } } // anonymous namespace \ No newline at end of file diff --git a/delegate/test/UnpackTestHelper.hpp b/delegate/test/UnpackTestHelper.hpp index a4c6bc01f3..2d6565f883 100644 --- a/delegate/test/UnpackTestHelper.hpp +++ b/delegate/test/UnpackTestHelper.hpp @@ -8,17 +8,15 @@ #include "TestUtils.hpp" #include +#include #include -#include #include -#include -#include #include -#include +#include -#include +#include namespace { @@ -110,7 +108,7 @@ std::vector CreateUnpackTfLiteModel(tflite::BuiltinOperator unpackOperator modelDescription, flatBufferBuilder.CreateVector(buffers)); - flatBufferBuilder.Finish(flatbufferModel); + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); @@ -128,7 +126,7 @@ void UnpackTest(tflite::BuiltinOperator unpackOperatorCode, float quantScale = 1.0f, int quantOffset = 0) { - using namespace tflite; + using namespace delegateTestInterpreter; std::vector modelBuffer = CreateUnpackTfLiteModel(unpackOperatorCode, tensorType, inputShape, @@ -138,51 +136,33 @@ void UnpackTest(tflite::BuiltinOperator unpackOperatorCode, quantScale, quantOffset); - const Model* tfLiteModel = GetModel(modelBuffer.data()); - - // Create TfLite Interpreters - std::unique_ptr armnnDelegateInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&armnnDelegateInterpreter) == kTfLiteOk); - CHECK(armnnDelegateInterpreter != nullptr); - CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); - - std::unique_ptr tfLiteInterpreter; - CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) - (&tfLiteInterpreter) == kTfLiteOk); - CHECK(tfLiteInterpreter != nullptr); - CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); - // Create the ArmNN Delegate - armnnDelegate::DelegateOptions delegateOptions(backends); - std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); - CHECK(theArmnnDelegate != nullptr); - - // Modify armnnDelegateInterpreter to use armnnDelegate - CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); - - // Set input data - armnnDelegate::FillInput(tfLiteInterpreter, 0, inputValues); - armnnDelegate::FillInput(armnnDelegateInterpreter, 0, inputValues); - - - // Run EnqueueWorkload - CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); - CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); // Compare output data for (unsigned int i = 0; i < expectedOutputValues.size(); ++i) { - armnnDelegate::CompareOutputData(tfLiteInterpreter, - armnnDelegateInterpreter, - expectedOutputShape, - expectedOutputValues[i], - i); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(i); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(i); + + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(i); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(i); + + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues[i]); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape); } - armnnDelegateInterpreter.reset(nullptr); + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); } } // anonymous namespace \ No newline at end of file -- cgit v1.2.1