aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Sloyan <matthew.sloyan@arm.com>2023-03-30 10:12:08 +0100
committerryan.oshea3 <ryan.oshea3@arm.com>2023-04-05 20:36:32 +0000
commitebe392df1635790bf21714549adb97f2f75559e1 (patch)
tree6fb8e56cc755d7c47a62bbe72c54b6ca5445377d
parentac9607f401dc30003aa97bd179a06d6b8a32139f (diff)
downloadarmnn-ebe392df1635790bf21714549adb97f2f75559e1.tar.gz
IVGCVSW-7562 Implement DelegateTestInterpreter for classic delegate
* Updated all tests to use new DelegateTestInterpreter. * Fixed some unit tests where the shape was incorrect. * Add file identifier to FlatBuffersBuilder, as it is required for validation when creating the model using new API. Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com> Change-Id: I1c4f5464367b35d4528571fa94d14bfaef18fb4d
-rw-r--r--delegate/CMakeLists.txt4
-rw-r--r--delegate/classic/src/test/DelegateTestInterpreter.cpp74
-rw-r--r--delegate/common/src/test/DelegateTestInterpreter.hpp175
-rw-r--r--delegate/common/src/test/DelegateTestInterpreterUtils.hpp110
-rw-r--r--delegate/test/ActivationTestHelper.hpp76
-rw-r--r--delegate/test/ArgMinMaxTestHelper.hpp75
-rw-r--r--delegate/test/BatchMatMulTestHelper.hpp89
-rw-r--r--delegate/test/BatchSpaceTestHelper.hpp72
-rw-r--r--delegate/test/CastTestHelper.hpp73
-rw-r--r--delegate/test/ComparisonTestHelper.hpp100
-rw-r--r--delegate/test/ControlTestHelper.hpp133
-rw-r--r--delegate/test/ConvolutionTestHelper.hpp225
-rw-r--r--delegate/test/DelegateOptionsTest.cpp11
-rw-r--r--delegate/test/DelegateOptionsTestHelper.hpp135
-rw-r--r--delegate/test/DepthwiseConvolution2dTest.cpp2
-rw-r--r--delegate/test/ElementwiseBinaryTestHelper.hpp74
-rw-r--r--delegate/test/ElementwiseUnaryTestHelper.hpp138
-rw-r--r--delegate/test/FillTestHelper.hpp58
-rw-r--r--delegate/test/FullyConnectedTestHelper.hpp82
-rw-r--r--delegate/test/GatherNdTestHelper.hpp77
-rw-r--r--delegate/test/GatherTestHelper.hpp77
-rw-r--r--delegate/test/LogicalTest.cpp72
-rw-r--r--delegate/test/LogicalTestHelper.hpp91
-rw-r--r--delegate/test/LstmTestHelper.hpp85
-rw-r--r--delegate/test/NormalizationTestHelper.hpp65
-rw-r--r--delegate/test/PackTestHelper.hpp66
-rw-r--r--delegate/test/PadTestHelper.hpp64
-rw-r--r--delegate/test/Pooling2dTestHelper.hpp75
-rw-r--r--delegate/test/Pooling3dTestHelper.hpp102
-rw-r--r--delegate/test/PreluTest.cpp3
-rw-r--r--delegate/test/PreluTestHelper.hpp80
-rw-r--r--delegate/test/QuantizationTestHelper.hpp89
-rw-r--r--delegate/test/RedefineTestHelper.hpp64
-rw-r--r--delegate/test/ReduceTestHelper.hpp74
-rw-r--r--delegate/test/ResizeTest.cpp4
-rw-r--r--delegate/test/ResizeTestHelper.hpp85
-rw-r--r--delegate/test/RoundTestHelper.hpp73
-rw-r--r--delegate/test/ShapeTestHelper.hpp75
-rw-r--r--delegate/test/SliceTestHelper.hpp75
-rw-r--r--delegate/test/SoftmaxTestHelper.hpp86
-rw-r--r--delegate/test/SpaceDepthTestHelper.hpp64
-rw-r--r--delegate/test/SplitTestHelper.hpp138
-rw-r--r--delegate/test/StridedSliceTestHelper.hpp75
-rw-r--r--delegate/test/TestUtils.cpp44
-rw-r--r--delegate/test/TestUtils.hpp58
-rw-r--r--delegate/test/TransposeTest.cpp29
-rw-r--r--delegate/test/TransposeTestHelper.hpp129
-rw-r--r--delegate/test/UnidirectionalSequenceLstmTestHelper.hpp88
-rw-r--r--delegate/test/UnpackTestHelper.hpp70
49 files changed, 1624 insertions, 2259 deletions
diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt
index 433cee6743..73df68fc4c 100644
--- a/delegate/CMakeLists.txt
+++ b/delegate/CMakeLists.txt
@@ -143,6 +143,9 @@ if(BUILD_UNIT_TESTS AND BUILD_CLASSIC_DELEGATE)
test/ConvolutionTestHelper.hpp
test/DelegateOptionsTest.cpp
test/DelegateOptionsTestHelper.hpp
+ classic/src/test/DelegateTestInterpreter.cpp
+ common/src/test/DelegateTestInterpreter.hpp
+ common/src/test/DelegateTestInterpreterUtils.hpp
test/DepthwiseConvolution2dTest.cpp
test/ElementwiseBinaryTest.cpp
test/ElementwiseBinaryTestHelper.hpp
@@ -236,6 +239,7 @@ if(BUILD_UNIT_TESTS AND BUILD_CLASSIC_DELEGATE)
add_executable(DelegateUnitTests ${armnnDelegate_unittest_sources})
target_include_directories(DelegateUnitTests SYSTEM PRIVATE "${TF_LITE_SCHEMA_INCLUDE_PATH}")
+ target_include_directories(DelegateUnitTests SYSTEM PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/common/src/test")
# Add half library from armnn third-party libraries
target_link_libraries(DelegateUnitTests PRIVATE thirdparty_headers)
diff --git a/delegate/classic/src/test/DelegateTestInterpreter.cpp b/delegate/classic/src/test/DelegateTestInterpreter.cpp
new file mode 100644
index 0000000000..45b6cd0932
--- /dev/null
+++ b/delegate/classic/src/test/DelegateTestInterpreter.cpp
@@ -0,0 +1,74 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <DelegateTestInterpreter.hpp>
+
+#include <armnn_delegate.hpp>
+
+namespace delegateTestInterpreter
+{
+
+DelegateTestInterpreter::DelegateTestInterpreter(std::vector<char>& modelBuffer,
+ const std::vector<armnn::BackendId>& backends,
+ const std::string& customOp,
+ bool disableFallback)
+{
+ TfLiteModel* tfLiteModel = delegateTestInterpreter::CreateTfLiteModel(modelBuffer);
+
+ TfLiteInterpreterOptions* options = delegateTestInterpreter::CreateTfLiteInterpreterOptions();
+ if (!customOp.empty())
+ {
+ options->mutable_op_resolver = delegateTestInterpreter::GenerateCustomOpResolver(customOp);
+ }
+
+ // Disable fallback by default for unit tests unless specified.
+ armnnDelegate::DelegateOptions delegateOptions(backends);
+ delegateOptions.DisableTfLiteRuntimeFallback(disableFallback);
+
+ auto armnnDelegate = armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions);
+ TfLiteInterpreterOptionsAddDelegate(options, armnnDelegate);
+
+ m_TfLiteDelegate = armnnDelegate;
+ m_TfLiteInterpreter = TfLiteInterpreterCreate(tfLiteModel, options);
+
+ // The options and model can be deleted after the interpreter is created.
+ TfLiteInterpreterOptionsDelete(options);
+ TfLiteModelDelete(tfLiteModel);
+}
+
+DelegateTestInterpreter::DelegateTestInterpreter(std::vector<char>& modelBuffer,
+ const armnnDelegate::DelegateOptions& delegateOptions,
+ const std::string& customOp)
+{
+ TfLiteModel* tfLiteModel = delegateTestInterpreter::CreateTfLiteModel(modelBuffer);
+
+ TfLiteInterpreterOptions* options = delegateTestInterpreter::CreateTfLiteInterpreterOptions();
+ if (!customOp.empty())
+ {
+ options->mutable_op_resolver = delegateTestInterpreter::GenerateCustomOpResolver(customOp);
+ }
+
+ auto armnnDelegate = armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions);
+ TfLiteInterpreterOptionsAddDelegate(options, armnnDelegate);
+
+ m_TfLiteDelegate = armnnDelegate;
+ m_TfLiteInterpreter = TfLiteInterpreterCreate(tfLiteModel, options);
+
+ // The options and model can be deleted after the interpreter is created.
+ TfLiteInterpreterOptionsDelete(options);
+ TfLiteModelDelete(tfLiteModel);
+}
+
+void DelegateTestInterpreter::Cleanup()
+{
+ TfLiteInterpreterDelete(m_TfLiteInterpreter);
+
+ if (m_TfLiteDelegate)
+ {
+ armnnDelegate::TfLiteArmnnDelegateDelete(static_cast<TfLiteDelegate*>(m_TfLiteDelegate));
+ }
+}
+
+} // anonymous namespace \ No newline at end of file
diff --git a/delegate/common/src/test/DelegateTestInterpreter.hpp b/delegate/common/src/test/DelegateTestInterpreter.hpp
new file mode 100644
index 0000000000..0b63441ddd
--- /dev/null
+++ b/delegate/common/src/test/DelegateTestInterpreter.hpp
@@ -0,0 +1,175 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <DelegateTestInterpreterUtils.hpp>
+
+#include <armnn_delegate.hpp>
+
+#include <armnn/BackendId.hpp>
+#include <armnn/Exceptions.hpp>
+
+#include <tensorflow/lite/core/c/c_api.h>
+#include <tensorflow/lite/kernels/kernel_util.h>
+#include <tensorflow/lite/kernels/custom_ops_register.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/c/c_api_internal.h>
+
+namespace delegateTestInterpreter
+{
+
+class DelegateTestInterpreter
+{
+public:
+ /// Create TfLite Interpreter only
+ DelegateTestInterpreter(std::vector<char>& modelBuffer, const std::string& customOp = "")
+ {
+ TfLiteModel* model = delegateTestInterpreter::CreateTfLiteModel(modelBuffer);
+
+ TfLiteInterpreterOptions* options = delegateTestInterpreter::CreateTfLiteInterpreterOptions();
+ if (!customOp.empty())
+ {
+ options->mutable_op_resolver = delegateTestInterpreter::GenerateCustomOpResolver(customOp);
+ }
+
+ m_TfLiteInterpreter = TfLiteInterpreterCreate(model, options);
+ m_TfLiteDelegate = nullptr;
+
+ // The options and model can be deleted after the interpreter is created.
+ TfLiteInterpreterOptionsDelete(options);
+ TfLiteModelDelete(model);
+ }
+
+ /// Create Interpreter with default Arm NN Classic/Opaque Delegate applied
+ DelegateTestInterpreter(std::vector<char>& model,
+ const std::vector<armnn::BackendId>& backends,
+ const std::string& customOp = "",
+ bool disableFallback = true);
+
+ /// Create Interpreter with Arm NN Classic/Opaque Delegate applied and DelegateOptions
+ DelegateTestInterpreter(std::vector<char>& model,
+ const armnnDelegate::DelegateOptions& delegateOptions,
+ const std::string& customOp = "");
+
+ /// Allocate the TfLiteTensors within the graph.
+ /// This must be called before FillInputTensor(values, index) and Invoke().
+ TfLiteStatus AllocateTensors()
+ {
+ return TfLiteInterpreterAllocateTensors(m_TfLiteInterpreter);
+ }
+
+ /// Copy a buffer of values into an input tensor at a given index.
+ template<typename T>
+ TfLiteStatus FillInputTensor(std::vector<T>& inputValues, int index)
+ {
+ TfLiteTensor* inputTensor = delegateTestInterpreter::GetInputTensorFromInterpreter(m_TfLiteInterpreter, index);
+ return delegateTestInterpreter::CopyFromBufferToTensor(inputTensor, inputValues);
+ }
+
+ /// Copy a boolean buffer of values into an input tensor at a given index.
+ /// Boolean types get converted to a bit representation in a vector.
+ /// vector.data() returns a void pointer instead of a pointer to bool, so the tensor needs to be accessed directly.
+ TfLiteStatus FillInputTensor(std::vector<bool>& inputValues, int index)
+ {
+ TfLiteTensor* inputTensor = delegateTestInterpreter::GetInputTensorFromInterpreter(m_TfLiteInterpreter, index);
+ if(inputTensor->type != kTfLiteBool)
+ {
+ throw armnn::Exception("Input tensor at the given index is not of bool type: " + std::to_string(index));
+ }
+
+ // Make sure there is enough bytes allocated to copy into.
+ if(inputTensor->bytes < inputValues.size() * sizeof(bool))
+ {
+ throw armnn::Exception("Input tensor has not been allocated to match number of input values.");
+ }
+
+ for (unsigned int i = 0; i < inputValues.size(); ++i)
+ {
+ inputTensor->data.b[i] = inputValues[i];
+ }
+
+ return kTfLiteOk;
+ }
+
+ /// Run the interpreter either on TFLite Runtime or Arm NN Delegate.
+ /// AllocateTensors() must be called before Invoke().
+ TfLiteStatus Invoke()
+ {
+ return TfLiteInterpreterInvoke(m_TfLiteInterpreter);
+ }
+
+ /// Return a buffer of values from the output tensor at a given index.
+ /// This must be called after Invoke().
+ template<typename T>
+ std::vector<T> GetOutputResult(int index)
+ {
+ const TfLiteTensor* outputTensor =
+ delegateTestInterpreter::GetOutputTensorFromInterpreter(m_TfLiteInterpreter, index);
+
+ int64_t n = tflite::NumElements(outputTensor);
+ std::vector<T> output;
+ output.resize(n);
+
+ TfLiteStatus status = TfLiteTensorCopyToBuffer(outputTensor, output.data(), output.size() * sizeof(T));
+ if(status != kTfLiteOk)
+ {
+ throw armnn::Exception("An error occurred when copying output buffer.");
+ }
+
+ return output;
+ }
+
+ /// Return a buffer of values from the output tensor at a given index. This must be called after Invoke().
+ /// Boolean types get converted to a bit representation in a vector.
+ /// vector.data() returns a void pointer instead of a pointer to bool, so the tensor needs to be accessed directly.
+ std::vector<bool> GetOutputResult(int index)
+ {
+ const TfLiteTensor* outputTensor =
+ delegateTestInterpreter::GetOutputTensorFromInterpreter(m_TfLiteInterpreter, index);
+ if(outputTensor->type != kTfLiteBool)
+ {
+ throw armnn::Exception("Output tensor at the given index is not of bool type: " + std::to_string(index));
+ }
+
+ int64_t n = tflite::NumElements(outputTensor);
+ std::vector<bool> output(n, false);
+ output.reserve(n);
+
+ for (unsigned int i = 0; i < output.size(); ++i)
+ {
+ output[i] = outputTensor->data.b[i];
+ }
+ return output;
+ }
+
+ /// Return a buffer of dimensions from the output tensor at a given index.
+ std::vector<int32_t> GetOutputShape(int index)
+ {
+ const TfLiteTensor* outputTensor =
+ delegateTestInterpreter::GetOutputTensorFromInterpreter(m_TfLiteInterpreter, index);
+ int32_t numDims = TfLiteTensorNumDims(outputTensor);
+
+ std::vector<int32_t> dims;
+ dims.reserve(numDims);
+
+ for (int32_t i = 0; i < numDims; ++i)
+ {
+ dims.push_back(TfLiteTensorDim(outputTensor, i));
+ }
+ return dims;
+ }
+
+ /// Delete TfLiteInterpreter and the TfLiteDelegate/TfLiteOpaqueDelegate
+ void Cleanup();
+
+private:
+ TfLiteInterpreter* m_TfLiteInterpreter;
+
+ /// m_TfLiteDelegate can be TfLiteDelegate or TfLiteOpaqueDelegate
+ void* m_TfLiteDelegate;
+};
+
+} // anonymous namespace \ No newline at end of file
diff --git a/delegate/common/src/test/DelegateTestInterpreterUtils.hpp b/delegate/common/src/test/DelegateTestInterpreterUtils.hpp
new file mode 100644
index 0000000000..396c75c22e
--- /dev/null
+++ b/delegate/common/src/test/DelegateTestInterpreterUtils.hpp
@@ -0,0 +1,110 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/Exceptions.hpp>
+
+#include <tensorflow/lite/core/c/c_api.h>
+#include <tensorflow/lite/kernels/custom_ops_register.h>
+#include <tensorflow/lite/kernels/register.h>
+
+#include <type_traits>
+
+namespace delegateTestInterpreter
+{
+
+inline TfLiteTensor* GetInputTensorFromInterpreter(TfLiteInterpreter* interpreter, int index)
+{
+ TfLiteTensor* inputTensor = TfLiteInterpreterGetInputTensor(interpreter, index);
+ if(inputTensor == nullptr)
+ {
+ throw armnn::Exception("Input tensor was not found at the given index: " + std::to_string(index));
+ }
+ return inputTensor;
+}
+
+inline const TfLiteTensor* GetOutputTensorFromInterpreter(TfLiteInterpreter* interpreter, int index)
+{
+ const TfLiteTensor* outputTensor = TfLiteInterpreterGetOutputTensor(interpreter, index);
+ if(outputTensor == nullptr)
+ {
+ throw armnn::Exception("Output tensor was not found at the given index: " + std::to_string(index));
+ }
+ return outputTensor;
+}
+
+inline TfLiteModel* CreateTfLiteModel(std::vector<char>& data)
+{
+ TfLiteModel* tfLiteModel = TfLiteModelCreate(data.data(), data.size());
+ if(tfLiteModel == nullptr)
+ {
+ throw armnn::Exception("An error has occurred when creating the TfLiteModel.");
+ }
+ return tfLiteModel;
+}
+
+inline TfLiteInterpreterOptions* CreateTfLiteInterpreterOptions()
+{
+ TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
+ if(options == nullptr)
+ {
+ throw armnn::Exception("An error has occurred when creating the TfLiteInterpreterOptions.");
+ }
+ return options;
+}
+
+inline tflite::ops::builtin::BuiltinOpResolver GenerateCustomOpResolver(const std::string& opName)
+{
+ tflite::ops::builtin::BuiltinOpResolver opResolver;
+ if (opName == "MaxPool3D")
+ {
+ opResolver.AddCustom("MaxPool3D", tflite::ops::custom::Register_MAX_POOL_3D());
+ }
+ else if (opName == "AveragePool3D")
+ {
+ opResolver.AddCustom("AveragePool3D", tflite::ops::custom::Register_AVG_POOL_3D());
+ }
+ else
+ {
+ throw armnn::Exception("The custom op isn't supported by the DelegateTestInterpreter.");
+ }
+ return opResolver;
+}
+
+template<typename T>
+inline TfLiteStatus CopyFromBufferToTensor(TfLiteTensor* tensor, std::vector<T>& values)
+{
+ // Make sure there is enough bytes allocated to copy into for uint8_t and int16_t case.
+ if(tensor->bytes < values.size() * sizeof(T))
+ {
+ throw armnn::Exception("Tensor has not been allocated to match number of values.");
+ }
+
+ // Requires uint8_t and int16_t specific case as the number of bytes is larger than values passed when creating
+ // TFLite tensors of these types. Otherwise, use generic TfLiteTensorCopyFromBuffer function.
+ TfLiteStatus status = kTfLiteOk;
+ if (std::is_same<T, uint8_t>::value)
+ {
+ for (unsigned int i = 0; i < values.size(); ++i)
+ {
+ tensor->data.uint8[i] = values[i];
+ }
+ }
+ else if (std::is_same<T, int16_t>::value)
+ {
+ for (unsigned int i = 0; i < values.size(); ++i)
+ {
+ tensor->data.i16[i] = values[i];
+ }
+ }
+ else
+ {
+ status = TfLiteTensorCopyFromBuffer(tensor, values.data(), values.size() * sizeof(T));
+ }
+ return status;
+}
+
+} // anonymous namespace \ No newline at end of file
diff --git a/delegate/test/ActivationTestHelper.hpp b/delegate/test/ActivationTestHelper.hpp
index 110c684c23..e1901b7d9f 100644
--- a/delegate/test/ActivationTestHelper.hpp
+++ b/delegate/test/ActivationTestHelper.hpp
@@ -8,14 +8,14 @@
#include "TestUtils.hpp"
#include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
#include <tensorflow/lite/version.h>
+#include <schema_generated.h>
+
#include <doctest/doctest.h>
namespace
@@ -69,7 +69,7 @@ std::vector<char> CreateActivationTfLiteModel(tflite::BuiltinOperator activation
modelDescription,
flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -80,51 +80,33 @@ void ActivationTest(tflite::BuiltinOperator activationOperatorCode,
std::vector<float>& inputValues,
std::vector<float>& expectedOutputValues)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
std::vector<int32_t> inputShape { { 4, 1, 4} };
std::vector<char> modelBuffer = CreateActivationTfLiteModel(activationOperatorCode,
- ::tflite::TensorType_FLOAT32,
- inputShape);
-
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<float>(tfLiteInterpreter, 0, inputValues);
- armnnDelegate::FillInput<float>(armnnDelegateInterpreter, 0, inputValues);
-
- // Run EnqueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- armnnDelegate::CompareOutputData<float>(tfLiteInterpreter,
- armnnDelegateInterpreter,
- inputShape,
- expectedOutputValues);
-
- tfLiteInterpreter.reset(nullptr);
- armnnDelegateInterpreter.reset(nullptr);
+ ::tflite::TensorType_FLOAT32,
+ inputShape);
+
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<float>(inputValues, 0) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+ std::vector<float> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<float>(0);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
+
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<float>(inputValues, 0) == kTfLiteOk);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+ std::vector<float> armnnOutputValues = armnnInterpreter.GetOutputResult<float>(0);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
+
+ armnnDelegate::CompareOutputData<float>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, inputShape);
+
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
}
} // anonymous namespace \ No newline at end of file
diff --git a/delegate/test/ArgMinMaxTestHelper.hpp b/delegate/test/ArgMinMaxTestHelper.hpp
index 91cf1f81e7..fd230fff94 100644
--- a/delegate/test/ArgMinMaxTestHelper.hpp
+++ b/delegate/test/ArgMinMaxTestHelper.hpp
@@ -8,14 +8,14 @@
#include "TestUtils.hpp"
#include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
#include <tensorflow/lite/version.h>
+#include <schema_generated.h>
+
#include <doctest/doctest.h>
namespace
@@ -119,7 +119,7 @@ std::vector<char> CreateArgMinMaxTfLiteModel(tflite::BuiltinOperator argMinMaxOp
modelDescription,
flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -139,7 +139,7 @@ void ArgMinMaxTest(tflite::BuiltinOperator argMinMaxOperatorCode,
float quantScale = 1.0f,
int quantOffset = 0)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
std::vector<char> modelBuffer = CreateArgMinMaxTfLiteModel<InputT, OutputT>(argMinMaxOperatorCode,
tensorType,
inputShape,
@@ -150,50 +150,27 @@ void ArgMinMaxTest(tflite::BuiltinOperator argMinMaxOperatorCode,
quantScale,
quantOffset);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- CHECK(tfLiteModel != nullptr);
-
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<InputT>(tfLiteInterpreter, 0, inputValues);
- armnnDelegate::FillInput<InputT>(armnnDelegateInterpreter, 0, inputValues);
-
- // Run EnqueueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
- auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<OutputT>(tfLiteDelegateOutputId);
- auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
- auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<OutputT>(armnnDelegateOutputId);
-
- for (size_t i = 0; i < expectedOutputValues.size(); i++)
- {
- CHECK(expectedOutputValues[i] == armnnDelegateOutputData[i]);
- CHECK(tfLiteDelageOutputData[i] == expectedOutputValues[i]);
- CHECK(tfLiteDelageOutputData[i] == armnnDelegateOutputData[i]);
- }
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<InputT>(inputValues, 0) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+ std::vector<OutputT> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<OutputT>(0);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
+
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<InputT>(inputValues, 0) == kTfLiteOk);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+ std::vector<OutputT> armnnOutputValues = armnnInterpreter.GetOutputResult<OutputT>(0);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
+
+ armnnDelegate::CompareOutputData<OutputT>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
+
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
}
} // anonymous namespace \ No newline at end of file
diff --git a/delegate/test/BatchMatMulTestHelper.hpp b/delegate/test/BatchMatMulTestHelper.hpp
index 32b0a4fc71..d45f438f5c 100644
--- a/delegate/test/BatchMatMulTestHelper.hpp
+++ b/delegate/test/BatchMatMulTestHelper.hpp
@@ -8,14 +8,14 @@
#include "TestUtils.hpp"
#include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
#include <tensorflow/lite/version.h>
+#include <schema_generated.h>
+
#include <doctest/doctest.h>
namespace
@@ -111,7 +111,7 @@ std::vector<char> CreateBatchMatMulTfLiteModel(
modelDescription,
flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -132,7 +132,7 @@ void BatchMatMulTest(tflite::BuiltinOperator bmmOperatorCode,
float quantScale = 1.0f,
int quantOffset = 0)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
std::vector<char> modelBuffer = CreateBatchMatMulTfLiteModel(bmmOperatorCode,
tensorType,
LHSInputShape,
@@ -143,62 +143,29 @@ void BatchMatMulTest(tflite::BuiltinOperator bmmOperatorCode,
quantScale,
quantOffset);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- CHECK(tfLiteModel != nullptr);
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- auto tfLiteDelegateLHSInputId = tfLiteInterpreter->inputs()[0];
- auto tfLiteDelegateLHSInputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateLHSInputId);
- auto tfLiteDelegateRHSInputId = tfLiteInterpreter->inputs()[1];
- auto tfLiteDelegateRHSInputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateRHSInputId);
- for (unsigned int i = 0; i < LHSInputValues.size(); ++i)
- {
- tfLiteDelegateLHSInputData[i] = LHSInputValues[i];
- }
- for (unsigned int i = 0; i < RHSInputValues.size(); ++i)
- {
- tfLiteDelegateRHSInputData[i] = RHSInputValues[i];
- }
-
- auto armnnDelegateLHSInputId = armnnDelegateInterpreter->inputs()[0];
- auto armnnDelegateLHSInputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateLHSInputId);
- auto armnnDelegateRHSInputId = armnnDelegateInterpreter->inputs()[1];
- auto armnnDelegateRHSInputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateRHSInputId);
- for (unsigned int i = 0; i < LHSInputValues.size(); ++i)
- {
- armnnDelegateLHSInputData[i] = LHSInputValues[i];
- }
- for (unsigned int i = 0; i < RHSInputValues.size(); ++i)
- {
- armnnDelegateRHSInputData[i] = RHSInputValues[i];
- }
- // Run EnqueueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter,
- outputShape, expectedOutputValues);
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<T>(LHSInputValues, 0) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<T>(RHSInputValues, 1) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
+
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<T>(LHSInputValues, 0) == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<T>(RHSInputValues, 1) == kTfLiteOk);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
+
+ armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
+
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
}
} // anonymous namespace
diff --git a/delegate/test/BatchSpaceTestHelper.hpp b/delegate/test/BatchSpaceTestHelper.hpp
index 597139d390..ba6afb1382 100644
--- a/delegate/test/BatchSpaceTestHelper.hpp
+++ b/delegate/test/BatchSpaceTestHelper.hpp
@@ -8,14 +8,14 @@
#include "TestUtils.hpp"
#include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
#include <tensorflow/lite/version.h>
+#include <schema_generated.h>
+
#include <doctest/doctest.h>
namespace
@@ -143,7 +143,7 @@ std::vector<char> CreateBatchSpaceTfLiteModel(tflite::BuiltinOperator batchSpace
modelDescription,
flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -162,7 +162,7 @@ void BatchSpaceTest(tflite::BuiltinOperator controlOperatorCode,
float quantScale = 1.0f,
int quantOffset = 0)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
std::vector<char> modelBuffer = CreateBatchSpaceTfLiteModel(controlOperatorCode,
tensorType,
inputShape,
@@ -172,47 +172,27 @@ void BatchSpaceTest(tflite::BuiltinOperator controlOperatorCode,
quantScale,
quantOffset);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
-
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, inputValues);
- armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, inputValues);
-
- // Run EnqueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- armnnDelegate::CompareOutputData<T>(tfLiteInterpreter,
- armnnDelegateInterpreter,
- expectedOutputShape,
- expectedOutputValues);
-
- armnnDelegateInterpreter.reset(nullptr);
- tfLiteInterpreter.reset(nullptr);
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
+
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
+
+ armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape);
+
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
}
} // anonymous namespace \ No newline at end of file
diff --git a/delegate/test/CastTestHelper.hpp b/delegate/test/CastTestHelper.hpp
index be1967ccd6..ac8f033bb8 100644
--- a/delegate/test/CastTestHelper.hpp
+++ b/delegate/test/CastTestHelper.hpp
@@ -8,14 +8,14 @@
#include "TestUtils.hpp"
#include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
#include <tensorflow/lite/version.h>
+#include <schema_generated.h>
+
#include <doctest/doctest.h>
namespace
@@ -90,7 +90,7 @@ std::vector<char> CreateCastTfLiteModel(tflite::TensorType inputTensorType,
modelDescription,
flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
}
@@ -105,55 +105,34 @@ void CastTest(tflite::TensorType inputTensorType,
float quantScale = 1.0f,
int quantOffset = 0)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
std::vector<char> modelBuffer = CreateCastTfLiteModel(inputTensorType,
outputTensorType,
shape,
quantScale,
quantOffset);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
-
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegate;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegate) == kTfLiteOk);
- CHECK(armnnDelegate != nullptr);
- CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteDelegate;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteDelegate) == kTfLiteOk);
- CHECK(tfLiteDelegate != nullptr);
- CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<T>(tfLiteDelegate, 0, inputValues);
- armnnDelegate::FillInput<T>(armnnDelegate, 0, inputValues);
-
- // Run EnqueWorkload
- CHECK(tfLiteDelegate->Invoke() == kTfLiteOk);
- CHECK(armnnDelegate->Invoke() == kTfLiteOk);
-
- // Compare output data
- armnnDelegate::CompareOutputData<K>(tfLiteDelegate,
- armnnDelegate,
- shape,
- expectedOutputValues,
- 0);
-
- tfLiteDelegate.reset(nullptr);
- armnnDelegate.reset(nullptr);
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+ std::vector<K> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<K>(0);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
+
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+ std::vector<K> armnnOutputValues = armnnInterpreter.GetOutputResult<K>(0);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
+
+ armnnDelegate::CompareOutputData<K>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, shape);
+
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
}
} // anonymous namespace
diff --git a/delegate/test/ComparisonTestHelper.hpp b/delegate/test/ComparisonTestHelper.hpp
index ef9f87a5d5..a1114cb938 100644
--- a/delegate/test/ComparisonTestHelper.hpp
+++ b/delegate/test/ComparisonTestHelper.hpp
@@ -8,14 +8,14 @@
#include "TestUtils.hpp"
#include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
#include <tensorflow/lite/version.h>
+#include <schema_generated.h>
+
#include <doctest/doctest.h>
namespace
@@ -141,7 +141,7 @@ std::vector<char> CreateComparisonTfLiteModel(tflite::BuiltinOperator comparison
modelDescription,
flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -160,7 +160,7 @@ void ComparisonTest(tflite::BuiltinOperator comparisonOperatorCode,
float quantScale = 1.0f,
int quantOffset = 0)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
std::vector<char> modelBuffer = CreateComparisonTfLiteModel(comparisonOperatorCode,
tensorType,
input0Shape,
@@ -169,70 +169,32 @@ void ComparisonTest(tflite::BuiltinOperator comparisonOperatorCode,
quantScale,
quantOffset);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- auto tfLiteDelegateInput0Id = tfLiteInterpreter->inputs()[0];
- auto tfLiteDelageInput0Data = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateInput0Id);
- for (unsigned int i = 0; i < input0Values.size(); ++i)
- {
- tfLiteDelageInput0Data[i] = input0Values[i];
- }
-
- auto tfLiteDelegateInput1Id = tfLiteInterpreter->inputs()[1];
- auto tfLiteDelageInput1Data = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateInput1Id);
- for (unsigned int i = 0; i < input1Values.size(); ++i)
- {
- tfLiteDelageInput1Data[i] = input1Values[i];
- }
-
- auto armnnDelegateInput0Id = armnnDelegateInterpreter->inputs()[0];
- auto armnnDelegateInput0Data = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateInput0Id);
- for (unsigned int i = 0; i < input0Values.size(); ++i)
- {
- armnnDelegateInput0Data[i] = input0Values[i];
- }
-
- auto armnnDelegateInput1Id = armnnDelegateInterpreter->inputs()[1];
- auto armnnDelegateInput1Data = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateInput1Id);
- for (unsigned int i = 0; i < input1Values.size(); ++i)
- {
- armnnDelegateInput1Data[i] = input1Values[i];
- }
-
- // Run EnqueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
- // Compare output data
- auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
- auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<bool>(tfLiteDelegateOutputId);
- auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
- auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<bool>(armnnDelegateOutputId);
-
- armnnDelegate::CompareData(expectedOutputValues , armnnDelegateOutputData, expectedOutputValues.size());
- armnnDelegate::CompareData(expectedOutputValues , tfLiteDelageOutputData , expectedOutputValues.size());
- armnnDelegate::CompareData(tfLiteDelageOutputData, armnnDelegateOutputData, expectedOutputValues.size());
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<T>(input0Values, 0) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<T>(input1Values, 1) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+ std::vector<bool> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
+
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<T>(input0Values, 0) == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<T>(input1Values, 1) == kTfLiteOk);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+ std::vector<bool> armnnOutputValues = armnnInterpreter.GetOutputResult(0);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
+
+ armnnDelegate::CompareData(expectedOutputValues, armnnOutputValues, expectedOutputValues.size());
+ armnnDelegate::CompareData(expectedOutputValues, tfLiteOutputValues, expectedOutputValues.size());
+ armnnDelegate::CompareData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues.size());
+
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
+
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
}
} // anonymous namespace \ No newline at end of file
diff --git a/delegate/test/ControlTestHelper.hpp b/delegate/test/ControlTestHelper.hpp
index f68cc07519..9e082a78af 100644
--- a/delegate/test/ControlTestHelper.hpp
+++ b/delegate/test/ControlTestHelper.hpp
@@ -8,17 +8,15 @@
#include "TestUtils.hpp"
#include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
+#include <schema_generated.h>
-#include <string>
+#include <doctest/doctest.h>
namespace
{
@@ -108,7 +106,7 @@ std::vector<char> CreateConcatTfLiteModel(tflite::BuiltinOperator controlOperato
modelDescription,
flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -201,7 +199,7 @@ std::vector<char> CreateMeanTfLiteModel(tflite::BuiltinOperator controlOperatorC
modelDescription,
flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -219,7 +217,7 @@ void ConcatenationTest(tflite::BuiltinOperator controlOperatorCode,
float quantScale = 1.0f,
int quantOffset = 0)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
std::vector<char> modelBuffer = CreateConcatTfLiteModel(controlOperatorCode,
tensorType,
inputShapes,
@@ -229,51 +227,33 @@ void ConcatenationTest(tflite::BuiltinOperator controlOperatorCode,
quantScale,
quantOffset);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data for all input tensors.
for (unsigned int i = 0; i < inputValues.size(); ++i)
{
- // Get single input tensor and assign to interpreters.
- auto inputTensorValues = inputValues[i];
- armnnDelegate::FillInput<T>(tfLiteInterpreter, i, inputTensorValues);
- armnnDelegate::FillInput<T>(armnnDelegateInterpreter, i, inputTensorValues);
+ CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues[i], i) == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<T>(inputValues[i], i) == kTfLiteOk);
}
- // Run EnqueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
- // Compare output data
- armnnDelegate::CompareOutputData<T>(tfLiteInterpreter,
- armnnDelegateInterpreter,
- expectedOutputShape,
- expectedOutputValues);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
- armnnDelegateInterpreter.reset(nullptr);
+ armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape);
+
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
}
template <typename T>
@@ -290,7 +270,7 @@ void MeanTest(tflite::BuiltinOperator controlOperatorCode,
float quantScale = 1.0f,
int quantOffset = 0)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
std::vector<char> modelBuffer = CreateMeanTfLiteModel(controlOperatorCode,
tensorType,
input0Shape,
@@ -301,46 +281,27 @@ void MeanTest(tflite::BuiltinOperator controlOperatorCode,
quantScale,
quantOffset);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
-
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, input0Values);
- armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, input0Values);
-
- // Run EnqueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- armnnDelegate::CompareOutputData<T>(tfLiteInterpreter,
- armnnDelegateInterpreter,
- expectedOutputShape,
- expectedOutputValues);
-
- armnnDelegateInterpreter.reset(nullptr);
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<T>(input0Values, 0) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
+
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<T>(input0Values, 0) == kTfLiteOk);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
+
+ armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape);
+
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
}
} // anonymous namespace \ No newline at end of file
diff --git a/delegate/test/ConvolutionTestHelper.hpp b/delegate/test/ConvolutionTestHelper.hpp
index 2e211b2ee9..6a3400e9cb 100644
--- a/delegate/test/ConvolutionTestHelper.hpp
+++ b/delegate/test/ConvolutionTestHelper.hpp
@@ -8,6 +8,7 @@
#include "TestUtils.hpp"
#include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
#include <flatbuffers/flatbuffers.h>
#include <tensorflow/lite/interpreter.h>
@@ -186,7 +187,7 @@ std::vector<char> CreateConv2dTfLiteModel(tflite::BuiltinOperator convolutionOpe
modelDescription,
flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -222,10 +223,9 @@ void ConvolutionTest(tflite::BuiltinOperator convolutionOperatorCode,
int32_t filterQuantizationDim = 3)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
std::vector<char> modelBuffer;
-
modelBuffer = CreateConv2dTfLiteModel(convolutionOperatorCode,
tensorType,
strideX,
@@ -251,59 +251,27 @@ void ConvolutionTest(tflite::BuiltinOperator convolutionOperatorCode,
depth_multiplier,
filterQuantizationDim);
-
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
- auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateInputId);
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- tfLiteDelageInputData[i] = inputValues[i];
- }
-
- auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
- auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateInputId);
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- armnnDelegateInputData[i] = inputValues[i];
- }
- // Run EnqueueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
- auto tfLiteDelagateOutputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateOutputId);
- auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
- auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateOutputId);
- for (size_t i = 0; i < expectedOutputValues.size(); i++)
- {
- CHECK(tfLiteDelagateOutputData[i] == armnnDelegateOutputData[i]);
- CHECK(doctest::Approx(tfLiteDelagateOutputData[i]).epsilon(0.000001f) == expectedOutputValues[i]);
- CHECK(doctest::Approx(armnnDelegateOutputData[i]).epsilon(0.000001f) == expectedOutputValues[i]);
- }
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
+
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
+
+ armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
+
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
}
// Conv3d is only correctly supported for external delegates from TF Lite v2.6, as there was a breaking bug in v2.5.
@@ -457,7 +425,7 @@ std::vector<char> CreateConv3dTfLiteModel(tflite::BuiltinOperator convolutionOpe
modelDescription,
flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -490,7 +458,7 @@ void Convolution3dTest(tflite::BuiltinOperator convolutionOperatorCode,
int32_t depth_multiplier = 1,
int32_t filterQuantizationDim = 3)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
std::vector<char> modelBuffer;
modelBuffer = CreateConv3dTfLiteModel(convolutionOperatorCode,
@@ -516,48 +484,30 @@ void Convolution3dTest(tflite::BuiltinOperator convolutionOperatorCode,
depth_multiplier,
filterQuantizationDim);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
-
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, inputValues);
- armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, inputValues);
-
- // Run EnqueueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
- auto tfLiteDelagateOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
- auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
- auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
-
- armnnDelegate::CompareData(expectedOutputValues.data(), armnnDelegateOutputData, expectedOutputValues.size(), 1);
- armnnDelegate::CompareData(expectedOutputValues.data(), tfLiteDelagateOutputData, expectedOutputValues.size(), 1);
- armnnDelegate::CompareData(tfLiteDelagateOutputData, armnnDelegateOutputData, expectedOutputValues.size(), 1);
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
+
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
+
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
+
+ armnnDelegate::CompareData(expectedOutputValues.data(), armnnOutputValues.data(), expectedOutputValues.size(), 1);
+ armnnDelegate::CompareData(expectedOutputValues.data(), tfLiteOutputValues.data(), expectedOutputValues.size(), 1);
+ armnnDelegate::CompareData(tfLiteOutputValues.data(), armnnOutputValues.data(), expectedOutputValues.size(), 1);
+
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
}
#endif
@@ -675,7 +625,7 @@ std::vector<char> CreateTransposeConvTfLiteModel(tflite::TensorType tensorType,
modelDescription,
flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -702,7 +652,7 @@ void TransposeConvTest(std::vector<armnn::BackendId>& backends,
float quantScale = 1.0f,
int quantOffset = 0)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
std::vector<char> modelBuffer;
modelBuffer = CreateTransposeConvTfLiteModel<T>(tensorType,
@@ -723,58 +673,27 @@ void TransposeConvTest(std::vector<armnn::BackendId>& backends,
quantOffset);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[2];
- auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateInputId);
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- tfLiteDelageInputData[i] = inputValues[i];
- }
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 2) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
- auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[2];
- auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateInputId);
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- armnnDelegateInputData[i] = inputValues[i];
- }
- // Run EnqueueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
- auto tfLiteDelagateOutputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateOutputId);
- auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
- auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateOutputId);
- for (size_t i = 0; i < expectedOutputValues.size(); i++)
- {
- CHECK(armnnDelegateOutputData[i] == expectedOutputValues[i]);
- CHECK(tfLiteDelagateOutputData[i] == expectedOutputValues[i]);
- CHECK(tfLiteDelagateOutputData[i] == armnnDelegateOutputData[i]);
- }
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 2) == kTfLiteOk);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
+
+ armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputTensorShape);
+
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
}
} // anonymous namespace
diff --git a/delegate/test/DelegateOptionsTest.cpp b/delegate/test/DelegateOptionsTest.cpp
index ecd8c736e8..d84d420977 100644
--- a/delegate/test/DelegateOptionsTest.cpp
+++ b/delegate/test/DelegateOptionsTest.cpp
@@ -30,7 +30,6 @@ TEST_CASE ("ArmnnDelegateOptimizerOptionsReduceFp32ToFp16")
armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
- backends,
tensorShape,
inputData,
inputData,
@@ -60,7 +59,6 @@ TEST_CASE ("ArmnnDelegateOptimizerOptionsDebug")
armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
- backends,
tensorShape,
inputData,
inputData,
@@ -104,7 +102,6 @@ TEST_CASE ("ArmnnDelegateOptimizerOptionsDebugFunction")
CHECK(!callback);
DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
- backends,
tensorShape,
inputData,
inputData,
@@ -118,7 +115,7 @@ TEST_CASE ("ArmnnDelegateOptimizerOptionsDebugFunction")
TEST_CASE ("ArmnnDelegateOptimizerOptionsImport")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc, armnn::Compute::CpuRef };
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
std::vector<uint8_t> inputData = { 1, 2, 3, 4 };
std::vector<uint8_t> divData = { 2, 2, 3, 4 };
@@ -128,7 +125,6 @@ TEST_CASE ("ArmnnDelegateOptimizerOptionsImport")
armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
DelegateOptionTest<uint8_t>(::tflite::TensorType_UINT8,
- backends,
tensorShape,
inputData,
inputData,
@@ -164,7 +160,6 @@ TEST_CASE ("ArmnnDelegateStringParsingOptionDisableTfLiteRuntimeFallback")
armnnDelegate::DelegateOptions delegateOptions(options_keys.get(), options_values.get(), num_options, nullptr);
DelegateOptionNoFallbackTest<float>(::tflite::TensorType_FLOAT32,
- backends,
tensorShape,
inputData,
expectedResult,
@@ -200,7 +195,6 @@ TEST_CASE ("ArmnnDelegateStringParsingOptionEnableTfLiteRuntimeFallback")
armnnDelegate::DelegateOptions delegateOptions(options_keys.get(), options_values.get(), num_options, nullptr);
DelegateOptionNoFallbackTest<float>(::tflite::TensorType_FLOAT32,
- backends,
tensorShape,
inputData,
expectedResult,
@@ -237,7 +231,6 @@ TEST_CASE ("ArmnnDelegateModelOptions_CpuAcc_Test")
armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
- backends,
tensorShape,
inputData,
inputData,
@@ -268,7 +261,6 @@ TEST_CASE ("ArmnnDelegateSerializeToDot")
// Enable serialize to dot by specifying the target file name.
delegateOptions.SetSerializeToDot(filename);
DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
- backends,
tensorShape,
inputData,
inputData,
@@ -309,7 +301,6 @@ void CreateFp16StringParsingTestRun(std::vector<std::string>& keys,
armnnDelegate::DelegateOptions delegateOptions(options_keys.get(), options_values.get(), num_options, nullptr);
DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
- backends,
tensorShape,
inputData,
inputData,
diff --git a/delegate/test/DelegateOptionsTestHelper.hpp b/delegate/test/DelegateOptionsTestHelper.hpp
index fb5403c7de..b6974c9fb6 100644
--- a/delegate/test/DelegateOptionsTestHelper.hpp
+++ b/delegate/test/DelegateOptionsTestHelper.hpp
@@ -5,17 +5,17 @@
#pragma once
-#include <armnn_delegate.hpp>
-
#include "TestUtils.hpp"
+#include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
+
#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
#include <tensorflow/lite/version.h>
+#include <schema_generated.h>
+
#include <doctest/doctest.h>
namespace
@@ -146,7 +146,7 @@ std::vector<char> CreateAddDivTfLiteModel(tflite::TensorType tensorType,
modelDescription,
flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -218,14 +218,13 @@ std::vector<char> CreateCeilTfLiteModel(tflite::TensorType tensorType,
modelDescription,
flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
}
template <typename T>
void DelegateOptionTest(tflite::TensorType tensorType,
- const std::vector<armnn::BackendId>& backends,
std::vector<int32_t>& tensorShape,
std::vector<T>& input0Values,
std::vector<T>& input1Values,
@@ -235,55 +234,41 @@ void DelegateOptionTest(tflite::TensorType tensorType,
float quantScale = 1.0f,
int quantOffset = 0)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
std::vector<char> modelBuffer = CreateAddDivTfLiteModel(tensorType,
tensorShape,
quantScale,
quantOffset);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput(tfLiteInterpreter, 0, input0Values);
- armnnDelegate::FillInput(tfLiteInterpreter, 1, input1Values);
- armnnDelegate::FillInput(tfLiteInterpreter, 2, input2Values);
-
- armnnDelegate::FillInput(armnnDelegateInterpreter, 0, input0Values);
- armnnDelegate::FillInput(armnnDelegateInterpreter, 1, input1Values);
- armnnDelegate::FillInput(armnnDelegateInterpreter, 2, input2Values);
-
- // Run EnqueueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- armnnDelegate::CompareOutputData<T>(tfLiteInterpreter, armnnDelegateInterpreter, tensorShape, expectedOutputValues);
-
- armnnDelegateInterpreter.reset(nullptr);
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<T>(input0Values, 0) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<T>(input1Values, 1) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<T>(input2Values, 2) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
+
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, delegateOptions);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<T>(input0Values, 0) == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<T>(input1Values, 1) == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<T>(input2Values, 2) == kTfLiteOk);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
+
+ armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, tensorShape);
+
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
}
template <typename T>
void DelegateOptionNoFallbackTest(tflite::TensorType tensorType,
- const std::vector<armnn::BackendId>& backends,
std::vector<int32_t>& tensorShape,
std::vector<T>& inputValues,
std::vector<T>& expectedOutputValues,
@@ -291,53 +276,39 @@ void DelegateOptionNoFallbackTest(tflite::TensorType tensorType,
float quantScale = 1.0f,
int quantOffset = 0)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
std::vector<char> modelBuffer = CreateCeilTfLiteModel(tensorType,
tensorShape,
quantScale,
quantOffset);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
+ tfLiteInterpreter.Cleanup();
+
try
{
- armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get());
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, delegateOptions);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
+ armnnInterpreter.Cleanup();
+
+ armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, tensorShape);
}
catch (const armnn::Exception& e)
{
// Forward the exception message to std::cout
std::cout << e.what() << std::endl;
}
-
- // Set input data
- armnnDelegate::FillInput(tfLiteInterpreter, 0, inputValues);
- armnnDelegate::FillInput(armnnDelegateInterpreter, 0, inputValues);
-
- // Run EnqueueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- armnnDelegate::CompareOutputData<T>(tfLiteInterpreter, armnnDelegateInterpreter, tensorShape, expectedOutputValues);
-
- armnnDelegateInterpreter.reset(nullptr);
}
} // anonymous namespace \ No newline at end of file
diff --git a/delegate/test/DepthwiseConvolution2dTest.cpp b/delegate/test/DepthwiseConvolution2dTest.cpp
index 9ee589c977..5fdbfc4801 100644
--- a/delegate/test/DepthwiseConvolution2dTest.cpp
+++ b/delegate/test/DepthwiseConvolution2dTest.cpp
@@ -25,7 +25,7 @@ void DepthwiseConv2dValidReluFp32Test(std::vector<armnn::BackendId>& backends)
std::vector<int32_t> inputShape { 1, 3, 2, 2 };
std::vector<int32_t> filterShape { 1, 2, 2, 4 };
std::vector<int32_t> biasShape { 4 };
- std::vector<int32_t> outputShape { 1, 3, 3, 1 };
+ std::vector<int32_t> outputShape { 1, 2, 1, 4 };
static std::vector<float> inputValues =
{
diff --git a/delegate/test/ElementwiseBinaryTestHelper.hpp b/delegate/test/ElementwiseBinaryTestHelper.hpp
index 47ee7c2410..fa9cbb881e 100644
--- a/delegate/test/ElementwiseBinaryTestHelper.hpp
+++ b/delegate/test/ElementwiseBinaryTestHelper.hpp
@@ -8,14 +8,14 @@
#include "TestUtils.hpp"
#include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
#include <tensorflow/lite/version.h>
+#include <schema_generated.h>
+
#include <doctest/doctest.h>
namespace
@@ -164,7 +164,7 @@ std::vector<char> CreateElementwiseBinaryTfLiteModel(tflite::BuiltinOperator bin
modelDescription,
flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -185,7 +185,7 @@ void ElementwiseBinaryTest(tflite::BuiltinOperator binaryOperatorCode,
int quantOffset = 0,
bool constantInput = false)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
std::vector<char> modelBuffer = CreateElementwiseBinaryTfLiteModel<T>(binaryOperatorCode,
activationType,
tensorType,
@@ -197,47 +197,29 @@ void ElementwiseBinaryTest(tflite::BuiltinOperator binaryOperatorCode,
quantScale,
quantOffset);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- // Create TfLite Interpreters
- std::unique_ptr <Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr <Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, input0Values);
- armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, input0Values);
- if (!constantInput)
- {
- armnnDelegate::FillInput<T>(tfLiteInterpreter, 1, input1Values);
- armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 1, input1Values);
- }
- // Run EnqueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- armnnDelegate::CompareOutputData<T>(tfLiteInterpreter,
- armnnDelegateInterpreter,
- outputShape,
- expectedOutputValues);
- armnnDelegateInterpreter.reset(nullptr);
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<T>(input0Values, 0) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<T>(input1Values, 1) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
+
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<T>(input0Values, 0) == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<T>(input1Values, 1) == kTfLiteOk);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
+
+ armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
+
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
}
} // anonymous namespace \ No newline at end of file
diff --git a/delegate/test/ElementwiseUnaryTestHelper.hpp b/delegate/test/ElementwiseUnaryTestHelper.hpp
index f6a534a64f..7f8879b50d 100644
--- a/delegate/test/ElementwiseUnaryTestHelper.hpp
+++ b/delegate/test/ElementwiseUnaryTestHelper.hpp
@@ -8,14 +8,14 @@
#include "TestUtils.hpp"
#include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
#include <tensorflow/lite/version.h>
+#include <schema_generated.h>
+
#include <doctest/doctest.h>
namespace
@@ -69,7 +69,7 @@ std::vector<char> CreateElementwiseUnaryTfLiteModel(tflite::BuiltinOperator unar
modelDescription,
flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -80,48 +80,33 @@ void ElementwiseUnaryFP32Test(tflite::BuiltinOperator unaryOperatorCode,
std::vector<float>& inputValues,
std::vector<float>& expectedOutputValues)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
std::vector<int32_t> inputShape { { 3, 1, 2} };
std::vector<char> modelBuffer = CreateElementwiseUnaryTfLiteModel(unaryOperatorCode,
::tflite::TensorType_FLOAT32,
inputShape);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput(armnnDelegateInterpreter, 0, inputValues);
- armnnDelegate::FillInput(tfLiteInterpreter, 0, inputValues);
-
- // Run EnqueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, inputShape, expectedOutputValues);
-
- armnnDelegateInterpreter.reset(nullptr);
- tfLiteInterpreter.reset(nullptr);
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<float>(inputValues, 0) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+ std::vector<float> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<float>(0);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
+
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<float>(inputValues, 0) == kTfLiteOk);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+ std::vector<float> armnnOutputValues = armnnInterpreter.GetOutputResult<float>(0);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
+
+ armnnDelegate::CompareOutputData<float>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, inputShape);
+
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
}
void ElementwiseUnaryBoolTest(tflite::BuiltinOperator unaryOperatorCode,
@@ -130,56 +115,35 @@ void ElementwiseUnaryBoolTest(tflite::BuiltinOperator unaryOperatorCode,
std::vector<bool>& inputValues,
std::vector<bool>& expectedOutputValues)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
std::vector<char> modelBuffer = CreateElementwiseUnaryTfLiteModel(unaryOperatorCode,
::tflite::TensorType_BOOL,
inputShape);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput(armnnDelegateInterpreter, 0, inputValues);
- armnnDelegate::FillInput(tfLiteInterpreter, 0, inputValues);
-
- // Run EnqueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data, comparing Boolean values is handled differently and needs to call the CompareData function
- // directly instead. This is because Boolean types get converted to a bit representation in a vector.
- auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
- auto tfLiteDelegateOutputData = tfLiteInterpreter->typed_tensor<bool>(tfLiteDelegateOutputId);
- auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
- auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<bool>(armnnDelegateOutputId);
-
- armnnDelegate::CompareData(expectedOutputValues, armnnDelegateOutputData, expectedOutputValues.size());
- armnnDelegate::CompareData(expectedOutputValues, tfLiteDelegateOutputData, expectedOutputValues.size());
- armnnDelegate::CompareData(tfLiteDelegateOutputData, armnnDelegateOutputData, expectedOutputValues.size());
-
- armnnDelegateInterpreter.reset(nullptr);
- tfLiteInterpreter.reset(nullptr);
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+ std::vector<bool> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
+
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+ std::vector<bool> armnnOutputValues = armnnInterpreter.GetOutputResult(0);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
+
+ armnnDelegate::CompareData(expectedOutputValues, armnnOutputValues, expectedOutputValues.size());
+ armnnDelegate::CompareData(expectedOutputValues, tfLiteOutputValues, expectedOutputValues.size());
+ armnnDelegate::CompareData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues.size());
+
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, inputShape);
+
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
}
} // anonymous namespace
diff --git a/delegate/test/FillTestHelper.hpp b/delegate/test/FillTestHelper.hpp
index c8aadb087b..70162c4a1d 100644
--- a/delegate/test/FillTestHelper.hpp
+++ b/delegate/test/FillTestHelper.hpp
@@ -8,14 +8,14 @@
#include "TestUtils.hpp"
#include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
#include <tensorflow/lite/version.h>
+#include <schema_generated.h>
+
#include <doctest/doctest.h>
namespace
@@ -102,7 +102,7 @@ std::vector<char> CreateFillTfLiteModel(tflite::BuiltinOperator fillOperatorCode
modelDescription,
flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -118,42 +118,32 @@ void FillTest(tflite::BuiltinOperator fillOperatorCode,
std::vector<T>& expectedOutputValues,
T fillValue)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
std::vector<char> modelBuffer = CreateFillTfLiteModel<T>(fillOperatorCode,
tensorType,
inputShape,
tensorShape,
{fillValue});
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- CHECK(tfLiteModel != nullptr);
-
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Run EnqueueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- armnnDelegate::CompareOutputData<T>(tfLiteInterpreter, armnnDelegateInterpreter, tensorShape, expectedOutputValues);
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
+
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
+
+ armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, tensorShape);
+
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
}
} // anonymous namespace
diff --git a/delegate/test/FullyConnectedTestHelper.hpp b/delegate/test/FullyConnectedTestHelper.hpp
index d6bbd93176..e9e5c092d6 100644
--- a/delegate/test/FullyConnectedTestHelper.hpp
+++ b/delegate/test/FullyConnectedTestHelper.hpp
@@ -8,14 +8,14 @@
#include "TestUtils.hpp"
#include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
#include <tensorflow/lite/version.h>
+#include <schema_generated.h>
+
#include <doctest/doctest.h>
namespace
@@ -159,7 +159,7 @@ std::vector<char> CreateFullyConnectedTfLiteModel(tflite::TensorType tensorType,
modelDescription,
flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -180,7 +180,7 @@ void FullyConnectedTest(std::vector<armnn::BackendId>& backends,
float quantScale = 1.0f,
int quantOffset = 0)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
std::vector<char> modelBuffer = CreateFullyConnectedTfLiteModel(tensorType,
activationType,
@@ -192,64 +192,50 @@ void FullyConnectedTest(std::vector<armnn::BackendId>& backends,
constantWeights,
quantScale,
quantOffset);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
-
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, inputValues);
- armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, inputValues);
+
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+
+ CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
if (!constantWeights)
{
- armnnDelegate::FillInput<T>(tfLiteInterpreter, 1, weightsData);
- armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 1, weightsData);
+ CHECK(tfLiteInterpreter.FillInputTensor<T>(weightsData, 1) == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<T>(weightsData, 1) == kTfLiteOk);
if (tensorType == ::tflite::TensorType_INT8)
{
std::vector <int32_t> biasData = {10};
- armnnDelegate::FillInput<int32_t>(tfLiteInterpreter, 2, biasData);
- armnnDelegate::FillInput<int32_t>(armnnDelegateInterpreter, 2, biasData);
+ CHECK(tfLiteInterpreter.FillInputTensor<int32_t>(biasData, 2) == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<int32_t>(biasData, 2) == kTfLiteOk);
}
else
{
std::vector<float> biasData = {10};
- armnnDelegate::FillInput<float>(tfLiteInterpreter, 2, biasData);
- armnnDelegate::FillInput<float>(armnnDelegateInterpreter, 2, biasData);
+ CHECK(tfLiteInterpreter.FillInputTensor<float>(biasData, 2) == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<float>(biasData, 2) == kTfLiteOk);
}
}
- // Run EnqueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
+
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
+
+ armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputTensorShape);
- // Compare output data
- armnnDelegate::CompareOutputData<T>(tfLiteInterpreter,
- armnnDelegateInterpreter,
- outputTensorShape,
- expectedOutputValues);
- armnnDelegateInterpreter.reset(nullptr);
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
}
} // anonymous namespace \ No newline at end of file
diff --git a/delegate/test/GatherNdTestHelper.hpp b/delegate/test/GatherNdTestHelper.hpp
index 7b1595bafb..604b2159fd 100644
--- a/delegate/test/GatherNdTestHelper.hpp
+++ b/delegate/test/GatherNdTestHelper.hpp
@@ -8,14 +8,14 @@
#include "TestUtils.hpp"
#include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
#include <tensorflow/lite/version.h>
+#include <schema_generated.h>
+
#include <doctest/doctest.h>
namespace
@@ -108,7 +108,7 @@ std::vector<char> CreateGatherNdTfLiteModel(tflite::TensorType tensorType,
modelDescription,
flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -126,56 +126,35 @@ void GatherNdTest(tflite::TensorType tensorType,
float quantScale = 1.0f,
int quantOffset = 0)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
std::vector<char> modelBuffer = CreateGatherNdTfLiteModel(tensorType,
paramsShape,
indicesShape,
expectedOutputShape,
quantScale,
quantOffset);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
-
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegate;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegate) == kTfLiteOk);
- CHECK(armnnDelegate != nullptr);
- CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteDelegate;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteDelegate) == kTfLiteOk);
- CHECK(tfLiteDelegate != nullptr);
- CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<T>(tfLiteDelegate, 0, paramsValues);
- armnnDelegate::FillInput<T>(armnnDelegate, 0, paramsValues);
- armnnDelegate::FillInput<int32_t>(tfLiteDelegate, 1, indicesValues);
- armnnDelegate::FillInput<int32_t>(armnnDelegate, 1, indicesValues);
-
- // Run EnqueWorkload
- CHECK(tfLiteDelegate->Invoke() == kTfLiteOk);
- CHECK(armnnDelegate->Invoke() == kTfLiteOk);
-
- // Compare output data
- armnnDelegate::CompareOutputData<T>(tfLiteDelegate,
- armnnDelegate,
- expectedOutputShape,
- expectedOutputValues,
- 0);
-
- tfLiteDelegate.reset(nullptr);
- armnnDelegate.reset(nullptr);
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<T>(paramsValues, 0) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<int32_t>(indicesValues, 1) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
+
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<T>(paramsValues, 0) == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<int32_t>(indicesValues, 1) == kTfLiteOk);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
+
+ armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape);
+
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
}
} // anonymous namespace \ No newline at end of file
diff --git a/delegate/test/GatherTestHelper.hpp b/delegate/test/GatherTestHelper.hpp
index 41e3b55a50..43717a3b2f 100644
--- a/delegate/test/GatherTestHelper.hpp
+++ b/delegate/test/GatherTestHelper.hpp
@@ -8,14 +8,14 @@
#include "TestUtils.hpp"
#include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
#include <tensorflow/lite/version.h>
+#include <schema_generated.h>
+
#include <doctest/doctest.h>
namespace
@@ -109,7 +109,7 @@ std::vector<char> CreateGatherTfLiteModel(tflite::TensorType tensorType,
modelDescription,
flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -128,7 +128,7 @@ void GatherTest(tflite::TensorType tensorType,
float quantScale = 1.0f,
int quantOffset = 0)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
std::vector<char> modelBuffer = CreateGatherTfLiteModel(tensorType,
paramsShape,
indicesShape,
@@ -136,49 +136,28 @@ void GatherTest(tflite::TensorType tensorType,
axis,
quantScale,
quantOffset);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
-
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegate;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegate) == kTfLiteOk);
- CHECK(armnnDelegate != nullptr);
- CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteDelegate;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteDelegate) == kTfLiteOk);
- CHECK(tfLiteDelegate != nullptr);
- CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<T>(tfLiteDelegate, 0, paramsValues);
- armnnDelegate::FillInput<T>(armnnDelegate, 0, paramsValues);
- armnnDelegate::FillInput<int32_t>(tfLiteDelegate, 1, indicesValues);
- armnnDelegate::FillInput<int32_t>(armnnDelegate, 1, indicesValues);
-
- // Run EnqueWorkload
- CHECK(tfLiteDelegate->Invoke() == kTfLiteOk);
- CHECK(armnnDelegate->Invoke() == kTfLiteOk);
-
- // Compare output data
- armnnDelegate::CompareOutputData<T>(tfLiteDelegate,
- armnnDelegate,
- expectedOutputShape,
- expectedOutputValues,
- 0);
-
- tfLiteDelegate.reset(nullptr);
- armnnDelegate.reset(nullptr);
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<T>(paramsValues, 0) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<int32_t>(indicesValues, 1) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
+
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<T>(paramsValues, 0) == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<int32_t>(indicesValues, 1) == kTfLiteOk);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
+
+ armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape);
+
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
}
} // anonymous namespace \ No newline at end of file
diff --git a/delegate/test/LogicalTest.cpp b/delegate/test/LogicalTest.cpp
index 57bbd318e7..8414293547 100644
--- a/delegate/test/LogicalTest.cpp
+++ b/delegate/test/LogicalTest.cpp
@@ -27,15 +27,15 @@ void LogicalBinaryAndBoolTest(std::vector<armnn::BackendId>& backends)
std::vector<bool> input1Values { 0, 1, 0, 1 };
std::vector<bool> expectedOutputValues { 0, 0, 0, 1 };
- LogicalBinaryTest<bool>(tflite::BuiltinOperator_LOGICAL_AND,
- ::tflite::TensorType_BOOL,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues);
+ LogicalBinaryTest(tflite::BuiltinOperator_LOGICAL_AND,
+ ::tflite::TensorType_BOOL,
+ backends,
+ input0Shape,
+ input1Shape,
+ expectedOutputShape,
+ input0Values,
+ input1Values,
+ expectedOutputValues);
}
void LogicalBinaryAndBroadcastTest(std::vector<armnn::BackendId>& backends)
@@ -48,15 +48,15 @@ void LogicalBinaryAndBroadcastTest(std::vector<armnn::BackendId>& backends)
std::vector<bool> input1Values { 1 };
std::vector<bool> expectedOutputValues { 0, 1, 0, 1 };
- LogicalBinaryTest<bool>(tflite::BuiltinOperator_LOGICAL_AND,
- ::tflite::TensorType_BOOL,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues);
+ LogicalBinaryTest(tflite::BuiltinOperator_LOGICAL_AND,
+ ::tflite::TensorType_BOOL,
+ backends,
+ input0Shape,
+ input1Shape,
+ expectedOutputShape,
+ input0Values,
+ input1Values,
+ expectedOutputValues);
}
void LogicalBinaryOrBoolTest(std::vector<armnn::BackendId>& backends)
@@ -69,15 +69,15 @@ void LogicalBinaryOrBoolTest(std::vector<armnn::BackendId>& backends)
std::vector<bool> input1Values { 0, 1, 0, 1 };
std::vector<bool> expectedOutputValues { 0, 1, 1, 1 };
- LogicalBinaryTest<bool>(tflite::BuiltinOperator_LOGICAL_OR,
- ::tflite::TensorType_BOOL,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues);
+ LogicalBinaryTest(tflite::BuiltinOperator_LOGICAL_OR,
+ ::tflite::TensorType_BOOL,
+ backends,
+ input0Shape,
+ input1Shape,
+ expectedOutputShape,
+ input0Values,
+ input1Values,
+ expectedOutputValues);
}
void LogicalBinaryOrBroadcastTest(std::vector<armnn::BackendId>& backends)
@@ -90,15 +90,15 @@ void LogicalBinaryOrBroadcastTest(std::vector<armnn::BackendId>& backends)
std::vector<bool> input1Values { 1 };
std::vector<bool> expectedOutputValues { 1, 1, 1, 1 };
- LogicalBinaryTest<bool>(tflite::BuiltinOperator_LOGICAL_OR,
- ::tflite::TensorType_BOOL,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues);
+ LogicalBinaryTest(tflite::BuiltinOperator_LOGICAL_OR,
+ ::tflite::TensorType_BOOL,
+ backends,
+ input0Shape,
+ input1Shape,
+ expectedOutputShape,
+ input0Values,
+ input1Values,
+ expectedOutputValues);
}
// LogicalNot operator uses ElementwiseUnary unary layer and descriptor but is still classed as logical operator.
diff --git a/delegate/test/LogicalTestHelper.hpp b/delegate/test/LogicalTestHelper.hpp
index 2f2ae7bf40..7da8ad9bfc 100644
--- a/delegate/test/LogicalTestHelper.hpp
+++ b/delegate/test/LogicalTestHelper.hpp
@@ -8,14 +8,14 @@
#include "TestUtils.hpp"
#include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
#include <tensorflow/lite/version.h>
+#include <schema_generated.h>
+
#include <doctest/doctest.h>
namespace
@@ -120,26 +120,25 @@ std::vector<char> CreateLogicalBinaryTfLiteModel(tflite::BuiltinOperator logical
modelDescription,
flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
}
-template <typename T>
void LogicalBinaryTest(tflite::BuiltinOperator logicalOperatorCode,
tflite::TensorType tensorType,
std::vector<armnn::BackendId>& backends,
std::vector<int32_t>& input0Shape,
std::vector<int32_t>& input1Shape,
std::vector<int32_t>& expectedOutputShape,
- std::vector<T>& input0Values,
- std::vector<T>& input1Values,
- std::vector<T>& expectedOutputValues,
+ std::vector<bool>& input0Values,
+ std::vector<bool>& input1Values,
+ std::vector<bool>& expectedOutputValues,
float quantScale = 1.0f,
int quantOffset = 0)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
std::vector<char> modelBuffer = CreateLogicalBinaryTfLiteModel(logicalOperatorCode,
tensorType,
input0Shape,
@@ -148,54 +147,32 @@ void LogicalBinaryTest(tflite::BuiltinOperator logicalOperatorCode,
quantScale,
quantOffset);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data for the armnn interpreter
- armnnDelegate::FillInput(armnnDelegateInterpreter, 0, input0Values);
- armnnDelegate::FillInput(armnnDelegateInterpreter, 1, input1Values);
-
- // Set input data for the tflite interpreter
- armnnDelegate::FillInput(tfLiteInterpreter, 0, input0Values);
- armnnDelegate::FillInput(tfLiteInterpreter, 1, input1Values);
-
- // Run EnqueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data, comparing Boolean values is handled differently and needs to call the CompareData function
- // directly. This is because Boolean types get converted to a bit representation in a vector.
- auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
- auto tfLiteDelegateOutputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateOutputId);
- auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
- auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateOutputId);
-
- armnnDelegate::CompareData(expectedOutputValues, armnnDelegateOutputData, expectedOutputValues.size());
- armnnDelegate::CompareData(expectedOutputValues, tfLiteDelegateOutputData, expectedOutputValues.size());
- armnnDelegate::CompareData(tfLiteDelegateOutputData, armnnDelegateOutputData, expectedOutputValues.size());
-
- armnnDelegateInterpreter.reset(nullptr);
- tfLiteInterpreter.reset(nullptr);
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor(input0Values, 0) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor(input1Values, 1) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+ std::vector<bool> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
+
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor(input0Values, 0) == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor(input1Values, 1) == kTfLiteOk);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+ std::vector<bool> armnnOutputValues = armnnInterpreter.GetOutputResult(0);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
+
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape);
+
+ armnnDelegate::CompareData(expectedOutputValues, armnnOutputValues, expectedOutputValues.size());
+ armnnDelegate::CompareData(expectedOutputValues, tfLiteOutputValues, expectedOutputValues.size());
+ armnnDelegate::CompareData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues.size());
+
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
}
} // anonymous namespace \ No newline at end of file
diff --git a/delegate/test/LstmTestHelper.hpp b/delegate/test/LstmTestHelper.hpp
index 14776ca341..4ff517509d 100644
--- a/delegate/test/LstmTestHelper.hpp
+++ b/delegate/test/LstmTestHelper.hpp
@@ -8,14 +8,13 @@
#include "TestUtils.hpp"
#include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
#include <tensorflow/lite/version.h>
-#include <tensorflow/lite/c/common.h>
+
+#include <schema_generated.h>
#include <doctest/doctest.h>
@@ -539,7 +538,7 @@ std::vector<char> CreateLstmTfLiteModel(tflite::TensorType tensorType,
modelDescription,
flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -591,7 +590,7 @@ void LstmTestImpl(std::vector<armnn::BackendId>& backends,
float clippingThresCell,
float clippingThresProj)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
std::vector<char> modelBuffer = CreateLstmTfLiteModel(tensorType,
batchSize,
@@ -635,57 +634,29 @@ void LstmTestImpl(std::vector<armnn::BackendId>& backends,
clippingThresCell,
clippingThresProj);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
- auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateInputId);
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- tfLiteDelageInputData[i] = inputValues[i];
- }
-
- auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
- auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateInputId);
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- armnnDelegateInputData[i] = inputValues[i];
- }
-
- // Run EnqueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
- auto tfLiteDelagateOutputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateOutputId);
- auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
- auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateOutputId);
-
- armnnDelegate::CompareData(expectedOutputValues.data(), armnnDelegateOutputData, expectedOutputValues.size());
- armnnDelegate::CompareData(expectedOutputValues.data(), tfLiteDelagateOutputData, expectedOutputValues.size());
- armnnDelegate::CompareData(tfLiteDelagateOutputData, armnnDelegateOutputData, expectedOutputValues.size());
+ std::vector<int32_t> expectedOutputShape {batchSize , outputSize};
+
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
+
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
+
+ armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape);
+
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
}
} // anonymous namespace \ No newline at end of file
diff --git a/delegate/test/NormalizationTestHelper.hpp b/delegate/test/NormalizationTestHelper.hpp
index eafdf84835..a9db6b8fbf 100644
--- a/delegate/test/NormalizationTestHelper.hpp
+++ b/delegate/test/NormalizationTestHelper.hpp
@@ -8,14 +8,14 @@
#include "TestUtils.hpp"
#include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
#include <tensorflow/lite/version.h>
+#include <schema_generated.h>
+
#include <doctest/doctest.h>
namespace
@@ -110,7 +110,7 @@ std::vector<char> CreateNormalizationTfLiteModel(tflite::BuiltinOperator normali
modelDescription,
flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -131,7 +131,7 @@ void NormalizationTest(tflite::BuiltinOperator normalizationOperatorCode,
float quantScale = 1.0f,
int quantOffset = 0)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
std::vector<char> modelBuffer = CreateNormalizationTfLiteModel(normalizationOperatorCode,
tensorType,
inputShape,
@@ -143,40 +143,27 @@ void NormalizationTest(tflite::BuiltinOperator normalizationOperatorCode,
quantScale,
quantOffset);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- CHECK(tfLiteModel != nullptr);
-
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, inputValues);
- armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, inputValues);
-
- // Run EnqueueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, outputShape, expectedOutputValues);
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
+
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
+
+ armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
+
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
}
void L2NormalizationTest(std::vector<armnn::BackendId>& backends)
diff --git a/delegate/test/PackTestHelper.hpp b/delegate/test/PackTestHelper.hpp
index 0fd2f195f4..112eccb5be 100644
--- a/delegate/test/PackTestHelper.hpp
+++ b/delegate/test/PackTestHelper.hpp
@@ -8,17 +8,15 @@
#include "TestUtils.hpp"
#include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
+#include <schema_generated.h>
-#include <string>
+#include <doctest/doctest.h>
namespace
{
@@ -108,7 +106,7 @@ std::vector<char> CreatePackTfLiteModel(tflite::BuiltinOperator packOperatorCode
modelDescription,
flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -126,7 +124,7 @@ void PackTest(tflite::BuiltinOperator packOperatorCode,
float quantScale = 1.0f,
int quantOffset = 0)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
std::vector<char> modelBuffer = CreatePackTfLiteModel(packOperatorCode,
tensorType,
inputShape,
@@ -136,51 +134,35 @@ void PackTest(tflite::BuiltinOperator packOperatorCode,
quantScale,
quantOffset);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
// Set input data for all input tensors.
for (unsigned int i = 0; i < inputValues.size(); ++i)
{
- // Get single input tensor and assign to interpreters.
auto inputTensorValues = inputValues[i];
- armnnDelegate::FillInput<T>(tfLiteInterpreter, i, inputTensorValues);
- armnnDelegate::FillInput<T>(armnnDelegateInterpreter, i, inputTensorValues);
+ CHECK(tfLiteInterpreter.FillInputTensor<T>(inputTensorValues, i) == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<T>(inputTensorValues, i) == kTfLiteOk);
}
- // Run EnqueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
+
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
- // Compare output data
- armnnDelegate::CompareOutputData<T>(tfLiteInterpreter,
- armnnDelegateInterpreter,
- expectedOutputShape,
- expectedOutputValues);
+ armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape);
- armnnDelegateInterpreter.reset(nullptr);
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
}
} // anonymous namespace \ No newline at end of file
diff --git a/delegate/test/PadTestHelper.hpp b/delegate/test/PadTestHelper.hpp
index d049c52635..c4bfd89458 100644
--- a/delegate/test/PadTestHelper.hpp
+++ b/delegate/test/PadTestHelper.hpp
@@ -8,14 +8,14 @@
#include "TestUtils.hpp"
#include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
#include <tensorflow/lite/version.h>
+#include <schema_generated.h>
+
#include <doctest/doctest.h>
namespace
@@ -153,7 +153,7 @@ std::vector<char> CreatePadTfLiteModel(
modelDescription,
flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -174,7 +174,7 @@ void PadTest(tflite::BuiltinOperator padOperatorCode,
int quantOffset = 0,
tflite::MirrorPadMode paddingMode = tflite::MirrorPadMode_SYMMETRIC)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
std::vector<char> modelBuffer = CreatePadTfLiteModel<T>(padOperatorCode,
tensorType,
paddingMode,
@@ -186,39 +186,27 @@ void PadTest(tflite::BuiltinOperator padOperatorCode,
quantScale,
quantOffset);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- CHECK(tfLiteModel != nullptr);
-
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, inputValues);
- armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, inputValues);
-
- // Run EnqueueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- armnnDelegate::CompareOutputData<T>(tfLiteInterpreter, armnnDelegateInterpreter, outputShape, expectedOutputValues);
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
+
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
+
+ armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
+
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
}
} // anonymous namespace
diff --git a/delegate/test/Pooling2dTestHelper.hpp b/delegate/test/Pooling2dTestHelper.hpp
index 6de85b63c5..d08a45b588 100644
--- a/delegate/test/Pooling2dTestHelper.hpp
+++ b/delegate/test/Pooling2dTestHelper.hpp
@@ -8,14 +8,14 @@
#include "TestUtils.hpp"
#include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
#include <tensorflow/lite/version.h>
+#include <schema_generated.h>
+
#include <doctest/doctest.h>
namespace
@@ -106,7 +106,7 @@ std::vector<char> CreatePooling2dTfLiteModel(
modelDescription,
flatBufferBuilder.CreateVector(buffers, 3));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -129,7 +129,7 @@ void Pooling2dTest(tflite::BuiltinOperator poolingOperatorCode,
float quantScale = 1.0f,
int quantOffset = 0)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
std::vector<char> modelBuffer = CreatePooling2dTfLiteModel(poolingOperatorCode,
tensorType,
inputShape,
@@ -143,50 +143,27 @@ void Pooling2dTest(tflite::BuiltinOperator poolingOperatorCode,
quantScale,
quantOffset);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- CHECK(tfLiteModel != nullptr);
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
- auto tfLiteDelegateInputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateInputId);
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- tfLiteDelegateInputData[i] = inputValues[i];
- }
-
- auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
- auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateInputId);
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- armnnDelegateInputData[i] = inputValues[i];
- }
-
- // Run EnqueueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, outputShape, expectedOutputValues);
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
+
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
+
+ armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
+
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
}
} // anonymous namespace
diff --git a/delegate/test/Pooling3dTestHelper.hpp b/delegate/test/Pooling3dTestHelper.hpp
index dd90e4bb1c..59d2e18228 100644
--- a/delegate/test/Pooling3dTestHelper.hpp
+++ b/delegate/test/Pooling3dTestHelper.hpp
@@ -8,16 +8,16 @@
#include "TestUtils.hpp"
#include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
#include <flatbuffers/flatbuffers.h>
#include <flatbuffers/flexbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/custom_ops_register.h>
#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
+#include <tensorflow/lite/kernels/custom_ops_register.h>
#include <tensorflow/lite/version.h>
+#include <schema_generated.h>
+
#include <doctest/doctest.h>
namespace
@@ -131,7 +131,7 @@ std::vector<char> CreatePooling3dTfLiteModel(
modelDescription,
flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -156,7 +156,7 @@ void Pooling3dTest(std::string poolType,
float quantScale = 1.0f,
int quantOffset = 0)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
// Create the single op model buffer
std::vector<char> modelBuffer = CreatePooling3dTfLiteModel(poolType,
tensorType,
@@ -173,79 +173,37 @@ void Pooling3dTest(std::string poolType,
quantScale,
quantOffset);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- CHECK(tfLiteModel != nullptr);
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
-
- // Custom ops need to be added to the BuiltinOp resolver before the interpreter is created
- // Based on the poolType from the test case add the custom operator using the name and the tflite
- // registration function
- tflite::ops::builtin::BuiltinOpResolver armnn_op_resolver;
+ std::string opType = "";
if (poolType == "kMax")
{
- armnn_op_resolver.AddCustom("MaxPool3D", tflite::ops::custom::Register_MAX_POOL_3D());
+ opType = "MaxPool3D";
}
else
{
- armnn_op_resolver.AddCustom("AveragePool3D", tflite::ops::custom::Register_AVG_POOL_3D());
+ opType = "AveragePool3D";
}
- CHECK(InterpreterBuilder(tfLiteModel, armnn_op_resolver)
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
-
- // Custom ops need to be added to the BuiltinOp resolver before the interpreter is created
- // Based on the poolType from the test case add the custom operator using the name and the tflite
- // registration function
- tflite::ops::builtin::BuiltinOpResolver tflite_op_resolver;
- if (poolType == "kMax")
- {
- tflite_op_resolver.AddCustom("MaxPool3D", tflite::ops::custom::Register_MAX_POOL_3D());
- }
- else
- {
- tflite_op_resolver.AddCustom("AveragePool3D", tflite::ops::custom::Register_AVG_POOL_3D());
- }
-
- CHECK(InterpreterBuilder(tfLiteModel, tflite_op_resolver)
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
- auto tfLiteDelegateInputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateInputId);
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- tfLiteDelegateInputData[i] = inputValues[i];
- }
-
- auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
- auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateInputId);
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- armnnDelegateInputData[i] = inputValues[i];
- }
-
- // Run EnqueueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, outputShape, expectedOutputValues);
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer, opType);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
+
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends, opType);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
+
+ armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
+
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
}
// Function to create the flexbuffer custom options for the custom pooling3d operator.
diff --git a/delegate/test/PreluTest.cpp b/delegate/test/PreluTest.cpp
index 40bf1dda56..f65e15bb97 100644
--- a/delegate/test/PreluTest.cpp
+++ b/delegate/test/PreluTest.cpp
@@ -18,7 +18,8 @@
namespace armnnDelegate {
-void PreluFloatSimpleTest(std::vector <armnn::BackendId>& backends, bool isAlphaConst, bool isDynamicOutput = false) {
+void PreluFloatSimpleTest(std::vector <armnn::BackendId>& backends, bool isAlphaConst, bool isDynamicOutput = false)
+{
std::vector<int32_t> inputShape { 1, 2, 3 };
std::vector<int32_t> alphaShape { 1 };
std::vector<int32_t> outputShape { 1, 2, 3 };
diff --git a/delegate/test/PreluTestHelper.hpp b/delegate/test/PreluTestHelper.hpp
index 0721c139ac..c2a9435d0c 100644
--- a/delegate/test/PreluTestHelper.hpp
+++ b/delegate/test/PreluTestHelper.hpp
@@ -8,14 +8,14 @@
#include "TestUtils.hpp"
#include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
#include <tensorflow/lite/version.h>
+#include <schema_generated.h>
+
#include <doctest/doctest.h>
namespace
@@ -107,7 +107,7 @@ std::vector<char> CreatePreluTfLiteModel(tflite::BuiltinOperator preluOperatorCo
modelDescription,
flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -124,7 +124,7 @@ void PreluTest(tflite::BuiltinOperator preluOperatorCode,
std::vector<float>& expectedOutput,
bool alphaIsConstant)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
std::vector<char> modelBuffer = CreatePreluTfLiteModel(preluOperatorCode,
tensorType,
@@ -134,62 +134,42 @@ void PreluTest(tflite::BuiltinOperator preluOperatorCode,
alphaData,
alphaIsConstant);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
-
- CHECK(tfLiteModel != nullptr);
-
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
-
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
- std::unique_ptr<Interpreter> tfLiteInterpreter;
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
-
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<float>(tfLiteInterpreter, 0, inputData);
- armnnDelegate::FillInput<float>(armnnDelegateInterpreter, 0, inputData);
+ CHECK(armnnInterpreter.FillInputTensor<float>(inputData, 0) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<float>(inputData, 0) == kTfLiteOk);
// Set alpha data if not constant
- if (!alphaIsConstant) {
- armnnDelegate::FillInput<float>(tfLiteInterpreter, 1, alphaData);
- armnnDelegate::FillInput<float>(armnnDelegateInterpreter, 1, alphaData);
+ if (!alphaIsConstant)
+ {
+ CHECK(tfLiteInterpreter.FillInputTensor<float>(alphaData, 1) == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<float>(alphaData, 1) == kTfLiteOk);
}
- // Run EnqueueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+ std::vector<float> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<float>(0);
- auto tfLiteDelegateOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+ std::vector<float> armnnOutputValues = armnnInterpreter.GetOutputResult<float>(0);
- auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
- auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
+ armnnDelegate::CompareOutputData<float>(tfLiteOutputValues, armnnOutputValues, expectedOutput);
- for (size_t i = 0; i < expectedOutput.size(); i++)
+ // Don't compare shapes on dynamic output tests, as output shape gets cleared.
+ if(!outputShape.empty())
{
- CHECK(expectedOutput[i] == armnnDelegateOutputData[i]);
- CHECK(tfLiteDelegateOutputData[i] == expectedOutput[i]);
- CHECK(tfLiteDelegateOutputData[i] == armnnDelegateOutputData[i]);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
}
+
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
}
} // anonymous namespace \ No newline at end of file
diff --git a/delegate/test/QuantizationTestHelper.hpp b/delegate/test/QuantizationTestHelper.hpp
index af898f332d..8554a01967 100644
--- a/delegate/test/QuantizationTestHelper.hpp
+++ b/delegate/test/QuantizationTestHelper.hpp
@@ -5,15 +5,17 @@
#pragma once
+#include "TestUtils.hpp"
+
#include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
#include <tensorflow/lite/version.h>
+#include <schema_generated.h>
+
#include <doctest/doctest.h>
namespace
@@ -112,7 +114,7 @@ std::vector<char> CreateQuantizationTfLiteModel(tflite::BuiltinOperator quantiza
modelDescription,
flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -130,7 +132,7 @@ void QuantizationTest(tflite::BuiltinOperator quantizeOperatorCode,
float quantScale = 1.0f,
int quantOffset = 0)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
std::vector<char> modelBuffer = CreateQuantizationTfLiteModel(quantizeOperatorCode,
inputTensorType,
outputTensorType,
@@ -139,62 +141,27 @@ void QuantizationTest(tflite::BuiltinOperator quantizeOperatorCode,
quantScale,
quantOffset);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
-
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
- auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor<InputT>(tfLiteDelegateInputId);
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- tfLiteDelageInputData[i] = inputValues[i];
- }
-
- auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
- auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<InputT>(armnnDelegateInputId);
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- armnnDelegateInputData[i] = inputValues[i];
- }
-
- // Run EnqueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
- auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<OutputT>(tfLiteDelegateOutputId);
- auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
- auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<OutputT>(armnnDelegateOutputId);
-
- for (size_t i = 0; i < expectedOutputValues.size(); i++)
- {
- CHECK(expectedOutputValues[i] == armnnDelegateOutputData[i]);
- CHECK(tfLiteDelageOutputData[i] == expectedOutputValues[i]);
- CHECK(tfLiteDelageOutputData[i] == armnnDelegateOutputData[i]);
- }
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+ std::vector<OutputT> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<OutputT>(0);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
+
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+ std::vector<OutputT> armnnOutputValues = armnnInterpreter.GetOutputResult<OutputT>(0);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
+
+ armnnDelegate::CompareOutputData<OutputT>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
+
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
}
} // anonymous namespace \ No newline at end of file
diff --git a/delegate/test/RedefineTestHelper.hpp b/delegate/test/RedefineTestHelper.hpp
index ce60db0664..80631ccf8d 100644
--- a/delegate/test/RedefineTestHelper.hpp
+++ b/delegate/test/RedefineTestHelper.hpp
@@ -8,14 +8,14 @@
#include "TestUtils.hpp"
#include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
#include <tensorflow/lite/version.h>
+#include <schema_generated.h>
+
#include <doctest/doctest.h>
namespace
@@ -135,7 +135,7 @@ std::vector<char> CreateRedefineTfLiteModel(
modelDescription,
flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -154,7 +154,7 @@ void RedefineTest(tflite::BuiltinOperator redefineOperatorCode,
float quantScale = 1.0f,
int quantOffset = 0)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
std::vector<char> modelBuffer = CreateRedefineTfLiteModel(redefineOperatorCode,
tensorType,
inputShape,
@@ -164,39 +164,27 @@ void RedefineTest(tflite::BuiltinOperator redefineOperatorCode,
quantScale,
quantOffset);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- CHECK(tfLiteModel != nullptr);
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, inputValues);
- armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, inputValues);
-
- // Run EnqueueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- armnnDelegate::CompareOutputData<T>(tfLiteInterpreter, armnnDelegateInterpreter, outputShape, expectedOutputValues);
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
+
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
+
+ armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
+
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
}
} // anonymous namespace \ No newline at end of file
diff --git a/delegate/test/ReduceTestHelper.hpp b/delegate/test/ReduceTestHelper.hpp
index fedf7ee150..a268981865 100644
--- a/delegate/test/ReduceTestHelper.hpp
+++ b/delegate/test/ReduceTestHelper.hpp
@@ -8,17 +8,15 @@
#include "TestUtils.hpp"
#include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
+#include <schema_generated.h>
-#include <string>
+#include <doctest/doctest.h>
namespace
{
@@ -140,7 +138,7 @@ std::vector<char> CreateReduceTfLiteModel(tflite::BuiltinOperator reduceOperator
modelDescription,
flatBufferBuilder.CreateVector(buffers, 4));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -160,7 +158,7 @@ void ReduceTest(tflite::BuiltinOperator reduceOperatorCode,
float quantScale = 1.0f,
int quantOffset = 0)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
std::vector<char> modelBufferArmNN = CreateReduceTfLiteModel(reduceOperatorCode,
tensorType,
input0Shape,
@@ -182,47 +180,27 @@ void ReduceTest(tflite::BuiltinOperator reduceOperatorCode,
quantOffset,
true);
- const Model* tfLiteModelArmNN = GetModel(modelBufferArmNN.data());
- const Model* tfLiteModelTFLite = GetModel(modelBufferTFLite.data());
-
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModelArmNN, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModelTFLite, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, input0Values);
- armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, input0Values);
-
- // Run EnqueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- armnnDelegate::CompareOutputData<T>(tfLiteInterpreter,
- armnnDelegateInterpreter,
- expectedOutputShape,
- expectedOutputValues);
-
- armnnDelegateInterpreter.reset(nullptr);
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBufferTFLite);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<T>(input0Values, 0) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
+
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBufferArmNN, backends);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<T>(input0Values, 0) == kTfLiteOk);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
+
+ armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape);
+
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
}
} // anonymous namespace \ No newline at end of file
diff --git a/delegate/test/ResizeTest.cpp b/delegate/test/ResizeTest.cpp
index 20113875a8..f3bfe43143 100644
--- a/delegate/test/ResizeTest.cpp
+++ b/delegate/test/ResizeTest.cpp
@@ -42,7 +42,7 @@ void ResizeBiliniarFloat32Test(std::vector<armnn::BackendId>& backends)
const std::vector<int32_t> input1Shape { 1, 3, 3, 1 };
const std::vector<int32_t> input2Shape { 2 };
- const std::vector<int32_t> expectedOutputShape = input2NewShape;
+ const std::vector<int32_t> expectedOutputShape = { 1, 5, 5, 1 };
ResizeFP32TestImpl(tflite::BuiltinOperator_RESIZE_BILINEAR,
backends,
@@ -66,7 +66,7 @@ void ResizeNearestNeighbourFloat32Test(std::vector<armnn::BackendId>& backends)
const std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
const std::vector<int32_t> input2Shape { 2 };
- const std::vector<int32_t> expectedOutputShape = input2NewShape;
+ const std::vector<int32_t> expectedOutputShape = { 1, 1, 1, 1 };
ResizeFP32TestImpl(tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR,
backends,
diff --git a/delegate/test/ResizeTestHelper.hpp b/delegate/test/ResizeTestHelper.hpp
index ab7de14612..ff0c413fbf 100644
--- a/delegate/test/ResizeTestHelper.hpp
+++ b/delegate/test/ResizeTestHelper.hpp
@@ -8,14 +8,14 @@
#include "TestUtils.hpp"
#include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
#include <tensorflow/lite/version.h>
+#include <schema_generated.h>
+
#include <doctest/doctest.h>
namespace
@@ -113,7 +113,7 @@ std::vector<char> CreateResizeTfLiteModel(tflite::BuiltinOperator operatorCode,
modelDescription,
flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -128,7 +128,7 @@ void ResizeFP32TestImpl(tflite::BuiltinOperator operatorCode,
std::vector<float>& expectedOutputValues,
std::vector<int32_t> expectedOutputShape)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
std::vector<char> modelBuffer = CreateResizeTfLiteModel(operatorCode,
::tflite::TensorType_FLOAT32,
@@ -137,58 +137,29 @@ void ResizeFP32TestImpl(tflite::BuiltinOperator operatorCode,
input2Shape,
expectedOutputShape);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
-
- // The model will be executed using tflite and using the armnn delegate so that the outputs
- // can be compared.
-
- // Create TfLite Interpreter with armnn delegate
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create TfLite Interpreter without armnn delegate
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data for the armnn interpreter
- armnnDelegate::FillInput(armnnDelegateInterpreter, 0, input1Values);
- armnnDelegate::FillInput(armnnDelegateInterpreter, 1, input2NewShape);
-
- // Set input data for the tflite interpreter
- armnnDelegate::FillInput(tfLiteInterpreter, 0, input1Values);
- armnnDelegate::FillInput(tfLiteInterpreter, 1, input2NewShape);
-
- // Run EnqueWorkload
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
- auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
- auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
- auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
- for (size_t i = 0; i < expectedOutputValues.size(); i++)
- {
- CHECK(expectedOutputValues[i] == doctest::Approx(armnnDelegateOutputData[i]));
- CHECK(armnnDelegateOutputData[i] == doctest::Approx(tfLiteDelageOutputData[i]));
- }
-
- armnnDelegateInterpreter.reset(nullptr);
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<float>(input1Values, 0) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<int32_t>(input2NewShape, 1) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+ std::vector<float> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<float>(0);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
+
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<float>(input1Values, 0) == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<int32_t>(input2NewShape, 1) == kTfLiteOk);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+ std::vector<float> armnnOutputValues = armnnInterpreter.GetOutputResult<float>(0);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
+
+ armnnDelegate::CompareOutputData<float>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape);
+
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
}
} // anonymous namespace \ No newline at end of file
diff --git a/delegate/test/RoundTestHelper.hpp b/delegate/test/RoundTestHelper.hpp
index dc14abf6e3..3aa066b8f6 100644
--- a/delegate/test/RoundTestHelper.hpp
+++ b/delegate/test/RoundTestHelper.hpp
@@ -8,14 +8,14 @@
#include "TestUtils.hpp"
#include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
#include <tensorflow/lite/version.h>
+#include <schema_generated.h>
+
#include <doctest/doctest.h>
namespace
@@ -94,7 +94,7 @@ std::vector<char> CreateRoundTfLiteModel(tflite::BuiltinOperator roundOperatorCo
modelDescription,
flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
}
@@ -109,55 +109,34 @@ void RoundTest(tflite::BuiltinOperator roundOperatorCode,
float quantScale = 1.0f,
int quantOffset = 0)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
std::vector<char> modelBuffer = CreateRoundTfLiteModel(roundOperatorCode,
tensorType,
shape,
quantScale,
quantOffset);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
-
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegate;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegate) == kTfLiteOk);
- CHECK(armnnDelegate != nullptr);
- CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteDelegate;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteDelegate) == kTfLiteOk);
- CHECK(tfLiteDelegate != nullptr);
- CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<T>(tfLiteDelegate, 0, inputValues);
- armnnDelegate::FillInput<T>(armnnDelegate, 0, inputValues);
-
- // Run EnqueWorkload
- CHECK(tfLiteDelegate->Invoke() == kTfLiteOk);
- CHECK(armnnDelegate->Invoke() == kTfLiteOk);
-
- // Compare output data
- armnnDelegate::CompareOutputData<T>(tfLiteDelegate,
- armnnDelegate,
- shape,
- expectedOutputValues,
- 0);
-
- tfLiteDelegate.reset(nullptr);
- armnnDelegate.reset(nullptr);
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
+
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
+
+ armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, shape);
+
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
}
} // anonymous namespace
diff --git a/delegate/test/ShapeTestHelper.hpp b/delegate/test/ShapeTestHelper.hpp
index 54e27ac8fd..42f258b00b 100644
--- a/delegate/test/ShapeTestHelper.hpp
+++ b/delegate/test/ShapeTestHelper.hpp
@@ -8,14 +8,14 @@
#include "TestUtils.hpp"
#include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
#include <tensorflow/lite/version.h>
+#include <schema_generated.h>
+
#include <doctest/doctest.h>
namespace
@@ -97,7 +97,7 @@ std::vector<char> CreateShapeTfLiteModel(tflite::TensorType inputTensorType,
modelDescription,
flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -114,7 +114,7 @@ void ShapeTest(tflite::TensorType inputTensorType,
float quantScale = 1.0f,
int quantOffset = 0)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
std::vector<char> modelBuffer = CreateShapeTfLiteModel(inputTensorType,
outputTensorType,
inputShape,
@@ -122,52 +122,25 @@ void ShapeTest(tflite::TensorType inputTensorType,
quantScale,
quantOffset);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
-
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegate;
-
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegate) == kTfLiteOk);
- CHECK(armnnDelegate != nullptr);
- CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteDelegate;
-
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteDelegate) == kTfLiteOk);
- CHECK(tfLiteDelegate != nullptr);
- CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
-
- std::unique_ptr < TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete) >
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
-
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<T>(tfLiteDelegate, 0, inputValues);
- armnnDelegate::FillInput<T>(armnnDelegate, 0, inputValues);
-
- // Run EnqueWorkload
- CHECK(tfLiteDelegate->Invoke() == kTfLiteOk);
- CHECK(armnnDelegate->Invoke() == kTfLiteOk);
-
- // Compare output data
- armnnDelegate::CompareOutputData<K>(tfLiteDelegate,
- armnnDelegate,
- expectedOutputShape,
- expectedOutputValues,
- 0);
-
- tfLiteDelegate.reset(nullptr);
- armnnDelegate.reset(nullptr);
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+ std::vector<K> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<K>(0);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
+
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+ std::vector<K> armnnOutputValues = armnnInterpreter.GetOutputResult<K>(0);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
+
+ armnnDelegate::CompareOutputData<K>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape);
+
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
}
} // anonymous namespace
diff --git a/delegate/test/SliceTestHelper.hpp b/delegate/test/SliceTestHelper.hpp
index c938fad31b..19f2b3d8ea 100644
--- a/delegate/test/SliceTestHelper.hpp
+++ b/delegate/test/SliceTestHelper.hpp
@@ -8,18 +8,15 @@
#include "TestUtils.hpp"
#include <armnn_delegate.hpp>
-#include <armnn/DescriptorsFwd.hpp>
+#include <DelegateTestInterpreter.hpp>
#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
+#include <schema_generated.h>
-#include <string>
+#include <doctest/doctest.h>
namespace
{
@@ -110,7 +107,7 @@ std::vector<char> CreateSliceTfLiteModel(tflite::TensorType tensorType,
modelDescription,
flatBufferBuilder.CreateVector(buffers, 5));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -127,7 +124,7 @@ void SliceTestImpl(std::vector<armnn::BackendId>& backends,
std::vector<int32_t>& sizeTensorShape,
std::vector<int32_t>& outputTensorShape)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
std::vector<char> modelBuffer = CreateSliceTfLiteModel(
::tflite::TensorType_FLOAT32,
inputTensorShape,
@@ -137,47 +134,27 @@ void SliceTestImpl(std::vector<armnn::BackendId>& backends,
sizeTensorShape,
outputTensorShape);
- auto tfLiteModel = GetModel(modelBuffer.data());
-
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegate;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegate) == kTfLiteOk);
- CHECK(armnnDelegate != nullptr);
- CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteDelegate;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteDelegate) == kTfLiteOk);
- CHECK(tfLiteDelegate != nullptr);
- CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<T>(tfLiteDelegate, 0, inputValues);
- armnnDelegate::FillInput<T>(armnnDelegate, 0, inputValues);
-
- // Run EnqueWorkload
- CHECK(tfLiteDelegate->Invoke() == kTfLiteOk);
- CHECK(armnnDelegate->Invoke() == kTfLiteOk);
-
- // Compare output data
- armnnDelegate::CompareOutputData<T>(tfLiteDelegate,
- armnnDelegate,
- outputTensorShape,
- expectedOutputValues);
-
- tfLiteDelegate.reset(nullptr);
- armnnDelegate.reset(nullptr);
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
+
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
+
+ armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputTensorShape);
+
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
} // End of Slice Test
} // anonymous namespace \ No newline at end of file
diff --git a/delegate/test/SoftmaxTestHelper.hpp b/delegate/test/SoftmaxTestHelper.hpp
index 15177b7088..ffd02abdf7 100644
--- a/delegate/test/SoftmaxTestHelper.hpp
+++ b/delegate/test/SoftmaxTestHelper.hpp
@@ -5,16 +5,18 @@
#pragma once
+#include "TestUtils.hpp"
+
#include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
#include <armnnUtils/FloatingPointComparison.hpp>
#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
#include <tensorflow/lite/version.h>
+#include <schema_generated.h>
+
#include <doctest/doctest.h>
namespace
@@ -95,7 +97,7 @@ std::vector<char> CreateSoftmaxTfLiteModel(tflite::BuiltinOperator softmaxOperat
flatBufferBuilder.CreateVector(&subgraph, 1),
modelDescription,
flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
}
@@ -108,65 +110,33 @@ void SoftmaxTest(tflite::BuiltinOperator softmaxOperatorCode,
std::vector<float>& expectedOutputValues,
float beta = 0)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
std::vector<char> modelBuffer = CreateSoftmaxTfLiteModel(softmaxOperatorCode,
tensorType,
shape,
beta);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
- auto tfLiteInterpreterInputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateInputId);
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- tfLiteInterpreterInputData[i] = inputValues[i];
- }
-
- auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
- auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateInputId);
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- armnnDelegateInputData[i] = inputValues[i];
- }
- // Run EnqueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- auto tfLiteInterpreterOutputId = tfLiteInterpreter->outputs()[0];
- auto tfLiteInterpreterOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteInterpreterOutputId);
- auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
- auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
-
- for (size_t i = 0; i < inputValues.size(); ++i)
- {
- CHECK(armnnUtils::within_percentage_tolerance(expectedOutputValues[i], armnnDelegateOutputData[i], 0.1));
- CHECK(armnnUtils::within_percentage_tolerance(tfLiteInterpreterOutputData[i],
- armnnDelegateOutputData[i], 0.1));
- }
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<float>(inputValues, 0) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+ std::vector<float> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<float>(0);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
+
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<float>(inputValues, 0) == kTfLiteOk);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+ std::vector<float> armnnOutputValues = armnnInterpreter.GetOutputResult<float>(0);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
+
+ armnnDelegate::CompareOutputData<float>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, shape);
+
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
}
diff --git a/delegate/test/SpaceDepthTestHelper.hpp b/delegate/test/SpaceDepthTestHelper.hpp
index 6e8e39d0b0..912472d6c7 100644
--- a/delegate/test/SpaceDepthTestHelper.hpp
+++ b/delegate/test/SpaceDepthTestHelper.hpp
@@ -8,14 +8,14 @@
#include "TestUtils.hpp"
#include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
#include <tensorflow/lite/version.h>
+#include <schema_generated.h>
+
#include <doctest/doctest.h>
namespace
@@ -108,7 +108,7 @@ std::vector<char> CreateSpaceDepthTfLiteModel(tflite::BuiltinOperator spaceDepth
flatBufferBuilder.CreateVector(&subgraph, 1),
modelDescription,
flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
}
@@ -123,46 +123,34 @@ void SpaceDepthTest(tflite::BuiltinOperator spaceDepthOperatorCode,
std::vector<T>& expectedOutputValues,
int32_t blockSize = 2)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
std::vector<char> modelBuffer = CreateSpaceDepthTfLiteModel(spaceDepthOperatorCode,
tensorType,
inputShape,
outputShape,
blockSize);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, inputValues);
- armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, inputValues);
-
- // Run EnqueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, outputShape, expectedOutputValues);
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
+
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
+
+ armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
+
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
}
} // anonymous namespace
diff --git a/delegate/test/SplitTestHelper.hpp b/delegate/test/SplitTestHelper.hpp
index 503fbc85ae..1d5f459148 100644
--- a/delegate/test/SplitTestHelper.hpp
+++ b/delegate/test/SplitTestHelper.hpp
@@ -8,17 +8,15 @@
#include "TestUtils.hpp"
#include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
+#include <schema_generated.h>
-#include <string>
+#include <doctest/doctest.h>
namespace
{
@@ -113,7 +111,7 @@ std::vector<char> CreateSplitTfLiteModel(tflite::TensorType tensorType,
modelDescription,
flatBufferBuilder.CreateVector(buffers));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -132,7 +130,7 @@ void SplitTest(tflite::TensorType tensorType,
float quantScale = 1.0f,
int quantOffset = 0)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
std::vector<char> modelBuffer = CreateSplitTfLiteModel(tensorType,
axisTensorShape,
inputTensorShape,
@@ -141,51 +139,34 @@ void SplitTest(tflite::TensorType tensorType,
numSplits,
quantScale,
quantOffset);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
-
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegate;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegate) == kTfLiteOk);
- CHECK(armnnDelegate != nullptr);
- CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteDelegate;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteDelegate) == kTfLiteOk);
- CHECK(tfLiteDelegate != nullptr);
- CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<T>(tfLiteDelegate, 1, inputValues);
- armnnDelegate::FillInput<T>(armnnDelegate, 1, inputValues);
-
- // Run EnqueWorkload
- CHECK(tfLiteDelegate->Invoke() == kTfLiteOk);
- CHECK(armnnDelegate->Invoke() == kTfLiteOk);
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 1) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 1) == kTfLiteOk);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
// Compare output data
for (unsigned int i = 0; i < expectedOutputValues.size(); ++i)
{
- armnnDelegate::CompareOutputData<T>(tfLiteDelegate,
- armnnDelegate,
- outputTensorShapes[i],
- expectedOutputValues[i],
- i);
+ std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(i);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(i);
+
+ std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(i);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(i);
+
+ armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues[i]);
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputTensorShapes[i]);
}
- tfLiteDelegate.reset(nullptr);
- armnnDelegate.reset(nullptr);
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
+
} // End of SPLIT Test
std::vector<char> CreateSplitVTfLiteModel(tflite::TensorType tensorType,
@@ -288,7 +269,7 @@ std::vector<char> CreateSplitVTfLiteModel(tflite::TensorType tensorType,
modelDescription,
flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -309,7 +290,7 @@ void SplitVTest(tflite::TensorType tensorType,
float quantScale = 1.0f,
int quantOffset = 0)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
std::vector<char> modelBuffer = CreateSplitVTfLiteModel(tensorType,
inputTensorShape,
splitsTensorShape,
@@ -320,51 +301,34 @@ void SplitVTest(tflite::TensorType tensorType,
numSplits,
quantScale,
quantOffset);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
-
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegate;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegate) == kTfLiteOk);
- CHECK(armnnDelegate != nullptr);
- CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteDelegate;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteDelegate) == kTfLiteOk);
- CHECK(tfLiteDelegate != nullptr);
- CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<T>(tfLiteDelegate, 0, inputValues);
- armnnDelegate::FillInput<T>(armnnDelegate, 0, inputValues);
-
- // Run EnqueWorkload
- CHECK(tfLiteDelegate->Invoke() == kTfLiteOk);
- CHECK(armnnDelegate->Invoke() == kTfLiteOk);
+
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
// Compare output data
for (unsigned int i = 0; i < expectedOutputValues.size(); ++i)
{
- armnnDelegate::CompareOutputData<T>(tfLiteDelegate,
- armnnDelegate,
- outputTensorShapes[i],
- expectedOutputValues[i],
- i);
+ std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(i);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(i);
+
+ std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(i);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(i);
+
+ armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues[i]);
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputTensorShapes[i]);
}
- tfLiteDelegate.reset(nullptr);
- armnnDelegate.reset(nullptr);
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
} // End of SPLIT_V Test
} // anonymous namespace \ No newline at end of file
diff --git a/delegate/test/StridedSliceTestHelper.hpp b/delegate/test/StridedSliceTestHelper.hpp
index fde7e16c72..d3d160158b 100644
--- a/delegate/test/StridedSliceTestHelper.hpp
+++ b/delegate/test/StridedSliceTestHelper.hpp
@@ -8,18 +8,15 @@
#include "TestUtils.hpp"
#include <armnn_delegate.hpp>
-#include <armnn/DescriptorsFwd.hpp>
+#include <DelegateTestInterpreter.hpp>
#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
+#include <schema_generated.h>
-#include <string>
+#include <doctest/doctest.h>
namespace
{
@@ -132,7 +129,7 @@ std::vector<char> CreateStridedSliceTfLiteModel(tflite::TensorType tensorType,
modelDescription,
flatBufferBuilder.CreateVector(buffers, 6));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -157,7 +154,7 @@ void StridedSliceTestImpl(std::vector<armnn::BackendId>& backends,
const int32_t ShrinkAxisMask = 0,
const armnn::DataLayout& dataLayout = armnn::DataLayout::NHWC)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
std::vector<char> modelBuffer = CreateStridedSliceTfLiteModel(
::tflite::TensorType_FLOAT32,
inputTensorShape,
@@ -175,47 +172,27 @@ void StridedSliceTestImpl(std::vector<armnn::BackendId>& backends,
ShrinkAxisMask,
dataLayout);
- auto tfLiteModel = GetModel(modelBuffer.data());
-
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegate;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegate) == kTfLiteOk);
- CHECK(armnnDelegate != nullptr);
- CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteDelegate;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteDelegate) == kTfLiteOk);
- CHECK(tfLiteDelegate != nullptr);
- CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<T>(tfLiteDelegate, 0, inputValues);
- armnnDelegate::FillInput<T>(armnnDelegate, 0, inputValues);
-
- // Run EnqueWorkload
- CHECK(tfLiteDelegate->Invoke() == kTfLiteOk);
- CHECK(armnnDelegate->Invoke() == kTfLiteOk);
-
- // Compare output data
- armnnDelegate::CompareOutputData<T>(tfLiteDelegate,
- armnnDelegate,
- outputTensorShape,
- expectedOutputValues);
-
- tfLiteDelegate.reset(nullptr);
- armnnDelegate.reset(nullptr);
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
+
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
+
+ armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputTensorShape);
+
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
} // End of StridedSlice Test
} // anonymous namespace \ No newline at end of file
diff --git a/delegate/test/TestUtils.cpp b/delegate/test/TestUtils.cpp
index 2689c2eaa3..0d53d9492b 100644
--- a/delegate/test/TestUtils.cpp
+++ b/delegate/test/TestUtils.cpp
@@ -17,7 +17,7 @@ void CompareData(bool tensor1[], bool tensor2[], size_t tensorSize)
}
}
-void CompareData(std::vector<bool>& tensor1, bool tensor2[], size_t tensorSize)
+void CompareData(std::vector<bool>& tensor1, std::vector<bool>& tensor2, size_t tensorSize)
{
auto compareBool = [](auto a, auto b) {return (((a == 0) && (b == 0)) || ((a != 0) && (b != 0)));};
for (size_t i = 0; i < tensorSize; i++)
@@ -108,44 +108,18 @@ void CompareData(TfLiteFloat16 tensor1[], Half tensor2[], size_t tensorSize) {
}
}
-template <>
-void CompareOutputData(std::unique_ptr<tflite::Interpreter>& tfLiteInterpreter,
- std::unique_ptr<tflite::Interpreter>& armnnDelegateInterpreter,
- std::vector<int32_t>& expectedOutputShape,
- std::vector<Half>& expectedOutputValues,
- unsigned int outputIndex)
+void CompareOutputShape(const std::vector<int32_t>& tfLiteDelegateShape,
+ const std::vector<int32_t>& armnnDelegateShape,
+ const std::vector<int32_t>& expectedOutputShape)
{
- auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[outputIndex];
- auto tfLiteDelegateOutputTensor = tfLiteInterpreter->tensor(tfLiteDelegateOutputId);
- auto tfLiteDelegateOutputData = tfLiteInterpreter->typed_tensor<TfLiteFloat16>(tfLiteDelegateOutputId);
- auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[outputIndex];
- auto armnnDelegateOutputTensor = armnnDelegateInterpreter->tensor(armnnDelegateOutputId);
- auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<TfLiteFloat16>(armnnDelegateOutputId);
-
- CHECK(expectedOutputShape.size() == tfLiteDelegateOutputTensor->dims->size);
- CHECK(expectedOutputShape.size() == armnnDelegateOutputTensor->dims->size);
+ CHECK(expectedOutputShape.size() == tfLiteDelegateShape.size());
+ CHECK(expectedOutputShape.size() == armnnDelegateShape.size());
for (size_t i = 0; i < expectedOutputShape.size(); i++)
{
- CHECK(armnnDelegateOutputTensor->dims->data[i] == expectedOutputShape[i]);
- CHECK(tfLiteDelegateOutputTensor->dims->data[i] == expectedOutputShape[i]);
- CHECK(tfLiteDelegateOutputTensor->dims->data[i] == armnnDelegateOutputTensor->dims->data[i]);
- }
-
- armnnDelegate::CompareData(armnnDelegateOutputData, expectedOutputValues.data(), expectedOutputValues.size());
- armnnDelegate::CompareData(tfLiteDelegateOutputData, expectedOutputValues.data(), expectedOutputValues.size());
- armnnDelegate::CompareData(tfLiteDelegateOutputData, armnnDelegateOutputData, expectedOutputValues.size());
-}
-
-template <>
-void FillInput<Half>(std::unique_ptr<tflite::Interpreter>& interpreter, int inputIndex, std::vector<Half>& inputValues)
-{
- auto tfLiteDelegateInputId = interpreter->inputs()[inputIndex];
- auto tfLiteDelageInputData = interpreter->typed_tensor<TfLiteFloat16>(tfLiteDelegateInputId);
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- tfLiteDelageInputData[i].data = half_float::detail::float2half<std::round_indeterminate, float>(inputValues[i]);
-
+ CHECK(expectedOutputShape[i] == armnnDelegateShape[i]);
+ CHECK(tfLiteDelegateShape[i] == expectedOutputShape[i]);
+ CHECK(tfLiteDelegateShape[i] == armnnDelegateShape[i]);
}
}
diff --git a/delegate/test/TestUtils.hpp b/delegate/test/TestUtils.hpp
index 95dd257c92..ba81cd8d56 100644
--- a/delegate/test/TestUtils.hpp
+++ b/delegate/test/TestUtils.hpp
@@ -17,26 +17,12 @@ using Half = half_float::half;
namespace armnnDelegate
{
-/// Can be used to assign input data from a vector to a model input.
-/// Example usage can be found in ResizeTesthelper.hpp
-template <typename T>
-void FillInput(std::unique_ptr<tflite::Interpreter>& interpreter, int inputIndex, std::vector<T>& inputValues)
-{
- auto tfLiteDelegateInputId = interpreter->inputs()[inputIndex];
- auto tfLiteDelageInputData = interpreter->typed_tensor<T>(tfLiteDelegateInputId);
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- tfLiteDelageInputData[i] = inputValues[i];
- }
-}
-
-template <>
-void FillInput(std::unique_ptr<tflite::Interpreter>& interpreter, int inputIndex, std::vector<Half>& inputValues);
+constexpr const char* FILE_IDENTIFIER = "TFL3";
/// Can be used to compare bool data coming from a tflite interpreter
/// Boolean types get converted to a bit representation in a vector. vector.data() returns a void pointer
/// instead of a pointer to bool. Therefore a special function to compare to vector of bool is required
-void CompareData(std::vector<bool>& tensor1, bool tensor2[], size_t tensorSize);
+void CompareData(std::vector<bool>& tensor1, std::vector<bool>& tensor2, size_t tensorSize);
void CompareData(bool tensor1[], bool tensor2[], size_t tensorSize);
/// Can be used to compare float data coming from a tflite interpreter with a tolerance of limit_of_float*100
@@ -66,36 +52,22 @@ void CompareData(TfLiteFloat16 tensor1[], TfLiteFloat16 tensor2[], size_t tensor
/// Can be used to compare Half (Float16) data and TfLiteFloat16 data coming from a tflite interpreter
void CompareData(TfLiteFloat16 tensor1[], Half tensor2[], size_t tensorSize);
-/// Can be used to compare the output tensor shape and values
-/// from armnnDelegateInterpreter and tfLiteInterpreter.
+/// Can be used to compare the output tensor shape
+/// Example usage can be found in ControlTestHelper.hpp
+void CompareOutputShape(const std::vector<int32_t>& tfLiteDelegateShape,
+ const std::vector<int32_t>& armnnDelegateShape,
+ const std::vector<int32_t>& expectedOutputShape);
+
+/// Can be used to compare the output tensor values
/// Example usage can be found in ControlTestHelper.hpp
template <typename T>
-void CompareOutputData(std::unique_ptr<tflite::Interpreter>& tfLiteInterpreter,
- std::unique_ptr<tflite::Interpreter>& armnnDelegateInterpreter,
- std::vector<int32_t>& expectedOutputShape,
- std::vector<T>& expectedOutputValues,
- unsigned int outputIndex = 0)
+void CompareOutputData(std::vector<T>& tfLiteDelegateOutputs,
+ std::vector<T>& armnnDelegateOutputs,
+ std::vector<T>& expectedOutputValues)
{
- auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[outputIndex];
- auto tfLiteDelegateOutputTensor = tfLiteInterpreter->tensor(tfLiteDelegateOutputId);
- auto tfLiteDelegateOutputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateOutputId);
- auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[outputIndex];
- auto armnnDelegateOutputTensor = armnnDelegateInterpreter->tensor(armnnDelegateOutputId);
- auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateOutputId);
-
- CHECK(expectedOutputShape.size() == tfLiteDelegateOutputTensor->dims->size);
- CHECK(expectedOutputShape.size() == armnnDelegateOutputTensor->dims->size);
-
- for (size_t i = 0; i < expectedOutputShape.size(); i++)
- {
- CHECK(expectedOutputShape[i] == armnnDelegateOutputTensor->dims->data[i]);
- CHECK(tfLiteDelegateOutputTensor->dims->data[i] == expectedOutputShape[i]);
- CHECK(tfLiteDelegateOutputTensor->dims->data[i] == armnnDelegateOutputTensor->dims->data[i]);
- }
-
- armnnDelegate::CompareData(expectedOutputValues.data(), armnnDelegateOutputData , expectedOutputValues.size());
- armnnDelegate::CompareData(tfLiteDelegateOutputData , expectedOutputValues.data(), expectedOutputValues.size());
- armnnDelegate::CompareData(tfLiteDelegateOutputData , armnnDelegateOutputData , expectedOutputValues.size());
+ armnnDelegate::CompareData(expectedOutputValues.data(), armnnDelegateOutputs.data(), expectedOutputValues.size());
+ armnnDelegate::CompareData(tfLiteDelegateOutputs.data(), expectedOutputValues.data(), expectedOutputValues.size());
+ armnnDelegate::CompareData(tfLiteDelegateOutputs.data(), armnnDelegateOutputs.data(), expectedOutputValues.size());
}
} // namespace armnnDelegate
diff --git a/delegate/test/TransposeTest.cpp b/delegate/test/TransposeTest.cpp
index c210128ac8..cb3b327b13 100644
--- a/delegate/test/TransposeTest.cpp
+++ b/delegate/test/TransposeTest.cpp
@@ -13,6 +13,28 @@
namespace armnnDelegate
{
+void TransposeFP32Test(std::vector<armnn::BackendId>& backends)
+{
+ // set test input data
+ std::vector<int32_t> input0Shape {4, 2, 3};
+ std::vector<int32_t> inputPermVecShape {3};
+ std::vector<int32_t> outputShape {3, 4, 2};
+
+ std::vector<float> input0Values = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23};
+ std::vector<int32_t> inputPermVec = {2, 0, 1};
+ std::vector<float> expectedOutputValues = {0, 3, 6, 9, 12, 15, 18, 21, 1, 4, 7, 10,
+ 13, 16, 19, 22, 2, 5, 8, 11, 14, 17, 20, 23};
+
+ TransposeTest<float>(backends,
+ input0Shape,
+ inputPermVecShape,
+ outputShape,
+ input0Values,
+ inputPermVec,
+ expectedOutputValues);
+}
+
TEST_SUITE ("Transpose_GpuAccTests")
{
@@ -37,10 +59,13 @@ TEST_CASE ("Transpose_Float32_CpuAcc_Test")
TEST_SUITE ("Transpose_CpuRefTests")
{
+
TEST_CASE ("Transpose_Float32_CpuRef_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- TransposeFP32Test(backends);
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ TransposeFP32Test(backends);
}
+
}
+
} // namespace armnnDelegate
diff --git a/delegate/test/TransposeTestHelper.hpp b/delegate/test/TransposeTestHelper.hpp
index 99bb60b91a..57f4e291bf 100644
--- a/delegate/test/TransposeTestHelper.hpp
+++ b/delegate/test/TransposeTestHelper.hpp
@@ -5,15 +5,17 @@
#pragma once
+#include "TestUtils.hpp"
+
#include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
#include <tensorflow/lite/version.h>
+#include <schema_generated.h>
+
#include <doctest/doctest.h>
namespace
@@ -76,102 +78,51 @@ std::vector<char> CreateTransposeTfLiteModel(tflite::TensorType tensorType,
flatBufferBuilder.CreateVector(&subgraph, 1),
modelDescription,
flatBufferBuilder.CreateVector(buffers, 4));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
}
-void TransposeFP32Test(std::vector<armnn::BackendId>& backends)
+template <typename T>
+void TransposeTest(std::vector<armnn::BackendId>& backends,
+ std::vector<int32_t>& inputShape,
+ std::vector<int32_t>& inputPermVecShape,
+ std::vector<int32_t>& outputShape,
+ std::vector<T>& inputValues,
+ std::vector<int32_t>& inputPermVec,
+ std::vector<T>& expectedOutputValues)
{
- using namespace tflite;
-
- // set test input data
- std::vector<int32_t> input0Shape {4, 2, 3};
- std::vector<int32_t> inputPermVecShape {3};
- std::vector<int32_t> outputShape {2, 3, 4};
+ using namespace delegateTestInterpreter;
- std::vector<float> input0Values = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
- 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23};
- std::vector<int32_t> inputPermVec = {2, 0, 1};
- std::vector<float> expectedOutputValues = {0, 3, 6, 9, 12, 15, 18, 21, 1, 4, 7, 10,
- 13, 16, 19, 22, 2, 5, 8, 11, 14, 17, 20, 23};
-
- // create model
+ // Create model
std::vector<char> modelBuffer = CreateTransposeTfLiteModel(::tflite::TensorType_FLOAT32,
- input0Shape,
+ inputShape,
inputPermVecShape,
outputShape,
inputPermVec);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data for tflite
- auto tfLiteInterpreterInput0Id = tfLiteInterpreter->inputs()[0];
- auto tfLiteInterpreterInput0Data = tfLiteInterpreter->typed_tensor<float>(tfLiteInterpreterInput0Id);
- for (unsigned int i = 0; i < input0Values.size(); ++i)
- {
- tfLiteInterpreterInput0Data[i] = input0Values[i];
- }
-
- auto tfLiteInterpreterInput1Id = tfLiteInterpreter->inputs()[1];
- auto tfLiteInterpreterInput1Data = tfLiteInterpreter->typed_tensor<int32_t>(tfLiteInterpreterInput1Id);
- for (unsigned int i = 0; i < inputPermVec.size(); ++i)
- {
- tfLiteInterpreterInput1Data[i] = inputPermVec[i];
- }
-
- //Set input data for armnn delegate
- auto armnnDelegateInput0Id = armnnDelegateInterpreter->inputs()[0];
- auto armnnDelegateInput0Data = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateInput0Id);
- for (unsigned int i = 0; i < input0Values.size(); ++i)
- {
- armnnDelegateInput0Data[i] = input0Values[i];
- }
-
- auto armnnDelegateInput1Id = armnnDelegateInterpreter->inputs()[1];
- auto armnnDelegateInput1Data = armnnDelegateInterpreter->typed_tensor<int32_t>(armnnDelegateInput1Id);
- for (unsigned int i = 0; i < inputPermVec.size(); ++i)
- {
- armnnDelegateInput1Data[i] = inputPermVec[i];
- }
-
- // Run EnqueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- auto tfLiteInterpreterOutputId = tfLiteInterpreter->outputs()[0];
- auto tfLiteInterpreterOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteInterpreterOutputId);
- auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
- auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
- for (size_t i = 0; i < expectedOutputValues.size(); ++i)
- {
- CHECK(expectedOutputValues[i] == armnnDelegateOutputData[i]);
- CHECK(tfLiteInterpreterOutputData[i] == expectedOutputValues[i]);
- CHECK(tfLiteInterpreterOutputData[i] == armnnDelegateOutputData[i]);
- }
-
- armnnDelegateInterpreter.reset(nullptr);
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<int32_t>(inputPermVec, 1) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
+
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<int32_t>(inputPermVec, 1) == kTfLiteOk);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+ std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
+
+ armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
+
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
}
}
diff --git a/delegate/test/UnidirectionalSequenceLstmTestHelper.hpp b/delegate/test/UnidirectionalSequenceLstmTestHelper.hpp
index 0ff04e7949..c058d83bc6 100644
--- a/delegate/test/UnidirectionalSequenceLstmTestHelper.hpp
+++ b/delegate/test/UnidirectionalSequenceLstmTestHelper.hpp
@@ -8,14 +8,13 @@
#include "TestUtils.hpp"
#include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
#include <tensorflow/lite/version.h>
-#include <tensorflow/lite/c/common.h>
+
+#include <schema_generated.h>
#include <doctest/doctest.h>
@@ -569,7 +568,7 @@ std::vector<char> CreateUnidirectionalSequenceLstmTfLiteModel(tflite::TensorType
modelDescription,
flatBufferBuilder.CreateVector(buffers));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -624,7 +623,7 @@ void UnidirectionalSequenceLstmTestImpl(std::vector<armnn::BackendId>& backends,
bool isTimeMajor,
float quantScale = 0.1f)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
std::vector<char> modelBuffer = CreateUnidirectionalSequenceLstmTfLiteModel(tensorType,
batchSize,
@@ -671,72 +670,51 @@ void UnidirectionalSequenceLstmTestImpl(std::vector<armnn::BackendId>& backends,
isTimeMajor,
quantScale);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
- auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateInputId);
- for (unsigned int i = 0; i < inputValues.size(); ++i)
+ std::vector<int32_t> outputShape;
+ if (isTimeMajor)
{
- tfLiteDelageInputData[i] = inputValues[i];
+ outputShape = {timeSize, batchSize, outputSize};
}
-
- auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
- auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateInputId);
- for (unsigned int i = 0; i < inputValues.size(); ++i)
+ else
{
- armnnDelegateInputData[i] = inputValues[i];
+ outputShape = {batchSize, timeSize, outputSize};
}
- // Run EnqueueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<float>(inputValues, 0) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+ std::vector<float> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<float>(0);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
+
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<float>(inputValues, 0) == kTfLiteOk);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+ std::vector<float> armnnOutputValues = armnnInterpreter.GetOutputResult<float>(0);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
- // Compare output data
- auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
- auto tfLiteDelagateOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
- auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
- auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
if (tensorType == ::tflite::TensorType_INT8)
{
// Allow 2% tolerance for Quantized weights
- armnnDelegate::CompareData(expectedOutputValues.data(), armnnDelegateOutputData,
+ armnnDelegate::CompareData(expectedOutputValues.data(), armnnOutputValues.data(),
expectedOutputValues.size(), 2);
- armnnDelegate::CompareData(expectedOutputValues.data(), tfLiteDelagateOutputData,
+ armnnDelegate::CompareData(expectedOutputValues.data(), tfLiteOutputValues.data(),
expectedOutputValues.size(), 2);
- armnnDelegate::CompareData(tfLiteDelagateOutputData, armnnDelegateOutputData,
+ armnnDelegate::CompareData(tfLiteOutputValues.data(), armnnOutputValues.data(),
expectedOutputValues.size(), 2);
}
else
{
- armnnDelegate::CompareData(expectedOutputValues.data(), armnnDelegateOutputData,
- expectedOutputValues.size());
- armnnDelegate::CompareData(expectedOutputValues.data(), tfLiteDelagateOutputData,
- expectedOutputValues.size());
- armnnDelegate::CompareData(tfLiteDelagateOutputData, armnnDelegateOutputData, expectedOutputValues.size());
+ armnnDelegate::CompareOutputData<float>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
}
+
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
}
} // anonymous namespace \ No newline at end of file
diff --git a/delegate/test/UnpackTestHelper.hpp b/delegate/test/UnpackTestHelper.hpp
index a4c6bc01f3..2d6565f883 100644
--- a/delegate/test/UnpackTestHelper.hpp
+++ b/delegate/test/UnpackTestHelper.hpp
@@ -8,17 +8,15 @@
#include "TestUtils.hpp"
#include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
+#include <schema_generated.h>
-#include <string>
+#include <doctest/doctest.h>
namespace
{
@@ -110,7 +108,7 @@ std::vector<char> CreateUnpackTfLiteModel(tflite::BuiltinOperator unpackOperator
modelDescription,
flatBufferBuilder.CreateVector(buffers));
- flatBufferBuilder.Finish(flatbufferModel);
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -128,7 +126,7 @@ void UnpackTest(tflite::BuiltinOperator unpackOperatorCode,
float quantScale = 1.0f,
int quantOffset = 0)
{
- using namespace tflite;
+ using namespace delegateTestInterpreter;
std::vector<char> modelBuffer = CreateUnpackTfLiteModel(unpackOperatorCode,
tensorType,
inputShape,
@@ -138,51 +136,33 @@ void UnpackTest(tflite::BuiltinOperator unpackOperatorCode,
quantScale,
quantOffset);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
-
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, inputValues);
- armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, inputValues);
-
-
- // Run EnqueueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
// Compare output data
for (unsigned int i = 0; i < expectedOutputValues.size(); ++i)
{
- armnnDelegate::CompareOutputData<T>(tfLiteInterpreter,
- armnnDelegateInterpreter,
- expectedOutputShape,
- expectedOutputValues[i],
- i);
+ std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(i);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(i);
+
+ std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(i);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(i);
+
+ armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues[i]);
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape);
}
- armnnDelegateInterpreter.reset(nullptr);
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
}
} // anonymous namespace \ No newline at end of file