aboutsummaryrefslogtreecommitdiff
path: root/delegate/test/SliceTestHelper.hpp
diff options
context:
space:
mode:
Diffstat (limited to 'delegate/test/SliceTestHelper.hpp')
-rw-r--r--delegate/test/SliceTestHelper.hpp14
1 files changed, 5 insertions, 9 deletions
diff --git a/delegate/test/SliceTestHelper.hpp b/delegate/test/SliceTestHelper.hpp
index 2e84d8de9f..90ce8e4fd6 100644
--- a/delegate/test/SliceTestHelper.hpp
+++ b/delegate/test/SliceTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,12 +10,8 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
-
namespace
{
@@ -112,15 +108,15 @@ std::vector<char> CreateSliceTfLiteModel(tflite::TensorType tensorType,
}
template <typename T>
-void SliceTestImpl(std::vector<armnn::BackendId>& backends,
- std::vector<T>& inputValues,
+void SliceTestImpl(std::vector<T>& inputValues,
std::vector<T>& expectedOutputValues,
std::vector<int32_t>& beginTensorData,
std::vector<int32_t>& sizeTensorData,
std::vector<int32_t>& inputTensorShape,
std::vector<int32_t>& beginTensorShape,
std::vector<int32_t>& sizeTensorShape,
- std::vector<int32_t>& outputTensorShape)
+ std::vector<int32_t>& outputTensorShape,
+ const std::vector<armnn::BackendId>& backends = {})
{
using namespace delegateTestInterpreter;
std::vector<char> modelBuffer = CreateSliceTfLiteModel(
@@ -141,7 +137,7 @@ void SliceTestImpl(std::vector<armnn::BackendId>& backends,
std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
// Setup interpreter with Arm NN Delegate applied.
- auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
CHECK(armnnInterpreter.Invoke() == kTfLiteOk);