aboutsummaryrefslogtreecommitdiff
path: root/delegate/test/StridedSliceTestHelper.hpp
diff options
context:
space:
mode:
Diffstat (limited to 'delegate/test/StridedSliceTestHelper.hpp')
-rw-r--r--delegate/test/StridedSliceTestHelper.hpp12
1 files changed, 4 insertions, 8 deletions
diff --git a/delegate/test/StridedSliceTestHelper.hpp b/delegate/test/StridedSliceTestHelper.hpp
index 0ae6384dab..740dafe878 100644
--- a/delegate/test/StridedSliceTestHelper.hpp
+++ b/delegate/test/StridedSliceTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,12 +10,8 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
-
namespace
{
@@ -134,8 +130,7 @@ std::vector<char> CreateStridedSliceTfLiteModel(tflite::TensorType tensorType,
}
template <typename T>
-void StridedSliceTestImpl(std::vector<armnn::BackendId>& backends,
- std::vector<T>& inputValues,
+void StridedSliceTestImpl(std::vector<T>& inputValues,
std::vector<T>& expectedOutputValues,
std::vector<int32_t>& beginTensorData,
std::vector<int32_t>& endTensorData,
@@ -145,6 +140,7 @@ void StridedSliceTestImpl(std::vector<armnn::BackendId>& backends,
std::vector<int32_t>& endTensorShape,
std::vector<int32_t>& strideTensorShape,
std::vector<int32_t>& outputTensorShape,
+ const std::vector<armnn::BackendId>& backends = {},
const int32_t beginMask = 0,
const int32_t endMask = 0,
const int32_t ellipsisMask = 0,
@@ -179,7 +175,7 @@ void StridedSliceTestImpl(std::vector<armnn::BackendId>& backends,
std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
// Setup interpreter with Arm NN Delegate applied.
- auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
CHECK(armnnInterpreter.Invoke() == kTfLiteOk);