aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorColm Donelan <colm.donelan@arm.com>2024-01-22 10:07:14 +0000
committerTeresaARM <teresa.charlinreyes@arm.com>2024-02-07 15:37:05 +0000
commit7bcae3c835468d9b0770514dc7127f02d47cec5f (patch)
tree03fe2da7324715be89c1c0ba476b083558c158ed
parenta52bca23d225144e92f521341718a70489d5c217 (diff)
downloadarmnn-7bcae3c835468d9b0770514dc7127f02d47cec5f.tar.gz
IVGCVSW-7675 Rework more DelegateUnitTests so backends are subcases.
The intent of this change is to remove the per backend test cases in the delegate unit tests. They will be replaced by using DocTest SUBCASES. The sub cases are paramaterized by the available backends. The list of available backends are determined by the compilation flags. Signed-off-by: Colm Donelan <colm.donelan@arm.com> Change-Id: I6dd0369491c4582b8e2467b911dfd085dddcf576
-rw-r--r--delegate/test/ActivationTest.cpp7
-rw-r--r--delegate/test/ActivationTestHelper.hpp6
-rw-r--r--delegate/test/ArgMinMaxTest.cpp6
-rw-r--r--delegate/test/ArgMinMaxTestHelper.hpp6
-rw-r--r--delegate/test/BatchMatMulTestHelper.hpp6
-rw-r--r--delegate/test/BatchSpaceTestHelper.hpp6
-rw-r--r--delegate/test/BroadcastToTestHelper.hpp6
-rw-r--r--delegate/test/CastTestHelper.hpp6
-rw-r--r--delegate/test/ComparisonTestHelper.hpp6
-rw-r--r--delegate/test/ControlTestHelper.hpp6
-rw-r--r--delegate/test/ConvolutionTestHelper.hpp8
-rw-r--r--delegate/test/DelegateOptionsTest.cpp12
-rw-r--r--delegate/test/DelegateOptionsTestHelper.hpp6
-rw-r--r--delegate/test/ElementwiseBinaryTestHelper.hpp6
-rw-r--r--delegate/test/ElementwiseUnaryTestHelper.hpp6
-rw-r--r--delegate/test/ExpandDimsTest.cpp64
-rw-r--r--delegate/test/FillTest.cpp130
-rw-r--r--delegate/test/FillTestHelper.hpp12
-rw-r--r--delegate/test/FullyConnectedTest.cpp103
-rw-r--r--delegate/test/FullyConnectedTestHelper.hpp12
-rw-r--r--delegate/test/GatherNdTest.cpp63
-rw-r--r--delegate/test/GatherNdTestHelper.hpp10
-rw-r--r--delegate/test/GatherTest.cpp64
-rw-r--r--delegate/test/GatherTestHelper.hpp10
-rw-r--r--delegate/test/LogicalTestHelper.hpp6
-rw-r--r--delegate/test/LstmTest.cpp30
-rw-r--r--delegate/test/LstmTestHelper.hpp14
-rw-r--r--delegate/test/MirrorPadTest.cpp18
-rw-r--r--delegate/test/NeonDelegateTests_NDK_Issue.cpp20
-rw-r--r--delegate/test/NormalizationTest.cpp57
-rw-r--r--delegate/test/NormalizationTestHelper.hpp24
-rw-r--r--delegate/test/PackTest.cpp197
-rw-r--r--delegate/test/PackTestHelper.hpp10
-rw-r--r--delegate/test/PadTest.cpp212
-rw-r--r--delegate/test/PadTestHelper.hpp10
-rw-r--r--delegate/test/Pooling2dTest.cpp553
-rw-r--r--delegate/test/Pooling2dTestHelper.hpp10
-rw-r--r--delegate/test/Pooling3dTest.cpp176
-rw-r--r--delegate/test/Pooling3dTestHelper.hpp11
-rw-r--r--delegate/test/PreluTest.cpp96
-rw-r--r--delegate/test/PreluTestHelper.hpp12
-rw-r--r--delegate/test/QuantizationTest.cpp226
-rw-r--r--delegate/test/QuantizationTestHelper.hpp10
-rw-r--r--delegate/test/RedefineTestHelper.hpp10
-rw-r--r--delegate/test/ReduceTest.cpp294
-rw-r--r--delegate/test/ReduceTestHelper.hpp10
-rw-r--r--delegate/test/ReshapeTest.cpp310
-rw-r--r--delegate/test/ResizeTest.cpp67
-rw-r--r--delegate/test/ResizeTestHelper.hpp12
-rw-r--r--delegate/test/ReverseV2Test.cpp65
-rw-r--r--delegate/test/ReverseV2TestHelper.hpp12
-rw-r--r--delegate/test/RoundTest.cpp38
-rw-r--r--delegate/test/RoundTestHelper.hpp10
-rw-r--r--delegate/test/ShapeTest.cpp8
-rw-r--r--delegate/test/ShapeTestHelper.hpp10
-rw-r--r--delegate/test/SliceTest.cpp60
-rw-r--r--delegate/test/SliceTestHelper.hpp14
-rw-r--r--delegate/test/SoftmaxTest.cpp58
-rw-r--r--delegate/test/SoftmaxTestHelper.hpp18
-rw-r--r--delegate/test/SpaceDepthTest.cpp118
-rw-r--r--delegate/test/SpaceDepthTestHelper.hpp10
-rw-r--r--delegate/test/SplitTest.cpp113
-rw-r--r--delegate/test/SplitTestHelper.hpp14
-rw-r--r--delegate/test/SqueezeTest.cpp58
-rw-r--r--delegate/test/StridedSliceTest.cpp105
-rw-r--r--delegate/test/StridedSliceTestHelper.hpp12
-rw-r--r--delegate/test/TestUtils.hpp7
-rw-r--r--delegate/test/TileTest.cpp42
-rw-r--r--delegate/test/TileTestHelper.hpp12
-rw-r--r--delegate/test/TransposeConvolution2dTest.cpp10
-rw-r--r--delegate/test/TransposeTest.cpp39
-rw-r--r--delegate/test/TransposeTestHelper.hpp14
-rw-r--r--delegate/test/UnidirectionalSequenceLstmTest.cpp123
-rw-r--r--delegate/test/UnidirectionalSequenceLstmTestHelper.hpp13
-rw-r--r--delegate/test/UnpackTest.cpp98
-rw-r--r--delegate/test/UnpackTestHelper.hpp10
76 files changed, 817 insertions, 3236 deletions
diff --git a/delegate/test/ActivationTest.cpp b/delegate/test/ActivationTest.cpp
index 113e645f82..f07660fa6e 100644
--- a/delegate/test/ActivationTest.cpp
+++ b/delegate/test/ActivationTest.cpp
@@ -1,15 +1,10 @@
//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "ActivationTestHelper.hpp"
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-
#include <doctest/doctest.h>
namespace armnnDelegate
diff --git a/delegate/test/ActivationTestHelper.hpp b/delegate/test/ActivationTestHelper.hpp
index c023696bab..ff03127d93 100644
--- a/delegate/test/ActivationTestHelper.hpp
+++ b/delegate/test/ActivationTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,12 +10,8 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
-
namespace
{
diff --git a/delegate/test/ArgMinMaxTest.cpp b/delegate/test/ArgMinMaxTest.cpp
index 73889863f0..8ae4e7e0d1 100644
--- a/delegate/test/ArgMinMaxTest.cpp
+++ b/delegate/test/ArgMinMaxTest.cpp
@@ -1,14 +1,10 @@
//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "ArgMinMaxTestHelper.hpp"
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-
#include <doctest/doctest.h>
namespace armnnDelegate
diff --git a/delegate/test/ArgMinMaxTestHelper.hpp b/delegate/test/ArgMinMaxTestHelper.hpp
index 9c6ac8dccb..9707ab2089 100644
--- a/delegate/test/ArgMinMaxTestHelper.hpp
+++ b/delegate/test/ArgMinMaxTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,12 +10,8 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
-
namespace
{
diff --git a/delegate/test/BatchMatMulTestHelper.hpp b/delegate/test/BatchMatMulTestHelper.hpp
index f2fb581a62..28a8ca2e1b 100644
--- a/delegate/test/BatchMatMulTestHelper.hpp
+++ b/delegate/test/BatchMatMulTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,12 +10,8 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
-
namespace
{
std::vector<char> CreateBatchMatMulTfLiteModel(
diff --git a/delegate/test/BatchSpaceTestHelper.hpp b/delegate/test/BatchSpaceTestHelper.hpp
index 9c39c30f1f..d19c4e7b5a 100644
--- a/delegate/test/BatchSpaceTestHelper.hpp
+++ b/delegate/test/BatchSpaceTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,12 +10,8 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
-
namespace
{
diff --git a/delegate/test/BroadcastToTestHelper.hpp b/delegate/test/BroadcastToTestHelper.hpp
index 8fcb762474..d37ffe1620 100644
--- a/delegate/test/BroadcastToTestHelper.hpp
+++ b/delegate/test/BroadcastToTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,12 +10,8 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
-
namespace
{
std::vector<char> CreateBroadcastToTfLiteModel(tflite::BuiltinOperator operatorCode,
diff --git a/delegate/test/CastTestHelper.hpp b/delegate/test/CastTestHelper.hpp
index 47c822c4b3..a22800c6c8 100644
--- a/delegate/test/CastTestHelper.hpp
+++ b/delegate/test/CastTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,12 +10,8 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
-
namespace
{
std::vector<char> CreateCastTfLiteModel(tflite::TensorType inputTensorType,
diff --git a/delegate/test/ComparisonTestHelper.hpp b/delegate/test/ComparisonTestHelper.hpp
index 436790d6ff..0170eb405e 100644
--- a/delegate/test/ComparisonTestHelper.hpp
+++ b/delegate/test/ComparisonTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,12 +10,8 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
-
namespace
{
diff --git a/delegate/test/ControlTestHelper.hpp b/delegate/test/ControlTestHelper.hpp
index 7c2efc855d..cde69c2f90 100644
--- a/delegate/test/ControlTestHelper.hpp
+++ b/delegate/test/ControlTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,12 +10,8 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
-
namespace
{
diff --git a/delegate/test/ConvolutionTestHelper.hpp b/delegate/test/ConvolutionTestHelper.hpp
index f651ad5e7e..ededbc7bac 100644
--- a/delegate/test/ConvolutionTestHelper.hpp
+++ b/delegate/test/ConvolutionTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,14 +10,8 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
-
namespace
{
diff --git a/delegate/test/DelegateOptionsTest.cpp b/delegate/test/DelegateOptionsTest.cpp
index 349e5d0692..7724f916d4 100644
--- a/delegate/test/DelegateOptionsTest.cpp
+++ b/delegate/test/DelegateOptionsTest.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -7,6 +7,8 @@
#include <common/include/ProfilingGuid.hpp>
#include <armnnUtils/Filesystem.hpp>
+#include <doctest/doctest.h>
+
namespace armnnDelegate
{
@@ -229,7 +231,13 @@ TEST_CASE ("ArmnnDelegateModelOptions_CpuAcc_Test")
armnn::OptimizerOptionsOpaque optimizerOptions(false, false, false,
false, modelOptions, false);
- armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
+ std::vector<armnn::BackendId> availableBackends = CaptureAvailableBackends(backends);
+ // It's possible that CpuAcc isn't supported. In that case availableBackends will be empty.
+ if (availableBackends.empty())
+ {
+ return;
+ }
+ armnnDelegate::DelegateOptions delegateOptions(availableBackends, optimizerOptions);
DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
tensorShape,
diff --git a/delegate/test/DelegateOptionsTestHelper.hpp b/delegate/test/DelegateOptionsTestHelper.hpp
index 76d127237c..87a01e71ad 100644
--- a/delegate/test/DelegateOptionsTestHelper.hpp
+++ b/delegate/test/DelegateOptionsTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,12 +10,8 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
-
namespace
{
diff --git a/delegate/test/ElementwiseBinaryTestHelper.hpp b/delegate/test/ElementwiseBinaryTestHelper.hpp
index b3766134b9..007ee1c49e 100644
--- a/delegate/test/ElementwiseBinaryTestHelper.hpp
+++ b/delegate/test/ElementwiseBinaryTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,12 +10,8 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
-
namespace
{
diff --git a/delegate/test/ElementwiseUnaryTestHelper.hpp b/delegate/test/ElementwiseUnaryTestHelper.hpp
index c62b9cc45d..fc78b73553 100644
--- a/delegate/test/ElementwiseUnaryTestHelper.hpp
+++ b/delegate/test/ElementwiseUnaryTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,12 +10,8 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
-
namespace
{
diff --git a/delegate/test/ExpandDimsTest.cpp b/delegate/test/ExpandDimsTest.cpp
index 8c21f731cc..25d911db8a 100644
--- a/delegate/test/ExpandDimsTest.cpp
+++ b/delegate/test/ExpandDimsTest.cpp
@@ -1,14 +1,16 @@
//
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "RedefineTestHelper.hpp"
+#include <doctest/doctest.h>
+
namespace armnnDelegate
{
-void ExpandDimsSimpleTest(std::vector<armnn::BackendId>& backends)
+void ExpandDimsSimpleTest()
{
// Set input data
std::vector<int32_t> inputShape { 2, 2, 1 };
@@ -20,15 +22,15 @@ void ExpandDimsSimpleTest(std::vector<armnn::BackendId>& backends)
RedefineTest<float>(tflite::BuiltinOperator_EXPAND_DIMS,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
- axis);
+ axis,
+ true);
}
-void ExpandDimsWithNegativeAxisTest(std::vector<armnn::BackendId>& backends)
+void ExpandDimsWithNegativeAxisTest()
{
// Set input data
std::vector<int32_t> inputShape { 1, 2, 2 };
@@ -40,63 +42,27 @@ void ExpandDimsWithNegativeAxisTest(std::vector<armnn::BackendId>& backends)
RedefineTest<float>(tflite::BuiltinOperator_EXPAND_DIMS,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
- axis);
-}
-
-TEST_SUITE("ExpandDims_GpuAccTests")
-{
-
-TEST_CASE ("ExpandDims_Simple_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ExpandDimsSimpleTest(backends);
+ axis,
+ true);
}
-TEST_CASE ("ExpandDims_With_Negative_Axis_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ExpandDimsWithNegativeAxisTest(backends);
-}
-
-} // TEST_SUITE("ExpandDims_GpuAccTests")
-
-TEST_SUITE("ExpandDims_CpuAccTests")
-{
-
-TEST_CASE ("ExpandDims_Simple_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ExpandDimsSimpleTest(backends);
-}
-
-TEST_CASE ("ExpandDims_With_Negative_Axis_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ExpandDimsWithNegativeAxisTest(backends);
-}
-
-} // TEST_SUITE("ExpandDims_CpuAccTests")
-
-TEST_SUITE("ExpandDims_CpuRefTests")
+TEST_SUITE("ExpandDimsTests")
{
-TEST_CASE ("ExpandDims_Simple_CpuRef_Test")
+TEST_CASE ("ExpandDims_Simple_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ExpandDimsSimpleTest(backends);
+ ExpandDimsSimpleTest();
}
-TEST_CASE ("ExpandDims_With_Negative_Axis_CpuRef_Test")
+TEST_CASE ("ExpandDims_With_Negative_Axis_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ExpandDimsWithNegativeAxisTest(backends);
+ ExpandDimsWithNegativeAxisTest();
}
-} // TEST_SUITE("ExpandDims_CpuRefTests")
+} // TEST_SUITE("ExpandDimsTests")
} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/test/FillTest.cpp b/delegate/test/FillTest.cpp
index e9def8de41..5cde6dd8eb 100644
--- a/delegate/test/FillTest.cpp
+++ b/delegate/test/FillTest.cpp
@@ -1,22 +1,17 @@
//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "FillTestHelper.hpp"
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-
#include <doctest/doctest.h>
namespace armnnDelegate
{
-void Fill2dTest(std::vector<armnn::BackendId>& backends,
- tflite::BuiltinOperator fillOperatorCode = tflite::BuiltinOperator_FILL,
- float fill = 2.0f )
+void Fill2dTest(tflite::BuiltinOperator fillOperatorCode = tflite::BuiltinOperator_FILL,
+ float fill = 2.0f )
{
std::vector<int32_t> inputShape { 2 };
std::vector<int32_t> tensorShape { 2, 2 };
@@ -25,16 +20,14 @@ void Fill2dTest(std::vector<armnn::BackendId>& backends,
FillTest<float>(fillOperatorCode,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
tensorShape,
expectedOutputValues,
fill);
}
-void Fill3dTest(std::vector<armnn::BackendId>& backends,
- tflite::BuiltinOperator fillOperatorCode = tflite::BuiltinOperator_FILL,
- float fill = 5.0f )
+void Fill3dTest(tflite::BuiltinOperator fillOperatorCode = tflite::BuiltinOperator_FILL,
+ float fill = 5.0f )
{
std::vector<int32_t> inputShape { 3 };
std::vector<int32_t> tensorShape { 3, 3, 3 };
@@ -52,16 +45,14 @@ void Fill3dTest(std::vector<armnn::BackendId>& backends,
FillTest<float>(fillOperatorCode,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
tensorShape,
expectedOutputValues,
fill);
}
-void Fill4dTest(std::vector<armnn::BackendId>& backends,
- tflite::BuiltinOperator fillOperatorCode = tflite::BuiltinOperator_FILL,
- float fill = 3.0f )
+void Fill4dTest(tflite::BuiltinOperator fillOperatorCode = tflite::BuiltinOperator_FILL,
+ float fill = 3.0f )
{
std::vector<int32_t> inputShape { 4 };
std::vector<int32_t> tensorShape { 2, 2, 4, 4 };
@@ -87,16 +78,14 @@ void Fill4dTest(std::vector<armnn::BackendId>& backends,
FillTest<float>(fillOperatorCode,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
tensorShape,
expectedOutputValues,
fill);
}
-void FillInt32Test(std::vector<armnn::BackendId>& backends,
- tflite::BuiltinOperator fillOperatorCode = tflite::BuiltinOperator_FILL,
- int32_t fill = 2 )
+void FillInt32Test(tflite::BuiltinOperator fillOperatorCode = tflite::BuiltinOperator_FILL,
+ int32_t fill = 2 )
{
std::vector<int32_t> inputShape { 2 };
std::vector<int32_t> tensorShape { 2, 2 };
@@ -105,116 +94,39 @@ void FillInt32Test(std::vector<armnn::BackendId>& backends,
FillTest<int32_t>(fillOperatorCode,
::tflite::TensorType_INT32,
- backends,
inputShape,
tensorShape,
expectedOutputValues,
fill);
}
-TEST_SUITE("Fill_CpuRefTests")
-{
-
-TEST_CASE ("Fill2d_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- Fill2dTest(backends);
-}
-
-TEST_CASE ("Fill3d_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- Fill3dTest(backends);
-}
-
-TEST_CASE ("Fill3d_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- Fill3dTest(backends);
-}
-
-TEST_CASE ("Fill4d_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- Fill4dTest(backends);
-}
-
-TEST_CASE ("FillInt32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- FillInt32Test(backends);
-}
-
-}
-
-TEST_SUITE("Fill_CpuAccTests")
+TEST_SUITE("FillTests")
{
-TEST_CASE ("Fill2d_CpuAcc_Test")
+TEST_CASE ("Fill2d_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- Fill2dTest(backends);
+ Fill2dTest();
}
-TEST_CASE ("Fill3d_CpuAcc_Test")
+TEST_CASE ("Fill3d_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- Fill3dTest(backends);
+ Fill3dTest();
}
-TEST_CASE ("Fill3d_CpuAcc_Test")
+TEST_CASE ("Fill3d_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- Fill3dTest(backends);
+ Fill3dTest();
}
-TEST_CASE ("Fill4d_CpuAcc_Test")
+TEST_CASE ("Fill4d_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- Fill4dTest(backends);
+ Fill4dTest();
}
-TEST_CASE ("FillInt32_CpuAcc_Test")
+TEST_CASE ("FillInt32_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- FillInt32Test(backends);
-}
-
-}
-
-TEST_SUITE("Fill_GpuAccTests")
-{
-
-TEST_CASE ("Fill2d_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- Fill2dTest(backends);
-}
-
-TEST_CASE ("Fill3d_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- Fill3dTest(backends);
-}
-
-TEST_CASE ("Fill3d_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- Fill3dTest(backends);
-}
-
-TEST_CASE ("Fill4d_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- Fill4dTest(backends);
-}
-
-TEST_CASE ("FillInt32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- FillInt32Test(backends);
-}
-
+ FillInt32Test();
}
+} // End of FillTests suite.
} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/test/FillTestHelper.hpp b/delegate/test/FillTestHelper.hpp
index 100aee7cba..51f9120376 100644
--- a/delegate/test/FillTestHelper.hpp
+++ b/delegate/test/FillTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,12 +10,8 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
-
namespace
{
@@ -110,11 +106,11 @@ std::vector<char> CreateFillTfLiteModel(tflite::BuiltinOperator fillOperatorCode
template <typename T>
void FillTest(tflite::BuiltinOperator fillOperatorCode,
tflite::TensorType tensorType,
- const std::vector<armnn::BackendId>& backends,
std::vector<int32_t >& inputShape,
std::vector<int32_t >& tensorShape,
std::vector<T>& expectedOutputValues,
- T fillValue)
+ T fillValue,
+ const std::vector<armnn::BackendId>& backends = {})
{
using namespace delegateTestInterpreter;
std::vector<char> modelBuffer = CreateFillTfLiteModel<T>(fillOperatorCode,
@@ -131,7 +127,7 @@ void FillTest(tflite::BuiltinOperator fillOperatorCode,
std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
// Setup interpreter with Arm NN Delegate applied.
- auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
diff --git a/delegate/test/FullyConnectedTest.cpp b/delegate/test/FullyConnectedTest.cpp
index 3ef5cedbd7..38669a68ac 100644
--- a/delegate/test/FullyConnectedTest.cpp
+++ b/delegate/test/FullyConnectedTest.cpp
@@ -1,14 +1,16 @@
//
-// Copyright © 2020-2021,2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2021,2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "FullyConnectedTestHelper.hpp"
+#include <doctest/doctest.h>
+
namespace
{
-void FullyConnectedFp32Test(std::vector<armnn::BackendId>& backends, bool constantWeights = true)
+void FullyConnectedFp32Test(const std::vector<armnn::BackendId>& backends = {}, bool constantWeights = true)
{
std::vector<int32_t> inputTensorShape { 1, 4, 1, 1 };
std::vector<int32_t> weightsTensorShape { 1, 4 };
@@ -21,8 +23,7 @@ void FullyConnectedFp32Test(std::vector<armnn::BackendId>& backends, bool consta
std::vector<float> expectedOutputValues = { (400 + 10) };
// bias is set std::vector<float> biasData = { 10 } in the model
- FullyConnectedTest<float>(backends,
- ::tflite::TensorType_FLOAT32,
+ FullyConnectedTest<float>(::tflite::TensorType_FLOAT32,
tflite::ActivationFunctionType_NONE,
inputTensorShape,
weightsTensorShape,
@@ -31,10 +32,11 @@ void FullyConnectedFp32Test(std::vector<armnn::BackendId>& backends, bool consta
inputValues,
expectedOutputValues,
weightsData,
+ backends,
constantWeights);
}
-void FullyConnectedActivationTest(std::vector<armnn::BackendId>& backends, bool constantWeights = true)
+void FullyConnectedActivationTest(const std::vector<armnn::BackendId>& backends = {}, bool constantWeights = true)
{
std::vector<int32_t> inputTensorShape { 1, 4, 1, 1 };
std::vector<int32_t> weightsTensorShape { 1, 4 };
@@ -47,8 +49,7 @@ void FullyConnectedActivationTest(std::vector<armnn::BackendId>& backends, bool
std::vector<float> expectedOutputValues = { 0 };
// bias is set std::vector<float> biasData = { 10 } in the model
- FullyConnectedTest<float>(backends,
- ::tflite::TensorType_FLOAT32,
+ FullyConnectedTest<float>(::tflite::TensorType_FLOAT32,
tflite::ActivationFunctionType_RELU,
inputTensorShape,
weightsTensorShape,
@@ -57,10 +58,11 @@ void FullyConnectedActivationTest(std::vector<armnn::BackendId>& backends, bool
inputValues,
expectedOutputValues,
weightsData,
+ backends,
constantWeights);
}
-void FullyConnectedInt8Test(std::vector<armnn::BackendId>& backends, bool constantWeights = true)
+void FullyConnectedInt8Test(const std::vector<armnn::BackendId>& backends = {}, bool constantWeights = true)
{
std::vector<int32_t> inputTensorShape { 1, 4, 2, 1 };
std::vector<int32_t> weightsTensorShape { 1, 4 };
@@ -75,104 +77,55 @@ void FullyConnectedInt8Test(std::vector<armnn::BackendId>& backends, bool consta
// bias is set std::vector<int32_t> biasData = { 10 } in the model
// input and weights quantization scale 1.0f and offset 0 in the model
// output quantization scale 2.0f and offset 0 in the model
- FullyConnectedTest<int8_t>(backends,
- ::tflite::TensorType_INT8,
- tflite::ActivationFunctionType_NONE,
+ FullyConnectedTest<int8_t>(::tflite::TensorType_INT8,
+ tflite::ActivationFunctionType_NONE,
inputTensorShape,
weightsTensorShape,
biasTensorShape,
- outputTensorShape,
- inputValues,
- expectedOutputValues,
- weightsData,
+ outputTensorShape,
+ inputValues,
+ expectedOutputValues,
+ weightsData,
+ backends,
constantWeights);
}
-TEST_SUITE("FullyConnected_GpuAccTests")
-{
-
-TEST_CASE ("FullyConnected_FP32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- FullyConnectedFp32Test(backends);
-}
-
-TEST_CASE ("FullyConnected_Int8_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- FullyConnectedInt8Test(backends);
-}
-
-TEST_CASE ("FullyConnected_Activation_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- FullyConnectedActivationTest(backends);
-}
-
-} // End of TEST_SUITE("FullyConnected_GpuAccTests")
-
-TEST_SUITE("FullyConnected_CpuAccTests")
-{
-
-TEST_CASE ("FullyConnected_FP32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- FullyConnectedFp32Test(backends);
-}
-
-TEST_CASE ("FullyConnected_Int8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- FullyConnectedInt8Test(backends);
-}
-
-TEST_CASE ("FullyConnected_Activation_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- FullyConnectedActivationTest(backends);
-}
-
-} // End of TEST_SUITE("FullyConnected_CpuAccTests")
-
-TEST_SUITE("FullyConnected_CpuRefTests")
+TEST_SUITE("FullyConnectedTests")
{
-TEST_CASE ("FullyConnected_FP32_CpuRef_Test")
+TEST_CASE ("FullyConnected_FP32_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- FullyConnectedFp32Test(backends);
+ FullyConnectedFp32Test();
}
-TEST_CASE ("FullyConnected_Int8_CpuRef_Test")
+TEST_CASE ("FullyConnected_Int8_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- FullyConnectedInt8Test(backends);
+ FullyConnectedInt8Test();
}
-TEST_CASE ("FullyConnected_Activation_CpuRef_Test")
+TEST_CASE ("FullyConnected_Activation_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- FullyConnectedActivationTest(backends);
+ FullyConnectedActivationTest();
}
-TEST_CASE ("FullyConnected_Weights_As_Inputs_FP32_CpuRef_Test")
+TEST_CASE ("FullyConnected_Weights_As_Inputs_FP32_Test")
{
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
FullyConnectedFp32Test(backends, false);
}
-TEST_CASE ("FullyConnected_Weights_As_Inputs_Int8_CpuRef_Test")
+TEST_CASE ("FullyConnected_Weights_As_Inputs_Int8_Test")
{
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
FullyConnectedInt8Test(backends, false);
}
-TEST_CASE ("FullyConnected_Weights_As_Inputs_Activation_CpuRef_Test")
+TEST_CASE ("FullyConnected_Weights_As_Inputs_Activation_Test")
{
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
FullyConnectedActivationTest(backends, false);
}
-} // End of TEST_SUITE("FullyConnected_CpuRefTests")
+} // End of TEST_SUITE("FullyConnectedTests")
} // anonymous namespace \ No newline at end of file
diff --git a/delegate/test/FullyConnectedTestHelper.hpp b/delegate/test/FullyConnectedTestHelper.hpp
index 20c1102bd9..517d932f29 100644
--- a/delegate/test/FullyConnectedTestHelper.hpp
+++ b/delegate/test/FullyConnectedTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,12 +10,8 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
-
namespace
{
@@ -164,8 +160,7 @@ std::vector<char> CreateFullyConnectedTfLiteModel(tflite::TensorType tensorType,
}
template <typename T>
-void FullyConnectedTest(std::vector<armnn::BackendId>& backends,
- tflite::TensorType tensorType,
+void FullyConnectedTest(tflite::TensorType tensorType,
tflite::ActivationFunctionType activationType,
const std::vector <int32_t>& inputTensorShape,
const std::vector <int32_t>& weightsTensorShape,
@@ -174,6 +169,7 @@ void FullyConnectedTest(std::vector<armnn::BackendId>& backends,
std::vector <T>& inputValues,
std::vector <T>& expectedOutputValues,
std::vector <T>& weightsData,
+ const std::vector<armnn::BackendId>& backends = {},
bool constantWeights = true,
float quantScale = 1.0f,
int quantOffset = 0)
@@ -196,7 +192,7 @@ void FullyConnectedTest(std::vector<armnn::BackendId>& backends,
CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
// Setup interpreter with Arm NN Delegate applied.
- auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
diff --git a/delegate/test/GatherNdTest.cpp b/delegate/test/GatherNdTest.cpp
index 26dc332239..10e7969992 100644
--- a/delegate/test/GatherNdTest.cpp
+++ b/delegate/test/GatherNdTest.cpp
@@ -1,21 +1,17 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "GatherNdTestHelper.hpp"
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-
#include <doctest/doctest.h>
namespace armnnDelegate
{
-// GATHER_ND Operator
-void GatherNdUint8Test(std::vector<armnn::BackendId>& backends)
+// Gather_Nd Operator
+void GatherNdUint8Test()
{
std::vector<int32_t> paramsShape{ 5, 2 };
@@ -27,7 +23,6 @@ void GatherNdUint8Test(std::vector<armnn::BackendId>& backends)
std::vector<uint8_t> expectedOutputValues{ 3, 4, 1, 2, 9, 10 };
GatherNdTest<uint8_t>(::tflite::TensorType_UINT8,
- backends,
paramsShape,
indicesShape,
expectedOutputShape,
@@ -36,7 +31,7 @@ void GatherNdUint8Test(std::vector<armnn::BackendId>& backends)
expectedOutputValues);
}
-void GatherNdFp32Test(std::vector<armnn::BackendId>& backends)
+void GatherNdFp32Test()
{
std::vector<int32_t> paramsShape{ 5, 2 };
std::vector<int32_t> indicesShape{ 3, 1 };
@@ -47,7 +42,6 @@ void GatherNdFp32Test(std::vector<armnn::BackendId>& backends)
std::vector<float> expectedOutputValues{ 3.3f, 4.4f, 1.1f, 2.2f, 9.9f, 10.10f };
GatherNdTest<float>(::tflite::TensorType_FLOAT32,
- backends,
paramsShape,
indicesShape,
expectedOutputShape,
@@ -56,57 +50,22 @@ void GatherNdFp32Test(std::vector<armnn::BackendId>& backends)
expectedOutputValues);
}
-// GATHER_ND Test Suite
-TEST_SUITE("GATHER_ND_CpuRefTests")
+// Gather_Nd Test Suite
+TEST_SUITE("Gather_NdTests")
{
-TEST_CASE ("GATHER_ND_Uint8_CpuRef_Test")
+TEST_CASE ("Gather_Nd_Uint8_Test")
{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- GatherNdUint8Test(backends);
+ GatherNdUint8Test();
}
-TEST_CASE ("GATHER_ND_Fp32_CpuRef_Test")
+TEST_CASE ("Gather_Nd_Fp32_Test")
{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- GatherNdFp32Test(backends);
+ GatherNdFp32Test();
}
}
-TEST_SUITE("GATHER_ND_CpuAccTests")
-{
-
-TEST_CASE ("GATHER_ND_Uint8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- GatherNdUint8Test(backends);
-}
-
-TEST_CASE ("GATHER_ND_Fp32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- GatherNdFp32Test(backends);
-}
-
-}
-
-TEST_SUITE("GATHER_ND_GpuAccTests")
-{
-
-TEST_CASE ("GATHER_ND_Uint8_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- GatherNdUint8Test(backends);
-}
-
-TEST_CASE ("GATHER_ND_Fp32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- GatherNdFp32Test(backends);
-}
-
-}
-// End of GATHER_ND Test Suite
+// End of Gather_Nd Test Suite
} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/test/GatherNdTestHelper.hpp b/delegate/test/GatherNdTestHelper.hpp
index c8e9a72b97..ff6f514130 100644
--- a/delegate/test/GatherNdTestHelper.hpp
+++ b/delegate/test/GatherNdTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,12 +10,8 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
-
namespace
{
@@ -114,13 +110,13 @@ std::vector<char> CreateGatherNdTfLiteModel(tflite::TensorType tensorType,
template<typename T>
void GatherNdTest(tflite::TensorType tensorType,
- std::vector<armnn::BackendId>& backends,
std::vector<int32_t>& paramsShape,
std::vector<int32_t>& indicesShape,
std::vector<int32_t>& expectedOutputShape,
std::vector<T>& paramsValues,
std::vector<int32_t>& indicesValues,
std::vector<T>& expectedOutputValues,
+ const std::vector<armnn::BackendId>& backends = {},
float quantScale = 1.0f,
int quantOffset = 0)
{
@@ -141,7 +137,7 @@ void GatherNdTest(tflite::TensorType tensorType,
std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
// Setup interpreter with Arm NN Delegate applied.
- auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
CHECK(armnnInterpreter.FillInputTensor<T>(paramsValues, 0) == kTfLiteOk);
CHECK(armnnInterpreter.FillInputTensor<int32_t>(indicesValues, 1) == kTfLiteOk);
diff --git a/delegate/test/GatherTest.cpp b/delegate/test/GatherTest.cpp
index c49c10d054..be9123a5ff 100644
--- a/delegate/test/GatherTest.cpp
+++ b/delegate/test/GatherTest.cpp
@@ -1,21 +1,17 @@
//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "GatherTestHelper.hpp"
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-
#include <doctest/doctest.h>
namespace armnnDelegate
{
-// GATHER Operator
-void GatherUint8Test(std::vector<armnn::BackendId>& backends)
+// Gather Operator
+void GatherUint8Test()
{
std::vector<int32_t> paramsShape{8};
@@ -28,7 +24,6 @@ void GatherUint8Test(std::vector<armnn::BackendId>& backends)
std::vector<uint8_t> expectedOutputValues{8, 7, 6};
GatherTest<uint8_t>(::tflite::TensorType_UINT8,
- backends,
paramsShape,
indicesShape,
expectedOutputShape,
@@ -38,7 +33,7 @@ void GatherUint8Test(std::vector<armnn::BackendId>& backends)
expectedOutputValues);
}
-void GatherFp32Test(std::vector<armnn::BackendId>& backends)
+void GatherFp32Test()
{
std::vector<int32_t> paramsShape{8};
std::vector<int32_t> indicesShape{3};
@@ -50,7 +45,6 @@ void GatherFp32Test(std::vector<armnn::BackendId>& backends)
std::vector<float> expectedOutputValues{8.8f, 7.7f, 6.6f};
GatherTest<float>(::tflite::TensorType_FLOAT32,
- backends,
paramsShape,
indicesShape,
expectedOutputShape,
@@ -60,57 +54,21 @@ void GatherFp32Test(std::vector<armnn::BackendId>& backends)
expectedOutputValues);
}
-// GATHER Test Suite
-TEST_SUITE("GATHER_CpuRefTests")
-{
-
-TEST_CASE ("GATHER_Uint8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- GatherUint8Test(backends);
-}
-
-TEST_CASE ("GATHER_Fp32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- GatherFp32Test(backends);
-}
-
-}
-
-TEST_SUITE("GATHER_CpuAccTests")
-{
-
-TEST_CASE ("GATHER_Uint8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- GatherUint8Test(backends);
-}
-
-TEST_CASE ("GATHER_Fp32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- GatherFp32Test(backends);
-}
-
-}
-
-TEST_SUITE("GATHER_GpuAccTests")
+// Gather Test Suite
+TEST_SUITE("GatherTests")
{
-TEST_CASE ("GATHER_Uint8_GpuAcc_Test")
+TEST_CASE ("Gather_Uint8_Test")
{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- GatherUint8Test(backends);
+ GatherUint8Test();
}
-TEST_CASE ("GATHER_Fp32_GpuAcc_Test")
+TEST_CASE ("Gather_Fp32_Test")
{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- GatherFp32Test(backends);
+ GatherFp32Test();
}
}
-// End of GATHER Test Suite
+// End of Gather Test Suite
} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/test/GatherTestHelper.hpp b/delegate/test/GatherTestHelper.hpp
index 5d24cdabce..e38b9a5557 100644
--- a/delegate/test/GatherTestHelper.hpp
+++ b/delegate/test/GatherTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,12 +10,8 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
-
namespace
{
@@ -115,7 +111,6 @@ std::vector<char> CreateGatherTfLiteModel(tflite::TensorType tensorType,
template<typename T>
void GatherTest(tflite::TensorType tensorType,
- std::vector<armnn::BackendId>& backends,
std::vector<int32_t>& paramsShape,
std::vector<int32_t>& indicesShape,
std::vector<int32_t>& expectedOutputShape,
@@ -123,6 +118,7 @@ void GatherTest(tflite::TensorType tensorType,
std::vector<T>& paramsValues,
std::vector<int32_t>& indicesValues,
std::vector<T>& expectedOutputValues,
+ const std::vector<armnn::BackendId>& backends = {},
float quantScale = 1.0f,
int quantOffset = 0)
{
@@ -144,7 +140,7 @@ void GatherTest(tflite::TensorType tensorType,
std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
// Setup interpreter with Arm NN Delegate applied.
- auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
CHECK(armnnInterpreter.FillInputTensor<T>(paramsValues, 0) == kTfLiteOk);
CHECK(armnnInterpreter.FillInputTensor<int32_t>(indicesValues, 1) == kTfLiteOk);
diff --git a/delegate/test/LogicalTestHelper.hpp b/delegate/test/LogicalTestHelper.hpp
index 763bb49adb..bd862d2445 100644
--- a/delegate/test/LogicalTestHelper.hpp
+++ b/delegate/test/LogicalTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,12 +10,8 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
-
namespace
{
diff --git a/delegate/test/LstmTest.cpp b/delegate/test/LstmTest.cpp
index 87b87bead6..309f3cd3fe 100644
--- a/delegate/test/LstmTest.cpp
+++ b/delegate/test/LstmTest.cpp
@@ -1,14 +1,10 @@
//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "LstmTestHelper.hpp"
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-
#include <doctest/doctest.h>
namespace armnnDelegate
@@ -118,8 +114,7 @@ void LstmTest(std::vector<armnn::BackendId>& backends)
float clippingThresCell = 0.f;
float clippingThresProj = 0.f;
- LstmTestImpl<float>(backends,
- ::tflite::TensorType_FLOAT32,
+ LstmTestImpl<float>(::tflite::TensorType_FLOAT32,
batchSize,
inputSize,
outputSize,
@@ -161,29 +156,18 @@ void LstmTest(std::vector<armnn::BackendId>& backends)
expectedOutputValues,
activationFunction,
clippingThresCell,
- clippingThresProj);
+ clippingThresProj,
+ backends);
}
-TEST_SUITE("LstmTest_CpuRefTests")
+TEST_SUITE("LstmTest_Tests")
{
-TEST_CASE ("LstmTest_CpuRef_Test")
+TEST_CASE ("LstmTest_Test")
{
- std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
+ std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef, armnn::Compute::CpuAcc};
LstmTest(backends);
}
-} //End of TEST_SUITE("Convolution2dTest_CpuRef")
-
-TEST_SUITE("LstmTest_CpuAccTests")
-{
-
-TEST_CASE ("LstmTest_CpuAcc_Test")
-{
- std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- LstmTest(backends);
}
-
-} //End of TEST_SUITE("Convolution2dTest_CpuAcc")
-
} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/test/LstmTestHelper.hpp b/delegate/test/LstmTestHelper.hpp
index ce1efe0b47..d111445e80 100644
--- a/delegate/test/LstmTestHelper.hpp
+++ b/delegate/test/LstmTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,12 +10,8 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
-
namespace
{
@@ -543,8 +539,7 @@ std::vector<char> CreateLstmTfLiteModel(tflite::TensorType tensorType,
}
template <typename T>
-void LstmTestImpl(std::vector<armnn::BackendId>& backends,
- tflite::TensorType tensorType,
+void LstmTestImpl(tflite::TensorType tensorType,
int32_t batchSize,
int32_t inputSize,
int32_t outputSize,
@@ -586,7 +581,8 @@ void LstmTestImpl(std::vector<armnn::BackendId>& backends,
std::vector<T>& expectedOutputValues,
tflite::ActivationFunctionType activationFunction,
float clippingThresCell,
- float clippingThresProj)
+ float clippingThresProj,
+ const std::vector<armnn::BackendId>& backends = {})
{
using namespace delegateTestInterpreter;
@@ -643,7 +639,7 @@ void LstmTestImpl(std::vector<armnn::BackendId>& backends,
std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
// Setup interpreter with Arm NN Delegate applied.
- auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
diff --git a/delegate/test/MirrorPadTest.cpp b/delegate/test/MirrorPadTest.cpp
index 5b459861c6..09c773c930 100644
--- a/delegate/test/MirrorPadTest.cpp
+++ b/delegate/test/MirrorPadTest.cpp
@@ -1,14 +1,10 @@
//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "PadTestHelper.hpp"
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-
#include <doctest/doctest.h>
namespace armnnDelegate
@@ -43,7 +39,6 @@ void MirrorPadSymmetric2dTest(std::vector<armnn::BackendId>& backends)
PadTest<float>(tflite::BuiltinOperator_MIRROR_PAD,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
paddingShape,
outputShape,
@@ -51,6 +46,7 @@ void MirrorPadSymmetric2dTest(std::vector<armnn::BackendId>& backends)
paddingDim,
expectedOutputValues,
0, // Padding value - Not used in these tests.
+ backends,
1.0f, // Scale
0, // Offset
tflite::MirrorPadMode_SYMMETRIC);
@@ -85,7 +81,6 @@ void MirrorPadReflect2dTest(std::vector<armnn::BackendId>& backends)
PadTest<float>(tflite::BuiltinOperator_MIRROR_PAD,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
paddingShape,
outputShape,
@@ -93,6 +88,7 @@ void MirrorPadReflect2dTest(std::vector<armnn::BackendId>& backends)
paddingDim,
expectedOutputValues,
0, // Padding value - Not used in these tests.
+ backends,
1.0f, // Scale
0, // Offset
tflite::MirrorPadMode_REFLECT);
@@ -143,7 +139,6 @@ void MirrorPadSymmetric3dTest(std::vector<armnn::BackendId>& backends)
PadTest<float>(tflite::BuiltinOperator_MIRROR_PAD,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
paddingShape,
outputShape,
@@ -151,6 +146,7 @@ void MirrorPadSymmetric3dTest(std::vector<armnn::BackendId>& backends)
paddingDim,
expectedOutputValues,
0, // Padding value - Not used in these tests.
+ backends,
1.0f, // Scale
0, // Offset
tflite::MirrorPadMode_SYMMETRIC);
@@ -201,7 +197,6 @@ void MirrorPadReflect3dTest(std::vector<armnn::BackendId>& backends)
PadTest<float>(tflite::BuiltinOperator_MIRROR_PAD,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
paddingShape,
outputShape,
@@ -209,6 +204,7 @@ void MirrorPadReflect3dTest(std::vector<armnn::BackendId>& backends)
paddingDim,
expectedOutputValues,
0, // Padding value - Not used in these tests.
+ backends,
1.0f, // Scale
0, // Offset
tflite::MirrorPadMode_REFLECT);
@@ -241,7 +237,6 @@ void MirrorPadSymmetricUint8Test(std::vector<armnn::BackendId>& backends)
PadTest<uint8_t>(tflite::BuiltinOperator_MIRROR_PAD,
::tflite::TensorType_UINT8,
- backends,
inputShape,
paddingShape,
outputShape,
@@ -249,6 +244,7 @@ void MirrorPadSymmetricUint8Test(std::vector<armnn::BackendId>& backends)
paddingDim,
expectedOutputValues,
0, // Padding value - Not used in these tests.
+ backends,
1.0f, // Scale
1, // Offset
tflite::MirrorPadMode_SYMMETRIC);
@@ -283,7 +279,6 @@ void MirrorPadReflectInt8Test(std::vector<armnn::BackendId>& backends)
PadTest<int8_t>(tflite::BuiltinOperator_MIRROR_PAD,
::tflite::TensorType_INT8,
- backends,
inputShape,
paddingShape,
outputShape,
@@ -291,6 +286,7 @@ void MirrorPadReflectInt8Test(std::vector<armnn::BackendId>& backends)
paddingDim,
expectedOutputValues,
0, // Padding value - Not used in these tests.
+ backends,
1.0f, // Scale
1, // Offset
tflite::MirrorPadMode_REFLECT);
diff --git a/delegate/test/NeonDelegateTests_NDK_Issue.cpp b/delegate/test/NeonDelegateTests_NDK_Issue.cpp
index b2651c4249..250d783ca4 100644
--- a/delegate/test/NeonDelegateTests_NDK_Issue.cpp
+++ b/delegate/test/NeonDelegateTests_NDK_Issue.cpp
@@ -1,15 +1,11 @@
//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "NormalizationTestHelper.hpp"
#include "SoftmaxTestHelper.hpp"
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-
#include <doctest/doctest.h>
namespace armnnDelegate
@@ -25,28 +21,28 @@ TEST_SUITE ("Softmax_CpuAccTests")
TEST_CASE ("Softmax_Standard_Beta_CpuAcc_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef, armnn::Compute::CpuAcc };
std::vector<float> expectedOutput = {0.00994190481, 0.0445565246, 0.0734612942, 0.329230666, 0.542809606,
0.710742831, 0.158588171, 0.0961885825, 0.0214625746, 0.0130177103};
- SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 1, expectedOutput);
+ SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, 1, expectedOutput, backends);
}
TEST_CASE ("Softmax_Different_Beta_CpuAcc_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef, armnn::Compute::CpuAcc };
std::vector<float> expectedOutput = {
0.0946234912, 0.148399189, 0.172415257, 0.270400971, 0.314161092,
0.352414012, 0.224709094, 0.193408906, 0.123322964, 0.106145054};
- SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 0.3, expectedOutput);
+ SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, 0.3, expectedOutput, backends);
}
TEST_CASE ("Log_Softmax_CpuAcc_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef, armnn::Compute::CpuAcc };
std::vector<float> expectedOutput =
{-4.61099672, -3.11099672, -2.61099672, -1.11099672, -0.610996664,
-0.341444582, -1.84144461, -2.34144449, -3.84144449, -4.34144449};
- SoftmaxTestCase(tflite::BuiltinOperator_LOG_SOFTMAX, backends, 0, expectedOutput);
+ SoftmaxTestCase(tflite::BuiltinOperator_LOG_SOFTMAX, 0, expectedOutput, backends);
}
} // TEST_SUITE ("Softmax_CpuAccTests")
@@ -55,7 +51,7 @@ TEST_SUITE("L2Normalization_CpuAccTests")
TEST_CASE ("L2NormalizationFp32Test_CpuAcc_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef, armnn::Compute::CpuAcc };
L2NormalizationTest(backends);
}
} // TEST_SUITE("L2NormalizationFp32Test_CpuAcc_Test")
diff --git a/delegate/test/NormalizationTest.cpp b/delegate/test/NormalizationTest.cpp
index b3a6f4b81b..3fc3141f08 100644
--- a/delegate/test/NormalizationTest.cpp
+++ b/delegate/test/NormalizationTest.cpp
@@ -1,72 +1,33 @@
//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "NormalizationTestHelper.hpp"
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-
#include <doctest/doctest.h>
namespace armnnDelegate
{
-TEST_SUITE("L2Normalization_CpuRefTests")
-{
-
-TEST_CASE ("L2NormalizationFp32Test_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- L2NormalizationTest(backends);
-}
-
-} // TEST_SUITE("L2Normalization_CpuRefTests")
-
-TEST_SUITE("L2Normalization_GpuAccTests")
-{
-
-TEST_CASE ("L2NormalizationFp32Test_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- L2NormalizationTest(backends);
-}
-
-} // TEST_SUITE("L2Normalization_GpuAccTests")
-
-TEST_SUITE("LocalResponseNormalization_CpuRefTests")
-{
-
-TEST_CASE ("LocalResponseNormalizationTest_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- LocalResponseNormalizationTest(backends, 3, 1.f, 1.f, 1.f);
-}
-
-} // TEST_SUITE("LocalResponseNormalization_CpuRefTests")
-
-TEST_SUITE("LocalResponseNormalization_CpuAccTests")
+TEST_SUITE("L2NormalizationTests")
{
-TEST_CASE ("LocalResponseNormalizationTest_CpuAcc_Test")
+TEST_CASE ("L2NormalizationFp32Test_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- LocalResponseNormalizationTest(backends, 3, 1.f, 1.f, 1.f);
+ L2NormalizationTest();
}
-} // TEST_SUITE("LocalResponseNormalization_CpuAccTests")
+} // TEST_SUITE("L2NormalizationTests")
-TEST_SUITE("LocalResponseNormalization_GpuAccTests")
+TEST_SUITE("LocalResponseNormalizationTests")
{
-TEST_CASE ("LocalResponseNormalizationTest_GpuAcc_Test")
+TEST_CASE ("LocalResponseNormalizationTest_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- LocalResponseNormalizationTest(backends, 3, 1.f, 1.f, 1.f);
+ LocalResponseNormalizationTest(3, 1.f, 1.f, 1.f);
}
-} // TEST_SUITE("LocalResponseNormalization_GpuAccTests")
+} // TEST_SUITE("LocalResponseNormalizationTests")
} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/test/NormalizationTestHelper.hpp b/delegate/test/NormalizationTestHelper.hpp
index d7930d91b9..1306fd199a 100644
--- a/delegate/test/NormalizationTestHelper.hpp
+++ b/delegate/test/NormalizationTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,12 +10,8 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
-
namespace
{
@@ -117,11 +113,11 @@ std::vector<char> CreateNormalizationTfLiteModel(tflite::BuiltinOperator normali
template <typename T>
void NormalizationTest(tflite::BuiltinOperator normalizationOperatorCode,
tflite::TensorType tensorType,
- const std::vector<armnn::BackendId>& backends,
const std::vector<int32_t>& inputShape,
std::vector<int32_t>& outputShape,
std::vector<T>& inputValues,
std::vector<T>& expectedOutputValues,
+ const std::vector<armnn::BackendId>& backends = {},
int32_t radius = 0,
float bias = 0.f,
float alpha = 0.f,
@@ -150,7 +146,7 @@ void NormalizationTest(tflite::BuiltinOperator normalizationOperatorCode,
std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
// Setup interpreter with Arm NN Delegate applied.
- auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
@@ -164,7 +160,7 @@ void NormalizationTest(tflite::BuiltinOperator normalizationOperatorCode,
armnnInterpreter.Cleanup();
}
-void L2NormalizationTest(std::vector<armnn::BackendId>& backends)
+void L2NormalizationTest(const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<int32_t> inputShape { 1, 1, 1, 10 };
@@ -201,18 +197,18 @@ void L2NormalizationTest(std::vector<armnn::BackendId>& backends)
NormalizationTest<float>(tflite::BuiltinOperator_L2_NORMALIZATION,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
outputShape,
inputValues,
- expectedOutputValues);
+ expectedOutputValues,
+ backends);
}
-void LocalResponseNormalizationTest(std::vector<armnn::BackendId>& backends,
- int32_t radius,
+void LocalResponseNormalizationTest(int32_t radius,
float bias,
float alpha,
- float beta)
+ float beta,
+ const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<int32_t> inputShape { 2, 2, 2, 1 };
@@ -234,11 +230,11 @@ void LocalResponseNormalizationTest(std::vector<armnn::BackendId>& backends,
NormalizationTest<float>(tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
+ backends,
radius,
bias,
alpha,
diff --git a/delegate/test/PackTest.cpp b/delegate/test/PackTest.cpp
index 51f545ce5e..4f826d9175 100644
--- a/delegate/test/PackTest.cpp
+++ b/delegate/test/PackTest.cpp
@@ -1,21 +1,17 @@
//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "PackTestHelper.hpp"
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-
#include <doctest/doctest.h>
namespace armnnDelegate
{
template <typename T>
-void PackFp32Axis0Test(tflite::TensorType tensorType, std::vector<armnn::BackendId>& backends)
+void PackFp32Axis0Test(tflite::TensorType tensorType)
{
std::vector<int32_t> inputShape { 3, 2, 3 };
std::vector<int32_t> expectedOutputShape { 2, 3, 2, 3 };
@@ -69,16 +65,16 @@ void PackFp32Axis0Test(tflite::TensorType tensorType, std::vector<armnn::Backend
PackTest<T>(tflite::BuiltinOperator_PACK,
tensorType,
- backends,
inputShape,
expectedOutputShape,
inputValues,
expectedOutputValues,
+ {},
0);
}
template <typename T>
-void PackFp32Axis1Test(tflite::TensorType tensorType, std::vector<armnn::BackendId>& backends)
+void PackFp32Axis1Test(tflite::TensorType tensorType)
{
std::vector<int32_t> inputShape { 3, 2, 3 };
std::vector<int32_t> expectedOutputShape { 3, 2, 2, 3 };
@@ -133,16 +129,16 @@ void PackFp32Axis1Test(tflite::TensorType tensorType, std::vector<armnn::Backend
PackTest<T>(tflite::BuiltinOperator_PACK,
tensorType,
- backends,
inputShape,
expectedOutputShape,
inputValues,
expectedOutputValues,
+ {},
1);
}
template <typename T>
-void PackFp32Axis2Test(tflite::TensorType tensorType, std::vector<armnn::BackendId>& backends)
+void PackFp32Axis2Test(tflite::TensorType tensorType)
{
std::vector<int32_t> inputShape { 3, 2, 3 };
std::vector<int32_t> expectedOutputShape { 3, 2, 2, 3 };
@@ -195,16 +191,16 @@ void PackFp32Axis2Test(tflite::TensorType tensorType, std::vector<armnn::Backend
PackTest<T>(tflite::BuiltinOperator_PACK,
tensorType,
- backends,
inputShape,
expectedOutputShape,
inputValues,
expectedOutputValues,
+ {},
2);
}
template <typename T>
-void PackFp32Axis3Test(tflite::TensorType tensorType, std::vector<armnn::BackendId>& backends)
+void PackFp32Axis3Test(tflite::TensorType tensorType)
{
std::vector<int32_t> inputShape { 3, 2, 3 };
std::vector<int32_t> expectedOutputShape { 3, 2, 3, 2 };
@@ -265,16 +261,16 @@ void PackFp32Axis3Test(tflite::TensorType tensorType, std::vector<armnn::Backend
PackTest<T>(tflite::BuiltinOperator_PACK,
tflite::TensorType_FLOAT32,
- backends,
inputShape,
expectedOutputShape,
inputValues,
expectedOutputValues,
+ {},
3);
}
template <typename T>
-void PackFp32Inputs3Test(tflite::TensorType tensorType, std::vector<armnn::BackendId>& backends)
+void PackFp32Inputs3Test(tflite::TensorType tensorType)
{
std::vector<int32_t> inputShape { 3, 3 };
std::vector<int32_t> expectedOutputShape { 3, 3, 3 };
@@ -318,196 +314,63 @@ void PackFp32Inputs3Test(tflite::TensorType tensorType, std::vector<armnn::Backe
PackTest<T>(tflite::BuiltinOperator_PACK,
tensorType,
- backends,
inputShape,
expectedOutputShape,
inputValues,
expectedOutputValues,
+ {},
1);
}
-TEST_SUITE("Pack_CpuAccTests")
-{
-
-// Fp32
-TEST_CASE ("Pack_Fp32_Axis0_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- PackFp32Axis0Test<float>(tflite::TensorType_FLOAT32, backends);
-}
-
-TEST_CASE ("Pack_Fp32_Axis1_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- PackFp32Axis1Test<float>(tflite::TensorType_FLOAT32, backends);
-}
-
-TEST_CASE ("Pack_Fp32_Axis2_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- PackFp32Axis2Test<float>(tflite::TensorType_FLOAT32, backends);
-}
-
-TEST_CASE ("Pack_Fp32_Axis3_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- PackFp32Axis3Test<float>(tflite::TensorType_FLOAT32, backends);
-}
-
-TEST_CASE ("Pack_Fp32_Inputs3_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- PackFp32Inputs3Test<float>(tflite::TensorType_FLOAT32, backends);
-}
-
-// Uint8
-TEST_CASE ("Pack_Uint8_Axis0_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- PackFp32Axis0Test<uint8_t>(tflite::TensorType_UINT8, backends);
-}
-
-TEST_CASE ("Pack_Uint8_Inputs3_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- PackFp32Inputs3Test<uint8_t>(tflite::TensorType_UINT8, backends);
-}
-
-// Uint8
-TEST_CASE ("Pack_Int8_Axis0_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- PackFp32Axis0Test<int8_t>(tflite::TensorType_INT8, backends);
-}
-
-TEST_CASE ("Pack_Int8_Inputs3_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- PackFp32Inputs3Test<int8_t>(tflite::TensorType_INT8, backends);
-}
-
-}
-
-TEST_SUITE("Pack_GpuAccTests")
-{
-
-// Fp32
-TEST_CASE ("Pack_Fp32_Axis0_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- PackFp32Axis0Test<float>(tflite::TensorType_FLOAT32, backends);
-}
-
-TEST_CASE ("Pack_Fp32_Axis1_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- PackFp32Axis1Test<float>(tflite::TensorType_FLOAT32, backends);
-}
-
-TEST_CASE ("Pack_Fp32_Axis2_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- PackFp32Axis2Test<float>(tflite::TensorType_FLOAT32, backends);
-}
-
-TEST_CASE ("Pack_Fp32_Axis3_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- PackFp32Axis3Test<float>(tflite::TensorType_FLOAT32, backends);
-}
-
-TEST_CASE ("Pack_Fp32_Inputs3_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- PackFp32Inputs3Test<float>(tflite::TensorType_FLOAT32, backends);
-}
-
-// Uint8
-TEST_CASE ("Pack_Uint8_Axis0_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- PackFp32Axis0Test<uint8_t>(tflite::TensorType_UINT8, backends);
-}
-
-TEST_CASE ("Pack_Uint8_Inputs3_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- PackFp32Inputs3Test<uint8_t>(tflite::TensorType_UINT8, backends);
-}
-
-// Int8
-TEST_CASE ("Pack_Int8_Axis0_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- PackFp32Axis0Test<int8_t>(tflite::TensorType_INT8, backends);
-}
-
-TEST_CASE ("Pack_Int8_Inputs3_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- PackFp32Inputs3Test<int8_t>(tflite::TensorType_INT8, backends);
-}
-
-}
-
-TEST_SUITE("Pack_CpuRefTests")
+TEST_SUITE("PackTests")
{
// Fp32
-TEST_CASE ("Pack_Fp32_Axis0_CpuRef_Test")
+TEST_CASE ("Pack_Fp32_Axis0_Test")
{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- PackFp32Axis0Test<float>(tflite::TensorType_FLOAT32, backends);
+ PackFp32Axis0Test<float>(tflite::TensorType_FLOAT32);
}
-TEST_CASE ("Pack_Fp32_Axis1_CpuRef_Test")
+TEST_CASE ("Pack_Fp32_Axis1_Test")
{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- PackFp32Axis1Test<float>(tflite::TensorType_FLOAT32, backends);
+ PackFp32Axis1Test<float>(tflite::TensorType_FLOAT32);
}
-TEST_CASE ("Pack_Fp32_Axis2_CpuRef_Test")
+TEST_CASE ("Pack_Fp32_Axis2_Test")
{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- PackFp32Axis2Test<float>(tflite::TensorType_FLOAT32, backends);
+ PackFp32Axis2Test<float>(tflite::TensorType_FLOAT32);
}
-TEST_CASE ("Pack_Fp32_Axis3_CpuRef_Test")
+TEST_CASE ("Pack_Fp32_Axis3_Test")
{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- PackFp32Axis3Test<float>(tflite::TensorType_FLOAT32, backends);
+ PackFp32Axis3Test<float>(tflite::TensorType_FLOAT32);
}
-TEST_CASE ("Pack_Fp32_Inputs3_CpuRef_Test")
+TEST_CASE ("Pack_Fp32_Inputs3_Test")
{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- PackFp32Inputs3Test<float>(tflite::TensorType_FLOAT32, backends);
+ PackFp32Inputs3Test<float>(tflite::TensorType_FLOAT32);
}
// Uint8
-TEST_CASE ("Pack_Uint8_Axis0_CpuRef_Test")
+TEST_CASE ("Pack_Uint8_Axis0_Test")
{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- PackFp32Axis0Test<uint8_t>(tflite::TensorType_UINT8, backends);
+ PackFp32Axis0Test<uint8_t>(tflite::TensorType_UINT8);
}
-TEST_CASE ("Pack_Uint8_Inputs3_CpuRef_Test")
+TEST_CASE ("Pack_Uint8_Inputs3_Test")
{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- PackFp32Inputs3Test<uint8_t>(tflite::TensorType_UINT8, backends);
+ PackFp32Inputs3Test<uint8_t>(tflite::TensorType_UINT8);
}
// Int8
-TEST_CASE ("Pack_Int8_Axis0_CpuRef_Test")
+TEST_CASE ("Pack_Int8_Axis0_Test")
{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- PackFp32Axis0Test<int8_t>(tflite::TensorType_INT8, backends);
+ PackFp32Axis0Test<int8_t>(tflite::TensorType_INT8);
}
-TEST_CASE ("Pack_Int8_Inputs3_CpuRef_Test")
+TEST_CASE ("Pack_Int8_Inputs3_Test")
{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- PackFp32Inputs3Test<int8_t>(tflite::TensorType_INT8, backends);
+ PackFp32Inputs3Test<int8_t>(tflite::TensorType_INT8);
}
}
diff --git a/delegate/test/PackTestHelper.hpp b/delegate/test/PackTestHelper.hpp
index 1d032a3c49..f7f3fcfd87 100644
--- a/delegate/test/PackTestHelper.hpp
+++ b/delegate/test/PackTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,12 +10,8 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
-
namespace
{
@@ -113,11 +109,11 @@ std::vector<char> CreatePackTfLiteModel(tflite::BuiltinOperator packOperatorCode
template <typename T>
void PackTest(tflite::BuiltinOperator packOperatorCode,
tflite::TensorType tensorType,
- std::vector<armnn::BackendId>& backends,
std::vector<int32_t>& inputShape,
std::vector<int32_t>& expectedOutputShape,
std::vector<std::vector<T>>& inputValues,
std::vector<T>& expectedOutputValues,
+ const std::vector<armnn::BackendId>& backends = {},
unsigned int axis = 0,
float quantScale = 1.0f,
int quantOffset = 0)
@@ -137,7 +133,7 @@ void PackTest(tflite::BuiltinOperator packOperatorCode,
CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
// Setup interpreter with Arm NN Delegate applied.
- auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
// Set input data for all input tensors.
diff --git a/delegate/test/PadTest.cpp b/delegate/test/PadTest.cpp
index 17fabe65e8..f9af26bd43 100644
--- a/delegate/test/PadTest.cpp
+++ b/delegate/test/PadTest.cpp
@@ -1,21 +1,16 @@
//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "PadTestHelper.hpp"
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-
#include <doctest/doctest.h>
namespace armnnDelegate
{
-void Pad2dTest(std::vector<armnn::BackendId>& backends,
- tflite::BuiltinOperator padOperatorCode = tflite::BuiltinOperator_PAD,
+void Pad2dTest(tflite::BuiltinOperator padOperatorCode = tflite::BuiltinOperator_PAD,
float pad = 0.0f)
{
// Set input data
@@ -50,7 +45,6 @@ void Pad2dTest(std::vector<armnn::BackendId>& backends,
PadTest<float>(padOperatorCode,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
paddingShape,
outputShape,
@@ -60,8 +54,7 @@ void Pad2dTest(std::vector<armnn::BackendId>& backends,
pad);
}
-void Pad3dTest(std::vector<armnn::BackendId>& backends,
- tflite::BuiltinOperator padOperatorCode = tflite::BuiltinOperator_PAD,
+void Pad3dTest(tflite::BuiltinOperator padOperatorCode = tflite::BuiltinOperator_PAD,
float pad = 0.0f)
{
// Set input data
@@ -96,7 +89,6 @@ void Pad3dTest(std::vector<armnn::BackendId>& backends,
PadTest<float>(padOperatorCode,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
paddingShape,
outputShape,
@@ -106,8 +98,7 @@ void Pad3dTest(std::vector<armnn::BackendId>& backends,
pad);
}
-void Pad4dTest(std::vector<armnn::BackendId>& backends,
- tflite::BuiltinOperator padOperatorCode = tflite::BuiltinOperator_PAD,
+void Pad4dTest(tflite::BuiltinOperator padOperatorCode = tflite::BuiltinOperator_PAD,
float pad = 0.0f)
{
// Set input data
@@ -295,7 +286,6 @@ void Pad4dTest(std::vector<armnn::BackendId>& backends,
PadTest<float>(padOperatorCode,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
paddingShape,
outputShape,
@@ -305,8 +295,7 @@ void Pad4dTest(std::vector<armnn::BackendId>& backends,
pad);
}
-void PadInt8Test(std::vector<armnn::BackendId>& backends,
- tflite::BuiltinOperator padOperatorCode = tflite::BuiltinOperator_PAD,
+void PadInt8Test(tflite::BuiltinOperator padOperatorCode = tflite::BuiltinOperator_PAD,
int8_t paddingValue = 0,
int8_t p = 3,
float quantizationScale = -2.0f,
@@ -344,7 +333,6 @@ void PadInt8Test(std::vector<armnn::BackendId>& backends,
PadTest<int8_t>(padOperatorCode,
::tflite::TensorType_INT8,
- backends,
inputShape,
paddingShape,
outputShape,
@@ -352,12 +340,12 @@ void PadInt8Test(std::vector<armnn::BackendId>& backends,
paddingDim,
expectedOutputValues,
paddingValue,
+ {},
quantizationScale,
quantizationOffset);
}
-void PadUint8Test(std::vector<armnn::BackendId>& backends,
- tflite::BuiltinOperator padOperatorCode = tflite::BuiltinOperator_PAD,
+void PadUint8Test(tflite::BuiltinOperator padOperatorCode = tflite::BuiltinOperator_PAD,
uint8_t paddingValue = 0,
uint8_t p = 3,
float quantizationScale = -2.0f,
@@ -395,7 +383,6 @@ void PadUint8Test(std::vector<armnn::BackendId>& backends,
PadTest<uint8_t>(padOperatorCode,
::tflite::TensorType_UINT8,
- backends,
inputShape,
paddingShape,
outputShape,
@@ -403,203 +390,64 @@ void PadUint8Test(std::vector<armnn::BackendId>& backends,
paddingDim,
expectedOutputValues,
paddingValue,
+ {},
quantizationScale,
quantizationOffset);
}
-TEST_SUITE("Pad_CpuRefTests")
-{
-
-TEST_CASE ("Pad2d_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- Pad2dTest(backends);
-}
-
-TEST_CASE ("Pad3d_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- Pad3dTest(backends);
-}
-
-TEST_CASE ("Pad4d_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- Pad4dTest(backends);
-}
-
-TEST_CASE ("Pad_Int8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- PadInt8Test(backends);
-}
-
-TEST_CASE ("Pad_Uint8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- PadUint8Test(backends);
-}
-
-TEST_CASE ("PadV22d_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- Pad2dTest(backends, tflite::BuiltinOperator_PADV2, -2.5);
-}
-
-TEST_CASE ("PadV23d_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- Pad3dTest(backends, tflite::BuiltinOperator_PADV2, 2.0);
-}
-
-TEST_CASE ("PadV24d_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- Pad4dTest(backends, tflite::BuiltinOperator_PADV2, -1.33);
-}
-
-TEST_CASE ("PadV2_Int8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- PadInt8Test(backends, tflite::BuiltinOperator_PADV2, -1, -1);
-}
-
-TEST_CASE ("PadV2_Uint8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- PadUint8Test(backends, tflite::BuiltinOperator_PADV2, -1, -1);
-}
-
-} // TEST_SUITE("Pad_CpuRefTests")
-
-TEST_SUITE("Pad_CpuAccTests")
-{
-
-TEST_CASE ("Pad2d_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- Pad2dTest(backends);
-}
-
-TEST_CASE ("Pad3d_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- Pad3dTest(backends);
-}
-
-TEST_CASE ("Pad4d_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- Pad4dTest(backends);
-}
-
-TEST_CASE ("Pad_Int8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- PadInt8Test(backends);
-}
-
-TEST_CASE ("Pad_Uint8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- PadUint8Test(backends);
-}
-
-TEST_CASE ("PadV22d_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- Pad2dTest(backends, tflite::BuiltinOperator_PADV2, -2.5);
-}
-
-TEST_CASE ("PadV23d_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- Pad3dTest(backends, tflite::BuiltinOperator_PADV2, 2.0);
-}
-
-TEST_CASE ("PadV24d_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- Pad4dTest(backends, tflite::BuiltinOperator_PADV2, -1.33);
-}
-
-TEST_CASE ("PadV2_Int8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- PadInt8Test(backends, tflite::BuiltinOperator_PADV2, -1, -1);
-}
-
-TEST_CASE ("PadV2_Uint8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- PadUint8Test(backends, tflite::BuiltinOperator_PADV2, -1, -1);
-}
-
-} // TEST_SUITE("Pad_CpuAccTests")
-
-TEST_SUITE("Pad_GpuAccTests")
+TEST_SUITE("PadTests")
{
-TEST_CASE ("Pad2d_GpuAcc_Test")
+TEST_CASE ("Pad2d_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- Pad2dTest(backends);
+ Pad2dTest();
}
-TEST_CASE ("Pad3d_GpuAcc_Test")
+TEST_CASE ("Pad3d_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- Pad3dTest(backends);
+ Pad3dTest();
}
-TEST_CASE ("Pad4d_GpuAcc_Test")
+TEST_CASE ("Pad4d_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- Pad4dTest(backends);
+ Pad4dTest();
}
-TEST_CASE ("Pad_Int8_GpuAcc_Test")
+TEST_CASE ("Pad_Int8_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- PadInt8Test(backends);
+ PadInt8Test();
}
-TEST_CASE ("Pad_Uint8_GpuAcc_Test")
+TEST_CASE ("Pad_Uint8_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- PadUint8Test(backends);
+ PadUint8Test();
}
-TEST_CASE ("PadV22d_GpuAcc_Test")
+TEST_CASE ("PadV22d_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- Pad2dTest(backends, tflite::BuiltinOperator_PADV2, -2.5);
+ Pad2dTest(tflite::BuiltinOperator_PADV2, -2.5);
}
-TEST_CASE ("PadV23d_GpuAcc_Test")
+TEST_CASE ("PadV23d_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- Pad3dTest(backends, tflite::BuiltinOperator_PADV2, 2.0);
+ Pad3dTest(tflite::BuiltinOperator_PADV2, 2.0);
}
-TEST_CASE ("PadV24d_GpuAcc_Test")
+TEST_CASE ("PadV24d_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- Pad4dTest(backends, tflite::BuiltinOperator_PADV2, -1.33);
+ Pad4dTest(tflite::BuiltinOperator_PADV2, -1.33);
}
-TEST_CASE ("PadV2_Int8_GpuAcc_Test")
+TEST_CASE ("PadV2_Int8_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- PadInt8Test(backends, tflite::BuiltinOperator_PADV2, -1, -1);
+ PadInt8Test(tflite::BuiltinOperator_PADV2, -1, -1);
}
-TEST_CASE ("PadV2_Uint8_GpuAcc_Test")
+TEST_CASE ("PadV2_Uint8_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- PadUint8Test(backends, tflite::BuiltinOperator_PADV2, -1, -1);
+ PadUint8Test(tflite::BuiltinOperator_PADV2, -1, -1);
}
-} // TEST_SUITE("Pad_GpuAccTests")
+} // TEST_SUITE("PadTests")
} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/test/PadTestHelper.hpp b/delegate/test/PadTestHelper.hpp
index fefcec60a7..f5434740f3 100644
--- a/delegate/test/PadTestHelper.hpp
+++ b/delegate/test/PadTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,12 +10,8 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
-
namespace
{
@@ -160,7 +156,6 @@ std::vector<char> CreatePadTfLiteModel(
template <typename T>
void PadTest(tflite::BuiltinOperator padOperatorCode,
tflite::TensorType tensorType,
- const std::vector<armnn::BackendId>& backends,
const std::vector<int32_t>& inputShape,
const std::vector<int32_t>& paddingShape,
std::vector<int32_t>& outputShape,
@@ -168,6 +163,7 @@ void PadTest(tflite::BuiltinOperator padOperatorCode,
std::vector<int32_t>& paddingDim,
std::vector<T>& expectedOutputValues,
T paddingValue,
+ const std::vector<armnn::BackendId>& backends = {},
float quantScale = 1.0f,
int quantOffset = 0,
tflite::MirrorPadMode paddingMode = tflite::MirrorPadMode_SYMMETRIC)
@@ -193,7 +189,7 @@ void PadTest(tflite::BuiltinOperator padOperatorCode,
std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
// Setup interpreter with Arm NN Delegate applied.
- auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
diff --git a/delegate/test/Pooling2dTest.cpp b/delegate/test/Pooling2dTest.cpp
index c202a956e4..46f8a5ad82 100644
--- a/delegate/test/Pooling2dTest.cpp
+++ b/delegate/test/Pooling2dTest.cpp
@@ -1,25 +1,16 @@
//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "Pooling2dTestHelper.hpp"
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-
-#include <tensorflow/lite/version.h>
-
#include <doctest/doctest.h>
namespace armnnDelegate
{
-void MaxPool2dFP32PaddingValidTest(std::vector<armnn::BackendId>& backends)
+void MaxPool2dFP32PaddingValidTest(const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<int32_t> inputShape { 1, 3, 4, 1 };
@@ -33,11 +24,11 @@ void MaxPool2dFP32PaddingValidTest(std::vector<armnn::BackendId>& backends)
Pooling2dTest<float>(tflite::BuiltinOperator_MAX_POOL_2D,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
+ backends,
::tflite::Padding_VALID,
2,
2,
@@ -45,7 +36,7 @@ void MaxPool2dFP32PaddingValidTest(std::vector<armnn::BackendId>& backends)
2);
}
-void MaxPool2dInt8PaddingValidTest(std::vector<armnn::BackendId>& backends)
+void MaxPool2dInt8PaddingValidTest(const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<int32_t> inputShape { 1, 3, 4, 1 };
@@ -59,11 +50,11 @@ void MaxPool2dInt8PaddingValidTest(std::vector<armnn::BackendId>& backends)
Pooling2dTest<int8_t>(tflite::BuiltinOperator_MAX_POOL_2D,
::tflite::TensorType_INT8,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
+ backends,
::tflite::Padding_VALID,
2,
2,
@@ -74,7 +65,7 @@ void MaxPool2dInt8PaddingValidTest(std::vector<armnn::BackendId>& backends)
1);
}
-void MaxPool2dFP32PaddingSameTest(std::vector<armnn::BackendId>& backends)
+void MaxPool2dFP32PaddingSameTest(const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<int32_t> inputShape { 1, 3, 4, 1 };
@@ -88,11 +79,11 @@ void MaxPool2dFP32PaddingSameTest(std::vector<armnn::BackendId>& backends)
Pooling2dTest<float>(tflite::BuiltinOperator_MAX_POOL_2D,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
+ backends,
::tflite::Padding_SAME,
2,
2,
@@ -100,7 +91,7 @@ void MaxPool2dFP32PaddingSameTest(std::vector<armnn::BackendId>& backends)
2);
}
-void MaxPool2dInt8PaddingSameTest(std::vector<armnn::BackendId>& backends)
+void MaxPool2dInt8PaddingSameTest(const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<int32_t> inputShape { 1, 3, 4, 1 };
@@ -114,11 +105,11 @@ void MaxPool2dInt8PaddingSameTest(std::vector<armnn::BackendId>& backends)
Pooling2dTest<int8_t>(tflite::BuiltinOperator_MAX_POOL_2D,
::tflite::TensorType_INT8,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
+ backends,
::tflite::Padding_SAME,
2,
2,
@@ -129,7 +120,7 @@ void MaxPool2dInt8PaddingSameTest(std::vector<armnn::BackendId>& backends)
1);
}
-void MaxPool2dFP32ReluTest(std::vector<armnn::BackendId>& backends)
+void MaxPool2dFP32ReluTest(const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<int32_t> inputShape { 1, 3, 4, 1 };
@@ -143,11 +134,11 @@ void MaxPool2dFP32ReluTest(std::vector<armnn::BackendId>& backends)
Pooling2dTest<float>(tflite::BuiltinOperator_MAX_POOL_2D,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
+ backends,
::tflite::Padding_VALID,
1,
1,
@@ -156,7 +147,7 @@ void MaxPool2dFP32ReluTest(std::vector<armnn::BackendId>& backends)
::tflite::ActivationFunctionType_RELU);
}
-void MaxPool2dInt8ReluTest(std::vector<armnn::BackendId>& backends)
+void MaxPool2dInt8ReluTest(const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<int32_t> inputShape { 1, 3, 4, 1 };
@@ -170,11 +161,11 @@ void MaxPool2dInt8ReluTest(std::vector<armnn::BackendId>& backends)
Pooling2dTest<int8_t>(tflite::BuiltinOperator_MAX_POOL_2D,
::tflite::TensorType_INT8,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
+ backends,
::tflite::Padding_VALID,
1,
1,
@@ -185,7 +176,7 @@ void MaxPool2dInt8ReluTest(std::vector<armnn::BackendId>& backends)
1);
}
-void MaxPool2dFP32Relu6Test(std::vector<armnn::BackendId>& backends)
+void MaxPool2dFP32Relu6Test(const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<int32_t> inputShape { 1, 3, 4, 1 };
@@ -199,11 +190,11 @@ void MaxPool2dFP32Relu6Test(std::vector<armnn::BackendId>& backends)
Pooling2dTest<float>(tflite::BuiltinOperator_MAX_POOL_2D,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
+ backends,
::tflite::Padding_SAME,
2,
2,
@@ -212,7 +203,7 @@ void MaxPool2dFP32Relu6Test(std::vector<armnn::BackendId>& backends)
::tflite::ActivationFunctionType_RELU6);
}
-void MaxPool2dInt8Relu6Test(std::vector<armnn::BackendId>& backends)
+void MaxPool2dInt8Relu6Test(const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<int32_t> inputShape { 1, 3, 4, 1 };
@@ -226,11 +217,11 @@ void MaxPool2dInt8Relu6Test(std::vector<armnn::BackendId>& backends)
Pooling2dTest<int8_t>(tflite::BuiltinOperator_MAX_POOL_2D,
::tflite::TensorType_INT8,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
+ backends,
::tflite::Padding_SAME,
2,
2,
@@ -241,7 +232,7 @@ void MaxPool2dInt8Relu6Test(std::vector<armnn::BackendId>& backends)
1);
}
-void MaxPool2dUint8PaddingSameTest(std::vector<armnn::BackendId>& backends)
+void MaxPool2dUint8PaddingSameTest(const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<int32_t> inputShape { 1, 3, 4, 1 };
@@ -255,11 +246,11 @@ void MaxPool2dUint8PaddingSameTest(std::vector<armnn::BackendId>& backends)
Pooling2dTest<uint8_t>(tflite::BuiltinOperator_MAX_POOL_2D,
::tflite::TensorType_UINT8,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
+ backends,
::tflite::Padding_SAME,
2,
2,
@@ -270,7 +261,7 @@ void MaxPool2dUint8PaddingSameTest(std::vector<armnn::BackendId>& backends)
1);
}
-void MaxPool2dUint8ReluTest(std::vector<armnn::BackendId>& backends)
+void MaxPool2dUint8ReluTest(const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<int32_t> inputShape { 1, 3, 4, 1 };
@@ -284,11 +275,11 @@ void MaxPool2dUint8ReluTest(std::vector<armnn::BackendId>& backends)
Pooling2dTest<uint8_t>(tflite::BuiltinOperator_MAX_POOL_2D,
::tflite::TensorType_UINT8,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
+ backends,
::tflite::Padding_VALID,
1,
1,
@@ -299,7 +290,7 @@ void MaxPool2dUint8ReluTest(std::vector<armnn::BackendId>& backends)
1);
}
-void MaxPool2dInt16PaddingSameTest(std::vector<armnn::BackendId>& backends)
+void MaxPool2dInt16PaddingSameTest(const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<int32_t> inputShape { 1, 3, 4, 1 };
@@ -313,11 +304,11 @@ void MaxPool2dInt16PaddingSameTest(std::vector<armnn::BackendId>& backends)
Pooling2dTest<int16_t>(tflite::BuiltinOperator_MAX_POOL_2D,
::tflite::TensorType_INT16,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
+ backends,
::tflite::Padding_SAME,
2,
2,
@@ -328,7 +319,7 @@ void MaxPool2dInt16PaddingSameTest(std::vector<armnn::BackendId>& backends)
0);
}
-void MaxPool2dInt16ReluTest(std::vector<armnn::BackendId>& backends)
+void MaxPool2dInt16ReluTest(const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<int32_t> inputShape { 1, 3, 4, 1 };
@@ -342,11 +333,11 @@ void MaxPool2dInt16ReluTest(std::vector<armnn::BackendId>& backends)
Pooling2dTest<int16_t>(tflite::BuiltinOperator_MAX_POOL_2D,
::tflite::TensorType_INT16,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
+ backends,
::tflite::Padding_VALID,
1,
1,
@@ -357,7 +348,7 @@ void MaxPool2dInt16ReluTest(std::vector<armnn::BackendId>& backends)
0);
}
-void AveragePool2dFP32PaddingValidTest(std::vector<armnn::BackendId>& backends)
+void AveragePool2dFP32PaddingValidTest(const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<int32_t> inputShape { 1, 3, 4, 1 };
@@ -371,11 +362,11 @@ void AveragePool2dFP32PaddingValidTest(std::vector<armnn::BackendId>& backends)
Pooling2dTest<float>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
+ backends,
::tflite::Padding_VALID,
2,
2,
@@ -383,7 +374,7 @@ void AveragePool2dFP32PaddingValidTest(std::vector<armnn::BackendId>& backends)
2);
}
-void AveragePool2dInt8PaddingValidTest(std::vector<armnn::BackendId>& backends)
+void AveragePool2dInt8PaddingValidTest(const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<int32_t> inputShape { 1, 3, 4, 1 };
@@ -397,11 +388,11 @@ void AveragePool2dInt8PaddingValidTest(std::vector<armnn::BackendId>& backends)
Pooling2dTest<int8_t>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
::tflite::TensorType_INT8,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
+ backends,
::tflite::Padding_VALID,
2,
2,
@@ -412,7 +403,7 @@ void AveragePool2dInt8PaddingValidTest(std::vector<armnn::BackendId>& backends)
1);
}
-void AveragePool2dFP32PaddingSameTest(std::vector<armnn::BackendId>& backends)
+void AveragePool2dFP32PaddingSameTest(const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<int32_t> inputShape { 1, 3, 4, 1 };
@@ -426,11 +417,11 @@ void AveragePool2dFP32PaddingSameTest(std::vector<armnn::BackendId>& backends)
Pooling2dTest<float>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
+ backends,
::tflite::Padding_SAME,
2,
2,
@@ -438,7 +429,7 @@ void AveragePool2dFP32PaddingSameTest(std::vector<armnn::BackendId>& backends)
2);
}
-void AveragePool2dInt8PaddingSameTest(std::vector<armnn::BackendId>& backends)
+void AveragePool2dInt8PaddingSameTest(const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<int32_t> inputShape { 1, 3, 4, 1 };
@@ -452,11 +443,11 @@ void AveragePool2dInt8PaddingSameTest(std::vector<armnn::BackendId>& backends)
Pooling2dTest<int8_t>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
::tflite::TensorType_INT8,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
+ backends,
::tflite::Padding_SAME,
2,
2,
@@ -467,7 +458,7 @@ void AveragePool2dInt8PaddingSameTest(std::vector<armnn::BackendId>& backends)
1);
}
-void AveragePool2dFP32ReluTest(std::vector<armnn::BackendId>& backends)
+void AveragePool2dFP32ReluTest(const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<int32_t> inputShape { 1, 3, 4, 1 };
@@ -481,11 +472,11 @@ void AveragePool2dFP32ReluTest(std::vector<armnn::BackendId>& backends)
Pooling2dTest<float>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
+ backends,
::tflite::Padding_VALID,
1,
1,
@@ -494,7 +485,7 @@ void AveragePool2dFP32ReluTest(std::vector<armnn::BackendId>& backends)
::tflite::ActivationFunctionType_RELU);
}
-void AveragePool2dInt8ReluTest(std::vector<armnn::BackendId>& backends)
+void AveragePool2dInt8ReluTest(const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<int32_t> inputShape { 1, 3, 4, 1 };
@@ -508,11 +499,11 @@ void AveragePool2dInt8ReluTest(std::vector<armnn::BackendId>& backends)
Pooling2dTest<int8_t>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
::tflite::TensorType_INT8,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
+ backends,
::tflite::Padding_VALID,
1,
1,
@@ -523,7 +514,7 @@ void AveragePool2dInt8ReluTest(std::vector<armnn::BackendId>& backends)
1);
}
-void AveragePool2dFP32Relu6Test(std::vector<armnn::BackendId>& backends)
+void AveragePool2dFP32Relu6Test(const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<int32_t> inputShape { 1, 3, 4, 1 };
@@ -537,11 +528,11 @@ void AveragePool2dFP32Relu6Test(std::vector<armnn::BackendId>& backends)
Pooling2dTest<float>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
+ backends,
::tflite::Padding_SAME,
2,
2,
@@ -550,7 +541,7 @@ void AveragePool2dFP32Relu6Test(std::vector<armnn::BackendId>& backends)
::tflite::ActivationFunctionType_RELU6);
}
-void AveragePool2dInt8Relu6Test(std::vector<armnn::BackendId>& backends)
+void AveragePool2dInt8Relu6Test(const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<int32_t> inputShape { 1, 3, 4, 1 };
@@ -564,11 +555,11 @@ void AveragePool2dInt8Relu6Test(std::vector<armnn::BackendId>& backends)
Pooling2dTest<int8_t>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
::tflite::TensorType_INT8,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
+ backends,
::tflite::Padding_SAME,
2,
2,
@@ -579,7 +570,7 @@ void AveragePool2dInt8Relu6Test(std::vector<armnn::BackendId>& backends)
1);
}
-void AveragePool2dUint8PaddingSameTest(std::vector<armnn::BackendId>& backends)
+void AveragePool2dUint8PaddingSameTest(const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<int32_t> inputShape { 1, 3, 4, 1 };
@@ -593,11 +584,11 @@ void AveragePool2dUint8PaddingSameTest(std::vector<armnn::BackendId>& backends)
Pooling2dTest<uint8_t>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
::tflite::TensorType_UINT8,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
+ backends,
::tflite::Padding_SAME,
2,
2,
@@ -608,7 +599,7 @@ void AveragePool2dUint8PaddingSameTest(std::vector<armnn::BackendId>& backends)
1);
}
-void AveragePool2dUint8ReluTest(std::vector<armnn::BackendId>& backends)
+void AveragePool2dUint8ReluTest(const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<int32_t> inputShape { 1, 3, 4, 1 };
@@ -622,11 +613,11 @@ void AveragePool2dUint8ReluTest(std::vector<armnn::BackendId>& backends)
Pooling2dTest<uint8_t>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
::tflite::TensorType_UINT8,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
+ backends,
::tflite::Padding_VALID,
1,
1,
@@ -637,7 +628,7 @@ void AveragePool2dUint8ReluTest(std::vector<armnn::BackendId>& backends)
1);
}
-void AveragePool2dInt16PaddingSameTest(std::vector<armnn::BackendId>& backends)
+void AveragePool2dInt16PaddingSameTest(const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<int32_t> inputShape { 1, 3, 4, 1 };
@@ -651,11 +642,11 @@ void AveragePool2dInt16PaddingSameTest(std::vector<armnn::BackendId>& backends)
Pooling2dTest<int16_t>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
::tflite::TensorType_INT16,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
+ backends,
::tflite::Padding_SAME,
2,
2,
@@ -666,7 +657,7 @@ void AveragePool2dInt16PaddingSameTest(std::vector<armnn::BackendId>& backends)
0);
}
-void AveragePool2dInt16ReluTest(std::vector<armnn::BackendId>& backends)
+void AveragePool2dInt16ReluTest(const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<int32_t> inputShape { 1, 3, 4, 1 };
@@ -680,11 +671,11 @@ void AveragePool2dInt16ReluTest(std::vector<armnn::BackendId>& backends)
Pooling2dTest<int16_t>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
::tflite::TensorType_INT16,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
+ backends,
::tflite::Padding_VALID,
1,
1,
@@ -695,7 +686,7 @@ void AveragePool2dInt16ReluTest(std::vector<armnn::BackendId>& backends)
0);
}
-void L2Pool2dFP32PaddingValidTest(std::vector<armnn::BackendId>& backends)
+void L2Pool2dFP32PaddingValidTest(const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<int32_t> inputShape { 1, 3, 4, 1 };
@@ -709,11 +700,11 @@ void L2Pool2dFP32PaddingValidTest(std::vector<armnn::BackendId>& backends)
Pooling2dTest<float>(tflite::BuiltinOperator_L2_POOL_2D,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
+ backends,
::tflite::Padding_VALID,
2,
2,
@@ -721,7 +712,7 @@ void L2Pool2dFP32PaddingValidTest(std::vector<armnn::BackendId>& backends)
2);
}
-void L2Pool2dFP32PaddingSameTest(std::vector<armnn::BackendId>& backends)
+void L2Pool2dFP32PaddingSameTest(const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<int32_t> inputShape { 1, 3, 4, 1 };
@@ -735,11 +726,11 @@ void L2Pool2dFP32PaddingSameTest(std::vector<armnn::BackendId>& backends)
Pooling2dTest<float>(tflite::BuiltinOperator_L2_POOL_2D,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
+ backends,
::tflite::Padding_SAME,
2,
2,
@@ -747,7 +738,7 @@ void L2Pool2dFP32PaddingSameTest(std::vector<armnn::BackendId>& backends)
2);
}
-void L2Pool2dFP32ReluTest(std::vector<armnn::BackendId>& backends)
+void L2Pool2dFP32ReluTest(const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<int32_t> inputShape { 1, 3, 4, 1 };
@@ -761,11 +752,11 @@ void L2Pool2dFP32ReluTest(std::vector<armnn::BackendId>& backends)
Pooling2dTest<float>(tflite::BuiltinOperator_L2_POOL_2D,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
+ backends,
::tflite::Padding_VALID,
1,
1,
@@ -774,7 +765,7 @@ void L2Pool2dFP32ReluTest(std::vector<armnn::BackendId>& backends)
::tflite::ActivationFunctionType_RELU);
}
-void L2Pool2dFP32Relu6Test(std::vector<armnn::BackendId>& backends)
+void L2Pool2dFP32Relu6Test(const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<int32_t> inputShape { 1, 3, 4, 1 };
@@ -788,11 +779,11 @@ void L2Pool2dFP32Relu6Test(std::vector<armnn::BackendId>& backends)
Pooling2dTest<float>(tflite::BuiltinOperator_L2_POOL_2D,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
+ backends,
::tflite::Padding_SAME,
2,
2,
@@ -801,475 +792,153 @@ void L2Pool2dFP32Relu6Test(std::vector<armnn::BackendId>& backends)
::tflite::ActivationFunctionType_RELU6);
}
-TEST_SUITE("Pooling2d_GpuAccTests")
+TEST_SUITE("Pooling2dTests")
{
-TEST_CASE ("MaxPooling2d_FP32_PaddingValid_GpuAcc_Test")
+TEST_CASE ("MaxPooling2d_FP32_PaddingValid_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- MaxPool2dFP32PaddingValidTest(backends);
+ MaxPool2dFP32PaddingValidTest();
}
-TEST_CASE ("MaxPooling2d_Int8_PaddingValid_GpuAcc_Test")
+TEST_CASE ("MaxPooling2d_Int8_PaddingValid_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- MaxPool2dInt8PaddingValidTest(backends);
+ MaxPool2dInt8PaddingValidTest();
}
-TEST_CASE ("MaxPooling2d_FP32_PaddingSame_GpuAcc_Test")
+TEST_CASE ("MaxPooling2d_FP32_PaddingSame_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- MaxPool2dFP32PaddingSameTest(backends);
+ MaxPool2dFP32PaddingSameTest();
}
-TEST_CASE ("MaxPooling2d_Int8_PaddingSame_GpuAcc_Test")
+TEST_CASE ("MaxPooling2d_Int8_PaddingSame_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- MaxPool2dInt8PaddingSameTest(backends);
+ MaxPool2dInt8PaddingSameTest();
}
-TEST_CASE ("MaxPooling2d_FP32_Relu_GpuAcc_Test")
+TEST_CASE ("MaxPooling2d_FP32_Relu_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- MaxPool2dFP32ReluTest(backends);
+ MaxPool2dFP32ReluTest();
}
-TEST_CASE ("MaxPooling2d_Int8_Relu_GpuAcc_Test")
+TEST_CASE ("MaxPooling2d_Int8_Relu_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- MaxPool2dInt8ReluTest(backends);
+ MaxPool2dInt8ReluTest();
}
-TEST_CASE ("MaxPooling2d_FP32_Relu6_GpuAcc_Test")
+TEST_CASE ("MaxPooling2d_FP32_Relu6_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- MaxPool2dFP32Relu6Test(backends);
+ MaxPool2dFP32Relu6Test();
}
-TEST_CASE ("MaxPooling2d_Int8_Relu6_GpuAcc_Test")
+TEST_CASE ("MaxPooling2d_Int8_Relu6_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- MaxPool2dInt8Relu6Test(backends);
+ MaxPool2dInt8Relu6Test();
}
-TEST_CASE ("MaxPooling2d_Uint8_PaddingSame_GpuAcc_Test")
+TEST_CASE ("MaxPooling2d_Uint8_PaddingSame_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- MaxPool2dUint8PaddingSameTest(backends);
+ MaxPool2dUint8PaddingSameTest();
}
-TEST_CASE ("MaxPooling2d_Uint8_Relu_GpuAcc_Test")
+TEST_CASE ("MaxPooling2d_Uint8_Relu_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- MaxPool2dUint8ReluTest(backends);
+ MaxPool2dUint8ReluTest();
}
-TEST_CASE ("AveragePooling2d_FP32_PaddingValid_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- AveragePool2dFP32PaddingValidTest(backends);
-}
-
-TEST_CASE ("AveragePooling2d_Int8_PaddingValid_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- AveragePool2dInt8PaddingValidTest(backends);
-}
-
-TEST_CASE ("AveragePooling2d_FP32_PaddingSame_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- AveragePool2dFP32PaddingSameTest(backends);
-}
-
-TEST_CASE ("AveragePooling2d_Int8_PaddingSame_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- AveragePool2dInt8PaddingSameTest(backends);
-}
-
-TEST_CASE ("AveragePooling2d_FP32_Relu_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- AveragePool2dFP32ReluTest(backends);
-}
-
-TEST_CASE ("AveragePooling2d_FP32_Relu6_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- AveragePool2dFP32Relu6Test(backends);
-}
-
-TEST_CASE ("AveragePooling2d_Int8_Relu_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- AveragePool2dInt8ReluTest(backends);
-}
-
-TEST_CASE ("AveragePooling2d_Int8_Relu6_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- AveragePool2dInt8Relu6Test(backends);
-}
-
-TEST_CASE ("AveragePooling2d_Uint8_PaddingSame_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- AveragePool2dUint8PaddingSameTest(backends);
-}
-
-TEST_CASE ("AveragePooling2d_Uint8_Relu_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- AveragePool2dUint8ReluTest(backends);
-}
-
-TEST_CASE ("L2Pooling2d_FP32_PaddingValid_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- L2Pool2dFP32PaddingValidTest(backends);
-}
-
-TEST_CASE ("L2Pooling2d_FP32_PaddingSame_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- L2Pool2dFP32PaddingSameTest(backends);
-}
-
-TEST_CASE ("L2Pooling2d_FP32_Relu_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- L2Pool2dFP32ReluTest(backends);
-}
-
-TEST_CASE ("L2Pooling2d_FP32_Relu6_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- L2Pool2dFP32Relu6Test(backends);
-}
-
-} // TEST_SUITE("Pooling2d_GpuAccTests")
-
-TEST_SUITE("Pooling2d_CpuAccTests")
-{
-
-TEST_CASE ("MaxPooling2d_FP32_PaddingValid_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- MaxPool2dFP32PaddingValidTest(backends);
-}
-
-TEST_CASE ("MaxPooling2d_Int8_PaddingValid_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- MaxPool2dInt8PaddingValidTest(backends);
-}
-
-TEST_CASE ("MaxPooling2d_FP32_PaddingSame_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- MaxPool2dFP32PaddingSameTest(backends);
-}
-
-TEST_CASE ("MaxPooling2d_Int8_PaddingSame_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- MaxPool2dInt8PaddingSameTest(backends);
-}
-
-TEST_CASE ("MaxPooling2d_FP32_Relu_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- MaxPool2dFP32ReluTest(backends);
-}
-
-TEST_CASE ("MaxPooling2d_Int8_Relu_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- MaxPool2dInt8ReluTest(backends);
-}
-
-TEST_CASE ("MaxPooling2d_FP32_Relu6_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- MaxPool2dFP32Relu6Test(backends);
-}
-
-TEST_CASE ("MaxPooling2d_Int8_Relu6_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- MaxPool2dInt8Relu6Test(backends);
-}
-
-TEST_CASE ("MaxPooling2d_Uint8_PaddingSame_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- MaxPool2dUint8PaddingSameTest(backends);
-}
-
-TEST_CASE ("MaxPooling2d_Uint8_Relu_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- MaxPool2dUint8ReluTest(backends);
-}
-
-TEST_CASE ("AveragePooling2d_FP32_PaddingValid_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- AveragePool2dFP32PaddingValidTest(backends);
-}
-
-TEST_CASE ("AveragePooling2d_Int8_PaddingValid_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- AveragePool2dInt8PaddingValidTest(backends);
-}
-
-TEST_CASE ("AveragePooling2d_FP32_PaddingSame_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- AveragePool2dFP32PaddingSameTest(backends);
-}
-
-TEST_CASE ("AveragePooling2d_Int8_PaddingSame_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- AveragePool2dInt8PaddingSameTest(backends);
-}
-
-TEST_CASE ("AveragePooling2d_FP32_Relu_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- AveragePool2dFP32ReluTest(backends);
-}
-
-TEST_CASE ("AveragePooling2d_FP32_Relu6_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- AveragePool2dFP32Relu6Test(backends);
-}
-
-TEST_CASE ("AveragePooling2d_Int8_Relu_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- AveragePool2dInt8ReluTest(backends);
-}
-
-TEST_CASE ("AveragePooling2d_Int8_Relu6_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- AveragePool2dInt8Relu6Test(backends);
-}
-
-TEST_CASE ("AveragePooling2d_Uint8_PaddingSame_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- AveragePool2dUint8PaddingSameTest(backends);
-}
-
-TEST_CASE ("AveragePooling2d_Uint8_Relu_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- AveragePool2dUint8ReluTest(backends);
-}
-
-TEST_CASE ("L2Pooling2d_FP32_PaddingValid_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- L2Pool2dFP32PaddingValidTest(backends);
-}
-
-TEST_CASE ("L2Pooling2d_FP32_PaddingSame_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- L2Pool2dFP32PaddingSameTest(backends);
-}
-
-TEST_CASE ("L2Pooling2d_FP32_Relu_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- L2Pool2dFP32ReluTest(backends);
-}
-
-TEST_CASE ("L2Pooling2d_FP32_Relu6_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- L2Pool2dFP32Relu6Test(backends);
-}
-
-} // TEST_SUITE("Pooling2d_CpuAccTests")
-
-TEST_SUITE("Pooling2d_CpuRefTests")
-{
-
-TEST_CASE ("MaxPooling2d_FP32_PaddingValid_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MaxPool2dFP32PaddingValidTest(backends);
-}
-
-TEST_CASE ("MaxPooling2d_Int8_PaddingValid_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MaxPool2dInt8PaddingValidTest(backends);
-}
-
-TEST_CASE ("MaxPooling2d_FP32_PaddingSame_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MaxPool2dFP32PaddingSameTest(backends);
-}
-
-TEST_CASE ("MaxPooling2d_Int8_PaddingSame_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MaxPool2dInt8PaddingSameTest(backends);
-}
-
-TEST_CASE ("MaxPooling2d_FP32_Relu_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MaxPool2dFP32ReluTest(backends);
-}
-
-TEST_CASE ("MaxPooling2d_Int8_Relu_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MaxPool2dInt8ReluTest(backends);
-}
-
-TEST_CASE ("MaxPooling2d_FP32_Relu6_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MaxPool2dFP32Relu6Test(backends);
-}
-
-TEST_CASE ("MaxPooling2d_Int8_Relu6_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MaxPool2dInt8Relu6Test(backends);
-}
-
-TEST_CASE ("MaxPooling2d_Uint8_PaddingSame_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MaxPool2dUint8PaddingSameTest(backends);
-}
-
-TEST_CASE ("MaxPooling2d_Uint8_Relu_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MaxPool2dUint8ReluTest(backends);
-}
-
-TEST_CASE ("MaxPooling2d_Int16_PaddingSame_CpuRef_Test")
+TEST_CASE ("MaxPooling2d_Int16_PaddingSame_Test")
{
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
MaxPool2dInt16PaddingSameTest(backends);
}
-TEST_CASE ("MaxPooling2d_Int16_Relu_CpuRef_Test")
+TEST_CASE ("MaxPooling2d_Int16_Relu_Test")
{
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
MaxPool2dInt16ReluTest(backends);
}
-TEST_CASE ("AveragePooling2d_FP32_PaddingValid_CpuRef_Test")
+TEST_CASE ("AveragePooling2d_FP32_PaddingValid_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- AveragePool2dFP32PaddingValidTest(backends);
+ AveragePool2dFP32PaddingValidTest();
}
-TEST_CASE ("AveragePooling2d_Int8_PaddingValid_CpuRef_Test")
+TEST_CASE ("AveragePooling2d_Int8_PaddingValid_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- AveragePool2dInt8PaddingValidTest(backends);
+ AveragePool2dInt8PaddingValidTest();
}
-TEST_CASE ("AveragePooling2d_FP32_PaddingSame_CpuRef_Test")
+TEST_CASE ("AveragePooling2d_FP32_PaddingSame_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- AveragePool2dFP32PaddingSameTest(backends);
+ AveragePool2dFP32PaddingSameTest();
}
-TEST_CASE ("AveragePooling2d_Int8_PaddingSame_CpuRef_Test")
+TEST_CASE ("AveragePooling2d_Int8_PaddingSame_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- AveragePool2dInt8PaddingSameTest(backends);
+ AveragePool2dInt8PaddingSameTest();
}
-TEST_CASE ("AveragePooling2d_FP32_Relu_CpuRef_Test")
+TEST_CASE ("AveragePooling2d_FP32_Relu_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- AveragePool2dFP32ReluTest(backends);
+ AveragePool2dFP32ReluTest();
}
-TEST_CASE ("AveragePooling2d_FP32_Relu6_CpuRef_Test")
+TEST_CASE ("AveragePooling2d_FP32_Relu6_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- AveragePool2dFP32Relu6Test(backends);
+ AveragePool2dFP32Relu6Test();
}
-TEST_CASE ("AveragePooling2d_Int8_Relu_CpuRef_Test")
+TEST_CASE ("AveragePooling2d_Int8_Relu_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- AveragePool2dInt8ReluTest(backends);
+ AveragePool2dInt8ReluTest();
}
-TEST_CASE ("AveragePooling2d_Int8_Relu6_CpuRef_Test")
+TEST_CASE ("AveragePooling2d_Int8_Relu6_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- AveragePool2dInt8Relu6Test(backends);
+ AveragePool2dInt8Relu6Test();
}
-TEST_CASE ("AveragePooling2d_Uint8_PaddingSame_CpuRef_Test")
+TEST_CASE ("AveragePooling2d_Uint8_PaddingSame_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- AveragePool2dUint8PaddingSameTest(backends);
+ AveragePool2dUint8PaddingSameTest();
}
-TEST_CASE ("AveragePooling2d_Uint8_Relu_CpuRef_Test")
+TEST_CASE ("AveragePooling2d_Uint8_Relu_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- AveragePool2dUint8ReluTest(backends);
+ AveragePool2dUint8ReluTest();
}
-TEST_CASE ("AveragePooling2d_Int16_PaddingSame_CpuRef_Test")
+TEST_CASE ("AveragePooling2d_Int16_PaddingSame_Test")
{
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
AveragePool2dInt16PaddingSameTest(backends);
}
-TEST_CASE ("AveragePooling2d_Int16_Relu_CpuRef_Test")
+TEST_CASE ("AveragePooling2d_Int16_Relu_Test")
{
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
AveragePool2dInt16ReluTest(backends);
}
-TEST_CASE ("L2Pooling2d_FP32_PaddingValid_CpuRef_Test")
+TEST_CASE ("L2Pooling2d_FP32_PaddingValid_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- L2Pool2dFP32PaddingValidTest(backends);
+ L2Pool2dFP32PaddingValidTest();
}
-TEST_CASE ("L2Pooling2d_FP32_PaddingSame_CpuRef_Test")
+TEST_CASE ("L2Pooling2d_FP32_PaddingSame_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- L2Pool2dFP32PaddingSameTest(backends);
+ L2Pool2dFP32PaddingSameTest();
}
-TEST_CASE ("L2Pooling2d_FP32_Relu_CpuRef_Test")
+TEST_CASE ("L2Pooling2d_FP32_Relu_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- L2Pool2dFP32ReluTest(backends);
+ L2Pool2dFP32ReluTest();
}
-TEST_CASE ("L2Pooling2d_FP32_Relu6_CpuRef_Test")
+TEST_CASE ("L2Pooling2d_FP32_Relu6_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- L2Pool2dFP32Relu6Test(backends);
+ L2Pool2dFP32Relu6Test();
}
-} // TEST_SUITE("Pooling2d_CpuRefTests")
+} // TEST_SUITE("Pooling2dTests")
} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/test/Pooling2dTestHelper.hpp b/delegate/test/Pooling2dTestHelper.hpp
index 3059f3b7a2..291bbb51e8 100644
--- a/delegate/test/Pooling2dTestHelper.hpp
+++ b/delegate/test/Pooling2dTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,12 +10,8 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
-
namespace
{
@@ -113,11 +109,11 @@ std::vector<char> CreatePooling2dTfLiteModel(
template <typename T>
void Pooling2dTest(tflite::BuiltinOperator poolingOperatorCode,
tflite::TensorType tensorType,
- std::vector<armnn::BackendId>& backends,
std::vector<int32_t>& inputShape,
std::vector<int32_t>& outputShape,
std::vector<T>& inputValues,
std::vector<T>& expectedOutputValues,
+ const std::vector<armnn::BackendId>& backends = {},
tflite::Padding padding = tflite::Padding_SAME,
int32_t strideWidth = 0,
int32_t strideHeight = 0,
@@ -150,7 +146,7 @@ void Pooling2dTest(tflite::BuiltinOperator poolingOperatorCode,
std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
// Setup interpreter with Arm NN Delegate applied.
- auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
diff --git a/delegate/test/Pooling3dTest.cpp b/delegate/test/Pooling3dTest.cpp
index a79ee33b0a..04a31f8ee5 100644
--- a/delegate/test/Pooling3dTest.cpp
+++ b/delegate/test/Pooling3dTest.cpp
@@ -1,19 +1,10 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "Pooling3dTestHelper.hpp"
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-
-#include <tensorflow/lite/version.h>
-
#include <doctest/doctest.h>
namespace armnnDelegate
@@ -22,7 +13,7 @@ namespace armnnDelegate
// Pool3D custom op was only added in tflite r2.6.
#if defined(ARMNN_POST_TFLITE_2_5)
-void MaxPool3dFP32PaddingValidTest(std::vector<armnn::BackendId>& backends)
+void MaxPool3dFP32PaddingValidTest(const std::vector<armnn::BackendId>& backends = {})
{
// Set input and expected output data
std::vector<int32_t> inputShape = { 1, 2, 3, 4, 1 };
@@ -41,11 +32,11 @@ void MaxPool3dFP32PaddingValidTest(std::vector<armnn::BackendId>& backends)
Pooling3dTest<float>(poolType,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
+ backends,
padding,
1,
1,
@@ -55,7 +46,7 @@ void MaxPool3dFP32PaddingValidTest(std::vector<armnn::BackendId>& backends)
2);
}
-void MaxPool3dFP32PaddingSameTest(std::vector<armnn::BackendId>& backends)
+void MaxPool3dFP32PaddingSameTest(const std::vector<armnn::BackendId>& backends = {})
{
// Set input data and expected output data
std::vector<int32_t> inputShape = { 1, 2, 3, 4, 1 };
@@ -74,11 +65,11 @@ void MaxPool3dFP32PaddingSameTest(std::vector<armnn::BackendId>& backends)
Pooling3dTest<float>(poolType,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
+ backends,
padding,
1,
1,
@@ -88,7 +79,7 @@ void MaxPool3dFP32PaddingSameTest(std::vector<armnn::BackendId>& backends)
2);
}
-void MaxPool3dFP32H1Test(std::vector<armnn::BackendId>& backends)
+void MaxPool3dFP32H1Test(const std::vector<armnn::BackendId>& backends = {})
{
// Set input data and expected output data
std::vector<int32_t> inputShape = { 1, 2, 3, 4, 1 };
@@ -107,11 +98,11 @@ void MaxPool3dFP32H1Test(std::vector<armnn::BackendId>& backends)
Pooling3dTest<float>(poolType,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
+ backends,
padding,
1,
1,
@@ -121,7 +112,7 @@ void MaxPool3dFP32H1Test(std::vector<armnn::BackendId>& backends)
2);
}
-void MaxPool3dFP32Test(std::vector<armnn::BackendId>& backends)
+void MaxPool3dFP32Test(const std::vector<armnn::BackendId>& backends = {})
{
// Set input data and expected output data
std::vector<int32_t> inputShape = { 1, 2, 3, 4, 1 };
@@ -140,11 +131,11 @@ void MaxPool3dFP32Test(std::vector<armnn::BackendId>& backends)
Pooling3dTest<float>(poolType,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
+ backends,
padding,
1,
1,
@@ -154,7 +145,7 @@ void MaxPool3dFP32Test(std::vector<armnn::BackendId>& backends)
2);
}
-void AveragePool3dFP32PaddingValidTest(std::vector<armnn::BackendId>& backends)
+void AveragePool3dFP32PaddingValidTest(const std::vector<armnn::BackendId>& backends = {})
{
// Set input data and expected output data.
std::vector<int32_t> inputShape = { 1, 2, 3, 4, 1 };
@@ -173,11 +164,11 @@ void AveragePool3dFP32PaddingValidTest(std::vector<armnn::BackendId>& backends)
Pooling3dTest<float>(poolType,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
+ backends,
padding,
1,
1,
@@ -187,7 +178,7 @@ void AveragePool3dFP32PaddingValidTest(std::vector<armnn::BackendId>& backends)
2);
}
-void AveragePool3dFP32PaddingSameTest(std::vector<armnn::BackendId>& backends)
+void AveragePool3dFP32PaddingSameTest(const std::vector<armnn::BackendId>& backends = {})
{
// Set input data and expected output data
std::vector<int32_t> inputShape = { 4, 2, 3, 1, 1 };
@@ -206,11 +197,11 @@ void AveragePool3dFP32PaddingSameTest(std::vector<armnn::BackendId>& backends)
Pooling3dTest<float>(poolType,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
+ backends,
padding,
1,
1,
@@ -220,7 +211,7 @@ void AveragePool3dFP32PaddingSameTest(std::vector<armnn::BackendId>& backends)
2);
}
-void AveragePool3dFP32H1Test(std::vector<armnn::BackendId>& backends)
+void AveragePool3dFP32H1Test(const std::vector<armnn::BackendId>& backends = {})
{
// Set input data and expected output data
std::vector<int32_t> inputShape = { 1, 2, 3, 4, 1 };
@@ -239,11 +230,11 @@ void AveragePool3dFP32H1Test(std::vector<armnn::BackendId>& backends)
Pooling3dTest<float>(poolType,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
+ backends,
padding,
2,
2,
@@ -253,7 +244,7 @@ void AveragePool3dFP32H1Test(std::vector<armnn::BackendId>& backends)
2);
}
-void AveragePool3dFP32Test(std::vector<armnn::BackendId>& backends)
+void AveragePool3dFP32Test(const std::vector<armnn::BackendId>& backends = {})
{
// Set input data and expected output data
std::vector<int32_t> inputShape = { 4, 3, 2, 1, 1 };
@@ -272,11 +263,11 @@ void AveragePool3dFP32Test(std::vector<armnn::BackendId>& backends)
Pooling3dTest<float>(poolType,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
+ backends,
padding,
2,
2,
@@ -286,145 +277,44 @@ void AveragePool3dFP32Test(std::vector<armnn::BackendId>& backends)
2);
}
-TEST_SUITE("Pooling3d_GpuAccTests")
-{
-
-TEST_CASE ("MaxPooling3d_FP32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- MaxPool3dFP32Test(backends);
-}
-
-TEST_CASE ("MaxPooling3d_FP32_PaddingValid_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- MaxPool3dFP32PaddingValidTest(backends);
-}
-
-TEST_CASE ("MaxPooling3d_FP32_PaddingSame_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- MaxPool3dFP32PaddingSameTest(backends);
-}
-
-TEST_CASE ("MaxPooling3d_FP32_H1_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- MaxPool3dFP32H1Test(backends);
-}
-
-TEST_CASE ("AveragePooling3d_FP32_PaddingValid_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- AveragePool3dFP32PaddingValidTest(backends);
-}
-
-TEST_CASE ("AveragePooling3d_FP32_PaddingSame_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- AveragePool3dFP32PaddingSameTest(backends);
-}
-
-TEST_CASE ("AveragePooling3d_FP32_H1_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- AveragePool3dFP32H1Test(backends);
-}
-
-} // TEST_SUITE("Pooling3d_GpuAccTests")
-
-TEST_SUITE("Pooling3d_CpuAccTests")
-{
-
-TEST_CASE ("MaxPooling3d_FP32_PaddingValid_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- MaxPool3dFP32PaddingValidTest(backends);
-}
-
-TEST_CASE ("MaxPooling3d_FP32_PaddingSame_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- MaxPool3dFP32PaddingSameTest(backends);
-}
-
-TEST_CASE ("MaxPooling3d_FP32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- MaxPool3dFP32Test(backends);
-}
-
-TEST_CASE ("MaxPooling3d_FP32_H1_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- MaxPool3dFP32H1Test(backends);
-}
-
-TEST_CASE ("AveragePooling3d_FP32_PaddingValid_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- AveragePool3dFP32PaddingValidTest(backends);
-}
-
-TEST_CASE ("AveragePooling3d_FP32_PaddingSame_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- AveragePool3dFP32PaddingSameTest(backends);
-}
-
-TEST_CASE ("AveragePooling3d_FP32_H1_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- AveragePool3dFP32H1Test(backends);
-}
-
-} // TEST_SUITE("Pooling3d_CpuAccTests")
-
-TEST_SUITE("Pooling3d_CpuRefTests")
+TEST_SUITE("Pooling3dTests")
{
-TEST_CASE ("MaxPooling3d_FP32_CpuRef_Test")
+TEST_CASE ("MaxPooling3d_FP32_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MaxPool3dFP32Test(backends);
+ MaxPool3dFP32Test();
}
-TEST_CASE ("MaxPooling3d_FP32_PaddingValid_CpuRef_Test")
+TEST_CASE ("MaxPooling3d_FP32_PaddingValid_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MaxPool3dFP32PaddingValidTest(backends);
+ MaxPool3dFP32PaddingValidTest();
}
-TEST_CASE ("MaxPooling3d_FP32_PaddingSame_CpuRef_Test")
+TEST_CASE ("MaxPooling3d_FP32_PaddingSame_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MaxPool3dFP32PaddingSameTest(backends);
+ MaxPool3dFP32PaddingSameTest();
}
-TEST_CASE ("MaxPooling3d_FP32_H1_CpuRef_Test")
+TEST_CASE ("MaxPooling3d_FP32_H1_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MaxPool3dFP32H1Test(backends);
+ MaxPool3dFP32H1Test();
}
-TEST_CASE ("AveragePooling3d_FP32_PaddingValid_CpuRef_Test")
+TEST_CASE ("AveragePooling3d_FP32_PaddingValid_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- AveragePool3dFP32PaddingValidTest(backends);
+ AveragePool3dFP32PaddingValidTest();
}
-TEST_CASE ("AveragePooling3d_FP32_PaddingSame_CpuRef_Test")
+TEST_CASE ("AveragePooling3d_FP32_PaddingSame_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- AveragePool3dFP32PaddingSameTest(backends);
+ AveragePool3dFP32PaddingSameTest();
}
-TEST_CASE ("AveragePooling3d_FP32_H1_CpuRef_Test")
+TEST_CASE ("AveragePooling3d_FP32_H1_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- AveragePool3dFP32H1Test(backends);
+ AveragePool3dFP32H1Test();
}
-} // TEST_SUITE("Pooling3d_CpuRefTests")
+} // TEST_SUITE("Pooling3dTests")
#endif
diff --git a/delegate/test/Pooling3dTestHelper.hpp b/delegate/test/Pooling3dTestHelper.hpp
index a23413e2fe..365939850d 100644
--- a/delegate/test/Pooling3dTestHelper.hpp
+++ b/delegate/test/Pooling3dTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,14 +10,9 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <flatbuffers/flatbuffers.h>
#include <flatbuffers/flexbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/kernels/custom_ops_register.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
-
namespace
{
#if defined(ARMNN_POST_TFLITE_2_5)
@@ -138,11 +133,11 @@ std::vector<char> CreatePooling3dTfLiteModel(
template<typename T>
void Pooling3dTest(std::string poolType,
tflite::TensorType tensorType,
- std::vector<armnn::BackendId>& backends,
std::vector<int32_t>& inputShape,
std::vector<int32_t>& outputShape,
std::vector<T>& inputValues,
std::vector<T>& expectedOutputValues,
+ const std::vector<armnn::BackendId>& backends = {},
TfLitePadding padding = kTfLitePaddingSame,
int32_t strideWidth = 0,
int32_t strideHeight = 0,
@@ -190,7 +185,7 @@ void Pooling3dTest(std::string poolType,
std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
// Setup interpreter with Arm NN Delegate applied.
- auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends, opType);
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends), opType);
CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
diff --git a/delegate/test/PreluTest.cpp b/delegate/test/PreluTest.cpp
index 3240eafe2b..990d91a33f 100644
--- a/delegate/test/PreluTest.cpp
+++ b/delegate/test/PreluTest.cpp
@@ -1,24 +1,15 @@
//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "PreluTestHelper.hpp"
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-
-#include <tensorflow/lite/version.h>
-
#include <doctest/doctest.h>
namespace armnnDelegate {
-void PreluFloatSimpleTest(std::vector <armnn::BackendId>& backends, bool isAlphaConst, bool isDynamicOutput = false)
+void PreluFloatSimpleTest(bool isAlphaConst, bool isDynamicOutput = false)
{
std::vector<int32_t> inputShape { 1, 2, 3 };
std::vector<int32_t> alphaShape { 1 };
@@ -35,7 +26,6 @@ void PreluFloatSimpleTest(std::vector <armnn::BackendId>& backends, bool isAlpha
PreluTest(tflite::BuiltinOperator_PRELU,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
alphaShape,
outputShape,
@@ -45,91 +35,29 @@ void PreluFloatSimpleTest(std::vector <armnn::BackendId>& backends, bool isAlpha
isAlphaConst);
}
-TEST_SUITE("Prelu_CpuRefTests")
-{
-
-TEST_CASE ("PreluFp32SimpleConstTest_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- PreluFloatSimpleTest(backends, true);
-}
-
-TEST_CASE ("PreluFp32SimpleTest_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- PreluFloatSimpleTest(backends, false);
-}
-
-TEST_CASE ("PreluFp32SimpleConstDynamicTest_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- PreluFloatSimpleTest(backends, true, true);
-}
-
-TEST_CASE ("PreluFp32SimpleDynamicTest_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- PreluFloatSimpleTest(backends, false, true);
-}
-
-} // TEST_SUITE("Prelu_CpuRefTests")
-
-TEST_SUITE("Prelu_CpuAccTests")
-{
-
-TEST_CASE ("PreluFp32SimpleConstTest_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- PreluFloatSimpleTest(backends, true);
-}
-
-TEST_CASE ("PreluFp32SimpleTest_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- PreluFloatSimpleTest(backends, false);
-}
-
-TEST_CASE ("PreluFp32SimpleConstDynamicTest_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- PreluFloatSimpleTest(backends, true, true);
-}
-
-TEST_CASE ("PreluFp32SimpleDynamicTest_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- PreluFloatSimpleTest(backends, false, true);
-}
-
-} // TEST_SUITE("Prelu_CpuAccTests")
-
-TEST_SUITE("Prelu_GpuAccTests")
+TEST_SUITE("PreluTests")
{
-TEST_CASE ("PreluFp32SimpleConstTest_GpuAcc_Test")
+TEST_CASE ("PreluFp32SimpleConstTest_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- PreluFloatSimpleTest(backends, true);
+ PreluFloatSimpleTest(true);
}
-TEST_CASE ("PreluFp32SimpleTest_GpuAcc_Test")
+TEST_CASE ("PreluFp32SimpleTest_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- PreluFloatSimpleTest(backends, false);
+ PreluFloatSimpleTest(false);
}
-TEST_CASE ("PreluFp32SimpleConstDynamicTest_GpuAcc_Test")
+TEST_CASE ("PreluFp32SimpleConstDynamicTest_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- PreluFloatSimpleTest(backends, true, true);
+ PreluFloatSimpleTest(true, true);
}
-TEST_CASE ("PreluFp32SimpleDynamicTest_GpuAcc_Test")
+TEST_CASE ("PreluFp32SimpleDynamicTest_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- PreluFloatSimpleTest(backends, false, true);
+ PreluFloatSimpleTest(false, true);
}
-} // TEST_SUITE("Prelu_GpuAccTests")
+} // TEST_SUITE("PreluTests")
} \ No newline at end of file
diff --git a/delegate/test/PreluTestHelper.hpp b/delegate/test/PreluTestHelper.hpp
index fa6122fa1f..dbd41cf776 100644
--- a/delegate/test/PreluTestHelper.hpp
+++ b/delegate/test/PreluTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,12 +10,8 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
-
namespace
{
@@ -113,14 +109,14 @@ std::vector<char> CreatePreluTfLiteModel(tflite::BuiltinOperator preluOperatorCo
void PreluTest(tflite::BuiltinOperator preluOperatorCode,
tflite::TensorType tensorType,
- const std::vector<armnn::BackendId>& backends,
const std::vector<int32_t>& inputShape,
const std::vector<int32_t>& alphaShape,
std::vector<int32_t>& outputShape,
std::vector<float>& inputData,
std::vector<float>& alphaData,
std::vector<float>& expectedOutput,
- bool alphaIsConstant)
+ bool alphaIsConstant,
+ const std::vector<armnn::BackendId>& backends = {})
{
using namespace delegateTestInterpreter;
@@ -138,7 +134,7 @@ void PreluTest(tflite::BuiltinOperator preluOperatorCode,
CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
// Setup interpreter with Arm NN Delegate applied.
- auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
CHECK(armnnInterpreter.FillInputTensor<float>(inputData, 0) == kTfLiteOk);
diff --git a/delegate/test/QuantizationTest.cpp b/delegate/test/QuantizationTest.cpp
index 8f9fbff019..75fe20dd41 100644
--- a/delegate/test/QuantizationTest.cpp
+++ b/delegate/test/QuantizationTest.cpp
@@ -1,22 +1,17 @@
//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "QuantizationTestHelper.hpp"
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-
-
#include <doctest/doctest.h>
namespace armnnDelegate
{
// Dequantize operator test functions.
-void DequantizeUint8Test(std::vector<armnn::BackendId>& backends)
+void DequantizeUint8Test(const std::vector<armnn::BackendId>& backends = {})
{
std::vector<int32_t> inputShape { 2, 4 };
std::vector<int32_t> outputShape { 2, 4 };
@@ -36,14 +31,14 @@ void DequantizeUint8Test(std::vector<armnn::BackendId>& backends)
QuantizationTest<uint8_t, float>(tflite::BuiltinOperator_DEQUANTIZE,
::tflite::TensorType_UINT8,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
outputShape,
inputValues,
- expectedOutputValues);
+ expectedOutputValues,
+ backends);
}
-void DequantizeInt8Test(std::vector<armnn::BackendId>& backends)
+void DequantizeInt8Test(const std::vector<armnn::BackendId>& backends = {})
{
std::vector<int32_t> inputShape { 2, 4 };
std::vector<int32_t> outputShape { 2, 4 };
@@ -62,14 +57,14 @@ void DequantizeInt8Test(std::vector<armnn::BackendId>& backends)
QuantizationTest<int8_t , float>(tflite::BuiltinOperator_DEQUANTIZE,
::tflite::TensorType_INT8,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
outputShape,
inputValues,
- expectedOutputValues);
+ expectedOutputValues,
+ backends);
}
-void DequantizeInt16Test(std::vector<armnn::BackendId>& backends)
+void DequantizeInt16Test(const std::vector<armnn::BackendId>& backends = {})
{
std::vector<int32_t> inputShape { 2, 5 };
std::vector<int32_t> outputShape { 2, 5 };
@@ -88,15 +83,15 @@ void DequantizeInt16Test(std::vector<armnn::BackendId>& backends)
QuantizationTest<int16_t, float>(tflite::BuiltinOperator_DEQUANTIZE,
::tflite::TensorType_INT16,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
outputShape,
inputValues,
- expectedOutputValues);
+ expectedOutputValues,
+ backends);
}
// Quantize operator test functions.
-void QuantizeFloat32Uint8Test(std::vector<armnn::BackendId>& backends)
+void QuantizeFloat32Uint8Test(const std::vector<armnn::BackendId>& backends = {})
{
std::vector<int32_t> inputShape { 2, 4 };
std::vector<int32_t> outputShape { 2, 4 };
@@ -116,14 +111,14 @@ void QuantizeFloat32Uint8Test(std::vector<armnn::BackendId>& backends)
QuantizationTest<float, uint8_t>(tflite::BuiltinOperator_QUANTIZE,
::tflite::TensorType_FLOAT32,
::tflite::TensorType_UINT8,
- backends,
inputShape,
outputShape,
inputValues,
- expectedOutputValues);
+ expectedOutputValues,
+ backends);
}
-void QuantizeFloat32Int8Test(std::vector<armnn::BackendId>& backends)
+void QuantizeFloat32Int8Test(const std::vector<armnn::BackendId>& backends = {})
{
std::vector<int32_t> inputShape { 2, 4 };
std::vector<int32_t> outputShape { 2, 4 };
@@ -140,16 +135,16 @@ void QuantizeFloat32Int8Test(std::vector<armnn::BackendId>& backends)
};
QuantizationTest<float, int8_t>(tflite::BuiltinOperator_QUANTIZE,
- ::tflite::TensorType_FLOAT32,
- ::tflite::TensorType_INT8,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues);
+ ::tflite::TensorType_FLOAT32,
+ ::tflite::TensorType_INT8,
+ inputShape,
+ outputShape,
+ inputValues,
+ expectedOutputValues,
+ backends);
}
-void QuantizeFloat32Int16Test(std::vector<armnn::BackendId>& backends)
+void QuantizeFloat32Int16Test(const std::vector<armnn::BackendId>& backends = {})
{
std::vector<int32_t> inputShape { 2, 4 };
std::vector<int32_t> outputShape { 2, 4 };
@@ -168,14 +163,14 @@ void QuantizeFloat32Int16Test(std::vector<armnn::BackendId>& backends)
QuantizationTest<float, int16_t>(tflite::BuiltinOperator_QUANTIZE,
::tflite::TensorType_FLOAT32,
::tflite::TensorType_INT16,
- backends,
inputShape,
outputShape,
inputValues,
- expectedOutputValues);
+ expectedOutputValues,
+ backends);
}
-void QuantizeInt16Int16Test(std::vector<armnn::BackendId>& backends)
+void QuantizeInt16Int16Test(const std::vector<armnn::BackendId>& backends = {})
{
std::vector<int32_t> inputShape { 2, 4 };
std::vector<int32_t> outputShape { 2, 4 };
@@ -194,14 +189,14 @@ void QuantizeInt16Int16Test(std::vector<armnn::BackendId>& backends)
QuantizationTest<int16_t, int16_t>(tflite::BuiltinOperator_QUANTIZE,
::tflite::TensorType_INT16,
::tflite::TensorType_INT16,
- backends,
inputShape,
outputShape,
inputValues,
- expectedOutputValues);
+ expectedOutputValues,
+ backends);
}
-void QuantizeInt16Int8Test(std::vector<armnn::BackendId>& backends)
+void QuantizeInt16Int8Test(const std::vector<armnn::BackendId>& backends = {})
{
std::vector<int32_t> inputShape { 2, 4 };
std::vector<int32_t> outputShape { 2, 4 };
@@ -218,16 +213,16 @@ void QuantizeInt16Int8Test(std::vector<armnn::BackendId>& backends)
};
QuantizationTest<int16_t, int8_t>(tflite::BuiltinOperator_QUANTIZE,
- ::tflite::TensorType_INT16,
- ::tflite::TensorType_INT8,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues);
+ ::tflite::TensorType_INT16,
+ ::tflite::TensorType_INT8,
+ inputShape,
+ outputShape,
+ inputValues,
+ expectedOutputValues,
+ backends);
}
-void QuantizeInt8Uint8Test(std::vector<armnn::BackendId>& backends)
+void QuantizeInt8Uint8Test(const std::vector<armnn::BackendId>& backends = {})
{
std::vector<int32_t> inputShape { 2, 4 };
std::vector<int32_t> outputShape { 2, 4 };
@@ -246,14 +241,14 @@ void QuantizeInt8Uint8Test(std::vector<armnn::BackendId>& backends)
QuantizationTest<int8_t, uint8_t>(tflite::BuiltinOperator_QUANTIZE,
::tflite::TensorType_INT8,
::tflite::TensorType_UINT8,
- backends,
inputShape,
outputShape,
inputValues,
- expectedOutputValues);
+ expectedOutputValues,
+ backends);
}
-void QuantizeUint8Int8Test(std::vector<armnn::BackendId>& backends)
+void QuantizeUint8Int8Test(const std::vector<armnn::BackendId>& backends = {})
{
std::vector<int32_t> inputShape { 2, 4 };
std::vector<int32_t> outputShape { 2, 4 };
@@ -272,66 +267,61 @@ void QuantizeUint8Int8Test(std::vector<armnn::BackendId>& backends)
QuantizationTest<uint8_t, int8_t>(tflite::BuiltinOperator_QUANTIZE,
::tflite::TensorType_UINT8,
::tflite::TensorType_INT8,
- backends,
inputShape,
outputShape,
inputValues,
- expectedOutputValues);
+ expectedOutputValues,
+ backends);
}
TEST_SUITE("CpuRef_QuantizationTests")
{
-TEST_CASE ("DEQUANTIZE_UINT8_CpuRef_Test")
+TEST_CASE ("DEQUANTIZE_UINT8_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- DequantizeUint8Test(backends);
+ DequantizeUint8Test();
}
-TEST_CASE ("DEQUANTIZE_INT8_CpuRef_Test")
+TEST_CASE ("DEQUANTIZE_INT8_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- DequantizeInt8Test(backends);
+ DequantizeInt8Test();
}
-TEST_CASE ("DEQUANTIZE_INT16_CpuRef_Test")
+TEST_CASE ("DEQUANTIZE_INT16_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- DequantizeInt16Test(backends);
+ DequantizeInt16Test();
}
-TEST_CASE ("QUANTIZE_FLOAT32_UINT8_CpuRef_Test")
+TEST_CASE ("QUANTIZE_FLOAT32_UINT8_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- QuantizeFloat32Uint8Test(backends);
+ QuantizeFloat32Uint8Test();
}
-TEST_CASE ("QUANTIZE_FLOAT32_INT8_CpuRef_Test")
+TEST_CASE ("QUANTIZE_FLOAT32_INT8_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- QuantizeFloat32Int8Test(backends);
+ QuantizeFloat32Int8Test();
}
-TEST_CASE ("QUANTIZE_FLOAT32_INT16_CpuRef_Test")
+TEST_CASE ("QUANTIZE_FLOAT32_INT16_Test")
{
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
QuantizeFloat32Int16Test(backends);
}
-TEST_CASE ("QUANTIZE_INT16_INT16_CpuRef_Test")
+TEST_CASE ("QUANTIZE_INT16_INT16_Test")
{
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
QuantizeInt16Int16Test(backends);
}
-TEST_CASE ("QUANTIZE_INT16_INT8_CpuRef_Test")
+TEST_CASE ("QUANTIZE_INT16_INT8_Test")
{
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
QuantizeInt16Int8Test(backends);
@@ -339,115 +329,15 @@ TEST_CASE ("QUANTIZE_INT16_INT8_CpuRef_Test")
-TEST_CASE ("QUANTIZE_INT8_UINT8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- QuantizeInt8Uint8Test(backends);
-}
-
-
-TEST_CASE ("QUANTIZE_UINT8_INT8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- QuantizeUint8Int8Test(backends);
-}
-
-}
-
-TEST_SUITE("CpuAcc_QuantizationTests")
-{
-
-// Dequantize Operator Tests
-TEST_CASE ("DEQUANTIZE_UINT8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- DequantizeUint8Test(backends);
-}
-
-TEST_CASE ("DEQUANTIZE_INT8_CpuAcc_Test")
+TEST_CASE ("QUANTIZE_INT8_UINT8_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- DequantizeInt8Test(backends);
+ QuantizeInt8Uint8Test();
}
-TEST_CASE ("DEQUANTIZE_INT16_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- DequantizeInt16Test(backends);
-}
-
-// Quantize Operator Tests
-TEST_CASE ("QUANTIZE_FLOAT32_UINT8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- QuantizeFloat32Uint8Test(backends);
-}
-
-TEST_CASE ("QUANTIZE_FLOAT32_INT8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- QuantizeFloat32Int8Test(backends);
-}
-
-TEST_CASE ("QUANTIZE_INT8_UINT8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- QuantizeInt8Uint8Test(backends);
-}
-
-TEST_CASE ("QUANTIZE_UINT8_INT8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- QuantizeUint8Int8Test(backends);
-}
-
-}
-
-TEST_SUITE("GpuAcc_QuantizationTests")
-{
-
-// Dequantize Operator Tests
-TEST_CASE ("DEQUANTIZE_UINT8_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- DequantizeUint8Test(backends);
-}
-
-TEST_CASE ("DEQUANTIZE_INT8_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- DequantizeInt8Test(backends);
-}
-
-TEST_CASE ("DEQUANTIZE_INT16_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- DequantizeInt16Test(backends);
-}
-
-// Quantize Operator Tests
-TEST_CASE ("QUANTIZE_FLOAT32_UINT8_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- QuantizeFloat32Uint8Test(backends);
-}
-
-TEST_CASE ("QUANTIZE_FLOAT32_INT8_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- QuantizeFloat32Int8Test(backends);
-}
-
-TEST_CASE ("QUANTIZE_INT8_UINT8_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- QuantizeInt8Uint8Test(backends);
-}
-TEST_CASE ("QUANTIZE_UINT8_INT8_GpuAcc_Test")
+TEST_CASE ("QUANTIZE_UINT8_INT8_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- QuantizeUint8Int8Test(backends);
+ QuantizeUint8Int8Test();
}
}
diff --git a/delegate/test/QuantizationTestHelper.hpp b/delegate/test/QuantizationTestHelper.hpp
index 94290288b2..f8a6f8b4ea 100644
--- a/delegate/test/QuantizationTestHelper.hpp
+++ b/delegate/test/QuantizationTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,12 +10,8 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
-
namespace
{
@@ -122,11 +118,11 @@ template <typename InputT, typename OutputT>
void QuantizationTest(tflite::BuiltinOperator quantizeOperatorCode,
tflite::TensorType inputTensorType,
tflite::TensorType outputTensorType,
- std::vector<armnn::BackendId>& backends,
std::vector<int32_t>& inputShape,
std::vector<int32_t>& outputShape,
std::vector<InputT>& inputValues,
std::vector<OutputT>& expectedOutputValues,
+ const std::vector<armnn::BackendId>& backends = {},
float quantScale = 1.0f,
int quantOffset = 0)
{
@@ -148,7 +144,7 @@ void QuantizationTest(tflite::BuiltinOperator quantizeOperatorCode,
std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
// Setup interpreter with Arm NN Delegate applied.
- auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk);
CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
diff --git a/delegate/test/RedefineTestHelper.hpp b/delegate/test/RedefineTestHelper.hpp
index c1838e01b1..076925afac 100644
--- a/delegate/test/RedefineTestHelper.hpp
+++ b/delegate/test/RedefineTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,12 +10,8 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
-
namespace
{
@@ -263,13 +259,13 @@ std::vector<char> CreateRedefineTfLiteModel(
template <typename T>
void RedefineTest(tflite::BuiltinOperator redefineOperatorCode,
tflite::TensorType tensorType,
- const std::vector<armnn::BackendId>& backends,
const std::vector<int32_t>& inputShape,
std::vector<int32_t>& outputShape,
std::vector<T>& inputValues,
std::vector<T>& expectedOutputValues,
std::vector<int32_t>& additionalData,
bool useOption = true,
+ const std::vector<armnn::BackendId>& backends = {},
float quantScale = 1.0f,
int quantOffset = 0)
{
@@ -317,7 +313,7 @@ void RedefineTest(tflite::BuiltinOperator redefineOperatorCode,
std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
// Setup interpreter with Arm NN Delegate applied.
- auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
diff --git a/delegate/test/ReduceTest.cpp b/delegate/test/ReduceTest.cpp
index f8606a8189..424f15ed64 100644
--- a/delegate/test/ReduceTest.cpp
+++ b/delegate/test/ReduceTest.cpp
@@ -1,22 +1,18 @@
//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "ReduceTestHelper.hpp"
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-
#include <doctest/doctest.h>
namespace armnnDelegate
{
void ReduceUint8KeepDimsTest(tflite::BuiltinOperator reduceOperatorCode,
- std::vector<armnn::BackendId>& backends,
- std::vector<uint8_t>& expectedOutputValues)
+ std::vector<uint8_t>& expectedOutputValues,
+ const std::vector<armnn::BackendId>& backends = {})
{
std::vector<int32_t> input0Shape { 1, 1, 2, 3 };
std::vector<int32_t> input1Shape { 1 };
@@ -28,19 +24,19 @@ void ReduceUint8KeepDimsTest(tflite::BuiltinOperator reduceOperatorCode,
ReduceTest<uint8_t>(reduceOperatorCode,
::tflite::TensorType_UINT8,
- backends,
input0Shape,
input1Shape,
expectedOutputShape,
input0Values,
input1Values,
expectedOutputValues,
- true);
+ true,
+ backends);
}
void ReduceUint8Test(tflite::BuiltinOperator reduceOperatorCode,
- std::vector<armnn::BackendId>& backends,
- std::vector<uint8_t>& expectedOutputValues)
+ std::vector<uint8_t>& expectedOutputValues,
+ const std::vector<armnn::BackendId>& backends = {})
{
std::vector<int32_t> input0Shape { 1, 1, 2, 3 };
std::vector<int32_t> input1Shape { 1 };
@@ -52,19 +48,19 @@ void ReduceUint8Test(tflite::BuiltinOperator reduceOperatorCode,
ReduceTest<uint8_t>(reduceOperatorCode,
::tflite::TensorType_UINT8,
- backends,
input0Shape,
input1Shape,
expectedOutputShape,
input0Values,
input1Values,
expectedOutputValues,
- false);
+ false,
+ backends);
}
void ReduceFp32KeepDimsTest(tflite::BuiltinOperator reduceOperatorCode,
- std::vector<armnn::BackendId>& backends,
- std::vector<float>& expectedOutputValues)
+ std::vector<float>& expectedOutputValues,
+ const std::vector<armnn::BackendId>& backends = {})
{
std::vector<int32_t> input0Shape { 1, 1, 2, 3 };
std::vector<int32_t> input1Shape { 1 };
@@ -76,19 +72,19 @@ void ReduceFp32KeepDimsTest(tflite::BuiltinOperator reduceOperatorCode,
ReduceTest<float>(reduceOperatorCode,
::tflite::TensorType_FLOAT32,
- backends,
input0Shape,
input1Shape,
expectedOutputShape,
input0Values,
input1Values,
expectedOutputValues,
- true);
+ true,
+ backends);
}
void ReduceFp32Test(tflite::BuiltinOperator reduceOperatorCode,
- std::vector<armnn::BackendId>& backends,
- std::vector<float>& expectedOutputValues)
+ std::vector<float>& expectedOutputValues,
+ const std::vector<armnn::BackendId>& backends = {})
{
std::vector<int32_t> input0Shape { 1, 1, 2, 3 };
std::vector<int32_t> input1Shape { 1 };
@@ -100,323 +96,101 @@ void ReduceFp32Test(tflite::BuiltinOperator reduceOperatorCode,
ReduceTest<float>(reduceOperatorCode,
::tflite::TensorType_FLOAT32,
- backends,
input0Shape,
input1Shape,
expectedOutputShape,
input0Values,
input1Values,
expectedOutputValues,
- false);
+ false,
+ backends);
}
// REDUCE_MAX Tests
-TEST_SUITE("ReduceMax_CpuRefTests")
-{
-
-TEST_CASE ("ReduceMax_Uint8_KeepDims_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- std::vector<uint8_t> expectedOutputValues { 4, 3, 3 };
- ReduceUint8KeepDimsTest(tflite::BuiltinOperator_REDUCE_MAX,
- backends,
- expectedOutputValues);
-}
-
-TEST_CASE ("ReduceMax_Uint8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- std::vector<uint8_t> expectedOutputValues { 4, 3, 3 };
- ReduceUint8Test(tflite::BuiltinOperator_REDUCE_MAX,
- backends,
- expectedOutputValues);
-}
-
-TEST_CASE ("ReduceMax_Fp32_KeepDims_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- std::vector<float> expectedOutputValues { 1001.0f, 1002.0f, 1003.0f };
- ReduceFp32KeepDimsTest(tflite::BuiltinOperator_REDUCE_MAX,
- backends,
- expectedOutputValues);
-}
-
-TEST_CASE ("ReduceMax_Fp32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- std::vector<float> expectedOutputValues { 1001.0f, 1002.0f, 1003.0f };
- ReduceFp32Test(tflite::BuiltinOperator_REDUCE_MAX,
- backends,
- expectedOutputValues);
-}
-
-} // End of ReduceMax_CpuRefTests
-
-TEST_SUITE("ReduceMax_CpuAccTests")
-{
-
-TEST_CASE ("ReduceMax_Uint8_KeepDims_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- std::vector<uint8_t> expectedOutputValues { 4, 3, 3 };
- ReduceUint8KeepDimsTest(tflite::BuiltinOperator_REDUCE_MAX,
- backends,
- expectedOutputValues);
-}
-
-TEST_CASE ("ReduceMax_Uint8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- std::vector<uint8_t> expectedOutputValues { 4, 3, 3 };
- ReduceUint8Test(tflite::BuiltinOperator_REDUCE_MAX,
- backends,
- expectedOutputValues);
-}
-
-
-TEST_CASE ("ReduceMax_Fp32_KeepDims_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- std::vector<float> expectedOutputValues { 1001.0f, 1002.0f, 1003.0f };
- ReduceFp32KeepDimsTest(tflite::BuiltinOperator_REDUCE_MAX,
- backends,
- expectedOutputValues);
-}
-
-TEST_CASE ("ReduceMax_Fp32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- std::vector<float> expectedOutputValues { 1001.0f, 1002.0f, 1003.0f };
- ReduceFp32Test(tflite::BuiltinOperator_REDUCE_MAX,
- backends,
- expectedOutputValues);
-}
-
-} // End of ReduceMax_CpuAccTests
-
-TEST_SUITE("ReduceMax_GpuAccTests")
+TEST_SUITE("ReduceMaxTests")
{
-TEST_CASE ("ReduceMax_Uint8_KeepDims_GpuAcc_Test")
+TEST_CASE ("ReduceMax_Uint8_KeepDims_Test")
{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
std::vector<uint8_t> expectedOutputValues { 4, 3, 3 };
ReduceUint8KeepDimsTest(tflite::BuiltinOperator_REDUCE_MAX,
- backends,
expectedOutputValues);
}
-TEST_CASE ("ReduceMax_Uint8_GpuAcc_Test")
+TEST_CASE ("ReduceMax_Uint8_Test")
{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
std::vector<uint8_t> expectedOutputValues { 4, 3, 3 };
ReduceUint8Test(tflite::BuiltinOperator_REDUCE_MAX,
- backends,
expectedOutputValues);
}
-
-TEST_CASE ("ReduceMax_Fp32_KeepDims_GpuAcc_Test")
+TEST_CASE ("ReduceMax_Fp32_KeepDims_Test")
{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
std::vector<float> expectedOutputValues { 1001.0f, 1002.0f, 1003.0f };
ReduceFp32KeepDimsTest(tflite::BuiltinOperator_REDUCE_MAX,
- backends,
expectedOutputValues);
}
-TEST_CASE ("ReduceMax_Fp32_GpuAcc_Test")
+TEST_CASE ("ReduceMax_Fp32_Test")
{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
std::vector<float> expectedOutputValues { 1001.0f, 1002.0f, 1003.0f };
ReduceFp32Test(tflite::BuiltinOperator_REDUCE_MAX,
- backends,
expectedOutputValues);
}
-} // End of ReduceMax_GpuAccTests
+} // End of ReduceMaxTests
// REDUCE_MIN Tests
-TEST_SUITE("ReduceMin_CpuRefTests")
-{
-
-TEST_CASE ("ReduceMin_Fp32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- std::vector<float> expectedOutputValues { 10.0f, 11.0f, 12.0f };
- ReduceFp32Test(tflite::BuiltinOperator_REDUCE_MIN,
- backends,
- expectedOutputValues);
-}
-
-} // End of ReduceMin_CpuRefTests
-
-TEST_SUITE("ReduceMin_CpuAccTests")
-{
-
-TEST_CASE ("ReduceMin_Fp32_CpuAcc_Test")
+TEST_SUITE("ReduceMinTests")
{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- std::vector<float> expectedOutputValues { 10.0f, 11.0f, 12.0f };
- ReduceFp32Test(tflite::BuiltinOperator_REDUCE_MIN,
- backends,
- expectedOutputValues);
-}
-} // End of ReduceMin_CpuAccTests
-
-TEST_SUITE("ReduceMin_GpuAccTests")
-{
-
-TEST_CASE ("ReduceMin_Fp32_GpuAcc_Test")
+TEST_CASE ("ReduceMin_Fp32_Test")
{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
std::vector<float> expectedOutputValues { 10.0f, 11.0f, 12.0f };
ReduceFp32Test(tflite::BuiltinOperator_REDUCE_MIN,
- backends,
expectedOutputValues);
}
-} // End of ReduceMin_GpuAccTests
+} // End of ReduceMinTests
// SUM Tests
-TEST_SUITE("Sum_CpuRefTests")
-{
-
-TEST_CASE ("Sum_Uint8_KeepDims_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- std::vector<uint8_t> expectedOutputValues { 5, 5, 4 };
- ReduceUint8KeepDimsTest(tflite::BuiltinOperator_SUM,
- backends,
- expectedOutputValues);
-}
-
-TEST_CASE ("Sum_Fp32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- std::vector<float> expectedOutputValues { 1011.0f, 1013.0f, 1015.0f };
- ReduceFp32Test(tflite::BuiltinOperator_SUM,
- backends,
- expectedOutputValues);
-}
-
-} // End of Sum_CpuRefTests
-
-TEST_SUITE("Sum_CpuAccTests")
-{
-
-TEST_CASE ("Sum_Uint8_KeepDims_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- std::vector<uint8_t> expectedOutputValues { 5, 5, 4 };
- ReduceUint8KeepDimsTest(tflite::BuiltinOperator_SUM,
- backends,
- expectedOutputValues);
-}
-
-TEST_CASE ("Sum_Fp32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- std::vector<float> expectedOutputValues { 1011.0f, 1013.0f, 1015.0f };
- ReduceFp32Test(tflite::BuiltinOperator_SUM,
- backends,
- expectedOutputValues);
-}
-
-} // End of Sum_CpuAccTests
-
-TEST_SUITE("Sum_GpuAccTests")
+TEST_SUITE("SumTests")
{
-TEST_CASE ("Sum_Uint8_KeepDims_GpuAcc_Test")
+TEST_CASE ("Sum_Uint8_KeepDims_Test")
{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
std::vector<uint8_t> expectedOutputValues { 5, 5, 4 };
ReduceUint8KeepDimsTest(tflite::BuiltinOperator_SUM,
- backends,
expectedOutputValues);
}
-TEST_CASE ("Sum_Fp32_GpuAcc_Test")
+TEST_CASE ("Sum_Fp32_Test")
{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
std::vector<float> expectedOutputValues { 1011.0f, 1013.0f, 1015.0f };
ReduceFp32Test(tflite::BuiltinOperator_SUM,
- backends,
expectedOutputValues);
}
-} // End of Sum_GpuAccTests
+} // End of SumTests
// PROD Tests
-TEST_SUITE("Prod_CpuRefTests")
-{
-
-TEST_CASE ("Prod_Uint8_KeepDims_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- std::vector<uint8_t> expectedOutputValues { 4, 6, 3 };
- ReduceUint8KeepDimsTest(tflite::BuiltinOperator_REDUCE_PROD,
- backends,
- expectedOutputValues);
-}
-
-TEST_CASE ("Prod_Fp32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- std::vector<float> expectedOutputValues { 10010.0f, 11022.0f, 12036.0f };
- ReduceFp32Test(tflite::BuiltinOperator_REDUCE_PROD,
- backends,
- expectedOutputValues);
-}
-
-} // End of Prod_CpuRefTests
-
-TEST_SUITE("Prod_CpuAccTests")
-{
-
-TEST_CASE ("Prod_Uint8_KeepDims_CpuAcc_Test" )
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- std::vector<uint8_t> expectedOutputValues { 4, 6, 3 };
- ReduceUint8KeepDimsTest(tflite::BuiltinOperator_REDUCE_PROD,
- backends,
- expectedOutputValues);
-}
-
-TEST_CASE ("Prod_Fp32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- std::vector<float> expectedOutputValues { 10010.0f, 11022.0f, 12036.0f };
- ReduceFp32Test(tflite::BuiltinOperator_REDUCE_PROD,
- backends,
- expectedOutputValues);
-}
-
-} // End of Prod_CpuAccTests
-
-TEST_SUITE("Prod_GpuAccTests")
+TEST_SUITE("ProdTests")
{
-TEST_CASE ("Prod_Uint8_KeepDims_GpuAcc_Test")
+TEST_CASE ("Prod_Uint8_KeepDims_Test")
{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
std::vector<uint8_t> expectedOutputValues { 4, 6, 3 };
ReduceUint8KeepDimsTest(tflite::BuiltinOperator_REDUCE_PROD,
- backends,
expectedOutputValues);
}
-TEST_CASE ("Prod_Fp32_GpuAcc_Test")
+TEST_CASE ("Prod_Fp32_Test")
{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
std::vector<float> expectedOutputValues { 10010.0f, 11022.0f, 12036.0f };
ReduceFp32Test(tflite::BuiltinOperator_REDUCE_PROD,
- backends,
expectedOutputValues);
}
-} // End of Prod_GpuAccTests
+} // End of ProdTests
} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/test/ReduceTestHelper.hpp b/delegate/test/ReduceTestHelper.hpp
index f6acdbc84f..5ec145e9e0 100644
--- a/delegate/test/ReduceTestHelper.hpp
+++ b/delegate/test/ReduceTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,12 +10,8 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
-
namespace
{
@@ -145,7 +141,6 @@ std::vector<char> CreateReduceTfLiteModel(tflite::BuiltinOperator reduceOperator
template <typename T>
void ReduceTest(tflite::BuiltinOperator reduceOperatorCode,
tflite::TensorType tensorType,
- std::vector<armnn::BackendId>& backends,
std::vector<int32_t>& input0Shape,
std::vector<int32_t>& input1Shape,
std::vector<int32_t>& expectedOutputShape,
@@ -153,6 +148,7 @@ void ReduceTest(tflite::BuiltinOperator reduceOperatorCode,
std::vector<int32_t>& input1Values,
std::vector<T>& expectedOutputValues,
const bool keepDims,
+ const std::vector<armnn::BackendId>& backends = {},
float quantScale = 1.0f,
int quantOffset = 0)
{
@@ -187,7 +183,7 @@ void ReduceTest(tflite::BuiltinOperator reduceOperatorCode,
std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
// Setup interpreter with Arm NN Delegate applied.
- auto armnnInterpreter = DelegateTestInterpreter(modelBufferArmNN, backends);
+ auto armnnInterpreter = DelegateTestInterpreter(modelBufferArmNN, CaptureAvailableBackends(backends));
CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
CHECK(armnnInterpreter.FillInputTensor<T>(input0Values, 0) == kTfLiteOk);
CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
diff --git a/delegate/test/ReshapeTest.cpp b/delegate/test/ReshapeTest.cpp
index 9a34173e3c..ed6ee80d8d 100644
--- a/delegate/test/ReshapeTest.cpp
+++ b/delegate/test/ReshapeTest.cpp
@@ -1,14 +1,10 @@
//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "RedefineTestHelper.hpp"
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-
#include <doctest/doctest.h>
#include <half/half.hpp>
@@ -18,7 +14,7 @@ using Half = half_float::half;
namespace armnnDelegate
{
-void ReshapeSimpleTest(std::vector<armnn::BackendId>& backends, bool useOption = true)
+void ReshapeSimpleTest(bool useOption = true, const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<int32_t> inputShape { 1, 3, 4, 1 };
@@ -35,18 +31,18 @@ void ReshapeSimpleTest(std::vector<armnn::BackendId>& backends, bool useOption =
RedefineTest<float>(tflite::BuiltinOperator_RESHAPE,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
targetShape,
- useOption);
+ useOption,
+ backends);
}
using namespace half_float::literal;
-void ReshapeSimpleFloat16Test(std::vector<armnn::BackendId>& backends, bool useOption = true)
+void ReshapeSimpleFloat16Test(bool useOption = true, const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<int32_t> inputShape { 1, 3, 4, 1 };
@@ -63,16 +59,16 @@ void ReshapeSimpleFloat16Test(std::vector<armnn::BackendId>& backends, bool useO
RedefineTest<Half>(tflite::BuiltinOperator_RESHAPE,
::tflite::TensorType_FLOAT16,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
targetShape,
- useOption);
+ useOption,
+ backends);
}
-void ReshapeReduceDimTest(std::vector<armnn::BackendId>& backends, bool useOption = true)
+void ReshapeReduceDimTest(bool useOption = true, const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<int32_t> inputShape { 1, 3, 4, 1 };
@@ -89,16 +85,16 @@ void ReshapeReduceDimTest(std::vector<armnn::BackendId>& backends, bool useOptio
RedefineTest<float>(tflite::BuiltinOperator_RESHAPE,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
targetShape,
- useOption);
+ useOption,
+ backends);
}
-void ReshapeFlattenTest(std::vector<armnn::BackendId>& backends, bool useOption = true)
+void ReshapeFlattenTest(bool useOption = true, const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<int32_t> inputShape { 1, 3, 4, 1 };
@@ -115,16 +111,16 @@ void ReshapeFlattenTest(std::vector<armnn::BackendId>& backends, bool useOption
RedefineTest<float>(tflite::BuiltinOperator_RESHAPE,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
targetShape,
- useOption);
+ useOption,
+ backends);
}
-void ReshapeFlattenAllTest(std::vector<armnn::BackendId>& backends, bool useOption = true)
+void ReshapeFlattenAllTest(bool useOption = true, const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<int32_t> inputShape { 1, 3, 4, 1 };
@@ -141,16 +137,16 @@ void ReshapeFlattenAllTest(std::vector<armnn::BackendId>& backends, bool useOpti
RedefineTest<float>(tflite::BuiltinOperator_RESHAPE,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
targetShape,
- useOption);
+ useOption,
+ backends);
}
-void ReshapeInt8Test(std::vector<armnn::BackendId>& backends, bool useOption = true)
+void ReshapeInt8Test(bool useOption = true, const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<int32_t> inputShape { 1, 3, 4, 1 };
@@ -167,18 +163,18 @@ void ReshapeInt8Test(std::vector<armnn::BackendId>& backends, bool useOption = t
RedefineTest<int8_t>(tflite::BuiltinOperator_RESHAPE,
::tflite::TensorType_INT8,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
targetShape,
useOption,
+ backends,
2.5f,
1);
}
-void ReshapeUint8Test(std::vector<armnn::BackendId>& backends, bool useOption = true)
+void ReshapeUint8Test(bool useOption = true, const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<int32_t> inputShape { 1, 3, 4, 1 };
@@ -195,18 +191,18 @@ void ReshapeUint8Test(std::vector<armnn::BackendId>& backends, bool useOption =
RedefineTest<uint8_t>(tflite::BuiltinOperator_RESHAPE,
::tflite::TensorType_UINT8,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
targetShape,
useOption,
+ backends,
2.5f,
1);
}
-void ReshapeInt16Test(std::vector<armnn::BackendId>& backends, bool useOption = true)
+void ReshapeInt16Test(bool useOption = true, const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<int32_t> inputShape { 1, 3, 4, 1 };
@@ -223,294 +219,100 @@ void ReshapeInt16Test(std::vector<armnn::BackendId>& backends, bool useOption =
RedefineTest<int16_t>(tflite::BuiltinOperator_RESHAPE,
::tflite::TensorType_INT16,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
targetShape,
useOption,
+ backends,
2.5f,
0);
}
-TEST_SUITE("Reshape_GpuAccTests")
-{
-
-TEST_CASE ("Reshape_Simple_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ReshapeSimpleTest(backends);
-}
-
-TEST_CASE ("Reshape_ReduceDimension_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ReshapeReduceDimTest(backends);
-}
-
-TEST_CASE ("Reshape_Flatten_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ReshapeFlattenTest(backends);
-}
-
-TEST_CASE ("Reshape_FlattenAll_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ReshapeFlattenAllTest(backends);
-}
-
-TEST_CASE ("Reshape_Int8_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ReshapeInt8Test(backends);
-}
-
-TEST_CASE ("Reshape_Uint8_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ReshapeUint8Test(backends);
-}
-
-TEST_CASE ("Reshape_Float16_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ReshapeSimpleFloat16Test(backends);
-}
-
-TEST_CASE ("Reshape_Simple_ShapeTensor_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ReshapeSimpleTest(backends, false);
-}
-
-TEST_CASE ("Reshape_ReduceDimension_ShapeTensor_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ReshapeReduceDimTest(backends, false);
-}
-
-TEST_CASE ("Reshape_Flatten_ShapeTensor_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ReshapeFlattenTest(backends, false);
-}
-
-TEST_CASE ("Reshape_FlattenAll_ShapeTensor_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ReshapeFlattenAllTest(backends, false);
-}
-
-TEST_CASE ("Reshape_Int8_ShapeTensor_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ReshapeInt8Test(backends, false);
-}
-
-TEST_CASE ("Reshape_Uint8_ShapeTensor_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ReshapeUint8Test(backends, false);
-}
-
-TEST_CASE ("Reshape_Float16_ShapeTensor_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ReshapeSimpleFloat16Test(backends, false);
-}
-
-} // TEST_SUITE("Reshape_GpuAccTests")
-
-TEST_SUITE("Reshape_CpuAccTests")
-{
-
-TEST_CASE ("Reshape_Simple_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ReshapeSimpleTest(backends);
-}
-
-TEST_CASE ("Reshape_ReduceDimension_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ReshapeReduceDimTest(backends);
-}
-
-TEST_CASE ("Reshape_Flatten_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ReshapeFlattenTest(backends);
-}
-
-TEST_CASE ("Reshape_FlattenAll_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ReshapeFlattenAllTest(backends);
-}
-
-TEST_CASE ("Reshape_Int8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ReshapeInt8Test(backends);
-}
-
-TEST_CASE ("Reshape_Uint8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ReshapeUint8Test(backends);
-}
-
-TEST_CASE ("Reshape_Float16_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ReshapeSimpleFloat16Test(backends);
-}
-
-TEST_CASE ("Reshape_Simple_ShapeTensor_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ReshapeSimpleTest(backends, false);
-}
-
-TEST_CASE ("Reshape_ReduceDimension_ShapeTensor_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ReshapeReduceDimTest(backends, false);
-}
-
-TEST_CASE ("Reshape_Flatten_ShapeTensor_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ReshapeFlattenTest(backends, false);
-}
-
-TEST_CASE ("Reshape_FlattenAll_ShapeTensor_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ReshapeFlattenAllTest(backends, false);
-}
-
-TEST_CASE ("Reshape_Int8_ShapeTensor_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ReshapeInt8Test(backends, false);
-}
-
-TEST_CASE ("Reshape_Uint8_ShapeTensor_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ReshapeUint8Test(backends, false);
-}
-
-TEST_CASE ("Reshape_Float16_ShapeTensor_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ReshapeSimpleFloat16Test(backends, false);
-}
-
-} // TEST_SUITE("Reshape_CpuAccTests")
-
-TEST_SUITE("Reshape_CpuRefTests")
+TEST_SUITE("ReshapeTests")
{
-TEST_CASE ("Reshape_Simple_CpuRef_Test")
+TEST_CASE ("Reshape_Simple_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ReshapeSimpleTest(backends);
+ ReshapeSimpleTest();
}
-TEST_CASE ("Reshape_ReduceDimension_CpuRef_Test")
+TEST_CASE ("Reshape_ReduceDimension_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ReshapeReduceDimTest(backends);
+ ReshapeReduceDimTest();
}
-TEST_CASE ("Reshape_Flatten_CpuRef_Test")
+TEST_CASE ("Reshape_Flatten_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ReshapeFlattenTest(backends);
+ ReshapeFlattenTest();
}
-TEST_CASE ("Reshape_FlattenAll_CpuRef_Test")
+TEST_CASE ("Reshape_FlattenAll_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ReshapeFlattenAllTest(backends);
+ ReshapeFlattenAllTest();
}
-TEST_CASE ("Reshape_Int8_CpuRef_Test")
+TEST_CASE ("Reshape_Int8_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ReshapeInt8Test(backends);
+ ReshapeInt8Test();
}
-TEST_CASE ("Reshape_Uint8_CpuRef_Test")
+TEST_CASE ("Reshape_Uint8_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ReshapeUint8Test(backends);
+ ReshapeUint8Test();
}
-TEST_CASE ("Reshape_Int16_CpuRef_Test")
+TEST_CASE ("Reshape_Int16_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ReshapeInt16Test(backends);
+ ReshapeInt16Test();
}
-TEST_CASE ("Reshape_Float16_CpuRef_Test")
+TEST_CASE ("Reshape_Float16_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ReshapeSimpleFloat16Test(backends);
+ ReshapeSimpleFloat16Test();
}
-TEST_CASE ("Reshape_Simple_ShapeTensor_CpuRef_Test")
+TEST_CASE ("Reshape_Simple_ShapeTensor_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ReshapeSimpleTest(backends, false);
+ ReshapeSimpleTest(false);
}
-TEST_CASE ("Reshape_ReduceDimension_ShapeTensor_CpuRef_Test")
+TEST_CASE ("Reshape_ReduceDimension_ShapeTensor_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ReshapeReduceDimTest(backends, false);
+ ReshapeReduceDimTest(false);
}
-TEST_CASE ("Reshape_Flatten_ShapeTensor_CpuRef_Test")
+TEST_CASE ("Reshape_Flatten_ShapeTensor_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ReshapeFlattenTest(backends, false);
+ ReshapeFlattenTest(false);
}
-TEST_CASE ("Reshape_FlattenAll_ShapeTensor_CpuRef_Test")
+TEST_CASE ("Reshape_FlattenAll_ShapeTensor_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ReshapeFlattenAllTest(backends, false);
+ ReshapeFlattenAllTest(false);
}
-TEST_CASE ("Reshape_Int8_ShapeTensor_CpuRef_Test")
+TEST_CASE ("Reshape_Int8_ShapeTensor_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ReshapeInt8Test(backends, false);
+ ReshapeInt8Test(false);
}
-TEST_CASE ("Reshape_Uint8_ShapeTensor_CpuRef_Test")
+TEST_CASE ("Reshape_Uint8_ShapeTensor_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ReshapeUint8Test(backends, false);
+ ReshapeUint8Test(false);
}
-TEST_CASE ("Reshape_Int16_ShapeTensor_CpuRef_Test")
+TEST_CASE ("Reshape_Int16_ShapeTensor_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ReshapeInt16Test(backends, false);
+ ReshapeInt16Test(false);
}
-TEST_CASE ("Reshape_Float16_ShapeTensor_CpuRef_Test")
+TEST_CASE ("Reshape_Float16_ShapeTensor_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ReshapeSimpleFloat16Test(backends, false);
+ ReshapeSimpleFloat16Test(false);
}
-} // TEST_SUITE("Reshape_CpuRefTests")
+} // TEST_SUITE("ReshapeTests")
} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/test/ResizeTest.cpp b/delegate/test/ResizeTest.cpp
index 95f4d93e59..de042db512 100644
--- a/delegate/test/ResizeTest.cpp
+++ b/delegate/test/ResizeTest.cpp
@@ -1,25 +1,16 @@
//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "ResizeTestHelper.hpp"
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-
-#include <tensorflow/lite/version.h>
-
#include <doctest/doctest.h>
namespace armnnDelegate
{
-void ResizeBiliniarFloat32Test(std::vector<armnn::BackendId>& backends)
+void ResizeBiliniarFloat32Test(const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<float> input1Values
@@ -45,7 +36,6 @@ void ResizeBiliniarFloat32Test(std::vector<armnn::BackendId>& backends)
const std::vector<int32_t> expectedOutputShape = { 1, 5, 5, 1 };
ResizeFP32TestImpl(tflite::BuiltinOperator_RESIZE_BILINEAR,
- backends,
input1Values,
input1Shape,
input2NewShape,
@@ -54,7 +44,7 @@ void ResizeBiliniarFloat32Test(std::vector<armnn::BackendId>& backends)
expectedOutputShape);
}
-void ResizeNearestNeighbourFloat32Test(std::vector<armnn::BackendId>& backends)
+void ResizeNearestNeighbourFloat32Test(const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<float> input1Values { 1.0f, 2.0f, 3.0f, 4.0f }
@@ -69,7 +59,6 @@ void ResizeNearestNeighbourFloat32Test(std::vector<armnn::BackendId>& backends)
const std::vector<int32_t> expectedOutputShape = { 1, 1, 1, 1 };
ResizeFP32TestImpl(tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR,
- backends,
input1Values,
input1Shape,
input2NewShape,
@@ -78,57 +67,19 @@ void ResizeNearestNeighbourFloat32Test(std::vector<armnn::BackendId>& backends)
expectedOutputShape);
}
-TEST_SUITE("ResizeTests_GpuAccTests")
-{
-
-TEST_CASE ("Resize_Biliniar_Float32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ResizeBiliniarFloat32Test(backends);
-}
-
-TEST_CASE ("Resize_NearestNeighbour_Float32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ResizeNearestNeighbourFloat32Test(backends);
-}
-
-} // TEST_SUITE("ResizeTests_GpuAccTests")
-
-
-TEST_SUITE("ResizeTests_CpuAccTests")
-{
-
-TEST_CASE ("Resize_Biliniar_Float32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ResizeBiliniarFloat32Test(backends);
-}
-
-TEST_CASE ("Resize_NearestNeighbour_Float32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ResizeNearestNeighbourFloat32Test(backends);
-}
-
-} // TEST_SUITE("ResizeTests_CpuAccTests")
-
-
-TEST_SUITE("ResizeTests_CpuRefTests")
+TEST_SUITE("ResizeTestsTests")
{
-TEST_CASE ("Resize_Biliniar_Float32_CpuRef_Test")
+TEST_CASE ("Resize_Biliniar_Float32_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ResizeBiliniarFloat32Test(backends);
+ ResizeBiliniarFloat32Test();
}
-TEST_CASE ("Resize_NearestNeighbour_Float32_CpuRef_Test")
+TEST_CASE ("Resize_NearestNeighbour_Float32_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ResizeNearestNeighbourFloat32Test(backends);
+ ResizeNearestNeighbourFloat32Test();
}
-} // TEST_SUITE("ResizeTests_CpuRefTests")
+} // TEST_SUITE("ResizeTestsTests")
} // namespace armnnDelegate
diff --git a/delegate/test/ResizeTestHelper.hpp b/delegate/test/ResizeTestHelper.hpp
index 5421082a39..6fc333769a 100644
--- a/delegate/test/ResizeTestHelper.hpp
+++ b/delegate/test/ResizeTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,12 +10,8 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
-
namespace
{
@@ -118,13 +114,13 @@ std::vector<char> CreateResizeTfLiteModel(tflite::BuiltinOperator operatorCode,
}
void ResizeFP32TestImpl(tflite::BuiltinOperator operatorCode,
- std::vector<armnn::BackendId>& backends,
std::vector<float>& input1Values,
std::vector<int32_t> input1Shape,
std::vector<int32_t> input2NewShape,
std::vector<int32_t> input2Shape,
std::vector<float>& expectedOutputValues,
- std::vector<int32_t> expectedOutputShape)
+ std::vector<int32_t> expectedOutputShape,
+ const std::vector<armnn::BackendId>& backends = {})
{
using namespace delegateTestInterpreter;
@@ -145,7 +141,7 @@ void ResizeFP32TestImpl(tflite::BuiltinOperator operatorCode,
std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
// Setup interpreter with Arm NN Delegate applied.
- auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
CHECK(armnnInterpreter.FillInputTensor<float>(input1Values, 0) == kTfLiteOk);
CHECK(armnnInterpreter.FillInputTensor<int32_t>(input2NewShape, 1) == kTfLiteOk);
diff --git a/delegate/test/ReverseV2Test.cpp b/delegate/test/ReverseV2Test.cpp
index e842217d2a..bd35347b69 100644
--- a/delegate/test/ReverseV2Test.cpp
+++ b/delegate/test/ReverseV2Test.cpp
@@ -1,25 +1,16 @@
//
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "ReverseV2TestHelper.hpp"
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-
-#include <tensorflow/lite/version.h>
-
#include <doctest/doctest.h>
namespace armnnDelegate
{
-void ReverseV2Float32Test(std::vector<armnn::BackendId>& backends)
+void ReverseV2Float32Test(const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<float> inputValues =
@@ -62,7 +53,6 @@ void ReverseV2Float32Test(std::vector<armnn::BackendId>& backends)
const std::vector<int32_t> expectedOutputShape = {3, 3, 3};
ReverseV2FP32TestImpl(tflite::BuiltinOperator_REVERSE_V2,
- backends,
inputValues,
inputShape,
axisValues,
@@ -71,7 +61,7 @@ void ReverseV2Float32Test(std::vector<armnn::BackendId>& backends)
expectedOutputShape);
}
-void ReverseV2NegativeAxisFloat32Test(std::vector<armnn::BackendId>& backends)
+void ReverseV2NegativeAxisFloat32Test(const std::vector<armnn::BackendId>& backends = {})
{
// Set input data
std::vector<float> inputValues =
@@ -114,7 +104,6 @@ void ReverseV2NegativeAxisFloat32Test(std::vector<armnn::BackendId>& backends)
const std::vector<int32_t> expectedOutputShape = {3, 3, 3};
ReverseV2FP32TestImpl(tflite::BuiltinOperator_REVERSE_V2,
- backends,
inputValues,
inputShape,
axisValues,
@@ -123,55 +112,19 @@ void ReverseV2NegativeAxisFloat32Test(std::vector<armnn::BackendId>& backends)
expectedOutputShape);
}
-TEST_SUITE("ReverseV2Tests_GpuAccTests")
-{
-
- TEST_CASE ("ReverseV2_Float32_GpuAcc_Test")
- {
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ReverseV2Float32Test(backends);
- }
-
- TEST_CASE ("ReverseV2_NegativeAxis_Float32_GpuAcc_Test")
- {
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ReverseV2NegativeAxisFloat32Test(backends);
- }
-
-} // TEST_SUITE("ReverseV2Tests_GpuAccTests")
-
-TEST_SUITE("ReverseV2Tests_CpuAccTests")
-{
-
- TEST_CASE ("ReverseV2_Float32_CpuAcc_Test")
- {
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ReverseV2Float32Test(backends);
- }
-
- TEST_CASE ("ReverseV2_NegativeAxis_Float32_CpuAcc_Test")
- {
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ReverseV2NegativeAxisFloat32Test(backends);
- }
-
-} // TEST_SUITE("ReverseV2Tests_CpuAccTests")
-
-TEST_SUITE("ReverseV2Tests_CpuRefTests")
+TEST_SUITE("ReverseV2TestsTests")
{
- TEST_CASE ("ReverseV2_Float32_CpuRef_Test")
+ TEST_CASE ("ReverseV2_Float32_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ReverseV2Float32Test(backends);
+ ReverseV2Float32Test();
}
- TEST_CASE ("ReverseV2_NegativeAxis_Float32_CpuRef_Test")
+ TEST_CASE ("ReverseV2_NegativeAxis_Float32_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ReverseV2NegativeAxisFloat32Test(backends);
+ ReverseV2NegativeAxisFloat32Test();
}
-} // TEST_SUITE("ReverseV2Tests_CpuRefTests")
+} // TEST_SUITE("ReverseV2TestsTests")
} // namespace armnnDelegate
diff --git a/delegate/test/ReverseV2TestHelper.hpp b/delegate/test/ReverseV2TestHelper.hpp
index 82f0bd700c..eeb0920b25 100644
--- a/delegate/test/ReverseV2TestHelper.hpp
+++ b/delegate/test/ReverseV2TestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,12 +10,8 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/version.h>
-
-
namespace
{
std::vector<char> CreateReverseV2TfLiteModel(tflite::BuiltinOperator operatorCode,
@@ -101,13 +97,13 @@ namespace
}
void ReverseV2FP32TestImpl(tflite::BuiltinOperator operatorCode,
- std::vector<armnn::BackendId>& backends,
std::vector<float>& inputValues,
std::vector<int32_t> inputShape,
std::vector<int32_t> axisValues,
std::vector<int32_t> axisShapeDims,
std::vector<float>& expectedOutputValues,
- std::vector<int32_t> expectedOutputShape)
+ std::vector<int32_t> expectedOutputShape,
+ const std::vector<armnn::BackendId>& backends = {})
{
using namespace delegateTestInterpreter;
@@ -128,7 +124,7 @@ namespace
std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
// Setup interpreter with Arm NN Delegate applied.
- auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
CHECK(armnnInterpreter.FillInputTensor<float>(inputValues, 0) == kTfLiteOk);
CHECK(armnnInterpreter.FillInputTensor<int32_t>(axisValues, 1) == kTfLiteOk);
diff --git a/delegate/test/RoundTest.cpp b/delegate/test/RoundTest.cpp
index 4aac1a77b3..90eb16bc05 100644
--- a/delegate/test/RoundTest.cpp
+++ b/delegate/test/RoundTest.cpp
@@ -1,20 +1,16 @@
//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "RoundTestHelper.hpp"
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-
#include <doctest/doctest.h>
namespace armnnDelegate
{
-void FloorFp32Test(std::vector<armnn::BackendId>& backends)
+void FloorFp32Test(const std::vector<armnn::BackendId>& backends = {})
{
std::vector<int32_t> inputShape {1, 3, 2, 3};
std::vector<int32_t> outputShape {1, 3, 2, 3};
@@ -27,42 +23,18 @@ void FloorFp32Test(std::vector<armnn::BackendId>& backends)
RoundTest<float>(tflite::BuiltinOperator_FLOOR,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
inputValues,
expectedOutputValues);
}
// FLOOR Test Suite
-TEST_SUITE("FLOOR_CpuRefTests")
-{
-
-TEST_CASE ("FLOOR_Fp32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- FloorFp32Test(backends);
-}
-
-}
-
-TEST_SUITE("FLOOR_CpuAccTests")
-{
-
-TEST_CASE ("FLOOR_Fp32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- FloorFp32Test(backends);
-}
-
-}
-
-TEST_SUITE("FLOOR_GpuAccTests")
+TEST_SUITE("FLOORTests")
{
-TEST_CASE ("FLOOR_Fp32_GpuAcc_Test")
+TEST_CASE ("FLOOR_Fp32_Test")
{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- FloorFp32Test(backends);
+ FloorFp32Test();
}
}
diff --git a/delegate/test/RoundTestHelper.hpp b/delegate/test/RoundTestHelper.hpp
index b7bd32fbc4..8dc53ef594 100644
--- a/delegate/test/RoundTestHelper.hpp
+++ b/delegate/test/RoundTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,12 +10,8 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
-
namespace
{
std::vector<char> CreateRoundTfLiteModel(tflite::BuiltinOperator roundOperatorCode,
@@ -100,10 +96,10 @@ std::vector<char> CreateRoundTfLiteModel(tflite::BuiltinOperator roundOperatorCo
template<typename T>
void RoundTest(tflite::BuiltinOperator roundOperatorCode,
tflite::TensorType tensorType,
- std::vector<armnn::BackendId>& backends,
std::vector<int32_t>& shape,
std::vector<T>& inputValues,
std::vector<T>& expectedOutputValues,
+ const std::vector<armnn::BackendId>& backends = {},
float quantScale = 1.0f,
int quantOffset = 0)
{
@@ -123,7 +119,7 @@ void RoundTest(tflite::BuiltinOperator roundOperatorCode,
std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
// Setup interpreter with Arm NN Delegate applied.
- auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
diff --git a/delegate/test/ShapeTest.cpp b/delegate/test/ShapeTest.cpp
index 309b071d9a..3a3fdf7472 100644
--- a/delegate/test/ShapeTest.cpp
+++ b/delegate/test/ShapeTest.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,7 +10,7 @@
namespace armnnDelegate
{
-void ShapeSimpleTest(std::vector<armnn::BackendId>& backends)
+void ShapeSimpleTest(const std::vector<armnn::BackendId>& backends = {})
{
std::vector<int32_t> inputShape{ 1, 3, 2, 3 };
@@ -22,11 +22,11 @@ void ShapeSimpleTest(std::vector<armnn::BackendId>& backends)
ShapeTest<int32_t, int32_t>(::tflite::TensorType_INT32,
::tflite::TensorType_INT32,
- backends,
inputShape,
inputValues,
expectedOutputValues,
- expectedOutputShape);
+ expectedOutputShape,
+ backends);
}
// SHAPE Test Suite
diff --git a/delegate/test/ShapeTestHelper.hpp b/delegate/test/ShapeTestHelper.hpp
index 1d1da2fa51..b9ab58813c 100644
--- a/delegate/test/ShapeTestHelper.hpp
+++ b/delegate/test/ShapeTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,12 +10,8 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
-
namespace
{
std::vector<char> CreateShapeTfLiteModel(tflite::TensorType inputTensorType,
@@ -104,11 +100,11 @@ std::vector<char> CreateShapeTfLiteModel(tflite::TensorType inputTensorType,
template<typename T, typename K>
void ShapeTest(tflite::TensorType inputTensorType,
tflite::TensorType outputTensorType,
- std::vector<armnn::BackendId>& backends,
std::vector<int32_t>& inputShape,
std::vector<T>& inputValues,
std::vector<K>& expectedOutputValues,
std::vector<int32_t>& expectedOutputShape,
+ const std::vector<armnn::BackendId>& backends = {},
float quantScale = 1.0f,
int quantOffset = 0)
{
@@ -128,7 +124,7 @@ void ShapeTest(tflite::TensorType inputTensorType,
std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
// Setup interpreter with Arm NN Delegate applied.
- auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
std::vector<K> armnnOutputValues = armnnInterpreter.GetOutputResult<K>(0);
diff --git a/delegate/test/SliceTest.cpp b/delegate/test/SliceTest.cpp
index 9e54f735f8..cc584a49a7 100644
--- a/delegate/test/SliceTest.cpp
+++ b/delegate/test/SliceTest.cpp
@@ -1,20 +1,16 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "SliceTestHelper.hpp"
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-
#include <doctest/doctest.h>
namespace armnnDelegate
{
-void SliceFixtureSimpleTest(std::vector<armnn::BackendId>& backends)
+void SliceFixtureSimpleTest(const std::vector<armnn::BackendId>& backends = {})
{
std::vector<int32_t> inputShape { 3, 2, 3 };
std::vector<int32_t> outputShape { 2, 1, 3 };
@@ -30,7 +26,6 @@ void SliceFixtureSimpleTest(std::vector<armnn::BackendId>& backends)
5.0f, 5.0f, 5.0f };
SliceTestImpl<float>(
- backends,
inputData,
outputData,
beginData,
@@ -41,7 +36,7 @@ void SliceFixtureSimpleTest(std::vector<armnn::BackendId>& backends)
outputShape);
}
-void SliceFixtureSizeTest(std::vector<armnn::BackendId>& backends)
+void SliceFixtureSizeTest(const std::vector<armnn::BackendId>& backends = {})
{
std::vector<int32_t> inputShape { 3, 2, 3 };
std::vector<int32_t> outputShape { 2, 1, 3 };
@@ -57,7 +52,6 @@ void SliceFixtureSizeTest(std::vector<armnn::BackendId>& backends)
5.0f, 5.0f, 5.0f };
SliceTestImpl<float>(
- backends,
inputData,
outputData,
beginData,
@@ -68,55 +62,19 @@ void SliceFixtureSizeTest(std::vector<armnn::BackendId>& backends)
outputShape);
}
-TEST_SUITE("Slice_CpuRefTests")
-{
-
-TEST_CASE ("Slice_Simple_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- SliceFixtureSimpleTest(backends);
-}
-
-TEST_CASE ("Slice_Size_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- SliceFixtureSizeTest(backends);
-}
-
-} // Slice_CpuRefTests TestSuite
-
-TEST_SUITE("Slice_CpuAccTests")
-{
-
-TEST_CASE ("Slice_Simple_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- SliceFixtureSimpleTest(backends);
-}
-
-TEST_CASE ("Slice_Size_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- SliceFixtureSizeTest(backends);
-}
-
-} // Slice_CpuAccTests TestSuite
-
-TEST_SUITE("StridedSlice_GpuAccTests")
+TEST_SUITE("SliceTests")
{
-TEST_CASE ("Slice_Simple_GpuAcc_Test")
+TEST_CASE ("Slice_Simple_Test")
{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- SliceFixtureSimpleTest(backends);
+ SliceFixtureSimpleTest();
}
-TEST_CASE ("Slice_Size_GpuAcc_Test")
+TEST_CASE ("Slice_Size_Test")
{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- SliceFixtureSizeTest(backends);
+ SliceFixtureSizeTest();
}
-} // Slice_GpuAccTests TestSuite
+} // SliceTests TestSuite
} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/test/SliceTestHelper.hpp b/delegate/test/SliceTestHelper.hpp
index 2e84d8de9f..90ce8e4fd6 100644
--- a/delegate/test/SliceTestHelper.hpp
+++ b/delegate/test/SliceTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,12 +10,8 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
-
namespace
{
@@ -112,15 +108,15 @@ std::vector<char> CreateSliceTfLiteModel(tflite::TensorType tensorType,
}
template <typename T>
-void SliceTestImpl(std::vector<armnn::BackendId>& backends,
- std::vector<T>& inputValues,
+void SliceTestImpl(std::vector<T>& inputValues,
std::vector<T>& expectedOutputValues,
std::vector<int32_t>& beginTensorData,
std::vector<int32_t>& sizeTensorData,
std::vector<int32_t>& inputTensorShape,
std::vector<int32_t>& beginTensorShape,
std::vector<int32_t>& sizeTensorShape,
- std::vector<int32_t>& outputTensorShape)
+ std::vector<int32_t>& outputTensorShape,
+ const std::vector<armnn::BackendId>& backends = {})
{
using namespace delegateTestInterpreter;
std::vector<char> modelBuffer = CreateSliceTfLiteModel(
@@ -141,7 +137,7 @@ void SliceTestImpl(std::vector<armnn::BackendId>& backends,
std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
// Setup interpreter with Arm NN Delegate applied.
- auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
diff --git a/delegate/test/SoftmaxTest.cpp b/delegate/test/SoftmaxTest.cpp
index 35840533f9..5abfc88f21 100644
--- a/delegate/test/SoftmaxTest.cpp
+++ b/delegate/test/SoftmaxTest.cpp
@@ -1,76 +1,42 @@
//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "SoftmaxTestHelper.hpp"
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-
#include <doctest/doctest.h>
namespace armnnDelegate
{
-TEST_SUITE ("Softmax_GpuAccTests")
+TEST_SUITE ("SoftmaxTests")
{
-TEST_CASE ("Softmax_Standard_Beta_GpuAcc_Test")
+TEST_CASE ("Softmax_Standard_Beta_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef, armnn::Compute::GpuAcc };
std::vector<float> expectedOutput = {0.00994190481, 0.0445565246, 0.0734612942, 0.329230666, 0.542809606,
0.710742831, 0.158588171, 0.0961885825, 0.0214625746, 0.0130177103};
- SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 1, expectedOutput);
+ SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, 1, expectedOutput, backends);
}
-TEST_CASE ("Softmax_Different_Beta_GpuAcc_Test")
+TEST_CASE ("Softmax_Different_Beta_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef, armnn::Compute::GpuAcc };
std::vector<float> expectedOutput = {0.0946234912, 0.148399189, 0.172415257, 0.270400971, 0.314161092, 0.352414012,
0.224709094, 0.193408906, 0.123322964, 0.106145054};
- SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 0.3, expectedOutput);
+ SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, 0.3, expectedOutput, backends);
}
-TEST_CASE ("Log_Softmax_GpuAcc_Test")
+TEST_CASE ("Log_Softmax_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef, armnn::Compute::GpuAcc };
std::vector<float> expectedOutput =
{-4.61099672, -3.11099672, -2.61099672, -1.11099672, -0.610996664,
-0.341444582, -1.84144461, -2.34144449, -3.84144449, -4.34144449};
- SoftmaxTestCase(tflite::BuiltinOperator_LOG_SOFTMAX, backends, 0, expectedOutput);
-}
-} // TEST_SUITE ("Softmax_GpuAccTests")
-
-TEST_SUITE ("Softmax_CpuRefTests")
-{
-
-TEST_CASE ("Softmax_Standard_Beta_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- std::vector<float> expectedOutput = {
- 0.00994190481, 0.0445565246, 0.0734612942, 0.329230666, 0.542809606,
- 0.710742831, 0.158588171, 0.0961885825, 0.0214625746, 0.0130177103};
- SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 1, expectedOutput);
+ SoftmaxTestCase(tflite::BuiltinOperator_LOG_SOFTMAX, 0, expectedOutput, backends);
}
+} // TEST_SUITE ("SoftmaxTests")
-TEST_CASE ("Softmax_Different_Beta_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- std::vector<float> expectedOutput = {
- 0.0946234912, 0.148399189, 0.172415257, 0.270400971, 0.314161092,
- 0.352414012, 0.224709094, 0.193408906, 0.123322964, 0.106145054};
- SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 0.3, expectedOutput);
-}
-
-TEST_CASE ("Log_Softmax_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- std::vector<float> expectedOutput =
- {-4.61099672, -3.11099672, -2.61099672, -1.11099672, -0.610996664,
- -0.341444582, -1.84144461, -2.34144449, -3.84144449, -4.34144449};
- SoftmaxTestCase(tflite::BuiltinOperator_LOG_SOFTMAX, backends, 0, expectedOutput);
-}
-} // TEST_SUITE ("Softmax_CpuRefTests")
} // namespace armnnDelegate
diff --git a/delegate/test/SoftmaxTestHelper.hpp b/delegate/test/SoftmaxTestHelper.hpp
index 609882d90c..f8525d151f 100644
--- a/delegate/test/SoftmaxTestHelper.hpp
+++ b/delegate/test/SoftmaxTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -9,14 +9,9 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <armnnUtils/FloatingPointComparison.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
-
namespace
{
std::vector<char> CreateSoftmaxTfLiteModel(tflite::BuiltinOperator softmaxOperatorCode,
@@ -102,10 +97,10 @@ std::vector<char> CreateSoftmaxTfLiteModel(tflite::BuiltinOperator softmaxOperat
void SoftmaxTest(tflite::BuiltinOperator softmaxOperatorCode,
tflite::TensorType tensorType,
- std::vector<armnn::BackendId>& backends,
std::vector<int32_t>& shape,
std::vector<float>& inputValues,
std::vector<float>& expectedOutputValues,
+ const std::vector<armnn::BackendId>& backends = {},
float beta = 0)
{
using namespace delegateTestInterpreter;
@@ -123,7 +118,7 @@ void SoftmaxTest(tflite::BuiltinOperator softmaxOperatorCode,
std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
// Setup interpreter with Arm NN Delegate applied.
- auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
CHECK(armnnInterpreter.FillInputTensor<float>(inputValues, 0) == kTfLiteOk);
CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
@@ -143,8 +138,9 @@ void SoftmaxTest(tflite::BuiltinOperator softmaxOperatorCode,
/// \param backends armnn backends to target
/// \param beta multiplicative parameter to the softmax function
/// \param expectedOutput to be checked against transformed input
-void SoftmaxTestCase(tflite::BuiltinOperator operatorCode,
- std::vector<armnn::BackendId> backends, float beta, std::vector<float> expectedOutput) {
+void SoftmaxTestCase(tflite::BuiltinOperator operatorCode, float beta,
+ std::vector<float> expectedOutput, const std::vector<armnn::BackendId> backends = {})
+{
std::vector<float> input = {
1.0, 2.5, 3.0, 4.5, 5.0,
-1.0, -2.5, -3.0, -4.5, -5.0};
@@ -152,10 +148,10 @@ void SoftmaxTestCase(tflite::BuiltinOperator operatorCode,
SoftmaxTest(operatorCode,
tflite::TensorType_FLOAT32,
- backends,
shape,
input,
expectedOutput,
+ backends,
beta);
}
diff --git a/delegate/test/SpaceDepthTest.cpp b/delegate/test/SpaceDepthTest.cpp
index 6cffba33eb..add51b4a21 100644
--- a/delegate/test/SpaceDepthTest.cpp
+++ b/delegate/test/SpaceDepthTest.cpp
@@ -1,20 +1,16 @@
//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "SpaceDepthTestHelper.hpp"
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-
#include <doctest/doctest.h>
namespace armnnDelegate
{
-void DepthToSpaceFp32Test(std::vector<armnn::BackendId>& backends, int blockSize)
+void DepthToSpaceFp32Test(int blockSize)
{
// Set input data
std::vector<int32_t> inputShape { 1, 2, 2, 4 };
@@ -32,15 +28,15 @@ void DepthToSpaceFp32Test(std::vector<armnn::BackendId>& backends, int blockSize
SpaceDepthTest<float>(tflite::BuiltinOperator_DEPTH_TO_SPACE,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
+ {},
blockSize);
}
-void DepthToSpaceUint8Test(std::vector<armnn::BackendId>& backends, int blockSize)
+void DepthToSpaceUint8Test(int blockSize)
{
// Set input data
std::vector<int32_t> inputShape { 2, 1, 1, 4 };
@@ -54,15 +50,15 @@ void DepthToSpaceUint8Test(std::vector<armnn::BackendId>& backends, int blockSiz
SpaceDepthTest<uint8_t>(tflite::BuiltinOperator_DEPTH_TO_SPACE,
::tflite::TensorType_UINT8,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
+ {},
blockSize);
}
-void SpaceToDepthFp32Test(std::vector<armnn::BackendId>& backends, int blockSize)
+void SpaceToDepthFp32Test(int blockSize)
{
// Set input data
std::vector<int32_t> inputShape { 1, 2, 2, 2 };
@@ -73,15 +69,15 @@ void SpaceToDepthFp32Test(std::vector<armnn::BackendId>& backends, int blockSize
SpaceDepthTest<float>(tflite::BuiltinOperator_SPACE_TO_DEPTH,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
+ {},
blockSize);
}
-void SpaceToDepthUint8Test(std::vector<armnn::BackendId>& backends, int blockSize)
+void SpaceToDepthUint8Test(int blockSize)
{
// Set input data
std::vector<int32_t> inputShape { 1, 2, 2, 1 };
@@ -92,115 +88,43 @@ void SpaceToDepthUint8Test(std::vector<armnn::BackendId>& backends, int blockSiz
SpaceDepthTest<uint8_t>(tflite::BuiltinOperator_SPACE_TO_DEPTH,
::tflite::TensorType_UINT8,
- backends,
inputShape,
outputShape,
inputValues,
expectedOutputValues,
+ {},
blockSize);
}
-TEST_SUITE("DepthToSpace_CpuRefTests")
-{
-
-TEST_CASE ("DepthToSpaceFp32Test_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- DepthToSpaceFp32Test(backends, 2);
-}
-
-TEST_CASE ("DepthToSpaceUint8Test_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- DepthToSpaceUint8Test(backends, 2);
-}
-
-} // TEST_SUITE("DepthToSpace_CpuRefTests")
-
-
-TEST_SUITE("DepthToSpace_CpuAccTests")
-{
-
-TEST_CASE ("DepthToSpaceFp32Test_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- DepthToSpaceFp32Test(backends, 2);
-}
-
-TEST_CASE ("DepthToSpaceUint8Test_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- DepthToSpaceUint8Test(backends, 2);
-}
-
-} // TEST_SUITE("DepthToSpace_CpuAccTests")
-
-TEST_SUITE("DepthToSpace_GpuAccTests")
+TEST_SUITE("DepthToSpaceTests")
{
-TEST_CASE ("DepthToSpaceFp32Test_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- DepthToSpaceFp32Test(backends, 2);
-}
-
-TEST_CASE ("DepthToSpaceUint8Test_GpuAcc_Test")
+TEST_CASE ("DepthToSpaceFp32Test_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- DepthToSpaceUint8Test(backends, 2);
+ DepthToSpaceFp32Test(2);
}
-} // TEST_SUITE("DepthToSpace_GpuAccTests")
-
-TEST_SUITE("SpaceToDepth_CpuRefTests")
-{
-
-TEST_CASE ("SpaceToDepthFp32Test_CpuRef_Test")
+TEST_CASE ("DepthToSpaceUint8Test_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- SpaceToDepthFp32Test(backends, 2);
+ DepthToSpaceUint8Test(2);
}
-TEST_CASE ("SpaceToDepthUint8Test_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- SpaceToDepthUint8Test(backends, 2);
-}
-
-} // TEST_SUITE("SpaceToDepth_CpuRefTests")
-
-TEST_SUITE("SpaceToDepth_CpuAccTests")
-{
-
-TEST_CASE ("SpaceToDepthFp32Test_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- SpaceToDepthFp32Test(backends, 2);
-}
-
-TEST_CASE ("SpaceToDepthUint8Test_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- SpaceToDepthUint8Test(backends, 2);
-}
+} // TEST_SUITE("DepthToSpaceTests")
-} // TEST_SUITE("SpaceToDepth_CpuAccTests")
-TEST_SUITE("SpaceToDepth_GpuAccTests")
+TEST_SUITE("SpaceToDepthTests")
{
-TEST_CASE ("SpaceToDepthFp32Test_GpuAcc_Test")
+TEST_CASE ("SpaceToDepthFp32Test_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- SpaceToDepthFp32Test(backends, 2);
+ SpaceToDepthFp32Test(2);
}
-TEST_CASE ("SpaceToDepthUint8Test_GpuAcc_Test")
+TEST_CASE ("SpaceToDepthUint8Test_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- SpaceToDepthUint8Test(backends, 2);
+ SpaceToDepthUint8Test(2);
}
-} // TEST_SUITE("SpaceToDepth_GpuAccTests")
+} // TEST_SUITE("SpaceToDepthTests")
} // namespace armnnDelegate
diff --git a/delegate/test/SpaceDepthTestHelper.hpp b/delegate/test/SpaceDepthTestHelper.hpp
index 6d566a61b4..5b8b0a94aa 100644
--- a/delegate/test/SpaceDepthTestHelper.hpp
+++ b/delegate/test/SpaceDepthTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,12 +10,8 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
-
namespace
{
std::vector<char> CreateSpaceDepthTfLiteModel(tflite::BuiltinOperator spaceDepthOperatorCode,
@@ -114,11 +110,11 @@ std::vector<char> CreateSpaceDepthTfLiteModel(tflite::BuiltinOperator spaceDepth
template <typename T>
void SpaceDepthTest(tflite::BuiltinOperator spaceDepthOperatorCode,
tflite::TensorType tensorType,
- std::vector<armnn::BackendId>& backends,
std::vector<int32_t>& inputShape,
std::vector<int32_t>& outputShape,
std::vector<T>& inputValues,
std::vector<T>& expectedOutputValues,
+ const std::vector<armnn::BackendId>& backends = {},
int32_t blockSize = 2)
{
using namespace delegateTestInterpreter;
@@ -137,7 +133,7 @@ void SpaceDepthTest(tflite::BuiltinOperator spaceDepthOperatorCode,
std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
// Setup interpreter with Arm NN Delegate applied.
- auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
diff --git a/delegate/test/SplitTest.cpp b/delegate/test/SplitTest.cpp
index 1d14b6d9f3..73dd4a53ba 100644
--- a/delegate/test/SplitTest.cpp
+++ b/delegate/test/SplitTest.cpp
@@ -1,21 +1,17 @@
//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "SplitTestHelper.hpp"
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-
#include <doctest/doctest.h>
namespace armnnDelegate
{
// SPLIT Operator
-void SplitUint8Test(std::vector<armnn::BackendId>& backends)
+void SplitUint8Test()
{
std::vector<int32_t> axisShape { 1 };
std::vector<int32_t> inputShape { 2, 2, 2, 2} ;
@@ -35,7 +31,6 @@ void SplitUint8Test(std::vector<armnn::BackendId>& backends)
int32_t numSplits = 2;
SplitTest<uint8_t>(::tflite::TensorType_UINT8,
- backends,
axisShape,
inputShape,
outputShapes,
@@ -45,7 +40,7 @@ void SplitUint8Test(std::vector<armnn::BackendId>& backends)
numSplits);
}
-void SplitFp32Test(std::vector<armnn::BackendId>& backends)
+void SplitFp32Test()
{
std::vector<int32_t> axisShape { 1 };
std::vector<int32_t> inputShape { 2, 2, 2, 2 };
@@ -65,7 +60,6 @@ void SplitFp32Test(std::vector<armnn::BackendId>& backends)
int32_t numSplits = 2;
SplitTest<float>(::tflite::TensorType_FLOAT32,
- backends,
axisShape,
inputShape,
outputShapes,
@@ -76,60 +70,24 @@ void SplitFp32Test(std::vector<armnn::BackendId>& backends)
}
// SPLIT Test Suite
-TEST_SUITE("SPLIT_CpuRefTests")
-{
-
-TEST_CASE ("SPLIT_Uint8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- SplitUint8Test(backends);
-}
-
-TEST_CASE ("SPLIT_Fp32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- SplitFp32Test(backends);
-}
-
-}
-
-TEST_SUITE("SPLIT_CpuAccTests")
-{
-
-TEST_CASE ("SPLIT_Uint8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- SplitUint8Test(backends);
-}
-
-TEST_CASE ("SPLIT_Fp32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- SplitFp32Test(backends);
-}
-
-}
-
-TEST_SUITE("SPLIT_GpuAccTests")
+TEST_SUITE("SPLITTests")
{
-TEST_CASE ("SPLIT_Uint8_GpuAcc_Test")
+TEST_CASE ("SPLIT_Uint8_Test")
{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- SplitUint8Test(backends);
+ SplitUint8Test();
}
-TEST_CASE ("SPLIT_Fp32_GpuAcc_Test")
+TEST_CASE ("SPLIT_Fp32_Test")
{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- SplitFp32Test(backends);
+ SplitFp32Test();
}
}
// End of SPLIT Test Suite
// SPLIT_V Operator
-void SplitVUint8Test(std::vector<armnn::BackendId>& backends)
+void SplitVUint8Test()
{
std::vector<int32_t> axisShape { 1 };
std::vector<int32_t> inputShape { 2, 4, 2, 2 };
@@ -155,7 +113,6 @@ void SplitVUint8Test(std::vector<armnn::BackendId>& backends)
int32_t numSplits = 2;
SplitVTest<uint8_t>(::tflite::TensorType_UINT8,
- backends,
inputShape,
splitsShape,
axisShape,
@@ -167,7 +124,7 @@ void SplitVUint8Test(std::vector<armnn::BackendId>& backends)
numSplits);
}
-void SplitVFp32Test(std::vector<armnn::BackendId>& backends)
+void SplitVFp32Test()
{
std::vector<int32_t> axisShape { 1 };
std::vector<int32_t> inputShape { 2, 4, 2, 2 };
@@ -193,7 +150,6 @@ void SplitVFp32Test(std::vector<armnn::BackendId>& backends)
int32_t numSplits = 2;
SplitVTest<float>(::tflite::TensorType_FLOAT32,
- backends,
inputShape,
splitsShape,
axisShape,
@@ -206,56 +162,19 @@ void SplitVFp32Test(std::vector<armnn::BackendId>& backends)
}
// SPLIT_V Test Suite
-TEST_SUITE("SPLIT_V_CpuRefTests")
+TEST_SUITE("SPLIT_VTests")
{
-TEST_CASE ("SPLIT_V_Uint8_CpuRef_Test")
+TEST_CASE ("SPLIT_V_Uint8_Test")
{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- SplitVUint8Test(backends);
+ SplitVUint8Test();
}
-TEST_CASE ("SPLIT_V_Fp32_CpuRef_Test")
+TEST_CASE ("SPLIT_V_Fp32_Test")
{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- SplitVFp32Test(backends);
+ SplitVFp32Test();
}
-}
-
-TEST_SUITE("SPLIT_V_CpuAccTests")
-{
-
-TEST_CASE ("SPLIT_V_Uint8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- SplitVUint8Test(backends);
-}
-
-TEST_CASE ("SPLIT_V_Fp32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- SplitVFp32Test(backends);
-}
-
-}
-
-TEST_SUITE("SPLIT_V_GpuAccTests")
-{
-
-TEST_CASE ("SPLIT_V_Uint8_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- SplitVUint8Test(backends);
-}
-
-TEST_CASE ("SPLIT_V_Fp32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- SplitVFp32Test(backends);
-}
-
-}
-// End of SPLIT_V Test Suite
+} // End of SPLIT_V Test Suite
} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/test/SplitTestHelper.hpp b/delegate/test/SplitTestHelper.hpp
index 54c859c764..d46721577e 100644
--- a/delegate/test/SplitTestHelper.hpp
+++ b/delegate/test/SplitTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,12 +10,8 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
-
namespace
{
@@ -117,7 +113,6 @@ std::vector<char> CreateSplitTfLiteModel(tflite::TensorType tensorType,
template <typename T>
void SplitTest(tflite::TensorType tensorType,
- std::vector<armnn::BackendId>& backends,
std::vector<int32_t>& axisTensorShape,
std::vector<int32_t>& inputTensorShape,
std::vector<std::vector<int32_t>>& outputTensorShapes,
@@ -125,6 +120,7 @@ void SplitTest(tflite::TensorType tensorType,
std::vector<T>& inputValues,
std::vector<std::vector<T>>& expectedOutputValues,
const int32_t numSplits,
+ const std::vector<armnn::BackendId>& backends = {},
float quantScale = 1.0f,
int quantOffset = 0)
{
@@ -144,7 +140,7 @@ void SplitTest(tflite::TensorType tensorType,
CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
// Setup interpreter with Arm NN Delegate applied.
- auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 1) == kTfLiteOk);
CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
@@ -275,7 +271,6 @@ std::vector<char> CreateSplitVTfLiteModel(tflite::TensorType tensorType,
template <typename T>
void SplitVTest(tflite::TensorType tensorType,
- std::vector<armnn::BackendId>& backends,
std::vector<int32_t>& inputTensorShape,
std::vector<int32_t>& splitsTensorShape,
std::vector<int32_t>& axisTensorShape,
@@ -285,6 +280,7 @@ void SplitVTest(tflite::TensorType tensorType,
std::vector<int32_t>& axisData,
std::vector<std::vector<T>>& expectedOutputValues,
const int32_t numSplits,
+ const std::vector<armnn::BackendId>& backends = {},
float quantScale = 1.0f,
int quantOffset = 0)
{
@@ -307,7 +303,7 @@ void SplitVTest(tflite::TensorType tensorType,
CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
// Setup interpreter with Arm NN Delegate applied.
- auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
diff --git a/delegate/test/SqueezeTest.cpp b/delegate/test/SqueezeTest.cpp
index 01122c95e6..9d0e748ce0 100644
--- a/delegate/test/SqueezeTest.cpp
+++ b/delegate/test/SqueezeTest.cpp
@@ -1,14 +1,16 @@
//
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "RedefineTestHelper.hpp"
+#include <doctest/doctest.h>
+
namespace armnnDelegate
{
-void SqueezeSimpleTest(std::vector<armnn::BackendId>& backends)
+void SqueezeSimpleTest()
{
// Set input data
std::vector<int32_t> inputShape { 1, 2, 2, 1 };
@@ -20,7 +22,6 @@ void SqueezeSimpleTest(std::vector<armnn::BackendId>& backends)
RedefineTest<float>(tflite::BuiltinOperator_SQUEEZE,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
outputShape,
inputValues,
@@ -28,7 +29,7 @@ void SqueezeSimpleTest(std::vector<armnn::BackendId>& backends)
squeezeDims);
}
-void SqueezeWithDimsTest(std::vector<armnn::BackendId>& backends)
+void SqueezeWithDimsTest()
{
// Set input data
std::vector<int32_t> inputShape { 1, 2, 2, 1 };
@@ -40,7 +41,6 @@ void SqueezeWithDimsTest(std::vector<armnn::BackendId>& backends)
RedefineTest<float>(tflite::BuiltinOperator_SQUEEZE,
::tflite::TensorType_FLOAT32,
- backends,
inputShape,
outputShape,
inputValues,
@@ -48,55 +48,19 @@ void SqueezeWithDimsTest(std::vector<armnn::BackendId>& backends)
squeezeDims);
}
-TEST_SUITE("Squeeze_GpuAccTests")
-{
-
-TEST_CASE ("Squeeze_Simple_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- SqueezeSimpleTest(backends);
-}
-
-TEST_CASE ("Squeeze_With_Dims_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- SqueezeWithDimsTest(backends);
-}
-
-} // TEST_SUITE("Squeeze_GpuAccTests")
-
-TEST_SUITE("Squeeze_CpuAccTests")
-{
-
-TEST_CASE ("Squeeze_Simple_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- SqueezeSimpleTest(backends);
-}
-
-TEST_CASE ("Squeeze_With_Dims_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- SqueezeWithDimsTest(backends);
-}
-
-} // TEST_SUITE("Squeeze_CpuAccTests")
-
-TEST_SUITE("Squeeze_CpuRefTests")
+TEST_SUITE("SqueezeTests")
{
-TEST_CASE ("Squeeze_Simple_CpuRef_Test")
+TEST_CASE ("Squeeze_Simple_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- SqueezeSimpleTest(backends);
+ SqueezeSimpleTest();
}
-TEST_CASE ("Squeeze_With_Dims_CpuRef_Test")
+TEST_CASE ("Squeeze_With_Dims_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- SqueezeWithDimsTest(backends);
+ SqueezeWithDimsTest();
}
-} // TEST_SUITE("Squeeze_CpuRefTests")
+} // TEST_SUITE("SqueezeTests")
} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/test/StridedSliceTest.cpp b/delegate/test/StridedSliceTest.cpp
index 5b6d7efca0..9619ca2e98 100644
--- a/delegate/test/StridedSliceTest.cpp
+++ b/delegate/test/StridedSliceTest.cpp
@@ -1,20 +1,16 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "StridedSliceTestHelper.hpp"
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-
#include <doctest/doctest.h>
namespace armnnDelegate
{
-void StridedSlice4DTest(std::vector<armnn::BackendId>& backends)
+void StridedSlice4DTest()
{
std::vector<int32_t> inputShape { 3, 2, 3, 1 };
std::vector<int32_t> outputShape { 1, 2, 3, 1 };
@@ -31,7 +27,6 @@ void StridedSlice4DTest(std::vector<armnn::BackendId>& backends)
std::vector<float> outputData { 3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f };
StridedSliceTestImpl<float>(
- backends,
inputData,
outputData,
beginData,
@@ -45,7 +40,7 @@ void StridedSlice4DTest(std::vector<armnn::BackendId>& backends)
);
}
-void StridedSlice4DReverseTest(std::vector<armnn::BackendId>& backends)
+void StridedSlice4DReverseTest()
{
std::vector<int32_t> inputShape { 3, 2, 3, 1 };
std::vector<int32_t> outputShape { 1, 2, 3, 1 };
@@ -62,7 +57,6 @@ void StridedSlice4DReverseTest(std::vector<armnn::BackendId>& backends)
std::vector<float> outputData { 4.0f, 4.0f, 4.0f, 3.0f, 3.0f, 3.0f };
StridedSliceTestImpl<float>(
- backends,
inputData,
outputData,
beginData,
@@ -76,7 +70,7 @@ void StridedSlice4DReverseTest(std::vector<armnn::BackendId>& backends)
);
}
-void StridedSliceSimpleStrideTest(std::vector<armnn::BackendId>& backends)
+void StridedSliceSimpleStrideTest()
{
std::vector<int32_t> inputShape { 3, 2, 3, 1 };
std::vector<int32_t> outputShape { 2, 1, 2, 1 };
@@ -94,7 +88,6 @@ void StridedSliceSimpleStrideTest(std::vector<armnn::BackendId>& backends)
5.0f, 5.0f };
StridedSliceTestImpl<float>(
- backends,
inputData,
outputData,
beginData,
@@ -108,7 +101,7 @@ void StridedSliceSimpleStrideTest(std::vector<armnn::BackendId>& backends)
);
}
-void StridedSliceSimpleRangeMaskTest(std::vector<armnn::BackendId>& backends)
+void StridedSliceSimpleRangeMaskTest()
{
std::vector<int32_t> inputShape { 3, 2, 3, 1 };
std::vector<int32_t> outputShape { 3, 2, 3, 1 };
@@ -131,7 +124,6 @@ void StridedSliceSimpleRangeMaskTest(std::vector<armnn::BackendId>& backends)
5.0f, 5.0f, 5.0f, 6.0f, 6.0f, 6.0f };
StridedSliceTestImpl<float>(
- backends,
inputData,
outputData,
beginData,
@@ -142,100 +134,35 @@ void StridedSliceSimpleRangeMaskTest(std::vector<armnn::BackendId>& backends)
endShape,
strideShape,
outputShape,
+ {},
beginMask,
endMask
);
}
-TEST_SUITE("StridedSlice_CpuRefTests")
-{
-
-TEST_CASE ("StridedSlice_4D_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- StridedSlice4DTest(backends);
-}
-
-TEST_CASE ("StridedSlice_4D_Reverse_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- StridedSlice4DReverseTest(backends);
-}
-
-TEST_CASE ("StridedSlice_SimpleStride_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- StridedSliceSimpleStrideTest(backends);
-}
-
-TEST_CASE ("StridedSlice_SimpleRange_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- StridedSliceSimpleRangeMaskTest(backends);
-}
-
-} // StridedSlice_CpuRefTests TestSuite
-
-
-
-TEST_SUITE("StridedSlice_CpuAccTests")
-{
-
-TEST_CASE ("StridedSlice_4D_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- StridedSlice4DTest(backends);
-}
-
-TEST_CASE ("StridedSlice_4D_Reverse_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- StridedSlice4DReverseTest(backends);
-}
-
-TEST_CASE ("StridedSlice_SimpleStride_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- StridedSliceSimpleStrideTest(backends);
-}
-
-TEST_CASE ("StridedSlice_SimpleRange_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- StridedSliceSimpleRangeMaskTest(backends);
-}
-
-} // StridedSlice_CpuAccTests TestSuite
-
-
-
-TEST_SUITE("StridedSlice_GpuAccTests")
+TEST_SUITE("StridedSliceTests")
{
-TEST_CASE ("StridedSlice_4D_GpuAcc_Test")
+TEST_CASE ("StridedSlice_4D_Test")
{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- StridedSlice4DTest(backends);
+ StridedSlice4DTest();
}
-TEST_CASE ("StridedSlice_4D_Reverse_GpuAcc_Test")
+TEST_CASE ("StridedSlice_4D_Reverse_Test")
{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- StridedSlice4DReverseTest(backends);
+ StridedSlice4DReverseTest();
}
-TEST_CASE ("StridedSlice_SimpleStride_GpuAcc_Test")
+TEST_CASE ("StridedSlice_SimpleStride_Test")
{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- StridedSliceSimpleStrideTest(backends);
+ StridedSliceSimpleStrideTest();
}
-TEST_CASE ("StridedSlice_SimpleRange_GpuAcc_Test")
+TEST_CASE ("StridedSlice_SimpleRange_Test")
{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- StridedSliceSimpleRangeMaskTest(backends);
+ StridedSliceSimpleRangeMaskTest();
}
-} // StridedSlice_GpuAccTests TestSuite
+} // StridedSliceTests TestSuite
} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/test/StridedSliceTestHelper.hpp b/delegate/test/StridedSliceTestHelper.hpp
index 0ae6384dab..740dafe878 100644
--- a/delegate/test/StridedSliceTestHelper.hpp
+++ b/delegate/test/StridedSliceTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,12 +10,8 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
-
namespace
{
@@ -134,8 +130,7 @@ std::vector<char> CreateStridedSliceTfLiteModel(tflite::TensorType tensorType,
}
template <typename T>
-void StridedSliceTestImpl(std::vector<armnn::BackendId>& backends,
- std::vector<T>& inputValues,
+void StridedSliceTestImpl(std::vector<T>& inputValues,
std::vector<T>& expectedOutputValues,
std::vector<int32_t>& beginTensorData,
std::vector<int32_t>& endTensorData,
@@ -145,6 +140,7 @@ void StridedSliceTestImpl(std::vector<armnn::BackendId>& backends,
std::vector<int32_t>& endTensorShape,
std::vector<int32_t>& strideTensorShape,
std::vector<int32_t>& outputTensorShape,
+ const std::vector<armnn::BackendId>& backends = {},
const int32_t beginMask = 0,
const int32_t endMask = 0,
const int32_t ellipsisMask = 0,
@@ -179,7 +175,7 @@ void StridedSliceTestImpl(std::vector<armnn::BackendId>& backends,
std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
// Setup interpreter with Arm NN Delegate applied.
- auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
diff --git a/delegate/test/TestUtils.hpp b/delegate/test/TestUtils.hpp
index 0932f229cc..05db4899b6 100644
--- a/delegate/test/TestUtils.hpp
+++ b/delegate/test/TestUtils.hpp
@@ -1,18 +1,17 @@
//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
#include <tensorflow/lite/c/common.h>
-#include <tensorflow/lite/interpreter.h>
-
-#include <doctest/doctest.h>
#include <armnn/BackendId.hpp>
#include <half/half.hpp>
+#include <doctest/doctest.h>
+
using Half = half_float::half;
namespace
diff --git a/delegate/test/TileTest.cpp b/delegate/test/TileTest.cpp
index ef803964fd..545ceeb5d5 100644
--- a/delegate/test/TileTest.cpp
+++ b/delegate/test/TileTest.cpp
@@ -1,21 +1,15 @@
//
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "TileTestHelper.hpp"
-#include <armnn_delegate.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/version.h>
#include <doctest/doctest.h>
namespace armnnDelegate
{
-void TileFloat32Test(std::vector<armnn::BackendId>& backends)
+void TileFloat32Test()
{
// Set input data
std::vector<float> inputValues =
@@ -43,7 +37,6 @@ void TileFloat32Test(std::vector<armnn::BackendId>& backends)
const std::vector<int32_t> expectedOutputShape = { 4, 6 };
TileFP32TestImpl(tflite::BuiltinOperator_TILE,
- backends,
inputValues,
inputShape,
multiplesValues,
@@ -52,37 +45,14 @@ void TileFloat32Test(std::vector<armnn::BackendId>& backends)
expectedOutputShape);
}
-TEST_SUITE("TileTests_GpuAccTests")
+TEST_SUITE("TileTestsTests")
{
- TEST_CASE ("Tile_Float32_GpuAcc_Test")
+ TEST_CASE ("Tile_Float32_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- TileFloat32Test(backends);
+ TileFloat32Test();
}
-} // TEST_SUITE("Tile_Float32_GpuAcc_Test")
-
-TEST_SUITE("TileTests_CpuAccTests")
-{
-
- TEST_CASE ("Tile_Float32_CpuAcc_Test")
- {
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- TileFloat32Test(backends);
- }
-
-} // TEST_SUITE("Tile_Float32_CpuAcc_Test")
-
-TEST_SUITE("TileTests_CpuRefTests")
-{
-
- TEST_CASE ("Tile_Float32_CpuRef_Test")
- {
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- TileFloat32Test(backends);
- }
-
-} // TEST_SUITE("Tile_Float32_CpuRef_Test")
+} // TEST_SUITE("Tile_Float32_Test")
} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/test/TileTestHelper.hpp b/delegate/test/TileTestHelper.hpp
index 0bdee9d7d4..880d01b41c 100644
--- a/delegate/test/TileTestHelper.hpp
+++ b/delegate/test/TileTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,12 +10,8 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
-
namespace
{
std::vector<char> CreateTileTfLiteModel(tflite::BuiltinOperator operatorCode,
@@ -102,13 +98,13 @@ std::vector<char> CreateTileTfLiteModel(tflite::BuiltinOperator operatorCode,
}
void TileFP32TestImpl(tflite::BuiltinOperator operatorCode,
- std::vector<armnn::BackendId>& backends,
std::vector<float>& inputValues,
std::vector<int32_t> inputShape,
std::vector<int32_t> multiplesValues,
std::vector<int32_t> multiplesShapes,
std::vector<float>& expectedOutputValues,
- std::vector<int32_t> expectedOutputShape)
+ std::vector<int32_t> expectedOutputShape,
+ const std::vector<armnn::BackendId>& backends = {})
{
using namespace delegateTestInterpreter;
@@ -129,7 +125,7 @@ void TileFP32TestImpl(tflite::BuiltinOperator operatorCode,
std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
// Setup interpreter with Arm NN Delegate applied.
- auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
CHECK(armnnInterpreter.FillInputTensor<float>(inputValues, 0) == kTfLiteOk);
CHECK(armnnInterpreter.FillInputTensor<int32_t>(multiplesValues, 1) == kTfLiteOk);
diff --git a/delegate/test/TransposeConvolution2dTest.cpp b/delegate/test/TransposeConvolution2dTest.cpp
index 7c3728cedf..b98255acec 100644
--- a/delegate/test/TransposeConvolution2dTest.cpp
+++ b/delegate/test/TransposeConvolution2dTest.cpp
@@ -1,18 +1,10 @@
//
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "ConvolutionTestHelper.hpp"
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/version.h>
-
#include <doctest/doctest.h>
namespace armnnDelegate
diff --git a/delegate/test/TransposeTest.cpp b/delegate/test/TransposeTest.cpp
index cb3b327b13..894e054265 100644
--- a/delegate/test/TransposeTest.cpp
+++ b/delegate/test/TransposeTest.cpp
@@ -1,19 +1,16 @@
//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "TransposeTestHelper.hpp"
-#include <armnn_delegate.hpp>
-
#include <doctest/doctest.h>
-#include <flatbuffers/flatbuffers.h>
namespace armnnDelegate
{
-void TransposeFP32Test(std::vector<armnn::BackendId>& backends)
+void TransposeFP32Test()
{
// set test input data
std::vector<int32_t> input0Shape {4, 2, 3};
@@ -26,8 +23,7 @@ void TransposeFP32Test(std::vector<armnn::BackendId>& backends)
std::vector<float> expectedOutputValues = {0, 3, 6, 9, 12, 15, 18, 21, 1, 4, 7, 10,
13, 16, 19, 22, 2, 5, 8, 11, 14, 17, 20, 23};
- TransposeTest<float>(backends,
- input0Shape,
+ TransposeTest<float>(input0Shape,
inputPermVecShape,
outputShape,
input0Values,
@@ -35,35 +31,12 @@ void TransposeFP32Test(std::vector<armnn::BackendId>& backends)
expectedOutputValues);
}
-TEST_SUITE ("Transpose_GpuAccTests")
-{
-
-TEST_CASE ("Transpose_Float32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- TransposeFP32Test(backends);
-}
-
-}
-
-TEST_SUITE ("Transpose_CpuAccTests")
-{
-
-TEST_CASE ("Transpose_Float32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- TransposeFP32Test(backends);
-}
-
-}
-
-TEST_SUITE ("Transpose_CpuRefTests")
+TEST_SUITE ("TransposeTests")
{
-TEST_CASE ("Transpose_Float32_CpuRef_Test")
+TEST_CASE ("Transpose_Float32_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- TransposeFP32Test(backends);
+ TransposeFP32Test();
}
}
diff --git a/delegate/test/TransposeTestHelper.hpp b/delegate/test/TransposeTestHelper.hpp
index d72bb7662e..c4c8ba718f 100644
--- a/delegate/test/TransposeTestHelper.hpp
+++ b/delegate/test/TransposeTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,12 +10,8 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
-
namespace
{
std::vector<char> CreateTransposeTfLiteModel(tflite::TensorType tensorType,
@@ -82,13 +78,13 @@ std::vector<char> CreateTransposeTfLiteModel(tflite::TensorType tensorType,
}
template <typename T>
-void TransposeTest(std::vector<armnn::BackendId>& backends,
- std::vector<int32_t>& inputShape,
+void TransposeTest(std::vector<int32_t>& inputShape,
std::vector<int32_t>& inputPermVecShape,
std::vector<int32_t>& outputShape,
std::vector<T>& inputValues,
std::vector<int32_t>& inputPermVec,
- std::vector<T>& expectedOutputValues)
+ std::vector<T>& expectedOutputValues,
+ const std::vector<armnn::BackendId>& backends = {})
{
using namespace delegateTestInterpreter;
@@ -109,7 +105,7 @@ void TransposeTest(std::vector<armnn::BackendId>& backends,
std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
// Setup interpreter with Arm NN Delegate applied.
- auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
CHECK(armnnInterpreter.FillInputTensor<int32_t>(inputPermVec, 1) == kTfLiteOk);
diff --git a/delegate/test/UnidirectionalSequenceLstmTest.cpp b/delegate/test/UnidirectionalSequenceLstmTest.cpp
index d4716e665f..7af2f271d7 100644
--- a/delegate/test/UnidirectionalSequenceLstmTest.cpp
+++ b/delegate/test/UnidirectionalSequenceLstmTest.cpp
@@ -1,19 +1,16 @@
//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "UnidirectionalSequenceLstmTestHelper.hpp"
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
#include <doctest/doctest.h>
namespace armnnDelegate
{
-void UnidirectionalSequenceLstmTest(std::vector<armnn::BackendId>& backends)
+void UnidirectionalSequenceLstmTest(const std::vector<armnn::BackendId>& backends = {})
{
int32_t batchSize = 3;
int32_t timeSize = 2;
@@ -108,8 +105,7 @@ void UnidirectionalSequenceLstmTest(std::vector<armnn::BackendId>& backends)
float clippingThresProj = 0.f;
bool isTimeMajor = false;
- UnidirectionalSequenceLstmTestImpl<float>(backends,
- ::tflite::TensorType_FLOAT32,
+ UnidirectionalSequenceLstmTestImpl<float>(::tflite::TensorType_FLOAT32,
batchSize,
timeSize,
inputSize,
@@ -153,13 +149,14 @@ void UnidirectionalSequenceLstmTest(std::vector<armnn::BackendId>& backends)
activationFunction,
clippingThresCell,
clippingThresProj,
- isTimeMajor);
+ isTimeMajor,
+ backends);
}
-void UnidirectionalSequenceLstmTimeMajorTestImpl(std::vector<armnn::BackendId>& backends,
- int32_t timeSize,
+void UnidirectionalSequenceLstmTimeMajorTestImpl(int32_t timeSize,
std::vector<float>& inputValues,
- std::vector<float>& expectedOutputValues)
+ std::vector<float>& expectedOutputValues,
+ const std::vector<armnn::BackendId>& backends = {})
{
int32_t batchSize = 3;
int32_t inputSize = 3;
@@ -249,8 +246,7 @@ void UnidirectionalSequenceLstmTimeMajorTestImpl(std::vector<armnn::BackendId>&
float clippingThresProj = 0.f;
bool isTimeMajor = true;
- UnidirectionalSequenceLstmTestImpl<float>(backends,
- ::tflite::TensorType_FLOAT32,
+ UnidirectionalSequenceLstmTestImpl<float>(::tflite::TensorType_FLOAT32,
batchSize,
timeSize,
inputSize,
@@ -294,9 +290,10 @@ void UnidirectionalSequenceLstmTimeMajorTestImpl(std::vector<armnn::BackendId>&
activationFunction,
clippingThresCell,
clippingThresProj,
- isTimeMajor);}
+ isTimeMajor,
+ backends);}
-void UnidirectionalSequenceLstmTimeMajorTest(std::vector<armnn::BackendId>& backends)
+void UnidirectionalSequenceLstmTimeMajorTest(const std::vector<armnn::BackendId>& backends = {})
{
int32_t timeSize = 2;
@@ -311,13 +308,13 @@ void UnidirectionalSequenceLstmTimeMajorTest(std::vector<armnn::BackendId>& back
0.111716f, 0.043119f, 0.0762981f, -0.0122854f,
0.104397f, 0.2144f, 0.119192f, -0.0839058f };
- UnidirectionalSequenceLstmTimeMajorTestImpl(backends,
- timeSize,
+ UnidirectionalSequenceLstmTimeMajorTestImpl(timeSize,
inputValues,
- expectedOutputValues);
+ expectedOutputValues,
+ backends);
}
-void UnidirectionalSequenceLstmTimeMajorSingleTimeTest(std::vector<armnn::BackendId>& backends)
+void UnidirectionalSequenceLstmTimeMajorSingleTimeTest(const std::vector<armnn::BackendId>& backends = {})
{
int32_t timeSize = 1;
@@ -329,13 +326,13 @@ void UnidirectionalSequenceLstmTimeMajorSingleTimeTest(std::vector<armnn::Backen
0.1053334f, 0.08508634f, 0.00667238f, -0.00356043f,
0.05638668f, 0.02924093f, 0.00119751f, -0.00017249f };
- UnidirectionalSequenceLstmTimeMajorTestImpl(backends,
- timeSize,
+ UnidirectionalSequenceLstmTimeMajorTestImpl(timeSize,
inputValues,
- expectedOutputValues);
+ expectedOutputValues,
+ backends);
}
-void UnidirectionalSequenceLstmNoCifgWithPeepholeWithProjectionTest(std::vector<armnn::BackendId>& backends)
+void UnidirectionalSequenceLstmNoCifgWithPeepholeWithProjectionTest(const std::vector<armnn::BackendId>& backends = {})
{
int32_t batchSize = 2;
int32_t timeSize = 3;
@@ -475,8 +472,7 @@ void UnidirectionalSequenceLstmNoCifgWithPeepholeWithProjectionTest(std::vector<
float clippingThresProj = 0.f;
bool isTimeMajor = false;
- UnidirectionalSequenceLstmTestImpl<float>(backends,
- ::tflite::TensorType_FLOAT32,
+ UnidirectionalSequenceLstmTestImpl<float>(::tflite::TensorType_FLOAT32,
batchSize,
timeSize,
inputSize,
@@ -520,10 +516,11 @@ void UnidirectionalSequenceLstmNoCifgWithPeepholeWithProjectionTest(std::vector<
activationFunction,
clippingThresCell,
clippingThresProj,
- isTimeMajor);
+ isTimeMajor,
+ backends);
}
-void UnidirectionalSequenceLstmWithCifgWithPeepholeNoProjectionTest(std::vector<armnn::BackendId>& backends)
+void UnidirectionalSequenceLstmWithCifgWithPeepholeNoProjectionTest(const std::vector<armnn::BackendId>& backends = {})
{
int32_t batchSize = 3;
int32_t timeSize = 2;
@@ -612,8 +609,7 @@ void UnidirectionalSequenceLstmWithCifgWithPeepholeNoProjectionTest(std::vector<
float clippingThresProj = 0.f;
bool isTimeMajor = false;
- UnidirectionalSequenceLstmTestImpl<float>(backends,
- ::tflite::TensorType_FLOAT32,
+ UnidirectionalSequenceLstmTestImpl<float>(::tflite::TensorType_FLOAT32,
batchSize,
timeSize,
inputSize,
@@ -657,11 +653,12 @@ void UnidirectionalSequenceLstmWithCifgWithPeepholeNoProjectionTest(std::vector<
activationFunction,
clippingThresCell,
clippingThresProj,
- isTimeMajor);
+ isTimeMajor,
+ backends);
}
void UnidirectionalSequenceLstmNoCifgWithPeepholeWithProjectionWithLayerNormTest(
- std::vector<armnn::BackendId>& backends)
+ const std::vector<armnn::BackendId>& backends = {})
{
int32_t batchSize = 3;
int32_t timeSize = 2;
@@ -767,8 +764,7 @@ void UnidirectionalSequenceLstmNoCifgWithPeepholeWithProjectionWithLayerNormTest
float clippingThresProj = 0.f;
bool isTimeMajor = false;
- UnidirectionalSequenceLstmTestImpl<float>(backends,
- ::tflite::TensorType_FLOAT32,
+ UnidirectionalSequenceLstmTestImpl<float>(::tflite::TensorType_FLOAT32,
batchSize,
timeSize,
inputSize,
@@ -812,10 +808,11 @@ void UnidirectionalSequenceLstmNoCifgWithPeepholeWithProjectionWithLayerNormTest
activationFunction,
clippingThresCell,
clippingThresProj,
- isTimeMajor);
+ isTimeMajor,
+ backends);
}
-void UnidirectionalSequenceLstmInt8Test(std::vector<armnn::BackendId>& backends)
+void UnidirectionalSequenceLstmInt8Test(const std::vector<armnn::BackendId>& backends = {})
{
int32_t batchSize = 3;
int32_t timeSize = 2;
@@ -888,8 +885,7 @@ void UnidirectionalSequenceLstmInt8Test(std::vector<armnn::BackendId>& backends)
float clippingThresProj = 0.f;
bool isTimeMajor = false;
- UnidirectionalSequenceLstmTestImpl<int8_t>(backends,
- ::tflite::TensorType_INT8,
+ UnidirectionalSequenceLstmTestImpl<int8_t>(::tflite::TensorType_INT8,
batchSize,
timeSize,
inputSize,
@@ -934,10 +930,11 @@ void UnidirectionalSequenceLstmInt8Test(std::vector<armnn::BackendId>& backends)
clippingThresCell,
clippingThresProj,
isTimeMajor,
+ backends,
0.1f);
}
-void UnidirectionalSequenceLstmInt8TimeMajorTest(std::vector<armnn::BackendId>& backends)
+void UnidirectionalSequenceLstmInt8TimeMajorTest(const std::vector<armnn::BackendId>& backends = {})
{
int32_t batchSize = 3;
int32_t timeSize = 2;
@@ -1010,8 +1007,7 @@ void UnidirectionalSequenceLstmInt8TimeMajorTest(std::vector<armnn::BackendId>&
float clippingThresProj = 0.f;
bool isTimeMajor = true;
- UnidirectionalSequenceLstmTestImpl<int8_t>(backends,
- ::tflite::TensorType_INT8,
+ UnidirectionalSequenceLstmTestImpl<int8_t>(::tflite::TensorType_INT8,
batchSize,
timeSize,
inputSize,
@@ -1056,10 +1052,12 @@ void UnidirectionalSequenceLstmInt8TimeMajorTest(std::vector<armnn::BackendId>&
clippingThresCell,
clippingThresProj,
isTimeMajor,
+ backends,
0.1);
}
-void UnidirectionalSequenceLstmInt8NoCifgWithPeepholeWithProjectionTest(std::vector<armnn::BackendId>& backends)
+void UnidirectionalSequenceLstmInt8NoCifgWithPeepholeWithProjectionTest(
+ const std::vector<armnn::BackendId>& backends = {})
{
int32_t batchSize = 3;
int32_t timeSize = 2;
@@ -1130,8 +1128,7 @@ void UnidirectionalSequenceLstmInt8NoCifgWithPeepholeWithProjectionTest(std::vec
float clippingThresProj = 0.f;
bool isTimeMajor = false;
- UnidirectionalSequenceLstmTestImpl<int8_t>(backends,
- ::tflite::TensorType_INT8,
+ UnidirectionalSequenceLstmTestImpl<int8_t>(::tflite::TensorType_INT8,
batchSize,
timeSize,
inputSize,
@@ -1176,10 +1173,12 @@ void UnidirectionalSequenceLstmInt8NoCifgWithPeepholeWithProjectionTest(std::vec
clippingThresCell,
clippingThresProj,
isTimeMajor,
+ backends,
0.1f);
}
-void UnidirectionalSequenceLstmInt8WithCifgWithPeepholeNoProjectionTest(std::vector<armnn::BackendId>& backends)
+void UnidirectionalSequenceLstmInt8WithCifgWithPeepholeNoProjectionTest(
+ const std::vector<armnn::BackendId>& backends = {})
{
int32_t batchSize = 3;
int32_t timeSize = 2;
@@ -1251,8 +1250,7 @@ void UnidirectionalSequenceLstmInt8WithCifgWithPeepholeNoProjectionTest(std::vec
float clippingThresProj = 0.f;
bool isTimeMajor = false;
- UnidirectionalSequenceLstmTestImpl<int8_t>(backends,
- ::tflite::TensorType_INT8,
+ UnidirectionalSequenceLstmTestImpl<int8_t>(::tflite::TensorType_INT8,
batchSize,
timeSize,
inputSize,
@@ -1297,11 +1295,12 @@ void UnidirectionalSequenceLstmInt8WithCifgWithPeepholeNoProjectionTest(std::vec
clippingThresCell,
clippingThresProj,
isTimeMajor,
+ backends,
0.1);
}
void UnidirectionalSequenceLstmInt8NoCifgWithPeepholeWithProjectionWithLayerNormTest(
- std::vector<armnn::BackendId>& backends)
+ const std::vector<armnn::BackendId>& backends = {})
{
int32_t batchSize = 3;
int32_t timeSize = 2;
@@ -1376,8 +1375,7 @@ void UnidirectionalSequenceLstmInt8NoCifgWithPeepholeWithProjectionWithLayerNorm
float clippingThresProj = 0.f;
bool isTimeMajor = false;
- UnidirectionalSequenceLstmTestImpl<int8_t>(backends,
- ::tflite::TensorType_INT8,
+ UnidirectionalSequenceLstmTestImpl<int8_t>(::tflite::TensorType_INT8,
batchSize,
timeSize,
inputSize,
@@ -1422,78 +1420,79 @@ void UnidirectionalSequenceLstmInt8NoCifgWithPeepholeWithProjectionWithLayerNorm
clippingThresCell,
clippingThresProj,
isTimeMajor,
+ backends,
0.1);
}
-TEST_SUITE("UnidirectionalSequenceLstmTest_CpuRefTests")
+TEST_SUITE("UnidirectionalSequenceLstmTestTests")
{
-TEST_CASE ("UnidirectionalSequenceLstmTest_CpuRef_Test")
+TEST_CASE ("UnidirectionalSequenceLstmTest_Test")
{
std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
UnidirectionalSequenceLstmTest(backends);
}
-TEST_CASE ("UnidirectionalSequenceLstmTimeMajorTest_CpuRef_Test")
+TEST_CASE ("UnidirectionalSequenceLstmTimeMajorTest_Test")
{
std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
UnidirectionalSequenceLstmTimeMajorTest(backends);
}
-TEST_CASE ("UnidirectionalSequenceLstmTimeMajorSingleTimeTest_CpuRef_Test")
+TEST_CASE ("UnidirectionalSequenceLstmTimeMajorSingleTimeTest_Test")
{
std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
UnidirectionalSequenceLstmTimeMajorSingleTimeTest(backends);
}
-TEST_CASE ("UnidirectionalSequenceLstmNoCifgWithPeepholeWithProjectionTest_CpuRef_Test")
+TEST_CASE ("UnidirectionalSequenceLstmNoCifgWithPeepholeWithProjectionTest_Test")
{
std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
UnidirectionalSequenceLstmNoCifgWithPeepholeWithProjectionTest(backends);
}
-TEST_CASE ("UnidirectionalSequenceLstmWithCifgWithPeepholeNoProjectionTest_CpuRef_Test")
+TEST_CASE ("UnidirectionalSequenceLstmWithCifgWithPeepholeNoProjectionTest_Test")
{
std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
UnidirectionalSequenceLstmWithCifgWithPeepholeNoProjectionTest(backends);
}
-TEST_CASE ("UnidirectionalSequenceLstmNoCifgWithPeepholeWithProjectionWithLayerNormTest_CpuRef_Test")
+TEST_CASE ("UnidirectionalSequenceLstmNoCifgWithPeepholeWithProjectionWithLayerNormTest_Test")
{
std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
UnidirectionalSequenceLstmNoCifgWithPeepholeWithProjectionWithLayerNormTest(backends);
}
-TEST_CASE ("UnidirectionalSequenceLstmInt8Test_CpuRef_Test")
+TEST_CASE ("UnidirectionalSequenceLstmInt8Test_Test")
{
std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
UnidirectionalSequenceLstmInt8Test(backends);
}
-TEST_CASE ("UnidirectionalSequenceLstmTimeInt8TimeMajorTest_CpuRef_Test")
+TEST_CASE ("UnidirectionalSequenceLstmTimeInt8TimeMajorTest_Test")
{
std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
UnidirectionalSequenceLstmInt8TimeMajorTest(backends);
}
-TEST_CASE ("UnidirectionalSequenceLstmInt8NoCifgWithPeepholeWithProjectionTest_CpuRef_Test")
+TEST_CASE ("UnidirectionalSequenceLstmInt8NoCifgWithPeepholeWithProjectionTest_Test")
{
std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
UnidirectionalSequenceLstmInt8NoCifgWithPeepholeWithProjectionTest(backends);
}
-TEST_CASE ("UnidirectionalSequenceLstmInt8WithCifgWithPeepholeNoProjectionTest_CpuRef_Test")
+TEST_CASE ("UnidirectionalSequenceLstmInt8WithCifgWithPeepholeNoProjectionTest_Test")
{
std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
UnidirectionalSequenceLstmInt8WithCifgWithPeepholeNoProjectionTest(backends);
}
-TEST_CASE ("UnidirectionalSequenceLstmInt8NoCifgWithPeepholeWithProjectionWithLayerNormTest_CpuRef_Test")
+TEST_CASE ("UnidirectionalSequenceLstmInt8NoCifgWithPeepholeWithProjectionWithLayerNormTest_Test")
{
std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
UnidirectionalSequenceLstmInt8NoCifgWithPeepholeWithProjectionWithLayerNormTest(backends);
}
-} //End of TEST_SUITE("UnidirectionalSequenceLstmTest_CpuRef")
+} //End of TEST_SUITE("UnidirectionalSequenceLstmTest")
} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/test/UnidirectionalSequenceLstmTestHelper.hpp b/delegate/test/UnidirectionalSequenceLstmTestHelper.hpp
index c27f8d854b..a47d80ea5d 100644
--- a/delegate/test/UnidirectionalSequenceLstmTestHelper.hpp
+++ b/delegate/test/UnidirectionalSequenceLstmTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,13 +10,8 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
-
-#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/utility/NumericCast.hpp>
#include <armnn/TypesUtils.hpp>
@@ -573,8 +568,7 @@ std::vector<char> CreateUnidirectionalSequenceLstmTfLiteModel(tflite::TensorType
}
template<typename T>
-void UnidirectionalSequenceLstmTestImpl(std::vector<armnn::BackendId>& backends,
- tflite::TensorType tensorType,
+void UnidirectionalSequenceLstmTestImpl(tflite::TensorType tensorType,
int32_t batchSize,
int32_t timeSize,
int32_t inputSize,
@@ -619,6 +613,7 @@ void UnidirectionalSequenceLstmTestImpl(std::vector<armnn::BackendId>& backends,
float clippingThresCell,
float clippingThresProj,
bool isTimeMajor,
+ const std::vector<armnn::BackendId>& backends = {},
float quantScale = 0.1f)
{
using namespace delegateTestInterpreter;
@@ -687,7 +682,7 @@ void UnidirectionalSequenceLstmTestImpl(std::vector<armnn::BackendId>& backends,
std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
// Setup interpreter with Arm NN Delegate applied.
- auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
CHECK(armnnInterpreter.FillInputTensor<float>(inputValues, 0) == kTfLiteOk);
CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
diff --git a/delegate/test/UnpackTest.cpp b/delegate/test/UnpackTest.cpp
index 4caba84494..64256bcfe0 100644
--- a/delegate/test/UnpackTest.cpp
+++ b/delegate/test/UnpackTest.cpp
@@ -1,21 +1,17 @@
//
-// Copyright © 2021,2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "UnpackTestHelper.hpp"
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-
#include <doctest/doctest.h>
namespace armnnDelegate
{
template <typename T>
-void UnpackAxis0Num4Test(tflite::TensorType tensorType, std::vector<armnn::BackendId>& backends)
+void UnpackAxis0Num4Test(tflite::TensorType tensorType)
{
std::vector<int32_t> inputShape { 4, 1, 6 };
std::vector<int32_t> expectedOutputShape { 1, 6 };
@@ -37,16 +33,16 @@ void UnpackAxis0Num4Test(tflite::TensorType tensorType, std::vector<armnn::Backe
UnpackTest<T>(tflite::BuiltinOperator_UNPACK,
tensorType,
- backends,
inputShape,
expectedOutputShape,
inputValues,
expectedOutputValues,
+ {},
0);
}
template <typename T>
-void UnpackAxis2Num6Test(tflite::TensorType tensorType, std::vector<armnn::BackendId>& backends)
+void UnpackAxis2Num6Test(tflite::TensorType tensorType)
{
std::vector<int32_t> inputShape { 4, 1, 6 };
std::vector<int32_t> expectedOutputShape { 4, 1 };
@@ -72,107 +68,41 @@ void UnpackAxis2Num6Test(tflite::TensorType tensorType, std::vector<armnn::Backe
UnpackTest<T>(tflite::BuiltinOperator_UNPACK,
tensorType,
- backends,
inputShape,
expectedOutputShape,
inputValues,
expectedOutputValues,
+ {},
2);
}
-TEST_SUITE("Unpack_CpuRefTests")
+TEST_SUITE("UnpackTests")
{
// Fp32
-TEST_CASE ("Unpack_Fp32_Axis0_Num4_CpuRef_Test")
+TEST_CASE ("Unpack_Fp32_Axis0_Num4_Test")
{
-std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-UnpackAxis0Num4Test<float>(tflite::TensorType_FLOAT32, backends);
+UnpackAxis0Num4Test<float>(tflite::TensorType_FLOAT32);
}
-TEST_CASE ("Unpack_Fp32_Axis2_Num6_CpuRef_Test")
+TEST_CASE ("Unpack_Fp32_Axis2_Num6_Test")
{
-std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-UnpackAxis2Num6Test<float>(tflite::TensorType_FLOAT32, backends);
+UnpackAxis2Num6Test<float>(tflite::TensorType_FLOAT32);
}
// Uint8
-TEST_CASE ("Unpack_Uint8_Axis0_Num4_CpuRef_Test")
-{
-std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-UnpackAxis0Num4Test<uint8_t>(tflite::TensorType_UINT8, backends);
-}
-
-TEST_CASE ("Unpack_Uint8_Axis2_Num6_CpuRef_Test")
-{
-std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-UnpackAxis2Num6Test<uint8_t>(tflite::TensorType_UINT8, backends);
-}
-
-} // End of Unpack_CpuRefTests
-
-TEST_SUITE("Unpack_CpuAccTests")
+TEST_CASE ("Unpack_Uint8_Axis0_Num4_Test")
{
-
-// Fp32
-TEST_CASE ("Unpack_Fp32_Axis0_Num4_CpuAcc_Test")
-{
-std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-UnpackAxis0Num4Test<float>(tflite::TensorType_FLOAT32, backends);
+UnpackAxis0Num4Test<uint8_t>(tflite::TensorType_UINT8);
}
-TEST_CASE ("Unpack_Fp32_Axis2_Num6_CpuAcc_Test")
+TEST_CASE ("Unpack_Uint8_Axis2_Num6_Test")
{
-std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-UnpackAxis2Num6Test<float>(tflite::TensorType_FLOAT32, backends);
+UnpackAxis2Num6Test<uint8_t>(tflite::TensorType_UINT8);
}
-// Uint8
-TEST_CASE ("Unpack_Uint8_Axis0_Num4_CpuAcc_Test")
-{
-std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-UnpackAxis0Num4Test<uint8_t>(tflite::TensorType_UINT8, backends);
}
-TEST_CASE ("Unpack_Uint8_Axis2_Num6_CpuAcc_Test")
-{
-std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-UnpackAxis2Num6Test<uint8_t>(tflite::TensorType_UINT8, backends);
-}
-
-} // End of Unpack_CpuAccTests
-
-TEST_SUITE("Unpack_GpuAccTests")
-{
-
-// Fp32
-TEST_CASE ("Unpack_Fp32_Axis0_Num4_GpuAcc_Test")
-{
-std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-UnpackAxis0Num4Test<float>(tflite::TensorType_FLOAT32, backends);
-}
-
-TEST_CASE ("Unpack_Fp32_Axis2_Num6_GpuAcc_Test")
-{
-std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-UnpackAxis2Num6Test<float>(tflite::TensorType_FLOAT32, backends);
-}
-
-// Uint8
-TEST_CASE ("Unpack_Uint8_Axis0_Num4_GpuAcc_Test")
-{
-std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-UnpackAxis0Num4Test<uint8_t>(tflite::TensorType_UINT8, backends);
-}
-
-TEST_CASE ("Unpack_Uint8_Axis2_Num6_GpuAcc_Test")
-{
-std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-UnpackAxis2Num6Test<uint8_t>(tflite::TensorType_UINT8, backends);
-}
-
-} // End of Unpack_GpuAccTests
-
// End of Unpack Test Suite
} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/test/UnpackTestHelper.hpp b/delegate/test/UnpackTestHelper.hpp
index 1b1ab496a1..584699d905 100644
--- a/delegate/test/UnpackTestHelper.hpp
+++ b/delegate/test/UnpackTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,12 +10,8 @@
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/version.h>
-#include <doctest/doctest.h>
-
namespace
{
@@ -115,11 +111,11 @@ std::vector<char> CreateUnpackTfLiteModel(tflite::BuiltinOperator unpackOperator
template <typename T>
void UnpackTest(tflite::BuiltinOperator unpackOperatorCode,
tflite::TensorType tensorType,
- std::vector<armnn::BackendId>& backends,
std::vector<int32_t>& inputShape,
std::vector<int32_t>& expectedOutputShape,
std::vector<T>& inputValues,
std::vector<std::vector<T>>& expectedOutputValues,
+ const std::vector<armnn::BackendId>& backends = {},
unsigned int axis = 0,
float quantScale = 1.0f,
int quantOffset = 0)
@@ -141,7 +137,7 @@ void UnpackTest(tflite::BuiltinOperator unpackOperatorCode,
CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
// Setup interpreter with Arm NN Delegate applied.
- auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
CHECK(armnnInterpreter.Invoke() == kTfLiteOk);