aboutsummaryrefslogtreecommitdiff
path: root/src/backends/cl
diff options
context:
space:
mode:
authorarovir01 <Aron.Virginas-Tar@arm.com>2018-10-09 18:04:24 +0100
committerMatthew Bentham <matthew.bentham@arm.com>2018-10-22 16:57:53 +0100
commit43095f31edf103d71a8e2420b549d21fd349b49e (patch)
tree1414f25d8fadd4aa84fbed008743c4672b606b26 /src/backends/cl
parent3b72db05d6f8df28728b292c8bbd27c402dc8704 (diff)
downloadarmnn-43095f31edf103d71a8e2420b549d21fd349b49e.tar.gz
IVGCVSW-1988: Refactor backend-specific unit tests
Change-Id: I8eca81d2e0780390eaa837c186ffe1c7d41fdebe
Diffstat (limited to 'src/backends/cl')
-rw-r--r--src/backends/cl/CMakeLists.txt3
-rw-r--r--src/backends/cl/backend.cmake1
-rw-r--r--src/backends/cl/test/CMakeLists.txt16
-rw-r--r--src/backends/cl/test/ClContextControlFixture.hpp34
-rw-r--r--src/backends/cl/test/ClCreateWorkloadTests.cpp678
-rw-r--r--src/backends/cl/test/ClLayerSupportTests.cpp104
-rw-r--r--src/backends/cl/test/ClLayerTests.cpp338
7 files changed, 1173 insertions, 1 deletions
diff --git a/src/backends/cl/CMakeLists.txt b/src/backends/cl/CMakeLists.txt
index a7ce893f20..04da6ddcff 100644
--- a/src/backends/cl/CMakeLists.txt
+++ b/src/backends/cl/CMakeLists.txt
@@ -15,7 +15,8 @@ list(APPEND armnnClBackend_sources
)
if(ARMCOMPUTECL)
- add_subdirectory(workloads test)
+ add_subdirectory(workloads)
+ add_subdirectory(test)
endif()
add_library(armnnClBackend STATIC ${armnnClBackend_sources})
diff --git a/src/backends/cl/backend.cmake b/src/backends/cl/backend.cmake
index 1af88e3c9b..f8a1c1c62a 100644
--- a/src/backends/cl/backend.cmake
+++ b/src/backends/cl/backend.cmake
@@ -6,6 +6,7 @@
if(ARMCOMPUTECL)
add_subdirectory(${PROJECT_SOURCE_DIR}/src/backends/cl)
list(APPEND armnnLibraries armnnClBackend armnnClBackendWorkloads)
+ list(APPEND armnnUnitTestLibraries armnnClBackendUnitTests)
else()
message("CL backend is disabled")
add_subdirectory(${PROJECT_SOURCE_DIR}/src/backends/cl)
diff --git a/src/backends/cl/test/CMakeLists.txt b/src/backends/cl/test/CMakeLists.txt
index e69de29bb2..795a7dc9d2 100644
--- a/src/backends/cl/test/CMakeLists.txt
+++ b/src/backends/cl/test/CMakeLists.txt
@@ -0,0 +1,16 @@
+#
+# Copyright © 2017 Arm Ltd. All rights reserved.
+# SPDX-License-Identifier: MIT
+#
+
+list(APPEND armnnClBackendUnitTests_sources
+ ClContextControlFixture.hpp
+ ClCreateWorkloadTests.cpp
+ ClLayerSupportTests.cpp
+ ClLayerTests.cpp
+)
+
+add_library(armnnClBackendUnitTests STATIC ${armnnClBackendUnitTests_sources})
+target_include_directories(armnnClBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src)
+target_include_directories(armnnClBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn)
+target_include_directories(armnnClBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils) \ No newline at end of file
diff --git a/src/backends/cl/test/ClContextControlFixture.hpp b/src/backends/cl/test/ClContextControlFixture.hpp
new file mode 100644
index 0000000000..fd53e3fcf3
--- /dev/null
+++ b/src/backends/cl/test/ClContextControlFixture.hpp
@@ -0,0 +1,34 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backends/cl/ClContextControl.hpp>
+
+template<bool ProfilingEnabled>
+struct ClContextControlFixtureBase
+{
+ static ClContextControlFixtureBase*& Instance()
+ {
+ static ClContextControlFixtureBase* s_Instance = nullptr;
+ return s_Instance;
+ }
+
+ // Initialising ClContextControl to ensure OpenCL is loaded correctly for each test case
+ ClContextControlFixtureBase()
+ : m_ClContextControl(nullptr, ProfilingEnabled)
+ {
+ Instance() = this;
+ }
+ ~ClContextControlFixtureBase()
+ {
+ Instance() = nullptr;
+ }
+
+ armnn::ClContextControl m_ClContextControl;
+};
+
+using ClContextControlFixture = ClContextControlFixtureBase<false>;
+using ClProfilingContextControlFixture = ClContextControlFixtureBase<true>;
diff --git a/src/backends/cl/test/ClCreateWorkloadTests.cpp b/src/backends/cl/test/ClCreateWorkloadTests.cpp
new file mode 100644
index 0000000000..659ba82e8a
--- /dev/null
+++ b/src/backends/cl/test/ClCreateWorkloadTests.cpp
@@ -0,0 +1,678 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ClContextControlFixture.hpp"
+
+#include <backends/MemCopyWorkload.hpp>
+#include <backends/cl/ClTensorHandle.hpp>
+#include <backends/cl/ClWorkloadFactory.hpp>
+#include <backends/cl/workloads/ClWorkloads.hpp>
+#include <backends/cl/workloads/ClWorkloadUtils.hpp>
+#include <backends/reference/RefWorkloadFactory.hpp>
+
+#include <test/CreateWorkloadClNeon.hpp>
+
+boost::test_tools::predicate_result CompareIClTensorHandleShape(IClTensorHandle* tensorHandle,
+ std::initializer_list<unsigned int> expectedDimensions)
+{
+ return CompareTensorHandleShape<IClTensorHandle>(tensorHandle, expectedDimensions);
+}
+
+BOOST_FIXTURE_TEST_SUITE(CreateWorkloadCl, ClContextControlFixture)
+
+template <typename ActivationWorkloadType, armnn::DataType DataType>
+static void ClCreateActivationWorkloadTest()
+{
+ Graph graph;
+ ClWorkloadFactory factory;
+
+ auto workload = CreateActivationWorkloadTest<ActivationWorkloadType, DataType>(factory, graph);
+
+ // Checks that inputs/outputs are as we expect them (see definition of CreateActivationWorkloadTest).
+ ActivationQueueDescriptor queueDescriptor = workload->GetData();
+ auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+
+ BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {1}));
+ BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {1}));
+}
+
+BOOST_AUTO_TEST_CASE(CreateActivationFloatWorkload)
+{
+ ClCreateActivationWorkloadTest<ClActivationFloatWorkload, armnn::DataType::Float32>();
+}
+
+BOOST_AUTO_TEST_CASE(CreateActivationFloat16Workload)
+{
+ ClCreateActivationWorkloadTest<ClActivationFloatWorkload, armnn::DataType::Float16>();
+}
+
+template <typename WorkloadType,
+ typename DescriptorType,
+ typename LayerType,
+ armnn::DataType DataType>
+static void ClCreateArithmethicWorkloadTest()
+{
+ Graph graph;
+ ClWorkloadFactory factory;
+ auto workload = CreateArithmeticWorkloadTest<WorkloadType, DescriptorType, LayerType, DataType>(factory, graph);
+
+ // Checks that inputs/outputs are as we expect them (see definition of CreateArithmeticWorkloadTest).
+ DescriptorType queueDescriptor = workload->GetData();
+ auto inputHandle1 = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ auto inputHandle2 = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[1]);
+ auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+ BOOST_TEST(CompareIClTensorHandleShape(inputHandle1, {2, 3}));
+ BOOST_TEST(CompareIClTensorHandleShape(inputHandle2, {2, 3}));
+ BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 3}));
+}
+
+BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
+{
+ ClCreateArithmethicWorkloadTest<ClAdditionWorkload,
+ AdditionQueueDescriptor,
+ AdditionLayer,
+ armnn::DataType::Float32>();
+}
+
+BOOST_AUTO_TEST_CASE(CreateAdditionFloat16Workload)
+{
+ ClCreateArithmethicWorkloadTest<ClAdditionWorkload,
+ AdditionQueueDescriptor,
+ AdditionLayer,
+ armnn::DataType::Float16>();
+}
+
+BOOST_AUTO_TEST_CASE(CreateSubtractionFloatWorkload)
+{
+ ClCreateArithmethicWorkloadTest<ClSubtractionWorkload,
+ SubtractionQueueDescriptor,
+ SubtractionLayer,
+ armnn::DataType::Float32>();
+}
+
+BOOST_AUTO_TEST_CASE(CreateSubtractionFloat16Workload)
+{
+ ClCreateArithmethicWorkloadTest<ClSubtractionWorkload,
+ SubtractionQueueDescriptor,
+ SubtractionLayer,
+ armnn::DataType::Float16>();
+}
+
+BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkloadTest)
+{
+ ClCreateArithmethicWorkloadTest<ClMultiplicationWorkload,
+ MultiplicationQueueDescriptor,
+ MultiplicationLayer,
+ armnn::DataType::Float32>();
+}
+
+BOOST_AUTO_TEST_CASE(CreateMultiplicationFloat16WorkloadTest)
+{
+ ClCreateArithmethicWorkloadTest<ClMultiplicationWorkload,
+ MultiplicationQueueDescriptor,
+ MultiplicationLayer,
+ armnn::DataType::Float16>();
+}
+
+BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8WorkloadTest)
+{
+ ClCreateArithmethicWorkloadTest<ClMultiplicationWorkload,
+ MultiplicationQueueDescriptor,
+ MultiplicationLayer,
+ armnn::DataType::QuantisedAsymm8>();
+}
+
+BOOST_AUTO_TEST_CASE(CreateDivisionFloatWorkloadTest)
+{
+ ClCreateArithmethicWorkloadTest<ClDivisionFloatWorkload,
+ DivisionQueueDescriptor,
+ DivisionLayer,
+ armnn::DataType::Float32>();
+}
+
+BOOST_AUTO_TEST_CASE(CreateDivisionFloat16WorkloadTest)
+{
+ ClCreateArithmethicWorkloadTest<ClDivisionFloatWorkload,
+ DivisionQueueDescriptor,
+ DivisionLayer,
+ armnn::DataType::Float16>();
+}
+
+template <typename BatchNormalizationWorkloadType, armnn::DataType DataType>
+static void ClCreateBatchNormalizationWorkloadTest()
+{
+ Graph graph;
+ ClWorkloadFactory factory;
+
+ auto workload = CreateBatchNormalizationWorkloadTest<BatchNormalizationWorkloadType, DataType>
+ (factory, graph);
+
+ // Checks that inputs/outputs are as we expect them (see definition of CreateBatchNormalizationWorkloadTest).
+ BatchNormalizationQueueDescriptor queueDescriptor = workload->GetData();
+ auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+
+ BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {2, 3, 1, 1}));
+ BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 3, 1, 1}));
+}
+
+BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloatWorkload)
+{
+ ClCreateBatchNormalizationWorkloadTest<ClBatchNormalizationFloatWorkload, armnn::DataType::Float32>();
+}
+
+BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16Workload)
+{
+ ClCreateBatchNormalizationWorkloadTest<ClBatchNormalizationFloatWorkload, armnn::DataType::Float16>();
+}
+
+BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Workload)
+{
+ Graph graph;
+ ClWorkloadFactory factory;
+ auto workload = CreateConvertFp16ToFp32WorkloadTest<ClConvertFp16ToFp32Workload>(factory, graph);
+
+ ConvertFp16ToFp32QueueDescriptor queueDescriptor = workload->GetData();
+ auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+
+ BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {3, 2, 3}));
+ BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {3, 2, 3}));
+ BOOST_TEST((inputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F16));
+ BOOST_TEST((outputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F32));
+}
+
+BOOST_AUTO_TEST_CASE(CreateConvertFp32ToFp16Workload)
+{
+ Graph graph;
+ ClWorkloadFactory factory;
+ auto workload = CreateConvertFp32ToFp16WorkloadTest<ClConvertFp32ToFp16Workload>(factory, graph);
+
+ ConvertFp32ToFp16QueueDescriptor queueDescriptor = workload->GetData();
+ auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+
+ BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {3, 2, 3}));
+ BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {3, 2, 3}));
+ BOOST_TEST((inputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F32));
+ BOOST_TEST((outputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F16));
+}
+
+template <typename Convolution2dWorkloadType, typename armnn::DataType DataType>
+static void ClConvolution2dWorkloadTest(DataLayout dataLayout)
+{
+ Graph graph;
+ ClWorkloadFactory factory;
+ auto workload = CreateConvolution2dWorkloadTest<ClConvolution2dWorkload, DataType>(factory,
+ graph,
+ dataLayout);
+
+ std::initializer_list<unsigned int> inputShape = (dataLayout == DataLayout::NCHW) ?
+ std::initializer_list<unsigned int>({2, 3, 8, 16}) : std::initializer_list<unsigned int>({2, 8, 16, 3});
+ std::initializer_list<unsigned int> outputShape = (dataLayout == DataLayout::NCHW) ?
+ std::initializer_list<unsigned int>({2, 2, 2, 10}) : std::initializer_list<unsigned int>({2, 2, 10, 2});
+
+ // Checks that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest).
+ Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
+ auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+ BOOST_TEST(CompareIClTensorHandleShape(inputHandle, inputShape));
+ BOOST_TEST(CompareIClTensorHandleShape(outputHandle, outputShape));
+}
+
+BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNchwWorkload)
+{
+ ClConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
+}
+
+BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNhwcWorkload)
+{
+ ClConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
+}
+
+BOOST_AUTO_TEST_CASE(CreateConvolution2dFloat16NchwWorkload)
+{
+ ClConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
+}
+
+BOOST_AUTO_TEST_CASE(CreateConvolution2dFloat16NhwcWorkload)
+{
+ ClConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float16>(DataLayout::NHWC);
+}
+
+template <typename Convolution2dWorkloadType, typename armnn::DataType DataType>
+static void ClDirectConvolution2dWorkloadTest()
+{
+ Graph graph;
+ ClWorkloadFactory factory;
+ auto workload = CreateDirectConvolution2dWorkloadTest<ClConvolution2dWorkload, DataType>(factory, graph);
+
+ // Checks that outputs and inputs are as we expect them (see definition of CreateDirectConvolution2dWorkloadTest).
+ Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
+ auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+ BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {2, 3, 6, 6}));
+ BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 2, 6, 6}));
+}
+
+BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dFloatWorkload)
+{
+ ClDirectConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float32>();
+}
+
+BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dFloat16Workload)
+{
+ ClDirectConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float16>();
+}
+
+BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dUint8Workload)
+{
+ ClDirectConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::QuantisedAsymm8>();
+}
+
+template <typename FullyConnectedWorkloadType, typename armnn::DataType DataType>
+static void ClCreateFullyConnectedWorkloadTest()
+{
+ Graph graph;
+ ClWorkloadFactory factory;
+ auto workload =
+ CreateFullyConnectedWorkloadTest<FullyConnectedWorkloadType, DataType>(factory, graph);
+
+ // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest).
+ FullyConnectedQueueDescriptor queueDescriptor = workload->GetData();
+ auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+ BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {3, 1, 4, 5}));
+ BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {3, 7}));
+}
+
+
+BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloatWorkloadTest)
+{
+ ClCreateFullyConnectedWorkloadTest<ClFullyConnectedWorkload, armnn::DataType::Float32>();
+}
+
+BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloat16WorkloadTest)
+{
+ ClCreateFullyConnectedWorkloadTest<ClFullyConnectedWorkload, armnn::DataType::Float16>();
+}
+
+template <typename NormalizationWorkloadType, typename armnn::DataType DataType>
+static void ClNormalizationWorkloadTest(DataLayout dataLayout)
+{
+ Graph graph;
+ ClWorkloadFactory factory;
+
+ auto workload = CreateNormalizationWorkloadTest<NormalizationWorkloadType, DataType>
+ (factory, graph, dataLayout);
+
+ // Checks that inputs/outputs are as we expect them (see definition of CreateNormalizationWorkloadTest).
+ NormalizationQueueDescriptor queueDescriptor = workload->GetData();
+ auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+
+ BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {3, 5, 5, 1}));
+ BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {3, 5, 5, 1}));
+}
+
+BOOST_AUTO_TEST_CASE(CreateNormalizationFloat32NchwWorkload)
+{
+ ClNormalizationWorkloadTest<ClNormalizationFloatWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
+}
+
+BOOST_AUTO_TEST_CASE(CreateNormalizationFloat16NchwWorkload)
+{
+ ClNormalizationWorkloadTest<ClNormalizationFloatWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
+}
+
+BOOST_AUTO_TEST_CASE(CreateNormalizationFloat32NhwcWorkload)
+{
+ ClNormalizationWorkloadTest<ClNormalizationFloatWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
+}
+
+BOOST_AUTO_TEST_CASE(CreateNormalizationFloat16NhwcWorkload)
+{
+ ClNormalizationWorkloadTest<ClNormalizationFloatWorkload, armnn::DataType::Float16>(DataLayout::NHWC);
+}
+
+template <typename Pooling2dWorkloadType, typename armnn::DataType DataType>
+static void ClPooling2dWorkloadTest(DataLayout dataLayout)
+{
+ Graph graph;
+ ClWorkloadFactory factory;
+
+ auto workload = CreatePooling2dWorkloadTest<Pooling2dWorkloadType, DataType>(factory, graph, dataLayout);
+
+ std::initializer_list<unsigned int> inputShape = (dataLayout == DataLayout::NCHW) ?
+ std::initializer_list<unsigned int>({3, 2, 5, 5}) : std::initializer_list<unsigned int>({3, 5, 5, 2});
+ std::initializer_list<unsigned int> outputShape = (dataLayout == DataLayout::NCHW) ?
+ std::initializer_list<unsigned int>({3, 2, 2, 4}) : std::initializer_list<unsigned int>({3, 2, 4, 2});
+
+ // Check that inputs/outputs are as we expect them (see definition of CreatePooling2dWorkloadTest).
+ Pooling2dQueueDescriptor queueDescriptor = workload->GetData();
+ auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+
+ BOOST_TEST(CompareIClTensorHandleShape(inputHandle, inputShape));
+ BOOST_TEST(CompareIClTensorHandleShape(outputHandle, outputShape));
+}
+
+BOOST_AUTO_TEST_CASE(CreatePooling2dFloatNchwWorkload)
+{
+ ClPooling2dWorkloadTest<ClPooling2dFloatWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
+}
+
+BOOST_AUTO_TEST_CASE(CreatePooling2dFloatNhwcWorkload)
+{
+ ClPooling2dWorkloadTest<ClPooling2dFloatWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
+}
+
+BOOST_AUTO_TEST_CASE(CreatePooling2dFloat16NchwWorkload)
+{
+ ClPooling2dWorkloadTest<ClPooling2dFloatWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
+}
+
+BOOST_AUTO_TEST_CASE(CreatePooling2dFloat16NhwcWorkload)
+{
+ ClPooling2dWorkloadTest<ClPooling2dFloatWorkload, armnn::DataType::Float16>(DataLayout::NHWC);
+}
+
+template <typename ReshapeWorkloadType, typename armnn::DataType DataType>
+static void ClCreateReshapeWorkloadTest()
+{
+ Graph graph;
+ ClWorkloadFactory factory;
+
+ auto workload = CreateReshapeWorkloadTest<ReshapeWorkloadType, DataType>(factory, graph);
+
+ // Checks that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest).
+ ReshapeQueueDescriptor queueDescriptor = workload->GetData();
+ auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+
+ BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {4, 1}));
+ BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {4})); // Leading size 1 dimensions are collapsed by ACL.
+}
+
+BOOST_AUTO_TEST_CASE(CreateReshapeFloatWorkload)
+{
+ ClCreateReshapeWorkloadTest<ClReshapeFloatWorkload, armnn::DataType::Float32>();
+}
+
+BOOST_AUTO_TEST_CASE(CreateReshapeFloat16Workload)
+{
+ ClCreateReshapeWorkloadTest<ClReshapeFloatWorkload, armnn::DataType::Float16>();
+}
+
+BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload)
+{
+ ClCreateReshapeWorkloadTest<ClReshapeUint8Workload, armnn::DataType::QuantisedAsymm8>();
+}
+
+template <typename SoftmaxWorkloadType, typename armnn::DataType DataType>
+static void ClSoftmaxWorkloadTest()
+{
+ Graph graph;
+ ClWorkloadFactory factory;
+
+ auto workload = CreateSoftmaxWorkloadTest<SoftmaxWorkloadType, DataType>(factory, graph);
+
+ // Checks that inputs/outputs are as we expect them (see definition of ClSoftmaxFloatWorkload).
+ SoftmaxQueueDescriptor queueDescriptor = workload->GetData();
+ auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+
+ BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {4, 1}));
+ BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {4, 1}));
+}
+
+
+BOOST_AUTO_TEST_CASE(CreateSoftmaxFloatWorkloadTest)
+{
+ ClSoftmaxWorkloadTest<ClSoftmaxFloatWorkload, armnn::DataType::Float32>();
+}
+
+BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat16WorkloadTest)
+{
+ ClSoftmaxWorkloadTest<ClSoftmaxFloatWorkload, armnn::DataType::Float16>();
+}
+
+template <typename armnn::DataType DataType>
+static void ClSplitterWorkloadTest()
+{
+ Graph graph;
+ ClWorkloadFactory factory;
+
+ auto workload = CreateSplitterWorkloadTest<ClSplitterWorkload, DataType>(factory, graph);
+
+ // Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest).
+ SplitterQueueDescriptor queueDescriptor = workload->GetData();
+ auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {5, 7, 7}));
+
+ auto outputHandle1 = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[1]);
+ BOOST_TEST(CompareIClTensorHandleShape(outputHandle1, {2, 7, 7}));
+
+ auto outputHandle2 = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[2]);
+ BOOST_TEST(CompareIClTensorHandleShape(outputHandle2, {2, 7, 7}));
+
+ auto outputHandle0 = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+ // NOTE: At the moment the CL collapses the tensor to a 2 dim when dimension zero = 1
+ // we are raising this difference between the NEON and CL libs as an issue with the compute library team.
+ BOOST_TEST(CompareIClTensorHandleShape(outputHandle0, {7, 7}));
+}
+
+BOOST_AUTO_TEST_CASE(CreateSplitterFloatWorkload)
+{
+ ClSplitterWorkloadTest<armnn::DataType::Float32>();
+}
+
+BOOST_AUTO_TEST_CASE(CreateSplitterFloat16Workload)
+{
+ ClSplitterWorkloadTest<armnn::DataType::Float16>();
+}
+
+template <typename MergerWorkloadType, typename armnn::DataType DataType>
+static void ClSplitterMergerTest()
+{
+ // Tests that it is possible to decide which output of the splitter layer
+ // should be lined to which input of the merger layer.
+ // We test that is is possible to specify 0th output
+ // of the splitter to be the 1st input to the merger and the 1st output of the splitter to be 0th input
+ // of the merger.
+
+ Graph graph;
+ ClWorkloadFactory factory;
+
+ auto workloads =
+ CreateSplitterMergerWorkloadTest<ClSplitterWorkload, MergerWorkloadType, DataType>
+ (factory, graph);
+
+ auto wlSplitter = std::move(workloads.first);
+ auto wlMerger = std::move(workloads.second);
+
+ //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction.
+ armnn::ClSubTensorHandle* sOut0 = dynamic_cast<armnn::ClSubTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
+ armnn::ClSubTensorHandle* sOut1 = dynamic_cast<armnn::ClSubTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
+ armnn::ClSubTensorHandle* mIn0 = dynamic_cast<armnn::ClSubTensorHandle*>(wlMerger->GetData().m_Inputs[0]);
+ armnn::ClSubTensorHandle* mIn1 = dynamic_cast<armnn::ClSubTensorHandle*>(wlMerger->GetData().m_Inputs[1]);
+
+ BOOST_TEST(sOut0);
+ BOOST_TEST(sOut1);
+ BOOST_TEST(mIn0);
+ BOOST_TEST(mIn1);
+
+ //Fliped order of inputs/outputs.
+ bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0);
+ BOOST_TEST(validDataPointers);
+
+
+ //Also make sure that the inputs are subtensors of one tensor and outputs are sub tensors of another tensor.
+ bool validSubTensorParents = (mIn0->GetTensor().parent() == mIn1->GetTensor().parent())
+ && (sOut0->GetTensor().parent() == sOut1->GetTensor().parent());
+
+ BOOST_TEST(validSubTensorParents);
+}
+
+BOOST_AUTO_TEST_CASE(CreateSplitterMergerFloatWorkload)
+{
+ ClSplitterMergerTest<ClMergerFloatWorkload, armnn::DataType::Float32>();
+}
+
+BOOST_AUTO_TEST_CASE(CreateSplitterMergerFloat16Workload)
+{
+ ClSplitterMergerTest<ClMergerFloatWorkload, armnn::DataType::Float16>();
+}
+
+
+BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputs)
+{
+ // Test that it is possible to assign multiple (two) different layers to each of the outputs of a splitter layer.
+ // We create a splitter with two outputs. That each of those outputs is used by two different activation layers.
+
+ Graph graph;
+ ClWorkloadFactory factory;
+ std::unique_ptr<ClSplitterWorkload> wlSplitter;
+ std::unique_ptr<ClActivationFloatWorkload> wlActiv0_0;
+ std::unique_ptr<ClActivationFloatWorkload> wlActiv0_1;
+ std::unique_ptr<ClActivationFloatWorkload> wlActiv1_0;
+ std::unique_ptr<ClActivationFloatWorkload> wlActiv1_1;
+
+ CreateSplitterMultipleInputsOneOutputWorkloadTest<ClSplitterWorkload,
+ ClActivationFloatWorkload, armnn::DataType::Float32>(factory, graph, wlSplitter, wlActiv0_0, wlActiv0_1,
+ wlActiv1_0, wlActiv1_1);
+
+ //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction.
+ armnn::ClSubTensorHandle* sOut0 = dynamic_cast<armnn::ClSubTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
+ armnn::ClSubTensorHandle* sOut1 = dynamic_cast<armnn::ClSubTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
+ armnn::ClSubTensorHandle* activ0_0Im = dynamic_cast<armnn::ClSubTensorHandle*>(wlActiv0_0->GetData().m_Inputs[0]);
+ armnn::ClSubTensorHandle* activ0_1Im = dynamic_cast<armnn::ClSubTensorHandle*>(wlActiv0_1->GetData().m_Inputs[0]);
+ armnn::ClSubTensorHandle* activ1_0Im = dynamic_cast<armnn::ClSubTensorHandle*>(wlActiv1_0->GetData().m_Inputs[0]);
+ armnn::ClSubTensorHandle* activ1_1Im = dynamic_cast<armnn::ClSubTensorHandle*>(wlActiv1_1->GetData().m_Inputs[0]);
+
+
+ BOOST_TEST(sOut0);
+ BOOST_TEST(sOut1);
+ BOOST_TEST(activ0_0Im);
+ BOOST_TEST(activ0_1Im);
+ BOOST_TEST(activ1_0Im);
+ BOOST_TEST(activ1_1Im);
+
+ bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) &&
+ (sOut1 == activ1_0Im) && (sOut1 == activ1_1Im);
+
+ BOOST_TEST(validDataPointers);
+}
+
+BOOST_AUTO_TEST_CASE(CreateMemCopyWorkloadsCl)
+{
+ ClWorkloadFactory factory;
+ CreateMemCopyWorkloads<IClTensorHandle>(factory);
+}
+
+template <typename L2NormalizationWorkloadType, typename armnn::DataType DataType>
+static void ClL2NormalizationWorkloadTest(DataLayout dataLayout)
+{
+ Graph graph;
+ ClWorkloadFactory factory;
+
+ auto workload = CreateL2NormalizationWorkloadTest<L2NormalizationWorkloadType, DataType>
+ (factory, graph, dataLayout);
+
+ // Checks that inputs/outputs are as we expect them (see definition of CreateNormalizationWorkloadTest).
+ L2NormalizationQueueDescriptor queueDescriptor = workload->GetData();
+ auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+
+ BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 5, 20, 50, 67 }));
+ BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 5, 20, 50, 67 }));
+}
+
+BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloatNchwWorkload)
+{
+ ClL2NormalizationWorkloadTest<ClL2NormalizationFloatWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
+}
+
+BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloatNhwcWorkload)
+{
+ ClL2NormalizationWorkloadTest<ClL2NormalizationFloatWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
+}
+
+BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat16NchwWorkload)
+{
+ ClL2NormalizationWorkloadTest<ClL2NormalizationFloatWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
+}
+
+BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat16NhwcWorkload)
+{
+ ClL2NormalizationWorkloadTest<ClL2NormalizationFloatWorkload, armnn::DataType::Float16>(DataLayout::NHWC);
+}
+
+template <typename LstmWorkloadType>
+static void ClCreateLstmWorkloadTest()
+{
+ Graph graph;
+ ClWorkloadFactory factory;
+ auto workload = CreateLstmWorkloadTest<LstmWorkloadType>(factory, graph);
+
+ LstmQueueDescriptor queueDescriptor = workload->GetData();
+ auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[1]);
+ BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 2 }));
+ BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 4 }));
+}
+
+BOOST_AUTO_TEST_CASE(CreateLSTMWorkloadFloatWorkload)
+{
+ ClCreateLstmWorkloadTest<ClLstmFloatWorkload>();
+}
+
+template <typename ResizeBilinearWorkloadType, typename armnn::DataType DataType>
+static void ClResizeBilinearWorkloadTest(DataLayout dataLayout)
+{
+ Graph graph;
+ ClWorkloadFactory factory;
+
+ auto workload = CreateResizeBilinearWorkloadTest<ResizeBilinearWorkloadType, DataType>(factory, graph, dataLayout);
+
+ // Checks that inputs/outputs are as we expect them (see definition of CreateResizeBilinearWorkloadTest).
+ ResizeBilinearQueueDescriptor queueDescriptor = workload->GetData();
+ auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+
+ switch (dataLayout)
+ {
+ case DataLayout::NHWC:
+ BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 4, 4, 3 }));
+ BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 2, 2, 3 }));
+ break;
+ default: // NCHW
+ BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 3, 4, 4 }));
+ BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 3, 2, 2 }));
+ }
+}
+
+BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32NchwWorkload)
+{
+ ClResizeBilinearWorkloadTest<ClResizeBilinearFloatWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
+}
+
+BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat16NchwWorkload)
+{
+ ClResizeBilinearWorkloadTest<ClResizeBilinearFloatWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
+}
+
+BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32NhwcWorkload)
+{
+ ClResizeBilinearWorkloadTest<ClResizeBilinearFloatWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
+}
+
+BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat16NhwcWorkload)
+{
+ ClResizeBilinearWorkloadTest<ClResizeBilinearFloatWorkload, armnn::DataType::Float16>(DataLayout::NHWC);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/cl/test/ClLayerSupportTests.cpp b/src/backends/cl/test/ClLayerSupportTests.cpp
new file mode 100644
index 0000000000..513366e8dc
--- /dev/null
+++ b/src/backends/cl/test/ClLayerSupportTests.cpp
@@ -0,0 +1,104 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <armnn/layers/ConvertFp16ToFp32Layer.hpp>
+#include <armnn/layers/ConvertFp32ToFp16Layer.hpp>
+#include <armnn/test/TensorHelpers.hpp>
+
+#include <backends/CpuTensorHandle.hpp>
+#include <backends/cl/ClWorkloadFactory.hpp>
+#include <backends/cl/test/ClContextControlFixture.hpp>
+#include <backends/test/IsLayerSupportedTestImpl.hpp>
+#include <backends/test/LayerTests.hpp>
+
+#include <boost/test/unit_test.hpp>
+
+#include <string>
+
+BOOST_AUTO_TEST_SUITE(ClLayerSupport)
+
+BOOST_FIXTURE_TEST_CASE(IsLayerSupportedFloat16Cl, ClContextControlFixture)
+{
+ armnn::ClWorkloadFactory factory;
+ IsLayerSupportedTests<armnn::ClWorkloadFactory, armnn::DataType::Float16>(&factory);
+}
+
+BOOST_FIXTURE_TEST_CASE(IsLayerSupportedFloat32Cl, ClContextControlFixture)
+{
+ armnn::ClWorkloadFactory factory;
+ IsLayerSupportedTests<armnn::ClWorkloadFactory, armnn::DataType::Float32>(&factory);
+}
+
+BOOST_FIXTURE_TEST_CASE(IsLayerSupportedUint8Cl, ClContextControlFixture)
+{
+ armnn::ClWorkloadFactory factory;
+ IsLayerSupportedTests<armnn::ClWorkloadFactory, armnn::DataType::QuantisedAsymm8>(&factory);
+}
+
+BOOST_FIXTURE_TEST_CASE(IsConvertFp16ToFp32SupportedCl, ClContextControlFixture)
+{
+ std::string reasonIfUnsupported;
+
+ bool result = IsConvertLayerSupportedTests<armnn::ClWorkloadFactory, armnn::ConvertFp16ToFp32Layer,
+ armnn::DataType::Float16, armnn::DataType::Float32>(reasonIfUnsupported);
+
+ BOOST_CHECK(result);
+}
+
+BOOST_FIXTURE_TEST_CASE(IsConvertFp16ToFp32SupportedFp32InputCl, ClContextControlFixture)
+{
+ std::string reasonIfUnsupported;
+
+ bool result = IsConvertLayerSupportedTests<armnn::ClWorkloadFactory, armnn::ConvertFp16ToFp32Layer,
+ armnn::DataType::Float32, armnn::DataType::Float32>(reasonIfUnsupported);
+
+ BOOST_CHECK(!result);
+ BOOST_CHECK_EQUAL(reasonIfUnsupported, "Input should be Float16");
+}
+
+BOOST_FIXTURE_TEST_CASE(IsConvertFp16ToFp32SupportedFp16OutputCl, ClContextControlFixture)
+{
+ std::string reasonIfUnsupported;
+
+ bool result = IsConvertLayerSupportedTests<armnn::ClWorkloadFactory, armnn::ConvertFp16ToFp32Layer,
+ armnn::DataType::Float16, armnn::DataType::Float16>(reasonIfUnsupported);
+
+ BOOST_CHECK(!result);
+ BOOST_CHECK_EQUAL(reasonIfUnsupported, "Output should be Float32");
+}
+
+BOOST_FIXTURE_TEST_CASE(IsConvertFp32ToFp16SupportedCl, ClContextControlFixture)
+{
+ std::string reasonIfUnsupported;
+
+ bool result = IsConvertLayerSupportedTests<armnn::ClWorkloadFactory, armnn::ConvertFp32ToFp16Layer,
+ armnn::DataType::Float32, armnn::DataType::Float16>(reasonIfUnsupported);
+
+ BOOST_CHECK(result);
+}
+
+BOOST_FIXTURE_TEST_CASE(IsConvertFp32ToFp16SupportedFp16InputCl, ClContextControlFixture)
+{
+ std::string reasonIfUnsupported;
+
+ bool result = IsConvertLayerSupportedTests<armnn::ClWorkloadFactory, armnn::ConvertFp32ToFp16Layer,
+ armnn::DataType::Float16, armnn::DataType::Float16>(reasonIfUnsupported);
+
+ BOOST_CHECK(!result);
+ BOOST_CHECK_EQUAL(reasonIfUnsupported, "Input should be Float32");
+}
+
+BOOST_FIXTURE_TEST_CASE(IsConvertFp32ToFp16SupportedFp32OutputCl, ClContextControlFixture)
+{
+ std::string reasonIfUnsupported;
+
+ bool result = IsConvertLayerSupportedTests<armnn::ClWorkloadFactory, armnn::ConvertFp32ToFp16Layer,
+ armnn::DataType::Float32, armnn::DataType::Float32>(reasonIfUnsupported);
+
+ BOOST_CHECK(!result);
+ BOOST_CHECK_EQUAL(reasonIfUnsupported, "Output should be Float16");
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp
new file mode 100644
index 0000000000..ab63ba4abb
--- /dev/null
+++ b/src/backends/cl/test/ClLayerTests.cpp
@@ -0,0 +1,338 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ClContextControlFixture.hpp"
+
+#include "test/TensorHelpers.hpp"
+#include "test/UnitTests.hpp"
+
+#include <backends/CpuTensorHandle.hpp>
+#include <backends/cl/ClLayerSupport.hpp>
+#include <backends/cl/ClWorkloadFactory.hpp>
+#include <backends/cl/workloads/ClWorkloadUtils.hpp>
+#include <backends/reference/RefWorkloadFactory.hpp>
+#include <backends/test/ActivationFixture.hpp>
+#include <backends/test/LayerTests.hpp>
+
+#include <arm_compute/core/CL/CLKernelLibrary.h>
+#include <arm_compute/runtime/CL/CLScheduler.h>
+
+#include <boost/test/unit_test.hpp>
+
+#include <iostream>
+#include <string>
+
+BOOST_FIXTURE_TEST_SUITE(Compute_ArmComputeCl, ClContextControlFixture)
+using FactoryType = armnn::ClWorkloadFactory;
+
+// ============================================================================
+// UNIT tests
+
+// Activation
+ARMNN_AUTO_TEST_CASE(ConstantLinearActivation, ConstantLinearActivationTest)
+
+ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta1, SimpleSoftmaxTest, 1.0f)
+ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta2, SimpleSoftmaxTest, 2.0f)
+ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta1Uint8, SimpleSoftmaxUint8Test, 1.0f)
+ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta2Uint8, SimpleSoftmaxUint8Test, 2.0f)
+
+ARMNN_AUTO_TEST_CASE(ReLu1Uint8, BoundedReLuUint8UpperAndLowerBoundTest)
+ARMNN_AUTO_TEST_CASE(ReLu6Uint8, BoundedReLuUint8UpperBoundOnlyTest)
+
+// Fully Connected
+ARMNN_AUTO_TEST_CASE(SimpleFullyConnected, FullyConnectedFloat32Test, false, false)
+ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithBias, FullyConnectedFloat32Test, true, false)
+ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithTranspose, FullyConnectedFloat32Test, false, true)
+ARMNN_AUTO_TEST_CASE(FullyConnectedUint8, FullyConnectedUint8Test, false)
+ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedUint8, FullyConnectedUint8Test, true)
+
+ARMNN_AUTO_TEST_CASE(FullyConnectedLarge, FullyConnectedLargeTest, false)
+ARMNN_AUTO_TEST_CASE(FullyConnectedLargeTransposed, FullyConnectedLargeTest, true)
+
+// Convolution
+ARMNN_AUTO_TEST_CASE(SimpleConvolution1d, Convolution1dTest, true)
+
+ARMNN_AUTO_TEST_CASE(SimpleConvolution2d, SimpleConvolution2d3x5Test, true)
+ARMNN_AUTO_TEST_CASE(SimpleConvolution2dSquare, SimpleConvolution2d3x3Test, true)
+ARMNN_AUTO_TEST_CASE(SimpleConvolution2d3x3Uint8, SimpleConvolution2d3x3Uint8Test, true)
+ARMNN_AUTO_TEST_CASE(UnbiasedConvolution2d, SimpleConvolution2d3x5Test, false)
+ARMNN_AUTO_TEST_CASE(UnbiasedConvolution2dSquare, SimpleConvolution2d3x3Test, false)
+ARMNN_AUTO_TEST_CASE(SimpleConvolution2dAsymmetricPadding, Convolution2dAsymmetricPaddingTest)
+
+ARMNN_AUTO_TEST_CASE(SimpleConvolution2dSquareNhwc, SimpleConvolution2d3x3NhwcTest, false)
+
+// Depthwise Convolution
+ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul1, DepthwiseConvolution2dDepthMul1Test, true)
+ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dDepthMul1, DepthwiseConvolution2dDepthMul1Test, false)
+ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul1Uint8, DepthwiseConvolution2dDepthMul1Uint8Test, true)
+ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dDepthMul1Uint8, DepthwiseConvolution2dDepthMul1Uint8Test, false)
+
+ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dAsymmetric, DepthwiseConvolution2dAsymmetricTest, true)
+ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dAsymmetric, DepthwiseConvolution2dAsymmetricTest, false)
+
+// Softmax
+BOOST_AUTO_TEST_CASE(Softmax4dSupport)
+{
+ const unsigned int numDimensions = 4u;
+ std::array<unsigned int, numDimensions> dimensionSizes;
+ dimensionSizes.fill(1u);
+
+ const armnn::TensorInfo inputInfo(numDimensions, &dimensionSizes.front(), armnn::DataType::Float32);
+ const armnn::TensorInfo outputInfo(numDimensions, &dimensionSizes.front(), armnn::DataType::Float32);
+
+ // 4D Softmax should be reported as unsupported on the CL backend
+ BOOST_TEST(!armnn::IsSoftmaxSupportedCl(inputInfo, outputInfo, armnn::SoftmaxDescriptor()));
+}
+
+// Splitter
+ARMNN_AUTO_TEST_CASE(SimpleSplitter, SplitterTest)
+ARMNN_AUTO_TEST_CASE(SimpleSplitterUint8, SplitterUint8Test)
+
+ARMNN_AUTO_TEST_CASE(CopyViaSplitter, CopyViaSplitterTest)
+ARMNN_AUTO_TEST_CASE(CopyViaSplitterUint8, CopyViaSplitterUint8Test)
+
+// Merger
+ARMNN_AUTO_TEST_CASE(SimpleMerger, MergerTest)
+ARMNN_AUTO_TEST_CASE(MergerUint8, MergerUint8Test)
+
+// Normalization
+ARMNN_AUTO_TEST_CASE(SimpleNormalizationAcross, SimpleNormalizationAcrossTest)
+ARMNN_AUTO_TEST_CASE(SimpleNormalizationWithin, SimpleNormalizationWithinTest)
+ARMNN_AUTO_TEST_CASE(SimpleNormalizationAcrossNhwc, SimpleNormalizationAcrossNhwcTest)
+
+// Pooling
+ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dSize3x3Stride2x4, SimpleMaxPooling2dSize3x3Stride2x4Test, true)
+ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dSize3x3Stride2x4Uint8, SimpleMaxPooling2dSize3x3Stride2x4Uint8Test, true)
+
+ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleMaxPooling2d, IgnorePaddingSimpleMaxPooling2dTest)
+ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleMaxPooling2dUint8, IgnorePaddingSimpleMaxPooling2dUint8Test)
+ARMNN_AUTO_TEST_CASE(IgnorePaddingMaxPooling2dSize3, IgnorePaddingMaxPooling2dSize3Test)
+ARMNN_AUTO_TEST_CASE(IgnorePaddingMaxPooling2dSize3Uint8, IgnorePaddingMaxPooling2dSize3Uint8Test)
+
+ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2d, IgnorePaddingSimpleAveragePooling2dTest)
+ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2dUint8, IgnorePaddingSimpleAveragePooling2dUint8Test)
+ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2dNoPadding, IgnorePaddingSimpleAveragePooling2dNoPaddingTest)
+ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2dNoPaddingUint8,
+ IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test)
+ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3, IgnorePaddingAveragePooling2dSize3Test)
+ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3Uint8, IgnorePaddingAveragePooling2dSize3Uint8Test)
+
+ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleL2Pooling2d, IgnorePaddingSimpleL2Pooling2dTest)
+ARMNN_AUTO_TEST_CASE(UNSUPPORTED_IgnorePaddingSimpleL2Pooling2dUint8, IgnorePaddingSimpleL2Pooling2dUint8Test)
+ARMNN_AUTO_TEST_CASE(IgnorePaddingL2Pooling2dSize3, IgnorePaddingL2Pooling2dSize3Test)
+ARMNN_AUTO_TEST_CASE(UNSUPPORTED_IgnorePaddingL2Pooling2dSize3Uint8, IgnorePaddingL2Pooling2dSize3Uint8Test)
+
+ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2d, SimpleAveragePooling2dTest)
+ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dNhwc, SimpleAveragePooling2dNhwcTest)
+ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dUint8, SimpleAveragePooling2dUint8Test)
+ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3x2Stride2x2,
+ IgnorePaddingAveragePooling2dSize3x2Stride2x2Test,
+ false)
+ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3x2Stride2x2NoPadding,
+ IgnorePaddingAveragePooling2dSize3x2Stride2x2Test,
+ true)
+ARMNN_AUTO_TEST_CASE(LargeTensorsAveragePooling2d, LargeTensorsAveragePooling2dTest)
+ARMNN_AUTO_TEST_CASE(LargeTensorsAveragePooling2dUint8, LargeTensorsAveragePooling2dUint8Test)
+
+ARMNN_AUTO_TEST_CASE(SimpleL2Pooling2d, SimpleL2Pooling2dTest)
+ARMNN_AUTO_TEST_CASE(UNSUPPORTED_SimpleL2Pooling2dUint8, SimpleL2Pooling2dUint8Test)
+ARMNN_AUTO_TEST_CASE(L2Pooling2dSize3Stride1, L2Pooling2dSize3Stride1Test)
+ARMNN_AUTO_TEST_CASE(UNSUPPORTED_L2Pooling2dSize3Stride1Uint8, L2Pooling2dSize3Stride1Uint8Test)
+ARMNN_AUTO_TEST_CASE(L2Pooling2dSize3Stride3, L2Pooling2dSize3Stride3Test)
+ARMNN_AUTO_TEST_CASE(UNSUPPORTED_L2Pooling2dSize3Stride3Uint8, L2Pooling2dSize3Stride3Uint8Test)
+ARMNN_AUTO_TEST_CASE(L2Pooling2dSize3Stride4, L2Pooling2dSize3Stride4Test)
+ARMNN_AUTO_TEST_CASE(UNSUPPORTED_L2Pooling2dSize3Stride4Uint8, L2Pooling2dSize3Stride4Uint8Test)
+ARMNN_AUTO_TEST_CASE(L2Pooling2dSize7, L2Pooling2dSize7Test)
+ARMNN_AUTO_TEST_CASE(UNSUPPORTED_L2Pooling2dSize7Uint8, L2Pooling2dSize7Uint8Test)
+ARMNN_AUTO_TEST_CASE(L2Pooling2dSize9, L2Pooling2dSize9Test)
+ARMNN_AUTO_TEST_CASE(UNSUPPORTED_L2Pooling2dSize9Uint8, L2Pooling2dSize9Uint8Test)
+
+// Add
+ARMNN_AUTO_TEST_CASE(SimpleAdd, AdditionTest)
+ARMNN_AUTO_TEST_CASE(AddBroadcast1Element, AdditionBroadcast1ElementTest)
+ARMNN_AUTO_TEST_CASE(AddBroadcast, AdditionBroadcastTest)
+
+ARMNN_AUTO_TEST_CASE(AdditionUint8, AdditionUint8Test)
+ARMNN_AUTO_TEST_CASE(AddBroadcastUint8, AdditionBroadcastUint8Test)
+ARMNN_AUTO_TEST_CASE(AddBroadcast1ElementUint8, AdditionBroadcast1ElementUint8Test)
+
+// Sub
+ARMNN_AUTO_TEST_CASE(SimpleSub, SubtractionTest)
+
+// Div
+ARMNN_AUTO_TEST_CASE(SimpleDivision, DivisionTest)
+ARMNN_AUTO_TEST_CASE(DivisionByZero, DivisionByZeroTest)
+ARMNN_AUTO_TEST_CASE(DivisionBroadcast1Element, DivisionBroadcast1ElementTest)
+ARMNN_AUTO_TEST_CASE(DivisionBroadcast1DVector, DivisionBroadcast1DVectorTest)
+// NOTE: quantized division is not supported by CL and not required by the
+// android NN api
+
+// Mul
+ARMNN_AUTO_TEST_CASE(SimpleMultiplication, MultiplicationTest)
+ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1Element, MultiplicationBroadcast1ElementTest)
+ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1DVector, MultiplicationBroadcast1DVectorTest)
+ARMNN_AUTO_TEST_CASE(MultiplicationUint8, MultiplicationUint8Test)
+ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1ElementUint8, MultiplicationBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1DVectorUint8, MultiplicationBroadcast1DVectorUint8Test)
+
+// Batch Norm
+ARMNN_AUTO_TEST_CASE(BatchNorm, BatchNormTest)
+
+// L2 Normalization
+ARMNN_AUTO_TEST_CASE(L2Normalization1d, L2Normalization1dTest)
+ARMNN_AUTO_TEST_CASE(L2Normalization2d, L2Normalization2dTest)
+ARMNN_AUTO_TEST_CASE(L2Normalization3d, L2Normalization3dTest)
+ARMNN_AUTO_TEST_CASE(L2Normalization4d, L2Normalization4dTest)
+
+ARMNN_AUTO_TEST_CASE(L2Normalization1dNhwc, L2Normalization1dNhwcTest)
+ARMNN_AUTO_TEST_CASE(L2Normalization2dNhwc, L2Normalization2dNhwcTest)
+ARMNN_AUTO_TEST_CASE(L2Normalization3dNhwc, L2Normalization3dNhwcTest)
+ARMNN_AUTO_TEST_CASE(L2Normalization4dNhwc, L2Normalization4dNhwcTest)
+
+// Resize Bilinear - NCHW data layout
+ARMNN_AUTO_TEST_CASE(ResizeBilinearNop, ResizeBilinearNopTest)
+ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, SimpleResizeBilinearTest)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin, ResizeBilinearSqMinTest)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMin, ResizeBilinearMinTest)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMag, ResizeBilinearMagTest)
+
+// Resize Bilinear - NHWC data layout
+ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwc, ResizeBilinearNopNhwcTest)
+ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc, SimpleResizeBilinearNhwcTest)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc, ResizeBilinearSqMinNhwcTest)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc, ResizeBilinearMinNhwcTest)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwc, ResizeBilinearMagNhwcTest)
+
+// Constant
+ARMNN_AUTO_TEST_CASE(Constant, ConstantTest)
+ARMNN_AUTO_TEST_CASE(ConstantUint8, ConstantTestUint8)
+
+// Concat
+ARMNN_AUTO_TEST_CASE(Concatenation1d, Concatenation1dTest)
+ARMNN_AUTO_TEST_CASE(Concatenation1dUint8, Concatenation1dUint8Test)
+
+ARMNN_AUTO_TEST_CASE(Concatenation2dDim0, Concatenation2dDim0Test)
+ARMNN_AUTO_TEST_CASE(Concatenation2dDim0Uint8, Concatenation2dDim0Uint8Test)
+ARMNN_AUTO_TEST_CASE(Concatenation2dDim1, Concatenation2dDim1Test)
+ARMNN_AUTO_TEST_CASE(Concatenation2dDim1Uint8, Concatenation2dDim1Uint8Test)
+
+ARMNN_AUTO_TEST_CASE(Concatenation2dDim0DiffInputDims, Concatenation2dDim0DiffInputDimsTest)
+ARMNN_AUTO_TEST_CASE(Concatenation2dDim0DiffInputDimsUint8, Concatenation2dDim0DiffInputDimsUint8Test)
+ARMNN_AUTO_TEST_CASE(Concatenation2dDim1DiffInputDims, Concatenation2dDim1DiffInputDimsTest)
+ARMNN_AUTO_TEST_CASE(Concatenation2dDim1DiffInputDimsUint8, Concatenation2dDim1DiffInputDimsUint8Test)
+
+ARMNN_AUTO_TEST_CASE(Concatenation3dDim0, Concatenation3dDim0Test)
+ARMNN_AUTO_TEST_CASE(Concatenation3dDim0Uint8, Concatenation3dDim0Uint8Test)
+ARMNN_AUTO_TEST_CASE(Concatenation3dDim1, Concatenation3dDim1Test)
+ARMNN_AUTO_TEST_CASE(Concatenation3dDim1Uint8, Concatenation3dDim1Uint8Test)
+ARMNN_AUTO_TEST_CASE(Concatenation3dDim2, Concatenation3dDim2Test)
+ARMNN_AUTO_TEST_CASE(Concatenation3dDim2Uint8, Concatenation3dDim2Uint8Test)
+
+ARMNN_AUTO_TEST_CASE(Concatenation3dDim0DiffInputDims, Concatenation3dDim0DiffInputDimsTest)
+ARMNN_AUTO_TEST_CASE(Concatenation3dDim0DiffInputDimsUint8, Concatenation3dDim0DiffInputDimsUint8Test)
+ARMNN_AUTO_TEST_CASE(Concatenation3dDim1DiffInputDims, Concatenation3dDim1DiffInputDimsTest)
+ARMNN_AUTO_TEST_CASE(Concatenation3dDim1DiffInputDimsUint8, Concatenation3dDim1DiffInputDimsUint8Test)
+ARMNN_AUTO_TEST_CASE(Concatenation3dDim2DiffInputDims, Concatenation3dDim2DiffInputDimsTest)
+ARMNN_AUTO_TEST_CASE(Concatenation3dDim2DiffInputDimsUint8, Concatenation3dDim2DiffInputDimsUint8Test)
+
+// Floor
+ARMNN_AUTO_TEST_CASE(SimpleFloor, SimpleFloorTest)
+
+// Reshape
+ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeFloat32Test)
+ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeUint8Test)
+
+// Permute
+ARMNN_AUTO_TEST_CASE(SimplePermuteFloat32, SimplePermuteFloat32Test)
+ARMNN_AUTO_TEST_CASE(SimplePermuteUint8, SimplePermuteUint8Test)
+ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet1, PermuteFloat32ValueSet1Test)
+ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet2, PermuteFloat32ValueSet2Test)
+ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet3, PermuteFloat32ValueSet3Test)
+
+// Lstm
+ARMNN_AUTO_TEST_CASE(LstmLayerFloat32WithCifgWithPeepholeNoProjection,
+ LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest)
+ARMNN_AUTO_TEST_CASE(LstmLayerFloat32NoCifgNoPeepholeNoProjection,
+ LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest)
+ARMNN_AUTO_TEST_CASE(LstmLayerFloat32NoCifgWithPeepholeWithProjection,
+ LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest)
+
+// Convert from Float16 to Float32
+ARMNN_AUTO_TEST_CASE(SimpleConvertFp16ToFp32, SimpleConvertFp16ToFp32Test)
+// Convert from Float32 to Float16
+ARMNN_AUTO_TEST_CASE(SimpleConvertFp32ToFp16, SimpleConvertFp32ToFp16Test)
+
+// ============================================================================
+// COMPARE tests
+
+ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareConv2dWithReference, CompareConvolution2dTest)
+
+ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceFloat32, CompareDepthwiseConvolution2dTest<float>)
+ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceUint8, CompareDepthwiseConvolution2dTest<uint8_t>)
+
+ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareNormalizationWithinWithReference, CompareNormalizationTest,
+ armnn::NormalizationAlgorithmChannel::Within,
+ armnn::NormalizationAlgorithmMethod::LocalBrightness)
+ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareNormalizationAcrossWithReference, CompareNormalizationTest,
+ armnn::NormalizationAlgorithmChannel::Across,
+ armnn::NormalizationAlgorithmMethod::LocalBrightness)
+
+ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareSoftmaxBeta1WithReference, CompareSoftmaxTest, 1.0f)
+ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareSoftmaxBeta2WithReference, CompareSoftmaxTest, 2.0f)
+ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareSoftmaxUint8, CompareSoftmaxUint8Test, 1.0f)
+
+ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareMaxPooling2dWithRef, ComparePooling2dTest, armnn::PoolingAlgorithm::Max)
+
+ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareAveragePooling2dWithRef, ComparePooling2dTest, armnn::PoolingAlgorithm::Average)
+ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareAveragePooling2dWithRefUint8, ComparePooling2dUint8Test,
+ armnn::PoolingAlgorithm::Average)
+
+ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareL2Pooling2dWithRef, ComparePooling2dTest, armnn::PoolingAlgorithm::L2)
+
+ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareAddition, CompareAdditionTest)
+
+ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareMultiplicationWithRef, CompareMultiplicationTest)
+
+ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareBatchNorm, CompareBatchNormTest)
+
+ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareReLu1, CompareBoundedReLuTest, 1.0f, -1.0f)
+ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareReLu6, CompareBoundedReLuTest, 6.0f, 0.0f)
+
+// ============================================================================
+// FIXTURE tests
+
+ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareSigmoidActivationWithReference, ActivationFixture,
+ CompareActivationTest, armnn::ActivationFunction::Sigmoid, 5u)
+
+ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareTanhActivationWithReference, ActivationFixture,
+ CompareActivationTest, armnn::ActivationFunction::TanH, 5u)
+
+ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareLinearActivationWithReference, ActivationFixture,
+ CompareActivationTest, armnn::ActivationFunction::Linear, 5u)
+
+ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareReLuActivationWithReference, ActivationFixture,
+ CompareActivationTest, armnn::ActivationFunction::ReLu, 5u)
+
+ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareBoundedReLuActivationWithReference, ActivationFixture,
+ CompareActivationTest, armnn::ActivationFunction::BoundedReLu, 5u)
+ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareBoundedReLuActivationWithReferenceUint8, ActivationFixture,
+ CompareActivationUint8Test, armnn::ActivationFunction::BoundedReLu)
+
+ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareSoftReLuActivationWithReference, ActivationFixture,
+ CompareActivationTest, armnn::ActivationFunction::SoftReLu, 5u)
+
+ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareLeakyReLuActivationWithReference, ActivationFixture,
+ CompareActivationTest, armnn::ActivationFunction::LeakyReLu, 5u)
+
+ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareAbsActivationWithReference, ActivationFixture,
+ CompareActivationTest, armnn::ActivationFunction::Abs, 5u)
+
+ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareSqrtActivationWithReference, PositiveActivationFixture,
+ CompareActivationTest, armnn::ActivationFunction::Sqrt, 5u)
+
+ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareSquareActivationWithReference, ActivationFixture,
+ CompareActivationTest, armnn::ActivationFunction::Square, 5u)
+
+BOOST_AUTO_TEST_SUITE_END()