From 43095f31edf103d71a8e2420b549d21fd349b49e Mon Sep 17 00:00:00 2001 From: arovir01 Date: Tue, 9 Oct 2018 18:04:24 +0100 Subject: IVGCVSW-1988: Refactor backend-specific unit tests Change-Id: I8eca81d2e0780390eaa837c186ffe1c7d41fdebe --- Android.mk | 18 +- CMakeLists.txt | 19 +- src/armnn/test/CreateWorkload.hpp | 1 + src/armnn/test/CreateWorkloadClNeon.hpp | 2 + src/armnn/test/JsonPrinterTests.cpp | 2 +- src/backends/cl/CMakeLists.txt | 3 +- src/backends/cl/backend.cmake | 1 + src/backends/cl/test/CMakeLists.txt | 16 + src/backends/cl/test/ClContextControlFixture.hpp | 34 ++ src/backends/cl/test/ClCreateWorkloadTests.cpp | 678 +++++++++++++++++++++ src/backends/cl/test/ClLayerSupportTests.cpp | 104 ++++ src/backends/cl/test/ClLayerTests.cpp | 338 ++++++++++ src/backends/neon/CMakeLists.txt | 3 +- src/backends/neon/backend.cmake | 1 + src/backends/neon/test/CMakeLists.txt | 11 + src/backends/neon/test/NeonCreateWorkloadTests.cpp | 531 ++++++++++++++++ src/backends/neon/test/NeonLayerSupportTests.cpp | 59 ++ src/backends/neon/test/NeonLayerTests.cpp | 484 +++++++++++++++ src/backends/reference/CMakeLists.txt | 4 +- src/backends/reference/backend.cmake | 1 + src/backends/reference/test/CMakeLists.txt | 11 + .../reference/test/RefCreateWorkloadTests.cpp | 484 +++++++++++++++ .../reference/test/RefLayerSupportTests.cpp | 118 ++++ src/backends/reference/test/RefLayerTests.cpp | 273 +++++++++ src/backends/test/ActivationFixture.hpp | 5 + src/backends/test/ArmComputeCl.cpp | 335 ---------- src/backends/test/ArmComputeNeon.cpp | 485 --------------- src/backends/test/ClContextControlFixture.hpp | 34 -- src/backends/test/CreateWorkloadCl.cpp | 676 -------------------- src/backends/test/CreateWorkloadNeon.cpp | 530 ---------------- src/backends/test/CreateWorkloadRef.cpp | 483 --------------- src/backends/test/IsLayerSupportedTest.cpp | 239 -------- src/backends/test/IsLayerSupportedTestImpl.hpp | 5 - src/backends/test/LayerTests.cpp | 2 +- src/backends/test/Reference.cpp | 273 --------- 35 files changed, 3179 insertions(+), 3084 deletions(-) create mode 100644 src/backends/cl/test/ClContextControlFixture.hpp create mode 100644 src/backends/cl/test/ClCreateWorkloadTests.cpp create mode 100644 src/backends/cl/test/ClLayerSupportTests.cpp create mode 100644 src/backends/cl/test/ClLayerTests.cpp create mode 100644 src/backends/neon/test/NeonCreateWorkloadTests.cpp create mode 100644 src/backends/neon/test/NeonLayerSupportTests.cpp create mode 100644 src/backends/neon/test/NeonLayerTests.cpp create mode 100644 src/backends/reference/test/RefCreateWorkloadTests.cpp create mode 100644 src/backends/reference/test/RefLayerSupportTests.cpp create mode 100644 src/backends/reference/test/RefLayerTests.cpp delete mode 100644 src/backends/test/ArmComputeCl.cpp delete mode 100644 src/backends/test/ArmComputeNeon.cpp delete mode 100644 src/backends/test/ClContextControlFixture.hpp delete mode 100644 src/backends/test/CreateWorkloadCl.cpp delete mode 100644 src/backends/test/CreateWorkloadNeon.cpp delete mode 100644 src/backends/test/CreateWorkloadRef.cpp delete mode 100644 src/backends/test/IsLayerSupportedTest.cpp delete mode 100644 src/backends/test/Reference.cpp diff --git a/Android.mk b/Android.mk index 95b10b9533..1155153e3f 100644 --- a/Android.mk +++ b/Android.mk @@ -194,17 +194,19 @@ LOCAL_SRC_FILES := \ src/armnn/test/ProfilingEventTest.cpp \ src/armnn/test/ObservableTest.cpp \ src/armnn/test/OptionalTest.cpp \ - src/backends/test/IsLayerSupportedTest.cpp \ - src/backends/test/Reference.cpp \ src/backends/test/WorkloadDataValidation.cpp \ src/backends/test/TensorCopyUtils.cpp \ src/backends/test/LayerTests.cpp \ - src/backends/test/CreateWorkloadRef.cpp \ - src/backends/test/ArmComputeCl.cpp \ - src/backends/test/ArmComputeNeon.cpp \ - src/backends/test/CreateWorkloadCl.cpp \ - src/backends/test/CreateWorkloadNeon.cpp \ - src/backends/test/MemCopyTests.cpp + src/backends/test/MemCopyTests.cpp \ + src/backends/cl/test/ClCreateWorkloadTests.cpp \ + src/backends/cl/test/ClLayerSupportTests.cpp \ + src/backends/cl/test/ClLayerTests.cpp \ + src/backends/neon/test/NeonCreateWorkloadTests.cpp \ + src/backends/neon/test/NeonLayerSupportTests.cpp \ + src/backends/neon/test/NeonLayerTests.cpp \ + src/backends/reference/test/RefCreateWorkloadTests.cpp \ + src/backends/reference/test/RefLayerSupportTests.cpp \ + src/backends/reference/test/RefLayerTests.cpp LOCAL_STATIC_LIBRARIES := \ libneuralnetworks_common \ diff --git a/CMakeLists.txt b/CMakeLists.txt index f0fe5a1525..669c92fd3f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -378,9 +378,7 @@ if(BUILD_UNIT_TESTS) src/armnn/test/InstrumentTests.cpp src/armnn/test/ObservableTest.cpp src/armnn/test/OptionalTest.cpp - src/backends/test/IsLayerSupportedTest.cpp src/backends/test/IsLayerSupportedTestImpl.hpp - src/backends/test/Reference.cpp src/backends/test/WorkloadDataValidation.cpp src/backends/test/TensorCopyUtils.hpp src/backends/test/TensorCopyUtils.cpp @@ -398,22 +396,16 @@ if(BUILD_UNIT_TESTS) src/backends/test/NormTestImpl.hpp src/backends/test/BatchNormTestImpl.hpp src/backends/test/WorkloadTestUtils.hpp - src/backends/test/CreateWorkloadRef.cpp src/backends/test/QuantizeHelper.hpp) if(ARMCOMPUTENEON) list(APPEND unittest_sources - src/backends/test/ArmComputeNeon.cpp - src/backends/test/CreateWorkloadNeon.cpp src/armnn/test/CreateWorkloadClNeon.hpp src/armnn/test/NeonTimerTest.cpp) endif() if(ARMCOMPUTECL) list(APPEND unittest_sources - src/backends/test/ArmComputeCl.cpp - src/backends/test/ClContextControlFixture.hpp - src/backends/test/CreateWorkloadCl.cpp src/armnn/test/CreateWorkloadClNeon.hpp src/armnn/test/OpenClTimerTest.cpp src/armnn/test/FP16SupportTest.cpp) @@ -522,8 +514,15 @@ if(BUILD_UNIT_TESTS) endif() endif() - target_link_libraries(UnitTests armnn) - target_link_libraries(UnitTests armnnUtils) + # the backends under src/backends extend the list of + # static libs UnitTests to link against + list(APPEND armnnUnitTestLibraries armnn armnnUtils) + + foreach(lib ${armnnUnitTestLibraries}) + target_link_libraries(UnitTests ${lib}) + message("Adding library dependency to UnitTests: ${lib}") + endforeach() + target_link_libraries(UnitTests ${CMAKE_THREAD_LIBS_INIT}) target_link_libraries(UnitTests ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY} ${Boost_SYSTEM_LIBRARY} ${Boost_FILESYSTEM_LIBRARY}) diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp index ada6027756..fed9dd6b33 100644 --- a/src/armnn/test/CreateWorkload.hpp +++ b/src/armnn/test/CreateWorkload.hpp @@ -9,6 +9,7 @@ #include #include +#include #include #include diff --git a/src/armnn/test/CreateWorkloadClNeon.hpp b/src/armnn/test/CreateWorkloadClNeon.hpp index a18e85d5f0..56de085f8e 100644 --- a/src/armnn/test/CreateWorkloadClNeon.hpp +++ b/src/armnn/test/CreateWorkloadClNeon.hpp @@ -5,6 +5,8 @@ #pragma once #include "CreateWorkload.hpp" + +#include #include #if ARMCOMPUTECL_ENABLED diff --git a/src/armnn/test/JsonPrinterTests.cpp b/src/armnn/test/JsonPrinterTests.cpp index 9c24fdaa7b..44609ea6f9 100644 --- a/src/armnn/test/JsonPrinterTests.cpp +++ b/src/armnn/test/JsonPrinterTests.cpp @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include BOOST_FIXTURE_TEST_SUITE(JsonPrinterTests, ClProfilingContextControlFixture) diff --git a/src/backends/cl/CMakeLists.txt b/src/backends/cl/CMakeLists.txt index a7ce893f20..04da6ddcff 100644 --- a/src/backends/cl/CMakeLists.txt +++ b/src/backends/cl/CMakeLists.txt @@ -15,7 +15,8 @@ list(APPEND armnnClBackend_sources ) if(ARMCOMPUTECL) - add_subdirectory(workloads test) + add_subdirectory(workloads) + add_subdirectory(test) endif() add_library(armnnClBackend STATIC ${armnnClBackend_sources}) diff --git a/src/backends/cl/backend.cmake b/src/backends/cl/backend.cmake index 1af88e3c9b..f8a1c1c62a 100644 --- a/src/backends/cl/backend.cmake +++ b/src/backends/cl/backend.cmake @@ -6,6 +6,7 @@ if(ARMCOMPUTECL) add_subdirectory(${PROJECT_SOURCE_DIR}/src/backends/cl) list(APPEND armnnLibraries armnnClBackend armnnClBackendWorkloads) + list(APPEND armnnUnitTestLibraries armnnClBackendUnitTests) else() message("CL backend is disabled") add_subdirectory(${PROJECT_SOURCE_DIR}/src/backends/cl) diff --git a/src/backends/cl/test/CMakeLists.txt b/src/backends/cl/test/CMakeLists.txt index e69de29bb2..795a7dc9d2 100644 --- a/src/backends/cl/test/CMakeLists.txt +++ b/src/backends/cl/test/CMakeLists.txt @@ -0,0 +1,16 @@ +# +# Copyright © 2017 Arm Ltd. All rights reserved. +# SPDX-License-Identifier: MIT +# + +list(APPEND armnnClBackendUnitTests_sources + ClContextControlFixture.hpp + ClCreateWorkloadTests.cpp + ClLayerSupportTests.cpp + ClLayerTests.cpp +) + +add_library(armnnClBackendUnitTests STATIC ${armnnClBackendUnitTests_sources}) +target_include_directories(armnnClBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src) +target_include_directories(armnnClBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn) +target_include_directories(armnnClBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils) \ No newline at end of file diff --git a/src/backends/cl/test/ClContextControlFixture.hpp b/src/backends/cl/test/ClContextControlFixture.hpp new file mode 100644 index 0000000000..fd53e3fcf3 --- /dev/null +++ b/src/backends/cl/test/ClContextControlFixture.hpp @@ -0,0 +1,34 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include + +template +struct ClContextControlFixtureBase +{ + static ClContextControlFixtureBase*& Instance() + { + static ClContextControlFixtureBase* s_Instance = nullptr; + return s_Instance; + } + + // Initialising ClContextControl to ensure OpenCL is loaded correctly for each test case + ClContextControlFixtureBase() + : m_ClContextControl(nullptr, ProfilingEnabled) + { + Instance() = this; + } + ~ClContextControlFixtureBase() + { + Instance() = nullptr; + } + + armnn::ClContextControl m_ClContextControl; +}; + +using ClContextControlFixture = ClContextControlFixtureBase; +using ClProfilingContextControlFixture = ClContextControlFixtureBase; diff --git a/src/backends/cl/test/ClCreateWorkloadTests.cpp b/src/backends/cl/test/ClCreateWorkloadTests.cpp new file mode 100644 index 0000000000..659ba82e8a --- /dev/null +++ b/src/backends/cl/test/ClCreateWorkloadTests.cpp @@ -0,0 +1,678 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "ClContextControlFixture.hpp" + +#include +#include +#include +#include +#include +#include + +#include + +boost::test_tools::predicate_result CompareIClTensorHandleShape(IClTensorHandle* tensorHandle, + std::initializer_list expectedDimensions) +{ + return CompareTensorHandleShape(tensorHandle, expectedDimensions); +} + +BOOST_FIXTURE_TEST_SUITE(CreateWorkloadCl, ClContextControlFixture) + +template +static void ClCreateActivationWorkloadTest() +{ + Graph graph; + ClWorkloadFactory factory; + + auto workload = CreateActivationWorkloadTest(factory, graph); + + // Checks that inputs/outputs are as we expect them (see definition of CreateActivationWorkloadTest). + ActivationQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + + BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {1})); + BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {1})); +} + +BOOST_AUTO_TEST_CASE(CreateActivationFloatWorkload) +{ + ClCreateActivationWorkloadTest(); +} + +BOOST_AUTO_TEST_CASE(CreateActivationFloat16Workload) +{ + ClCreateActivationWorkloadTest(); +} + +template +static void ClCreateArithmethicWorkloadTest() +{ + Graph graph; + ClWorkloadFactory factory; + auto workload = CreateArithmeticWorkloadTest(factory, graph); + + // Checks that inputs/outputs are as we expect them (see definition of CreateArithmeticWorkloadTest). + DescriptorType queueDescriptor = workload->GetData(); + auto inputHandle1 = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto inputHandle2 = boost::polymorphic_downcast(queueDescriptor.m_Inputs[1]); + auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + BOOST_TEST(CompareIClTensorHandleShape(inputHandle1, {2, 3})); + BOOST_TEST(CompareIClTensorHandleShape(inputHandle2, {2, 3})); + BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 3})); +} + +BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload) +{ + ClCreateArithmethicWorkloadTest(); +} + +BOOST_AUTO_TEST_CASE(CreateAdditionFloat16Workload) +{ + ClCreateArithmethicWorkloadTest(); +} + +BOOST_AUTO_TEST_CASE(CreateSubtractionFloatWorkload) +{ + ClCreateArithmethicWorkloadTest(); +} + +BOOST_AUTO_TEST_CASE(CreateSubtractionFloat16Workload) +{ + ClCreateArithmethicWorkloadTest(); +} + +BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkloadTest) +{ + ClCreateArithmethicWorkloadTest(); +} + +BOOST_AUTO_TEST_CASE(CreateMultiplicationFloat16WorkloadTest) +{ + ClCreateArithmethicWorkloadTest(); +} + +BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8WorkloadTest) +{ + ClCreateArithmethicWorkloadTest(); +} + +BOOST_AUTO_TEST_CASE(CreateDivisionFloatWorkloadTest) +{ + ClCreateArithmethicWorkloadTest(); +} + +BOOST_AUTO_TEST_CASE(CreateDivisionFloat16WorkloadTest) +{ + ClCreateArithmethicWorkloadTest(); +} + +template +static void ClCreateBatchNormalizationWorkloadTest() +{ + Graph graph; + ClWorkloadFactory factory; + + auto workload = CreateBatchNormalizationWorkloadTest + (factory, graph); + + // Checks that inputs/outputs are as we expect them (see definition of CreateBatchNormalizationWorkloadTest). + BatchNormalizationQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + + BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {2, 3, 1, 1})); + BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 3, 1, 1})); +} + +BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloatWorkload) +{ + ClCreateBatchNormalizationWorkloadTest(); +} + +BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16Workload) +{ + ClCreateBatchNormalizationWorkloadTest(); +} + +BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Workload) +{ + Graph graph; + ClWorkloadFactory factory; + auto workload = CreateConvertFp16ToFp32WorkloadTest(factory, graph); + + ConvertFp16ToFp32QueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + + BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {3, 2, 3})); + BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {3, 2, 3})); + BOOST_TEST((inputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F16)); + BOOST_TEST((outputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F32)); +} + +BOOST_AUTO_TEST_CASE(CreateConvertFp32ToFp16Workload) +{ + Graph graph; + ClWorkloadFactory factory; + auto workload = CreateConvertFp32ToFp16WorkloadTest(factory, graph); + + ConvertFp32ToFp16QueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + + BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {3, 2, 3})); + BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {3, 2, 3})); + BOOST_TEST((inputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F32)); + BOOST_TEST((outputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F16)); +} + +template +static void ClConvolution2dWorkloadTest(DataLayout dataLayout) +{ + Graph graph; + ClWorkloadFactory factory; + auto workload = CreateConvolution2dWorkloadTest(factory, + graph, + dataLayout); + + std::initializer_list inputShape = (dataLayout == DataLayout::NCHW) ? + std::initializer_list({2, 3, 8, 16}) : std::initializer_list({2, 8, 16, 3}); + std::initializer_list outputShape = (dataLayout == DataLayout::NCHW) ? + std::initializer_list({2, 2, 2, 10}) : std::initializer_list({2, 2, 10, 2}); + + // Checks that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest). + Convolution2dQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + BOOST_TEST(CompareIClTensorHandleShape(inputHandle, inputShape)); + BOOST_TEST(CompareIClTensorHandleShape(outputHandle, outputShape)); +} + +BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNchwWorkload) +{ + ClConvolution2dWorkloadTest(DataLayout::NCHW); +} + +BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNhwcWorkload) +{ + ClConvolution2dWorkloadTest(DataLayout::NHWC); +} + +BOOST_AUTO_TEST_CASE(CreateConvolution2dFloat16NchwWorkload) +{ + ClConvolution2dWorkloadTest(DataLayout::NCHW); +} + +BOOST_AUTO_TEST_CASE(CreateConvolution2dFloat16NhwcWorkload) +{ + ClConvolution2dWorkloadTest(DataLayout::NHWC); +} + +template +static void ClDirectConvolution2dWorkloadTest() +{ + Graph graph; + ClWorkloadFactory factory; + auto workload = CreateDirectConvolution2dWorkloadTest(factory, graph); + + // Checks that outputs and inputs are as we expect them (see definition of CreateDirectConvolution2dWorkloadTest). + Convolution2dQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {2, 3, 6, 6})); + BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 2, 6, 6})); +} + +BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dFloatWorkload) +{ + ClDirectConvolution2dWorkloadTest(); +} + +BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dFloat16Workload) +{ + ClDirectConvolution2dWorkloadTest(); +} + +BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dUint8Workload) +{ + ClDirectConvolution2dWorkloadTest(); +} + +template +static void ClCreateFullyConnectedWorkloadTest() +{ + Graph graph; + ClWorkloadFactory factory; + auto workload = + CreateFullyConnectedWorkloadTest(factory, graph); + + // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest). + FullyConnectedQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {3, 1, 4, 5})); + BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {3, 7})); +} + + +BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloatWorkloadTest) +{ + ClCreateFullyConnectedWorkloadTest(); +} + +BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloat16WorkloadTest) +{ + ClCreateFullyConnectedWorkloadTest(); +} + +template +static void ClNormalizationWorkloadTest(DataLayout dataLayout) +{ + Graph graph; + ClWorkloadFactory factory; + + auto workload = CreateNormalizationWorkloadTest + (factory, graph, dataLayout); + + // Checks that inputs/outputs are as we expect them (see definition of CreateNormalizationWorkloadTest). + NormalizationQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + + BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {3, 5, 5, 1})); + BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {3, 5, 5, 1})); +} + +BOOST_AUTO_TEST_CASE(CreateNormalizationFloat32NchwWorkload) +{ + ClNormalizationWorkloadTest(DataLayout::NCHW); +} + +BOOST_AUTO_TEST_CASE(CreateNormalizationFloat16NchwWorkload) +{ + ClNormalizationWorkloadTest(DataLayout::NCHW); +} + +BOOST_AUTO_TEST_CASE(CreateNormalizationFloat32NhwcWorkload) +{ + ClNormalizationWorkloadTest(DataLayout::NHWC); +} + +BOOST_AUTO_TEST_CASE(CreateNormalizationFloat16NhwcWorkload) +{ + ClNormalizationWorkloadTest(DataLayout::NHWC); +} + +template +static void ClPooling2dWorkloadTest(DataLayout dataLayout) +{ + Graph graph; + ClWorkloadFactory factory; + + auto workload = CreatePooling2dWorkloadTest(factory, graph, dataLayout); + + std::initializer_list inputShape = (dataLayout == DataLayout::NCHW) ? + std::initializer_list({3, 2, 5, 5}) : std::initializer_list({3, 5, 5, 2}); + std::initializer_list outputShape = (dataLayout == DataLayout::NCHW) ? + std::initializer_list({3, 2, 2, 4}) : std::initializer_list({3, 2, 4, 2}); + + // Check that inputs/outputs are as we expect them (see definition of CreatePooling2dWorkloadTest). + Pooling2dQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + + BOOST_TEST(CompareIClTensorHandleShape(inputHandle, inputShape)); + BOOST_TEST(CompareIClTensorHandleShape(outputHandle, outputShape)); +} + +BOOST_AUTO_TEST_CASE(CreatePooling2dFloatNchwWorkload) +{ + ClPooling2dWorkloadTest(DataLayout::NCHW); +} + +BOOST_AUTO_TEST_CASE(CreatePooling2dFloatNhwcWorkload) +{ + ClPooling2dWorkloadTest(DataLayout::NHWC); +} + +BOOST_AUTO_TEST_CASE(CreatePooling2dFloat16NchwWorkload) +{ + ClPooling2dWorkloadTest(DataLayout::NCHW); +} + +BOOST_AUTO_TEST_CASE(CreatePooling2dFloat16NhwcWorkload) +{ + ClPooling2dWorkloadTest(DataLayout::NHWC); +} + +template +static void ClCreateReshapeWorkloadTest() +{ + Graph graph; + ClWorkloadFactory factory; + + auto workload = CreateReshapeWorkloadTest(factory, graph); + + // Checks that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest). + ReshapeQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + + BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {4, 1})); + BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {4})); // Leading size 1 dimensions are collapsed by ACL. +} + +BOOST_AUTO_TEST_CASE(CreateReshapeFloatWorkload) +{ + ClCreateReshapeWorkloadTest(); +} + +BOOST_AUTO_TEST_CASE(CreateReshapeFloat16Workload) +{ + ClCreateReshapeWorkloadTest(); +} + +BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload) +{ + ClCreateReshapeWorkloadTest(); +} + +template +static void ClSoftmaxWorkloadTest() +{ + Graph graph; + ClWorkloadFactory factory; + + auto workload = CreateSoftmaxWorkloadTest(factory, graph); + + // Checks that inputs/outputs are as we expect them (see definition of ClSoftmaxFloatWorkload). + SoftmaxQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + + BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {4, 1})); + BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {4, 1})); +} + + +BOOST_AUTO_TEST_CASE(CreateSoftmaxFloatWorkloadTest) +{ + ClSoftmaxWorkloadTest(); +} + +BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat16WorkloadTest) +{ + ClSoftmaxWorkloadTest(); +} + +template +static void ClSplitterWorkloadTest() +{ + Graph graph; + ClWorkloadFactory factory; + + auto workload = CreateSplitterWorkloadTest(factory, graph); + + // Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest). + SplitterQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {5, 7, 7})); + + auto outputHandle1 = boost::polymorphic_downcast(queueDescriptor.m_Outputs[1]); + BOOST_TEST(CompareIClTensorHandleShape(outputHandle1, {2, 7, 7})); + + auto outputHandle2 = boost::polymorphic_downcast(queueDescriptor.m_Outputs[2]); + BOOST_TEST(CompareIClTensorHandleShape(outputHandle2, {2, 7, 7})); + + auto outputHandle0 = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + // NOTE: At the moment the CL collapses the tensor to a 2 dim when dimension zero = 1 + // we are raising this difference between the NEON and CL libs as an issue with the compute library team. + BOOST_TEST(CompareIClTensorHandleShape(outputHandle0, {7, 7})); +} + +BOOST_AUTO_TEST_CASE(CreateSplitterFloatWorkload) +{ + ClSplitterWorkloadTest(); +} + +BOOST_AUTO_TEST_CASE(CreateSplitterFloat16Workload) +{ + ClSplitterWorkloadTest(); +} + +template +static void ClSplitterMergerTest() +{ + // Tests that it is possible to decide which output of the splitter layer + // should be lined to which input of the merger layer. + // We test that is is possible to specify 0th output + // of the splitter to be the 1st input to the merger and the 1st output of the splitter to be 0th input + // of the merger. + + Graph graph; + ClWorkloadFactory factory; + + auto workloads = + CreateSplitterMergerWorkloadTest + (factory, graph); + + auto wlSplitter = std::move(workloads.first); + auto wlMerger = std::move(workloads.second); + + //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction. + armnn::ClSubTensorHandle* sOut0 = dynamic_cast(wlSplitter->GetData().m_Outputs[0]); + armnn::ClSubTensorHandle* sOut1 = dynamic_cast(wlSplitter->GetData().m_Outputs[1]); + armnn::ClSubTensorHandle* mIn0 = dynamic_cast(wlMerger->GetData().m_Inputs[0]); + armnn::ClSubTensorHandle* mIn1 = dynamic_cast(wlMerger->GetData().m_Inputs[1]); + + BOOST_TEST(sOut0); + BOOST_TEST(sOut1); + BOOST_TEST(mIn0); + BOOST_TEST(mIn1); + + //Fliped order of inputs/outputs. + bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0); + BOOST_TEST(validDataPointers); + + + //Also make sure that the inputs are subtensors of one tensor and outputs are sub tensors of another tensor. + bool validSubTensorParents = (mIn0->GetTensor().parent() == mIn1->GetTensor().parent()) + && (sOut0->GetTensor().parent() == sOut1->GetTensor().parent()); + + BOOST_TEST(validSubTensorParents); +} + +BOOST_AUTO_TEST_CASE(CreateSplitterMergerFloatWorkload) +{ + ClSplitterMergerTest(); +} + +BOOST_AUTO_TEST_CASE(CreateSplitterMergerFloat16Workload) +{ + ClSplitterMergerTest(); +} + + +BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputs) +{ + // Test that it is possible to assign multiple (two) different layers to each of the outputs of a splitter layer. + // We create a splitter with two outputs. That each of those outputs is used by two different activation layers. + + Graph graph; + ClWorkloadFactory factory; + std::unique_ptr wlSplitter; + std::unique_ptr wlActiv0_0; + std::unique_ptr wlActiv0_1; + std::unique_ptr wlActiv1_0; + std::unique_ptr wlActiv1_1; + + CreateSplitterMultipleInputsOneOutputWorkloadTest(factory, graph, wlSplitter, wlActiv0_0, wlActiv0_1, + wlActiv1_0, wlActiv1_1); + + //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction. + armnn::ClSubTensorHandle* sOut0 = dynamic_cast(wlSplitter->GetData().m_Outputs[0]); + armnn::ClSubTensorHandle* sOut1 = dynamic_cast(wlSplitter->GetData().m_Outputs[1]); + armnn::ClSubTensorHandle* activ0_0Im = dynamic_cast(wlActiv0_0->GetData().m_Inputs[0]); + armnn::ClSubTensorHandle* activ0_1Im = dynamic_cast(wlActiv0_1->GetData().m_Inputs[0]); + armnn::ClSubTensorHandle* activ1_0Im = dynamic_cast(wlActiv1_0->GetData().m_Inputs[0]); + armnn::ClSubTensorHandle* activ1_1Im = dynamic_cast(wlActiv1_1->GetData().m_Inputs[0]); + + + BOOST_TEST(sOut0); + BOOST_TEST(sOut1); + BOOST_TEST(activ0_0Im); + BOOST_TEST(activ0_1Im); + BOOST_TEST(activ1_0Im); + BOOST_TEST(activ1_1Im); + + bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) && + (sOut1 == activ1_0Im) && (sOut1 == activ1_1Im); + + BOOST_TEST(validDataPointers); +} + +BOOST_AUTO_TEST_CASE(CreateMemCopyWorkloadsCl) +{ + ClWorkloadFactory factory; + CreateMemCopyWorkloads(factory); +} + +template +static void ClL2NormalizationWorkloadTest(DataLayout dataLayout) +{ + Graph graph; + ClWorkloadFactory factory; + + auto workload = CreateL2NormalizationWorkloadTest + (factory, graph, dataLayout); + + // Checks that inputs/outputs are as we expect them (see definition of CreateNormalizationWorkloadTest). + L2NormalizationQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + + BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 5, 20, 50, 67 })); + BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 5, 20, 50, 67 })); +} + +BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloatNchwWorkload) +{ + ClL2NormalizationWorkloadTest(DataLayout::NCHW); +} + +BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloatNhwcWorkload) +{ + ClL2NormalizationWorkloadTest(DataLayout::NHWC); +} + +BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat16NchwWorkload) +{ + ClL2NormalizationWorkloadTest(DataLayout::NCHW); +} + +BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat16NhwcWorkload) +{ + ClL2NormalizationWorkloadTest(DataLayout::NHWC); +} + +template +static void ClCreateLstmWorkloadTest() +{ + Graph graph; + ClWorkloadFactory factory; + auto workload = CreateLstmWorkloadTest(factory, graph); + + LstmQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[1]); + BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 2 })); + BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 4 })); +} + +BOOST_AUTO_TEST_CASE(CreateLSTMWorkloadFloatWorkload) +{ + ClCreateLstmWorkloadTest(); +} + +template +static void ClResizeBilinearWorkloadTest(DataLayout dataLayout) +{ + Graph graph; + ClWorkloadFactory factory; + + auto workload = CreateResizeBilinearWorkloadTest(factory, graph, dataLayout); + + // Checks that inputs/outputs are as we expect them (see definition of CreateResizeBilinearWorkloadTest). + ResizeBilinearQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + + switch (dataLayout) + { + case DataLayout::NHWC: + BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 4, 4, 3 })); + BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 2, 2, 3 })); + break; + default: // NCHW + BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 3, 4, 4 })); + BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 3, 2, 2 })); + } +} + +BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32NchwWorkload) +{ + ClResizeBilinearWorkloadTest(DataLayout::NCHW); +} + +BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat16NchwWorkload) +{ + ClResizeBilinearWorkloadTest(DataLayout::NCHW); +} + +BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32NhwcWorkload) +{ + ClResizeBilinearWorkloadTest(DataLayout::NHWC); +} + +BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat16NhwcWorkload) +{ + ClResizeBilinearWorkloadTest(DataLayout::NHWC); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/backends/cl/test/ClLayerSupportTests.cpp b/src/backends/cl/test/ClLayerSupportTests.cpp new file mode 100644 index 0000000000..513366e8dc --- /dev/null +++ b/src/backends/cl/test/ClLayerSupportTests.cpp @@ -0,0 +1,104 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +#include + +BOOST_AUTO_TEST_SUITE(ClLayerSupport) + +BOOST_FIXTURE_TEST_CASE(IsLayerSupportedFloat16Cl, ClContextControlFixture) +{ + armnn::ClWorkloadFactory factory; + IsLayerSupportedTests(&factory); +} + +BOOST_FIXTURE_TEST_CASE(IsLayerSupportedFloat32Cl, ClContextControlFixture) +{ + armnn::ClWorkloadFactory factory; + IsLayerSupportedTests(&factory); +} + +BOOST_FIXTURE_TEST_CASE(IsLayerSupportedUint8Cl, ClContextControlFixture) +{ + armnn::ClWorkloadFactory factory; + IsLayerSupportedTests(&factory); +} + +BOOST_FIXTURE_TEST_CASE(IsConvertFp16ToFp32SupportedCl, ClContextControlFixture) +{ + std::string reasonIfUnsupported; + + bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); + + BOOST_CHECK(result); +} + +BOOST_FIXTURE_TEST_CASE(IsConvertFp16ToFp32SupportedFp32InputCl, ClContextControlFixture) +{ + std::string reasonIfUnsupported; + + bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); + + BOOST_CHECK(!result); + BOOST_CHECK_EQUAL(reasonIfUnsupported, "Input should be Float16"); +} + +BOOST_FIXTURE_TEST_CASE(IsConvertFp16ToFp32SupportedFp16OutputCl, ClContextControlFixture) +{ + std::string reasonIfUnsupported; + + bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); + + BOOST_CHECK(!result); + BOOST_CHECK_EQUAL(reasonIfUnsupported, "Output should be Float32"); +} + +BOOST_FIXTURE_TEST_CASE(IsConvertFp32ToFp16SupportedCl, ClContextControlFixture) +{ + std::string reasonIfUnsupported; + + bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); + + BOOST_CHECK(result); +} + +BOOST_FIXTURE_TEST_CASE(IsConvertFp32ToFp16SupportedFp16InputCl, ClContextControlFixture) +{ + std::string reasonIfUnsupported; + + bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); + + BOOST_CHECK(!result); + BOOST_CHECK_EQUAL(reasonIfUnsupported, "Input should be Float32"); +} + +BOOST_FIXTURE_TEST_CASE(IsConvertFp32ToFp16SupportedFp32OutputCl, ClContextControlFixture) +{ + std::string reasonIfUnsupported; + + bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); + + BOOST_CHECK(!result); + BOOST_CHECK_EQUAL(reasonIfUnsupported, "Output should be Float16"); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp new file mode 100644 index 0000000000..ab63ba4abb --- /dev/null +++ b/src/backends/cl/test/ClLayerTests.cpp @@ -0,0 +1,338 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "ClContextControlFixture.hpp" + +#include "test/TensorHelpers.hpp" +#include "test/UnitTests.hpp" + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include +#include + +BOOST_FIXTURE_TEST_SUITE(Compute_ArmComputeCl, ClContextControlFixture) +using FactoryType = armnn::ClWorkloadFactory; + +// ============================================================================ +// UNIT tests + +// Activation +ARMNN_AUTO_TEST_CASE(ConstantLinearActivation, ConstantLinearActivationTest) + +ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta1, SimpleSoftmaxTest, 1.0f) +ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta2, SimpleSoftmaxTest, 2.0f) +ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta1Uint8, SimpleSoftmaxUint8Test, 1.0f) +ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta2Uint8, SimpleSoftmaxUint8Test, 2.0f) + +ARMNN_AUTO_TEST_CASE(ReLu1Uint8, BoundedReLuUint8UpperAndLowerBoundTest) +ARMNN_AUTO_TEST_CASE(ReLu6Uint8, BoundedReLuUint8UpperBoundOnlyTest) + +// Fully Connected +ARMNN_AUTO_TEST_CASE(SimpleFullyConnected, FullyConnectedFloat32Test, false, false) +ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithBias, FullyConnectedFloat32Test, true, false) +ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithTranspose, FullyConnectedFloat32Test, false, true) +ARMNN_AUTO_TEST_CASE(FullyConnectedUint8, FullyConnectedUint8Test, false) +ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedUint8, FullyConnectedUint8Test, true) + +ARMNN_AUTO_TEST_CASE(FullyConnectedLarge, FullyConnectedLargeTest, false) +ARMNN_AUTO_TEST_CASE(FullyConnectedLargeTransposed, FullyConnectedLargeTest, true) + +// Convolution +ARMNN_AUTO_TEST_CASE(SimpleConvolution1d, Convolution1dTest, true) + +ARMNN_AUTO_TEST_CASE(SimpleConvolution2d, SimpleConvolution2d3x5Test, true) +ARMNN_AUTO_TEST_CASE(SimpleConvolution2dSquare, SimpleConvolution2d3x3Test, true) +ARMNN_AUTO_TEST_CASE(SimpleConvolution2d3x3Uint8, SimpleConvolution2d3x3Uint8Test, true) +ARMNN_AUTO_TEST_CASE(UnbiasedConvolution2d, SimpleConvolution2d3x5Test, false) +ARMNN_AUTO_TEST_CASE(UnbiasedConvolution2dSquare, SimpleConvolution2d3x3Test, false) +ARMNN_AUTO_TEST_CASE(SimpleConvolution2dAsymmetricPadding, Convolution2dAsymmetricPaddingTest) + +ARMNN_AUTO_TEST_CASE(SimpleConvolution2dSquareNhwc, SimpleConvolution2d3x3NhwcTest, false) + +// Depthwise Convolution +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul1, DepthwiseConvolution2dDepthMul1Test, true) +ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dDepthMul1, DepthwiseConvolution2dDepthMul1Test, false) +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul1Uint8, DepthwiseConvolution2dDepthMul1Uint8Test, true) +ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dDepthMul1Uint8, DepthwiseConvolution2dDepthMul1Uint8Test, false) + +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dAsymmetric, DepthwiseConvolution2dAsymmetricTest, true) +ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dAsymmetric, DepthwiseConvolution2dAsymmetricTest, false) + +// Softmax +BOOST_AUTO_TEST_CASE(Softmax4dSupport) +{ + const unsigned int numDimensions = 4u; + std::array dimensionSizes; + dimensionSizes.fill(1u); + + const armnn::TensorInfo inputInfo(numDimensions, &dimensionSizes.front(), armnn::DataType::Float32); + const armnn::TensorInfo outputInfo(numDimensions, &dimensionSizes.front(), armnn::DataType::Float32); + + // 4D Softmax should be reported as unsupported on the CL backend + BOOST_TEST(!armnn::IsSoftmaxSupportedCl(inputInfo, outputInfo, armnn::SoftmaxDescriptor())); +} + +// Splitter +ARMNN_AUTO_TEST_CASE(SimpleSplitter, SplitterTest) +ARMNN_AUTO_TEST_CASE(SimpleSplitterUint8, SplitterUint8Test) + +ARMNN_AUTO_TEST_CASE(CopyViaSplitter, CopyViaSplitterTest) +ARMNN_AUTO_TEST_CASE(CopyViaSplitterUint8, CopyViaSplitterUint8Test) + +// Merger +ARMNN_AUTO_TEST_CASE(SimpleMerger, MergerTest) +ARMNN_AUTO_TEST_CASE(MergerUint8, MergerUint8Test) + +// Normalization +ARMNN_AUTO_TEST_CASE(SimpleNormalizationAcross, SimpleNormalizationAcrossTest) +ARMNN_AUTO_TEST_CASE(SimpleNormalizationWithin, SimpleNormalizationWithinTest) +ARMNN_AUTO_TEST_CASE(SimpleNormalizationAcrossNhwc, SimpleNormalizationAcrossNhwcTest) + +// Pooling +ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dSize3x3Stride2x4, SimpleMaxPooling2dSize3x3Stride2x4Test, true) +ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dSize3x3Stride2x4Uint8, SimpleMaxPooling2dSize3x3Stride2x4Uint8Test, true) + +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleMaxPooling2d, IgnorePaddingSimpleMaxPooling2dTest) +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleMaxPooling2dUint8, IgnorePaddingSimpleMaxPooling2dUint8Test) +ARMNN_AUTO_TEST_CASE(IgnorePaddingMaxPooling2dSize3, IgnorePaddingMaxPooling2dSize3Test) +ARMNN_AUTO_TEST_CASE(IgnorePaddingMaxPooling2dSize3Uint8, IgnorePaddingMaxPooling2dSize3Uint8Test) + +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2d, IgnorePaddingSimpleAveragePooling2dTest) +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2dUint8, IgnorePaddingSimpleAveragePooling2dUint8Test) +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2dNoPadding, IgnorePaddingSimpleAveragePooling2dNoPaddingTest) +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2dNoPaddingUint8, + IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test) +ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3, IgnorePaddingAveragePooling2dSize3Test) +ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3Uint8, IgnorePaddingAveragePooling2dSize3Uint8Test) + +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleL2Pooling2d, IgnorePaddingSimpleL2Pooling2dTest) +ARMNN_AUTO_TEST_CASE(UNSUPPORTED_IgnorePaddingSimpleL2Pooling2dUint8, IgnorePaddingSimpleL2Pooling2dUint8Test) +ARMNN_AUTO_TEST_CASE(IgnorePaddingL2Pooling2dSize3, IgnorePaddingL2Pooling2dSize3Test) +ARMNN_AUTO_TEST_CASE(UNSUPPORTED_IgnorePaddingL2Pooling2dSize3Uint8, IgnorePaddingL2Pooling2dSize3Uint8Test) + +ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2d, SimpleAveragePooling2dTest) +ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dNhwc, SimpleAveragePooling2dNhwcTest) +ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dUint8, SimpleAveragePooling2dUint8Test) +ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3x2Stride2x2, + IgnorePaddingAveragePooling2dSize3x2Stride2x2Test, + false) +ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3x2Stride2x2NoPadding, + IgnorePaddingAveragePooling2dSize3x2Stride2x2Test, + true) +ARMNN_AUTO_TEST_CASE(LargeTensorsAveragePooling2d, LargeTensorsAveragePooling2dTest) +ARMNN_AUTO_TEST_CASE(LargeTensorsAveragePooling2dUint8, LargeTensorsAveragePooling2dUint8Test) + +ARMNN_AUTO_TEST_CASE(SimpleL2Pooling2d, SimpleL2Pooling2dTest) +ARMNN_AUTO_TEST_CASE(UNSUPPORTED_SimpleL2Pooling2dUint8, SimpleL2Pooling2dUint8Test) +ARMNN_AUTO_TEST_CASE(L2Pooling2dSize3Stride1, L2Pooling2dSize3Stride1Test) +ARMNN_AUTO_TEST_CASE(UNSUPPORTED_L2Pooling2dSize3Stride1Uint8, L2Pooling2dSize3Stride1Uint8Test) +ARMNN_AUTO_TEST_CASE(L2Pooling2dSize3Stride3, L2Pooling2dSize3Stride3Test) +ARMNN_AUTO_TEST_CASE(UNSUPPORTED_L2Pooling2dSize3Stride3Uint8, L2Pooling2dSize3Stride3Uint8Test) +ARMNN_AUTO_TEST_CASE(L2Pooling2dSize3Stride4, L2Pooling2dSize3Stride4Test) +ARMNN_AUTO_TEST_CASE(UNSUPPORTED_L2Pooling2dSize3Stride4Uint8, L2Pooling2dSize3Stride4Uint8Test) +ARMNN_AUTO_TEST_CASE(L2Pooling2dSize7, L2Pooling2dSize7Test) +ARMNN_AUTO_TEST_CASE(UNSUPPORTED_L2Pooling2dSize7Uint8, L2Pooling2dSize7Uint8Test) +ARMNN_AUTO_TEST_CASE(L2Pooling2dSize9, L2Pooling2dSize9Test) +ARMNN_AUTO_TEST_CASE(UNSUPPORTED_L2Pooling2dSize9Uint8, L2Pooling2dSize9Uint8Test) + +// Add +ARMNN_AUTO_TEST_CASE(SimpleAdd, AdditionTest) +ARMNN_AUTO_TEST_CASE(AddBroadcast1Element, AdditionBroadcast1ElementTest) +ARMNN_AUTO_TEST_CASE(AddBroadcast, AdditionBroadcastTest) + +ARMNN_AUTO_TEST_CASE(AdditionUint8, AdditionUint8Test) +ARMNN_AUTO_TEST_CASE(AddBroadcastUint8, AdditionBroadcastUint8Test) +ARMNN_AUTO_TEST_CASE(AddBroadcast1ElementUint8, AdditionBroadcast1ElementUint8Test) + +// Sub +ARMNN_AUTO_TEST_CASE(SimpleSub, SubtractionTest) + +// Div +ARMNN_AUTO_TEST_CASE(SimpleDivision, DivisionTest) +ARMNN_AUTO_TEST_CASE(DivisionByZero, DivisionByZeroTest) +ARMNN_AUTO_TEST_CASE(DivisionBroadcast1Element, DivisionBroadcast1ElementTest) +ARMNN_AUTO_TEST_CASE(DivisionBroadcast1DVector, DivisionBroadcast1DVectorTest) +// NOTE: quantized division is not supported by CL and not required by the +// android NN api + +// Mul +ARMNN_AUTO_TEST_CASE(SimpleMultiplication, MultiplicationTest) +ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1Element, MultiplicationBroadcast1ElementTest) +ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1DVector, MultiplicationBroadcast1DVectorTest) +ARMNN_AUTO_TEST_CASE(MultiplicationUint8, MultiplicationUint8Test) +ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1ElementUint8, MultiplicationBroadcast1ElementUint8Test) +ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1DVectorUint8, MultiplicationBroadcast1DVectorUint8Test) + +// Batch Norm +ARMNN_AUTO_TEST_CASE(BatchNorm, BatchNormTest) + +// L2 Normalization +ARMNN_AUTO_TEST_CASE(L2Normalization1d, L2Normalization1dTest) +ARMNN_AUTO_TEST_CASE(L2Normalization2d, L2Normalization2dTest) +ARMNN_AUTO_TEST_CASE(L2Normalization3d, L2Normalization3dTest) +ARMNN_AUTO_TEST_CASE(L2Normalization4d, L2Normalization4dTest) + +ARMNN_AUTO_TEST_CASE(L2Normalization1dNhwc, L2Normalization1dNhwcTest) +ARMNN_AUTO_TEST_CASE(L2Normalization2dNhwc, L2Normalization2dNhwcTest) +ARMNN_AUTO_TEST_CASE(L2Normalization3dNhwc, L2Normalization3dNhwcTest) +ARMNN_AUTO_TEST_CASE(L2Normalization4dNhwc, L2Normalization4dNhwcTest) + +// Resize Bilinear - NCHW data layout +ARMNN_AUTO_TEST_CASE(ResizeBilinearNop, ResizeBilinearNopTest) +ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, SimpleResizeBilinearTest) +ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin, ResizeBilinearSqMinTest) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMin, ResizeBilinearMinTest) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMag, ResizeBilinearMagTest) + +// Resize Bilinear - NHWC data layout +ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwc, ResizeBilinearNopNhwcTest) +ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc, SimpleResizeBilinearNhwcTest) +ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc, ResizeBilinearSqMinNhwcTest) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc, ResizeBilinearMinNhwcTest) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwc, ResizeBilinearMagNhwcTest) + +// Constant +ARMNN_AUTO_TEST_CASE(Constant, ConstantTest) +ARMNN_AUTO_TEST_CASE(ConstantUint8, ConstantTestUint8) + +// Concat +ARMNN_AUTO_TEST_CASE(Concatenation1d, Concatenation1dTest) +ARMNN_AUTO_TEST_CASE(Concatenation1dUint8, Concatenation1dUint8Test) + +ARMNN_AUTO_TEST_CASE(Concatenation2dDim0, Concatenation2dDim0Test) +ARMNN_AUTO_TEST_CASE(Concatenation2dDim0Uint8, Concatenation2dDim0Uint8Test) +ARMNN_AUTO_TEST_CASE(Concatenation2dDim1, Concatenation2dDim1Test) +ARMNN_AUTO_TEST_CASE(Concatenation2dDim1Uint8, Concatenation2dDim1Uint8Test) + +ARMNN_AUTO_TEST_CASE(Concatenation2dDim0DiffInputDims, Concatenation2dDim0DiffInputDimsTest) +ARMNN_AUTO_TEST_CASE(Concatenation2dDim0DiffInputDimsUint8, Concatenation2dDim0DiffInputDimsUint8Test) +ARMNN_AUTO_TEST_CASE(Concatenation2dDim1DiffInputDims, Concatenation2dDim1DiffInputDimsTest) +ARMNN_AUTO_TEST_CASE(Concatenation2dDim1DiffInputDimsUint8, Concatenation2dDim1DiffInputDimsUint8Test) + +ARMNN_AUTO_TEST_CASE(Concatenation3dDim0, Concatenation3dDim0Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim0Uint8, Concatenation3dDim0Uint8Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim1, Concatenation3dDim1Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim1Uint8, Concatenation3dDim1Uint8Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim2, Concatenation3dDim2Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim2Uint8, Concatenation3dDim2Uint8Test) + +ARMNN_AUTO_TEST_CASE(Concatenation3dDim0DiffInputDims, Concatenation3dDim0DiffInputDimsTest) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim0DiffInputDimsUint8, Concatenation3dDim0DiffInputDimsUint8Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim1DiffInputDims, Concatenation3dDim1DiffInputDimsTest) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim1DiffInputDimsUint8, Concatenation3dDim1DiffInputDimsUint8Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim2DiffInputDims, Concatenation3dDim2DiffInputDimsTest) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim2DiffInputDimsUint8, Concatenation3dDim2DiffInputDimsUint8Test) + +// Floor +ARMNN_AUTO_TEST_CASE(SimpleFloor, SimpleFloorTest) + +// Reshape +ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeFloat32Test) +ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeUint8Test) + +// Permute +ARMNN_AUTO_TEST_CASE(SimplePermuteFloat32, SimplePermuteFloat32Test) +ARMNN_AUTO_TEST_CASE(SimplePermuteUint8, SimplePermuteUint8Test) +ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet1, PermuteFloat32ValueSet1Test) +ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet2, PermuteFloat32ValueSet2Test) +ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet3, PermuteFloat32ValueSet3Test) + +// Lstm +ARMNN_AUTO_TEST_CASE(LstmLayerFloat32WithCifgWithPeepholeNoProjection, + LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest) +ARMNN_AUTO_TEST_CASE(LstmLayerFloat32NoCifgNoPeepholeNoProjection, + LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest) +ARMNN_AUTO_TEST_CASE(LstmLayerFloat32NoCifgWithPeepholeWithProjection, + LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest) + +// Convert from Float16 to Float32 +ARMNN_AUTO_TEST_CASE(SimpleConvertFp16ToFp32, SimpleConvertFp16ToFp32Test) +// Convert from Float32 to Float16 +ARMNN_AUTO_TEST_CASE(SimpleConvertFp32ToFp16, SimpleConvertFp32ToFp16Test) + +// ============================================================================ +// COMPARE tests + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareConv2dWithReference, CompareConvolution2dTest) + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceFloat32, CompareDepthwiseConvolution2dTest) +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceUint8, CompareDepthwiseConvolution2dTest) + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareNormalizationWithinWithReference, CompareNormalizationTest, + armnn::NormalizationAlgorithmChannel::Within, + armnn::NormalizationAlgorithmMethod::LocalBrightness) +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareNormalizationAcrossWithReference, CompareNormalizationTest, + armnn::NormalizationAlgorithmChannel::Across, + armnn::NormalizationAlgorithmMethod::LocalBrightness) + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareSoftmaxBeta1WithReference, CompareSoftmaxTest, 1.0f) +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareSoftmaxBeta2WithReference, CompareSoftmaxTest, 2.0f) +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareSoftmaxUint8, CompareSoftmaxUint8Test, 1.0f) + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareMaxPooling2dWithRef, ComparePooling2dTest, armnn::PoolingAlgorithm::Max) + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareAveragePooling2dWithRef, ComparePooling2dTest, armnn::PoolingAlgorithm::Average) +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareAveragePooling2dWithRefUint8, ComparePooling2dUint8Test, + armnn::PoolingAlgorithm::Average) + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareL2Pooling2dWithRef, ComparePooling2dTest, armnn::PoolingAlgorithm::L2) + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareAddition, CompareAdditionTest) + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareMultiplicationWithRef, CompareMultiplicationTest) + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareBatchNorm, CompareBatchNormTest) + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareReLu1, CompareBoundedReLuTest, 1.0f, -1.0f) +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareReLu6, CompareBoundedReLuTest, 6.0f, 0.0f) + +// ============================================================================ +// FIXTURE tests + +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareSigmoidActivationWithReference, ActivationFixture, + CompareActivationTest, armnn::ActivationFunction::Sigmoid, 5u) + +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareTanhActivationWithReference, ActivationFixture, + CompareActivationTest, armnn::ActivationFunction::TanH, 5u) + +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareLinearActivationWithReference, ActivationFixture, + CompareActivationTest, armnn::ActivationFunction::Linear, 5u) + +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareReLuActivationWithReference, ActivationFixture, + CompareActivationTest, armnn::ActivationFunction::ReLu, 5u) + +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareBoundedReLuActivationWithReference, ActivationFixture, + CompareActivationTest, armnn::ActivationFunction::BoundedReLu, 5u) +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareBoundedReLuActivationWithReferenceUint8, ActivationFixture, + CompareActivationUint8Test, armnn::ActivationFunction::BoundedReLu) + +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareSoftReLuActivationWithReference, ActivationFixture, + CompareActivationTest, armnn::ActivationFunction::SoftReLu, 5u) + +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareLeakyReLuActivationWithReference, ActivationFixture, + CompareActivationTest, armnn::ActivationFunction::LeakyReLu, 5u) + +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareAbsActivationWithReference, ActivationFixture, + CompareActivationTest, armnn::ActivationFunction::Abs, 5u) + +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareSqrtActivationWithReference, PositiveActivationFixture, + CompareActivationTest, armnn::ActivationFunction::Sqrt, 5u) + +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareSquareActivationWithReference, ActivationFixture, + CompareActivationTest, armnn::ActivationFunction::Square, 5u) + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/backends/neon/CMakeLists.txt b/src/backends/neon/CMakeLists.txt index c6492bc076..93c7955a5f 100644 --- a/src/backends/neon/CMakeLists.txt +++ b/src/backends/neon/CMakeLists.txt @@ -14,7 +14,8 @@ if(ARMCOMPUTENEON) NeonTensorHandle.hpp ) - add_subdirectory(workloads test) + add_subdirectory(workloads) + add_subdirectory(test) else() list(APPEND armnnNeonBackend_sources NeonLayerSupport.cpp diff --git a/src/backends/neon/backend.cmake b/src/backends/neon/backend.cmake index 5f02c845ed..0240d527b3 100644 --- a/src/backends/neon/backend.cmake +++ b/src/backends/neon/backend.cmake @@ -6,6 +6,7 @@ if(ARMCOMPUTENEON) add_subdirectory(${PROJECT_SOURCE_DIR}/src/backends/neon) list(APPEND armnnLibraries armnnNeonBackend armnnNeonBackendWorkloads) + list(APPEND armnnUnitTestLibraries armnnNeonBackendUnitTests) else() message("NEON backend is disabled") add_subdirectory(${PROJECT_SOURCE_DIR}/src/backends/neon) diff --git a/src/backends/neon/test/CMakeLists.txt b/src/backends/neon/test/CMakeLists.txt index f41a074999..82156f380b 100644 --- a/src/backends/neon/test/CMakeLists.txt +++ b/src/backends/neon/test/CMakeLists.txt @@ -2,3 +2,14 @@ # Copyright © 2017 Arm Ltd. All rights reserved. # SPDX-License-Identifier: MIT # + +list(APPEND armnnNeonBackendUnitTests_sources + NeonCreateWorkloadTests.cpp + NeonLayerSupportTests.cpp + NeonLayerTests.cpp +) + +add_library(armnnNeonBackendUnitTests STATIC ${armnnNeonBackendUnitTests_sources}) +target_include_directories(armnnNeonBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src) +target_include_directories(armnnNeonBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn) +target_include_directories(armnnNeonBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils) \ No newline at end of file diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp new file mode 100644 index 0000000000..d1a5b2a5f2 --- /dev/null +++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp @@ -0,0 +1,531 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include + +#include +#include +#include +#include +#include + +BOOST_AUTO_TEST_SUITE(CreateWorkloadNeon) + +namespace +{ + +bool TestNeonTensorHandleInfo(armnn::INeonTensorHandle* handle, const armnn::TensorInfo& expectedInfo) +{ + using namespace armnn::armcomputetensorutils; + + const arm_compute::ITensorInfo* handleInfo = handle->GetTensor().info(); + const arm_compute::TensorInfo expectedAclInfo = BuildArmComputeTensorInfo(expectedInfo); + + if (handleInfo->data_type() != expectedAclInfo.data_type()) + { + return false; + } + + if (handleInfo->num_dimensions() != expectedAclInfo.num_dimensions()) + { + return false; + } + + if (handleInfo->quantization_info() != expectedAclInfo.quantization_info()) + { + return false; + } + + for (std::size_t d = 0; d < expectedAclInfo.num_dimensions(); ++d) + { + if (handleInfo->dimension(d) != expectedAclInfo.dimension(d)) + { + return false; + } + } + + return true; +} + +} // namespace + +template +static void NeonCreateActivationWorkloadTest() +{ + Graph graph; + NeonWorkloadFactory factory; + auto workload = CreateActivationWorkloadTest + (factory, graph); + + // Checks that inputs/outputs are as we expect them (see definition of CreateActivationWorkloadTest). + ActivationQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({1, 1}, DataType))); + BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({1, 1}, DataType))); +} + +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +BOOST_AUTO_TEST_CASE(CreateActivationFloat16Workload) +{ + NeonCreateActivationWorkloadTest(); +} +#endif + +BOOST_AUTO_TEST_CASE(CreateActivationFloatWorkload) +{ + NeonCreateActivationWorkloadTest(); +} + +template +static void NeonCreateArithmethicWorkloadTest() +{ + Graph graph; + NeonWorkloadFactory factory; + auto workload = CreateArithmeticWorkloadTest(factory, graph); + + DescriptorType queueDescriptor = workload->GetData(); + auto inputHandle1 = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto inputHandle2 = boost::polymorphic_downcast(queueDescriptor.m_Inputs[1]); + auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + BOOST_TEST(TestNeonTensorHandleInfo(inputHandle1, TensorInfo({2, 3}, DataType))); + BOOST_TEST(TestNeonTensorHandleInfo(inputHandle2, TensorInfo({2, 3}, DataType))); + BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({2, 3}, DataType))); +} + +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +BOOST_AUTO_TEST_CASE(CreateAdditionFloat16Workload) +{ + NeonCreateArithmethicWorkloadTest(); +} +#endif + +BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload) +{ + NeonCreateArithmethicWorkloadTest(); +} + +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +BOOST_AUTO_TEST_CASE(CreateSubtractionFloat16Workload) +{ + NeonCreateArithmethicWorkloadTest(); +} +#endif + +BOOST_AUTO_TEST_CASE(CreateSubtractionFloatWorkload) +{ + NeonCreateArithmethicWorkloadTest(); +} + +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +BOOST_AUTO_TEST_CASE(CreateMultiplicationFloat16Workload) +{ + NeonCreateArithmethicWorkloadTest(); +} +#endif + +BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkload) +{ + NeonCreateArithmethicWorkloadTest(); +} + +template +static void NeonCreateBatchNormalizationWorkloadTest() +{ + Graph graph; + NeonWorkloadFactory factory; + auto workload = CreateBatchNormalizationWorkloadTest(factory, graph); + + // Checks that outputs and inputs are as we expect them (see definition of CreateBatchNormalizationWorkloadTest). + BatchNormalizationQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({2, 3, 1, 1}, DataType))); + BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({2, 3, 1, 1}, DataType))); +} + +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16Workload) +{ + NeonCreateBatchNormalizationWorkloadTest(); +} +#endif + +BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloatWorkload) +{ + NeonCreateBatchNormalizationWorkloadTest(); +} + +template +static void NeonCreateConvolution2dWorkloadTest(DataLayout dataLayout = DataLayout::NCHW) +{ + Graph graph; + NeonWorkloadFactory factory; + auto workload = CreateConvolution2dWorkloadTest(factory, graph, dataLayout); + + TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 8, 16} : TensorShape{2, 8, 16, 3}; + TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 2, 2, 10} : TensorShape{2, 2, 10, 2}; + + // Checks that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest). + Convolution2dQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType))); + BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType))); +} + +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +BOOST_AUTO_TEST_CASE(CreateConvolution2dFloat16NchwWorkload) +{ + NeonCreateConvolution2dWorkloadTest(); +} + +BOOST_AUTO_TEST_CASE(CreateConvolution2dFloat16NhwcWorkload) +{ + NeonCreateConvolution2dWorkloadTest(DataLayout::NHWC); +} + +#endif +BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNchwWorkload) +{ + NeonCreateConvolution2dWorkloadTest(); +} + +BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNhwcWorkload) +{ + NeonCreateConvolution2dWorkloadTest(DataLayout::NHWC); +} + +template +static void NeonCreateFullyConnectedWorkloadTest() +{ + Graph graph; + NeonWorkloadFactory factory; + auto workload = CreateFullyConnectedWorkloadTest(factory, graph); + + // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest). + FullyConnectedQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({3, 1, 4, 5}, DataType))); + BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({3, 7}, DataType))); +} + +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloat16Workload) +{ + NeonCreateFullyConnectedWorkloadTest(); +} +#endif + +BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloatWorkload) +{ + NeonCreateFullyConnectedWorkloadTest(); +} + +template +static void NeonCreateNormalizationWorkloadTest(DataLayout dataLayout) +{ + Graph graph; + NeonWorkloadFactory factory; + auto workload = CreateNormalizationWorkloadTest(factory, graph, dataLayout); + + // Checks that outputs and inputs are as we expect them (see definition of CreateNormalizationWorkloadTest). + NormalizationQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({3, 5, 5, 1}, DataType))); + BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({3, 5, 5, 1}, DataType))); +} + +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +BOOST_AUTO_TEST_CASE(CreateNormalizationFloat16NchwWorkload) +{ + NeonCreateNormalizationWorkloadTest(DataLayout::NCHW); +} + +BOOST_AUTO_TEST_CASE(CreateNormalizationFloat16NhwcWorkload) +{ + NeonCreateNormalizationWorkloadTest(DataLayout::NHWC); +} +#endif + +BOOST_AUTO_TEST_CASE(CreateNormalizationFloatNchwWorkload) +{ + NeonCreateNormalizationWorkloadTest(DataLayout::NCHW); +} + +BOOST_AUTO_TEST_CASE(CreateNormalizationFloatNhwcWorkload) +{ + NeonCreateNormalizationWorkloadTest(DataLayout::NHWC); +} + + +template +static void NeonCreatePooling2dWorkloadTest(DataLayout dataLayout = DataLayout::NCHW) +{ + Graph graph; + NeonWorkloadFactory factory; + auto workload = CreatePooling2dWorkloadTest + (factory, graph, dataLayout); + + TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 2, 5, 5} : TensorShape{3, 5, 5, 2}; + TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 2, 2, 4} : TensorShape{3, 2, 4, 2}; + + // Checks that outputs and inputs are as we expect them (see definition of CreatePooling2dWorkloadTest). + Pooling2dQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType))); + BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType))); +} + +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +BOOST_AUTO_TEST_CASE(CreatePooling2dFloat16Workload) +{ + NeonCreatePooling2dWorkloadTest(); +} +#endif + +BOOST_AUTO_TEST_CASE(CreatePooling2dFloatNchwWorkload) +{ + NeonCreatePooling2dWorkloadTest(DataLayout::NCHW); +} + +BOOST_AUTO_TEST_CASE(CreatePooling2dFloatNhwcWorkload) +{ + NeonCreatePooling2dWorkloadTest(DataLayout::NHWC); +} + +BOOST_AUTO_TEST_CASE(CreatePooling2dUint8NchwWorkload) +{ + NeonCreatePooling2dWorkloadTest(DataLayout::NCHW); +} + +BOOST_AUTO_TEST_CASE(CreatePooling2dUint8NhwcWorkload) +{ + NeonCreatePooling2dWorkloadTest(DataLayout::NHWC); +} + +template +static void NeonCreateReshapeWorkloadTest() +{ + Graph graph; + NeonWorkloadFactory factory; + auto workload = CreateReshapeWorkloadTest(factory, graph); + + // Checks that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest). + ReshapeQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({4, 1}, DataType))); + BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({1, 4}, DataType))); +} + +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +BOOST_AUTO_TEST_CASE(CreateReshapeFloat16Workload) +{ + NeonCreateReshapeWorkloadTest(); +} +#endif + +BOOST_AUTO_TEST_CASE(CreateReshapeFloatWorkload) +{ + NeonCreateReshapeWorkloadTest(); +} + +BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload) +{ + NeonCreateReshapeWorkloadTest(); +} + +template +static void NeonCreateSoftmaxWorkloadTest() +{ + Graph graph; + NeonWorkloadFactory factory; + auto workload = CreateSoftmaxWorkloadTest(factory, graph); + + // Checks that outputs and inputs are as we expect them (see definition of CreateSoftmaxWorkloadTest). + SoftmaxQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({4, 1}, DataType))); + BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({4, 1}, DataType))); +} + +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat16Workload) +{ + NeonCreateSoftmaxWorkloadTest(); +} +#endif + +BOOST_AUTO_TEST_CASE(CreateSoftmaxFloatWorkload) +{ + NeonCreateSoftmaxWorkloadTest(); +} + +BOOST_AUTO_TEST_CASE(CreateSplitterWorkload) +{ + Graph graph; + NeonWorkloadFactory factory; + auto workload = CreateSplitterWorkloadTest(factory, graph); + + // Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest). + SplitterQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({5, 7, 7}, DataType::Float32))); + + auto outputHandle0 = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + BOOST_TEST(TestNeonTensorHandleInfo(outputHandle0, TensorInfo({1, 7, 7}, DataType::Float32))); + + auto outputHandle1 = boost::polymorphic_downcast(queueDescriptor.m_Outputs[1]); + BOOST_TEST(TestNeonTensorHandleInfo(outputHandle1, TensorInfo({2, 7, 7}, DataType::Float32))); + + auto outputHandle2 = boost::polymorphic_downcast(queueDescriptor.m_Outputs[2]); + BOOST_TEST(TestNeonTensorHandleInfo(outputHandle2, TensorInfo({2, 7, 7}, DataType::Float32))); +} + +BOOST_AUTO_TEST_CASE(CreateSplitterMerger) +{ + // Tests that it is possible to decide which output of the splitter layer + // should be lined to which input of the merger layer. + // We tested that is is possible to specify 0th output + // of the splitter to be the 1st input to the merger, and the 1st output of the splitter to be 0th input + // of the merger. + + Graph graph; + NeonWorkloadFactory factory; + + auto workloads = + CreateSplitterMergerWorkloadTest(factory, graph); + + auto wlSplitter = std::move(workloads.first); + auto wlMerger = std::move(workloads.second); + + //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction. + armnn::INeonTensorHandle* sOut0 = dynamic_cast(wlSplitter->GetData().m_Outputs[0]); + armnn::INeonTensorHandle* sOut1 = dynamic_cast(wlSplitter->GetData().m_Outputs[1]); + armnn::INeonTensorHandle* mIn0 = dynamic_cast(wlMerger->GetData().m_Inputs[0]); + armnn::INeonTensorHandle* mIn1 = dynamic_cast(wlMerger->GetData().m_Inputs[1]); + + BOOST_TEST(sOut0); + BOOST_TEST(sOut1); + BOOST_TEST(mIn0); + BOOST_TEST(mIn1); + + bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0); + + BOOST_TEST(validDataPointers); +} + +BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputs) +{ + // Tests that it is possible to assign multiple (two) different layers to each of the outputs of a splitter layer. + // We created a splitter with two outputs. That each of those outputs is used by two different activation layers + + Graph graph; + NeonWorkloadFactory factory; + std::unique_ptr wlSplitter; + std::unique_ptr wlActiv0_0; + std::unique_ptr wlActiv0_1; + std::unique_ptr wlActiv1_0; + std::unique_ptr wlActiv1_1; + + CreateSplitterMultipleInputsOneOutputWorkloadTest(factory, graph, wlSplitter, wlActiv0_0, wlActiv0_1, + wlActiv1_0, wlActiv1_1); + + armnn::INeonTensorHandle* sOut0 = dynamic_cast(wlSplitter->GetData().m_Outputs[0]); + armnn::INeonTensorHandle* sOut1 = dynamic_cast(wlSplitter->GetData().m_Outputs[1]); + armnn::INeonTensorHandle* activ0_0Im = dynamic_cast(wlActiv0_0->GetData().m_Inputs[0]); + armnn::INeonTensorHandle* activ0_1Im = dynamic_cast(wlActiv0_1->GetData().m_Inputs[0]); + armnn::INeonTensorHandle* activ1_0Im = dynamic_cast(wlActiv1_0->GetData().m_Inputs[0]); + armnn::INeonTensorHandle* activ1_1Im = dynamic_cast(wlActiv1_1->GetData().m_Inputs[0]); + + + BOOST_TEST(sOut0); + BOOST_TEST(sOut1); + BOOST_TEST(activ0_0Im); + BOOST_TEST(activ0_1Im); + BOOST_TEST(activ1_0Im); + BOOST_TEST(activ1_1Im); + + bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) && + (sOut1 == activ1_0Im) && (sOut1 == activ1_1Im); + + BOOST_TEST(validDataPointers); +} + +BOOST_AUTO_TEST_CASE(CreateMemCopyWorkloadsNeon) +{ + NeonWorkloadFactory factory; + CreateMemCopyWorkloads(factory); +} + +template +static void NeonCreateL2NormalizationWorkloadTest(DataLayout dataLayout) +{ + Graph graph; + NeonWorkloadFactory factory; + auto workload = CreateL2NormalizationWorkloadTest(factory, graph, dataLayout); + + // Checks that inputs/outputs are as we expect them (see definition of CreateNormalizationWorkloadTest). + L2NormalizationQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({ 5, 20, 50, 67 }, DataType))); + BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({ 5, 20, 50, 67 }, DataType))); +} + +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat16NchwWorkload) +{ + NeonCreateL2NormalizationWorkloadTest(DataLayout::NCHW); +} + +BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat16NhwcWorkload) +{ + NeonCreateL2NormalizationWorkloadTest(DataLayout::NHWC); +} +#endif + +BOOST_AUTO_TEST_CASE(CreateL2NormalizationNchwWorkload) +{ + NeonCreateL2NormalizationWorkloadTest(DataLayout::NCHW); +} + +BOOST_AUTO_TEST_CASE(CreateL2NormalizationNhwcWorkload) +{ + NeonCreateL2NormalizationWorkloadTest(DataLayout::NHWC); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/backends/neon/test/NeonLayerSupportTests.cpp b/src/backends/neon/test/NeonLayerSupportTests.cpp new file mode 100644 index 0000000000..db7897fc28 --- /dev/null +++ b/src/backends/neon/test/NeonLayerSupportTests.cpp @@ -0,0 +1,59 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include +#include +#include + +#include +#include +#include +#include + +#include + +#include + +BOOST_AUTO_TEST_SUITE(NeonLayerSupport) + +BOOST_AUTO_TEST_CASE(IsLayerSupportedFloat16Neon) +{ + armnn::NeonWorkloadFactory factory; + IsLayerSupportedTests(&factory); +} + +BOOST_AUTO_TEST_CASE(IsLayerSupportedFloat32Neon) +{ + armnn::NeonWorkloadFactory factory; + IsLayerSupportedTests(&factory); +} + +BOOST_AUTO_TEST_CASE(IsLayerSupportedUint8Neon) +{ + armnn::NeonWorkloadFactory factory; + IsLayerSupportedTests(&factory); +} + +BOOST_AUTO_TEST_CASE(IsConvertFp16ToFp32SupportedNeon) +{ + std::string reasonIfUnsupported; + + bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); + + BOOST_CHECK(result); +} + +BOOST_AUTO_TEST_CASE(IsConvertFp32ToFp16SupportedNeon) +{ + std::string reasonIfUnsupported; + + bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); + + BOOST_CHECK(result); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp new file mode 100644 index 0000000000..2d4ee996a4 --- /dev/null +++ b/src/backends/neon/test/NeonLayerTests.cpp @@ -0,0 +1,484 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +BOOST_AUTO_TEST_SUITE(Compute_ArmComputeNeon) +using FactoryType = armnn::NeonWorkloadFactory; + +// ============================================================================ +// UNIT tests + +// Convolution +ARMNN_AUTO_TEST_CASE(SimpleConvolution1d, Convolution1dTest, true) + +ARMNN_AUTO_TEST_CASE(SimpleConvolution2d, SimpleConvolution2d3x5Test, true) +ARMNN_AUTO_TEST_CASE(SimpleConvolution2dSquare, SimpleConvolution2d3x3Test, true) +ARMNN_AUTO_TEST_CASE(UnbiasedConvolution2d, SimpleConvolution2d3x5Test, false) +ARMNN_AUTO_TEST_CASE(UnbiasedConvolution2dSquare, SimpleConvolution2d3x3Test, false) +ARMNN_AUTO_TEST_CASE(SimpleConvolution2dAsymmetricPadding, Convolution2dAsymmetricPaddingTest) + +ARMNN_AUTO_TEST_CASE(SimpleConvolution2dSquareNhwc, SimpleConvolution2d3x3NhwcTest, false) +namespace +{ + +armnn::Convolution2dDescriptor MakeConv2dDesc(uint32_t strideX, uint32_t strideY, + uint32_t padLeft = 0, uint32_t padRight = 0, uint32_t padTop = 0, uint32_t padBottom = 0) +{ + armnn::Convolution2dDescriptor result; + result.m_StrideX = strideX; + result.m_StrideY = strideY; + result.m_PadLeft = padLeft; + result.m_PadRight = padRight; + result.m_PadTop = padTop; + result.m_PadBottom = padBottom; + result.m_BiasEnabled = true; + return result; +} + +} + +BOOST_AUTO_TEST_CASE(Conv2dUtils) +{ + // The only preferred Neon convolution is 1x1 with padding=0 and stride size {1,2,3}. + armnn::TensorShape shape1x1({ 1,1,1,1 }); + armnn::TensorInfo info1x1(shape1x1, armnn::DataType::Float32); + BOOST_TEST(armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(1, 1))); + BOOST_TEST(armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(1, 2))); + BOOST_TEST(armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(1, 3))); + BOOST_TEST(armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(2, 1))); + BOOST_TEST(armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(2, 2))); + BOOST_TEST(armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(2, 3))); + BOOST_TEST(armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(3, 1))); + BOOST_TEST(armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(3, 2))); + BOOST_TEST(armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(3, 3))); + + BOOST_TEST(!armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(4, 1))); + BOOST_TEST(!armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(4, 5))); + BOOST_TEST(!armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(3, 6))); + + // non zero padding is not preferred for direct convolution + BOOST_TEST(!armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(1, 1, 1, 0))); + BOOST_TEST(!armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(1, 1, 0, 1))); + BOOST_TEST(!armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(1, 1, 1, 1))); + + // 2x2 filter not preferred for direct convolution + armnn::TensorShape shape2x2({ 1,1,2,2 }); + armnn::TensorInfo info2x2(shape2x2, armnn::DataType::Float32); + BOOST_TEST(!armnn::IsNeonDirectConvolutionPreferred(info2x2, MakeConv2dDesc(1, 1))); +} + +// Depthwise Convolution +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul1, DepthwiseConvolution2dDepthMul1Test, true) +ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dDepthMul1, DepthwiseConvolution2dDepthMul1Test, false) +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul1Uint8, DepthwiseConvolution2dDepthMul1Uint8Test, true) +ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dDepthMul1Uint8, DepthwiseConvolution2dDepthMul1Uint8Test, false) + +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dAsymmetric, DepthwiseConvolution2dAsymmetricTest, true) +ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dAsymmetric, DepthwiseConvolution2dAsymmetricTest, false) + +namespace +{ + +armnn::DepthwiseConvolution2dDescriptor MakeDepthwiseConv2dDesc(uint32_t strideX, uint32_t strideY, + uint32_t depthMultiplier = 1, uint32_t padLeft = 0, uint32_t padRight = 0, + uint32_t padTop = 0, uint32_t padBottom = 0) +{ + boost::ignore_unused(depthMultiplier); + + armnn::DepthwiseConvolution2dDescriptor desc; + + desc.m_PadLeft = padLeft; + desc.m_PadRight = padRight; + + desc.m_PadTop = padTop; + desc.m_PadBottom = padBottom; + desc.m_StrideX = strideX; + desc.m_StrideY = strideY; + desc.m_BiasEnabled = false; + + return desc; +} + +armnn::TensorInfo CreateOutputTensorInfo(const armnn::TensorInfo& inputInfo, + const armnn::TensorInfo& weightsInfo, + const armnn::DepthwiseConvolution2dDescriptor& descriptor, + armnn::DataType dataType) +{ + const armnn::TensorShape& inputShape = inputInfo.GetShape(); + const armnn::TensorShape& filterShape = weightsInfo.GetShape(); + + unsigned int inWidth = inputShape[3]; + unsigned int inHeight = inputShape[2]; + unsigned int inBatchSize = inputShape[0]; + + unsigned int filterWidth = filterShape[3]; + unsigned int readWidth = (inWidth + descriptor.m_PadLeft + descriptor.m_PadRight) - (filterWidth); + unsigned int outWidth = 1u + (readWidth / descriptor.m_StrideX); + + unsigned int filterHeight = filterShape[2]; + unsigned int readHeight = (inHeight + descriptor.m_PadTop + descriptor.m_PadBottom) - (filterHeight); + unsigned int outHeight = 1u + (readHeight / descriptor.m_StrideY); + unsigned int depthMultiplier = filterShape[0]; + + unsigned int outChannels = filterShape[1] * depthMultiplier; + unsigned int outBatchSize = inBatchSize; + + armnn::TensorShape outputShape({outBatchSize, outChannels, outHeight, outWidth}); + return armnn::TensorInfo(outputShape, dataType); +} +} + +BOOST_AUTO_TEST_CASE(DepthwiseConv2dUtils) +{ + const armnn::DataType dataType = armnn::DataType::Float32; + + armnn::TensorInfo inputInfo({1, 1, 10, 10 }, dataType); + armnn::TensorInfo outputInfo; + armnn::TensorInfo weightsInfo3x3({ 1, 1, 3, 3 }, dataType); + armnn::TensorInfo biasesInfo; + + armnn::DepthwiseConvolution2dDescriptor descriptor; + + // Strides supported: 1,2,3 + descriptor = MakeDepthwiseConv2dDesc(1, 1); + outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType); + BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor, + weightsInfo3x3, biasesInfo)); + + descriptor = MakeDepthwiseConv2dDesc(1, 2); + outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType); + BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor, + weightsInfo3x3, biasesInfo)); + + descriptor = MakeDepthwiseConv2dDesc(1, 3); + outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType); + BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor, + weightsInfo3x3, biasesInfo)); + + descriptor = MakeDepthwiseConv2dDesc(2, 1); + outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType); + BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor, + weightsInfo3x3, biasesInfo)); + + descriptor = MakeDepthwiseConv2dDesc(2, 2); + outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType); + BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor, + weightsInfo3x3, biasesInfo)); + + descriptor = MakeDepthwiseConv2dDesc(2, 3); + outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType); + BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor, + weightsInfo3x3, biasesInfo)); + + descriptor = MakeDepthwiseConv2dDesc(3, 1); + outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType); + BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor, + weightsInfo3x3, biasesInfo)); + + descriptor = MakeDepthwiseConv2dDesc(3, 2); + outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType); + BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor, + weightsInfo3x3, biasesInfo)); + + descriptor = MakeDepthwiseConv2dDesc(3, 3); + outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType); + BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor, + weightsInfo3x3, biasesInfo)); + + // Supported stride 4 + descriptor = MakeDepthwiseConv2dDesc(4, 1); + outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType); + BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor, + weightsInfo3x3, biasesInfo)); + + // Supported weights shape 1x1 + armnn::TensorInfo weightsInfo1x1({ 1, 1, 1, 1 }, armnn::DataType::Float32); + descriptor = MakeDepthwiseConv2dDesc(1, 1); + outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo1x1, descriptor, dataType); + BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor, + weightsInfo1x1, biasesInfo)); + + // Supported shape 2x2 + armnn::TensorInfo weightsInfo2x2({ 1, 1, 2, 2 }, armnn::DataType::Float32); + descriptor = MakeDepthwiseConv2dDesc(1, 1); + outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo2x2, descriptor, dataType); + BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor, + weightsInfo2x2, biasesInfo)); + + // Asymmetric padding + descriptor = MakeDepthwiseConv2dDesc(1, 1, 1, 1, 2, 1, 2); + outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType); + BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor, + weightsInfo3x3, biasesInfo)); +} + +// Pooling +ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dSize3x3Stride2x4, SimpleMaxPooling2dSize3x3Stride2x4Test, true) +ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dSize3x3Stride2x4Uint8, SimpleMaxPooling2dSize3x3Stride2x4Uint8Test, true) +ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2d, SimpleAveragePooling2dTest) +ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dNhwc, SimpleAveragePooling2dNhwcTest) +ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dUint8, SimpleAveragePooling2dUint8Test) + +ARMNN_AUTO_TEST_CASE(LargeTensorsAveragePooling2d, LargeTensorsAveragePooling2dTest) +ARMNN_AUTO_TEST_CASE(LargeTensorsAveragePooling2dUint8, LargeTensorsAveragePooling2dUint8Test) + +ARMNN_AUTO_TEST_CASE(SimpleL2Pooling2d, SimpleL2Pooling2dTest) +ARMNN_AUTO_TEST_CASE(UNSUPPORTED_SimpleL2Pooling2dUint8, SimpleL2Pooling2dUint8Test) +ARMNN_AUTO_TEST_CASE(L2Pooling2dSize3Stride1, L2Pooling2dSize3Stride1Test) +ARMNN_AUTO_TEST_CASE(UNSUPPORTED_L2Pooling2dSize3Stride1Uint8, L2Pooling2dSize3Stride1Uint8Test) +ARMNN_AUTO_TEST_CASE(L2Pooling2dSize3Stride3, L2Pooling2dSize3Stride3Test) +ARMNN_AUTO_TEST_CASE(UNSUPPORTED_L2Pooling2dSize3Stride3Uint8, L2Pooling2dSize3Stride3Uint8Test) +ARMNN_AUTO_TEST_CASE(L2Pooling2dSize3Stride4, L2Pooling2dSize3Stride4Test) +ARMNN_AUTO_TEST_CASE(UNSUPPORTED_L2Pooling2dSize3Stride4Uint8, L2Pooling2dSize3Stride4Uint8Test) +ARMNN_AUTO_TEST_CASE(L2Pooling2dSize7, L2Pooling2dSize7Test) +ARMNN_AUTO_TEST_CASE(UNSUPPORTED_L2Pooling2dSize7Uint8, L2Pooling2dSize7Uint8Test) +ARMNN_AUTO_TEST_CASE(L2Pooling2dSize9, L2Pooling2dSize9Test) +ARMNN_AUTO_TEST_CASE(UNSUPPORTED_L2Pooling2dSize9Uint8, L2Pooling2dSize9Uint8Test) + +// Ignore padding values for pooling but count padding fields into the divisor +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleMaxPooling2d, IgnorePaddingSimpleMaxPooling2dTest) +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleMaxPooling2dUint8, IgnorePaddingSimpleMaxPooling2dUint8Test) +ARMNN_AUTO_TEST_CASE(IgnorePaddingMaxPooling2dSize3, IgnorePaddingMaxPooling2dSize3Test) +ARMNN_AUTO_TEST_CASE(IgnorePaddingMaxPooling2dSize3Uint8, IgnorePaddingMaxPooling2dSize3Uint8Test) + +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2d, IgnorePaddingSimpleAveragePooling2dTest) +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2dUint8, IgnorePaddingSimpleAveragePooling2dUint8Test) +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2dNoPadding, IgnorePaddingSimpleAveragePooling2dNoPaddingTest) +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2dNoPaddingUint8, + IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test) +ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3, IgnorePaddingAveragePooling2dSize3Test) +ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3Uint8, IgnorePaddingAveragePooling2dSize3Uint8Test) +ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3x2Stride2x2, + IgnorePaddingAveragePooling2dSize3x2Stride2x2Test, false) +ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3x2Stride2x2NoPadding, + IgnorePaddingAveragePooling2dSize3x2Stride2x2Test, + true) + +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleL2Pooling2d, IgnorePaddingSimpleL2Pooling2dTest) +ARMNN_AUTO_TEST_CASE(UNSUPPORTED_IgnorePaddingSimpleL2Pooling2dUint8, IgnorePaddingSimpleL2Pooling2dUint8Test) +ARMNN_AUTO_TEST_CASE(IgnorePaddingL2Pooling2dSize3, IgnorePaddingL2Pooling2dSize3Test) +ARMNN_AUTO_TEST_CASE(UNSUPPORTED_IgnorePaddingL2Pooling2dSize3Uint8, IgnorePaddingL2Pooling2dSize3Uint8Test) + +// Activation +ARMNN_AUTO_TEST_CASE(ConstantLinearActivation, ConstantLinearActivationTest) + +ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta1, SimpleSoftmaxTest, 1.0f) +ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta2, SimpleSoftmaxTest, 2.0f) + +ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta1Uint8, SimpleSoftmaxUint8Test, 1.0f) +ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta2Uint8, SimpleSoftmaxUint8Test, 2.0f) + +ARMNN_AUTO_TEST_CASE(ReLu1Uint8, BoundedReLuUint8UpperAndLowerBoundTest) +ARMNN_AUTO_TEST_CASE(ReLu6Uint8, BoundedReLuUint8UpperBoundOnlyTest) + +// Softmax +BOOST_AUTO_TEST_CASE(Softmax4dSupport) +{ + const unsigned int numDimensions = 4u; + std::array dimensionSizes; + dimensionSizes.fill(1u); + + const armnn::TensorInfo inputInfo(numDimensions, &dimensionSizes.front(), armnn::DataType::Float32); + const armnn::TensorInfo outputInfo(numDimensions, &dimensionSizes.front(), armnn::DataType::Float32); + + // 4D Softmax should be reported as unsupported on the NEON backend + BOOST_TEST(!armnn::IsSoftmaxSupportedNeon(inputInfo, outputInfo, armnn::SoftmaxDescriptor())); +} + +// Splitter +ARMNN_AUTO_TEST_CASE(SimpleSplitter, SplitterTest) +ARMNN_AUTO_TEST_CASE(SimpleSplitterUint8, SplitterUint8Test) + +ARMNN_AUTO_TEST_CASE(CopyViaSplitter, CopyViaSplitterTest) +ARMNN_AUTO_TEST_CASE(CopyViaSplitterUint8, CopyViaSplitterUint8Test) + +// Merger +ARMNN_AUTO_TEST_CASE(SimpleMerger, MergerTest) +ARMNN_AUTO_TEST_CASE(MergerUint8, MergerUint8Test) + +// Fully Connected +ARMNN_AUTO_TEST_CASE(SimpleFullyConnected, FullyConnectedFloat32Test, false, false) +ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithBias, FullyConnectedFloat32Test, true, false) +ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithTranspose, FullyConnectedFloat32Test, false, true) +ARMNN_AUTO_TEST_CASE(FullyConnectedLarge, FullyConnectedLargeTest, false) +ARMNN_AUTO_TEST_CASE(FullyConnectedLargeTransposed, FullyConnectedLargeTest, true) +ARMNN_AUTO_TEST_CASE(FullyConnectedUint8, FullyConnectedUint8Test, false) +ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedUint8, FullyConnectedUint8Test, true) + +// Add +ARMNN_AUTO_TEST_CASE(SimpleAdd, AdditionTest) +ARMNN_AUTO_TEST_CASE(AddBroadcast, AdditionBroadcastTest) +ARMNN_AUTO_TEST_CASE(AddBroadcast1Element, AdditionBroadcast1ElementTest) + +// Sub +ARMNN_AUTO_TEST_CASE(SimpleSub, SubtractionTest) + +// Mul +ARMNN_AUTO_TEST_CASE(SimpleMultiplication, MultiplicationTest) +ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1Element, MultiplicationBroadcast1ElementTest) +ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1DVector, MultiplicationBroadcast1DVectorTest) + +// Batch Norm +ARMNN_AUTO_TEST_CASE(BatchNorm, BatchNormTest) + +// Constant +ARMNN_AUTO_TEST_CASE(Constant, ConstantTest) +ARMNN_AUTO_TEST_CASE(ConstantUint8, ConstantTestUint8) + +// Concatenation +ARMNN_AUTO_TEST_CASE(Concatenation1d, Concatenation1dTest) +ARMNN_AUTO_TEST_CASE(Concatenation1dUint8, Concatenation1dUint8Test) + +ARMNN_AUTO_TEST_CASE(Concatenation2dDim0, Concatenation2dDim0Test) +ARMNN_AUTO_TEST_CASE(Concatenation2dDim0Uint8, Concatenation2dDim0Uint8Test) +ARMNN_AUTO_TEST_CASE(Concatenation2dDim1, Concatenation2dDim1Test) +ARMNN_AUTO_TEST_CASE(Concatenation2dDim1Uint8, Concatenation2dDim1Uint8Test) + +ARMNN_AUTO_TEST_CASE(Concatenation2dDim0DiffInputDims, Concatenation2dDim0DiffInputDimsTest) +ARMNN_AUTO_TEST_CASE(Concatenation2dDim0DiffInputDimsUint8, Concatenation2dDim0DiffInputDimsUint8Test) +ARMNN_AUTO_TEST_CASE(Concatenation2dDim1DiffInputDims, Concatenation2dDim1DiffInputDimsTest) +ARMNN_AUTO_TEST_CASE(Concatenation2dDim1DiffInputDimsUint8, Concatenation2dDim1DiffInputDimsUint8Test) + +ARMNN_AUTO_TEST_CASE(Concatenation3dDim0, Concatenation3dDim0Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim0Uint8, Concatenation3dDim0Uint8Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim1, Concatenation3dDim1Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim1Uint8, Concatenation3dDim1Uint8Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim2, Concatenation3dDim2Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim2Uint8, Concatenation3dDim2Uint8Test) + +ARMNN_AUTO_TEST_CASE(Concatenation3dDim0DiffInputDims, Concatenation3dDim0DiffInputDimsTest) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim0DiffInputDimsUint8, Concatenation3dDim0DiffInputDimsUint8Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim1DiffInputDims, Concatenation3dDim1DiffInputDimsTest) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim1DiffInputDimsUint8, Concatenation3dDim1DiffInputDimsUint8Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim2DiffInputDims, Concatenation3dDim2DiffInputDimsTest) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim2DiffInputDimsUint8, Concatenation3dDim2DiffInputDimsUint8Test) + +// L2 Normalization +ARMNN_AUTO_TEST_CASE(L2Normalization1d, L2Normalization1dTest) +ARMNN_AUTO_TEST_CASE(L2Normalization2d, L2Normalization2dTest) +ARMNN_AUTO_TEST_CASE(L2Normalization3d, L2Normalization3dTest) +ARMNN_AUTO_TEST_CASE(L2Normalization4d, L2Normalization4dTest) + +ARMNN_AUTO_TEST_CASE(L2Normalization1dNhwc, L2Normalization1dNhwcTest) +ARMNN_AUTO_TEST_CASE(L2Normalization2dNhwc, L2Normalization2dNhwcTest) +ARMNN_AUTO_TEST_CASE(L2Normalization3dNhwc, L2Normalization3dNhwcTest) +ARMNN_AUTO_TEST_CASE(L2Normalization4dNhwc, L2Normalization4dNhwcTest) + +// Floor +ARMNN_AUTO_TEST_CASE(SimpleFloor, SimpleFloorTest) + +// Reshape +ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeFloat32Test) +ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeUint8Test) + +// Permute +ARMNN_AUTO_TEST_CASE(SimplePermuteFloat32, SimplePermuteFloat32Test) +ARMNN_AUTO_TEST_CASE(SimplePermuteUint8, SimplePermuteUint8Test) +ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet1, PermuteFloat32ValueSet1Test) +ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet2, PermuteFloat32ValueSet2Test) +ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet3, PermuteFloat32ValueSet3Test) + +// Lstm +ARMNN_AUTO_TEST_CASE(LstmLayerFloat32WithCifgWithPeepholeNoProjection, + LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest) +ARMNN_AUTO_TEST_CASE(LstmLayerFloat32NoCifgNoPeepholeNoProjection, + LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest) +ARMNN_AUTO_TEST_CASE(LstmLayerFloat32NoCifgWithPeepholeWithProjection, + LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest) + +// Normalization +ARMNN_AUTO_TEST_CASE(SimpleNormalizationAcross, SimpleNormalizationAcrossTest) +ARMNN_AUTO_TEST_CASE(SimpleNormalizationWithin, SimpleNormalizationWithinTest) +ARMNN_AUTO_TEST_CASE(SimpleNormalizationAcrossNhwc, SimpleNormalizationAcrossNhwcTest) + +// ============================================================================ +// COMPARE tests + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareConv2dWithReference, CompareConvolution2dTest) + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceFloat32, CompareDepthwiseConvolution2dTest) +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceUint8, CompareDepthwiseConvolution2dTest) + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareNormalizationWithinWithReference, CompareNormalizationTest, + armnn::NormalizationAlgorithmChannel::Within, + armnn::NormalizationAlgorithmMethod::LocalBrightness) +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareNormalizationAcrossWithReference, CompareNormalizationTest, + armnn::NormalizationAlgorithmChannel::Across, + armnn::NormalizationAlgorithmMethod::LocalBrightness) + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareMaxPooling2dWithReference, ComparePooling2dTest, armnn::PoolingAlgorithm::Max) +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareMaxPooling2dWithReferenceUint8, ComparePooling2dUint8Test, + armnn::PoolingAlgorithm::Max) +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareAveragePooling2dWithReference, ComparePooling2dTest, + armnn::PoolingAlgorithm::Average) +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareAveragePooling2dWithReferenceUint8, ComparePooling2dUint8Test, + armnn::PoolingAlgorithm::Average) +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareL2Pooling2dWithReference, ComparePooling2dTest, armnn::PoolingAlgorithm::L2) +ARMNN_COMPARE_REF_AUTO_TEST_CASE(UNSUPPORTED_CompareL2Pooling2dWithReferenceUint8, ComparePooling2dUint8Test, + armnn::PoolingAlgorithm::L2) + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareSoftmaxBeta1WithReference, CompareSoftmaxTest, 1.0f) +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareSoftmaxBeta2WithReference, CompareSoftmaxTest, 2.0f) + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareSoftmaxUint8Beta1WithReference, CompareSoftmaxUint8Test, 1.0f) +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareSoftmaxUint8Beta2WithReference, CompareSoftmaxUint8Test, 2.0f) + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareAddition, CompareAdditionTest) + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareMultiplicationWithReference, CompareMultiplicationTest) + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareBatchNorm, CompareBatchNormTest) + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(ReLu1, CompareBoundedReLuTest, 1.0f, -1.0f) +ARMNN_COMPARE_REF_AUTO_TEST_CASE(ReLu6, CompareBoundedReLuTest, 6.0f, 0.0f) + +// ============================================================================ +// FIXTURE tests + +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareSigmoidActivationWithReference, ActivationFixture, + CompareActivationTest, armnn::ActivationFunction::Sigmoid, 5u) + +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareTanhActivationWithReference, ActivationFixture, + CompareActivationTest, armnn::ActivationFunction::TanH, 5u) + +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareLinearActivationWithReference, ActivationFixture, + CompareActivationTest, armnn::ActivationFunction::Linear, 5u) + +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareReLuActivationWithReference, ActivationFixture, + CompareActivationTest, armnn::ActivationFunction::ReLu, 5u) + +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareBoundedReLuActivationWithReference, ActivationFixture, + CompareActivationTest, armnn::ActivationFunction::BoundedReLu, 5u) +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareBoundedReLuActivationWithReferenceUint8, ActivationFixture, + CompareActivationUint8Test, armnn::ActivationFunction::BoundedReLu) + +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareSoftReLuActivationWithReference, ActivationFixture, + CompareActivationTest, armnn::ActivationFunction::SoftReLu, 1u) + +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareLeakyReLuActivationWithReference, ActivationFixture, + CompareActivationTest, armnn::ActivationFunction::LeakyReLu, 5u) + +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareAbsActivationWithReference, ActivationFixture, + CompareActivationTest, armnn::ActivationFunction::Abs, 5u) + +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareSqrtActivationWithReference, PositiveActivationFixture, + CompareActivationTest, armnn::ActivationFunction::Sqrt, 5u) + +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareSquareActivationWithReference, ActivationFixture, + CompareActivationTest, armnn::ActivationFunction::Square, 5u) +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/backends/reference/CMakeLists.txt b/src/backends/reference/CMakeLists.txt index 9810cf86e2..5aa3fc27f5 100644 --- a/src/backends/reference/CMakeLists.txt +++ b/src/backends/reference/CMakeLists.txt @@ -17,5 +17,5 @@ target_include_directories(armnnRefBackend PRIVATE ${PROJECT_SOURCE_DIR}/src) target_include_directories(armnnRefBackend PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn) target_include_directories(armnnRefBackend PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils) -add_subdirectory(workloads test) - +add_subdirectory(workloads) +add_subdirectory(test) diff --git a/src/backends/reference/backend.cmake b/src/backends/reference/backend.cmake index 95e72a438d..5ae088a124 100644 --- a/src/backends/reference/backend.cmake +++ b/src/backends/reference/backend.cmake @@ -5,3 +5,4 @@ add_subdirectory(${PROJECT_SOURCE_DIR}/src/backends/reference) list(APPEND armnnLibraries armnnRefBackend armnnRefBackendWorkloads) +list(APPEND armnnUnitTestLibraries armnnRefBackendUnitTests) diff --git a/src/backends/reference/test/CMakeLists.txt b/src/backends/reference/test/CMakeLists.txt index f41a074999..8f86f86d39 100644 --- a/src/backends/reference/test/CMakeLists.txt +++ b/src/backends/reference/test/CMakeLists.txt @@ -2,3 +2,14 @@ # Copyright © 2017 Arm Ltd. All rights reserved. # SPDX-License-Identifier: MIT # + +list(APPEND armnnRefBackendUnitTests_sources + RefCreateWorkloadTests.cpp + RefLayerSupportTests.cpp + RefLayerTests.cpp +) + +add_library(armnnRefBackendUnitTests STATIC ${armnnRefBackendUnitTests_sources}) +target_include_directories(armnnRefBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src) +target_include_directories(armnnRefBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn) +target_include_directories(armnnRefBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils) \ No newline at end of file diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp new file mode 100644 index 0000000000..e88fbed014 --- /dev/null +++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp @@ -0,0 +1,484 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include + +#include +#include +#include + +namespace +{ + +template +void CheckInputOutput(std::unique_ptr workload, const TensorInfo& inputInfo, const TensorInfo& outputInfo) +{ + auto queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + BOOST_TEST((inputHandle->GetTensorInfo() == inputInfo)); + BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo)); +} + +template +void CheckInputsOutput(std::unique_ptr workload, + const TensorInfo& inputInfo0, + const TensorInfo& inputInfo1, + const TensorInfo& outputInfo) +{ + auto queueDescriptor = workload->GetData(); + auto inputHandle0 = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto inputHandle1 = boost::polymorphic_downcast(queueDescriptor.m_Inputs[1]); + auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + BOOST_TEST((inputHandle0->GetTensorInfo() == inputInfo0)); + BOOST_TEST((inputHandle1->GetTensorInfo() == inputInfo1)); + BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo)); +} +} + +BOOST_AUTO_TEST_SUITE(CreateWorkloadRef) + +template +static void RefCreateActivationWorkloadTest() +{ + Graph graph; + RefWorkloadFactory factory; + auto workload = CreateActivationWorkloadTest(factory, graph); + + // Checks that outputs are as we expect them (see definition of CreateActivationWorkloadTest). + CheckInputOutput(std::move(workload), + TensorInfo({ 1, 1 }, DataType), + TensorInfo({ 1, 1 }, DataType)); +} + +BOOST_AUTO_TEST_CASE(CreateActivationFloat32Workload) +{ + RefCreateActivationWorkloadTest(); +} + +BOOST_AUTO_TEST_CASE(CreateActivationUint8Workload) +{ + RefCreateActivationWorkloadTest(); +} + +template +static void RefCreateArithmethicWorkloadTest() +{ + Graph graph; + RefWorkloadFactory factory; + auto workload = CreateArithmeticWorkloadTest(factory, graph); + + CheckInputsOutput(std::move(workload), + TensorInfo({ 2, 3 }, DataType), + TensorInfo({ 2, 3 }, DataType), + TensorInfo({ 2, 3 }, DataType)); +} + +BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload) +{ + RefCreateArithmethicWorkloadTest(); +} + +BOOST_AUTO_TEST_CASE(CreateAdditionUint8Workload) +{ + RefCreateArithmethicWorkloadTest(); +} + +BOOST_AUTO_TEST_CASE(CreateSubtractionFloatWorkload) +{ + RefCreateArithmethicWorkloadTest(); +} + +BOOST_AUTO_TEST_CASE(CreateSubtractionUint8Workload) +{ + RefCreateArithmethicWorkloadTest(); +} + +BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkload) +{ + RefCreateArithmethicWorkloadTest(); +} + +BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8Workload) +{ + RefCreateArithmethicWorkloadTest(); +} + +BOOST_AUTO_TEST_CASE(CreateDivisionFloatWorkload) +{ + RefCreateArithmethicWorkloadTest(); +} + +BOOST_AUTO_TEST_CASE(CreateDivisionUint8Workload) +{ + RefCreateArithmethicWorkloadTest(); +} + +BOOST_AUTO_TEST_CASE(CreateBatchNormalizationWorkload) +{ + Graph graph; + RefWorkloadFactory factory; + auto workload = CreateBatchNormalizationWorkloadTest + (factory, graph); + + // Checks that outputs and inputs are as we expect them (see definition of CreateBatchNormalizationWorkloadTest). + CheckInputOutput( + std::move(workload), TensorInfo({2, 3, 1, 1}, DataType::Float32), TensorInfo({2, 3, 1, 1}, DataType::Float32)); +} + +BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Float32Workload) +{ + Graph graph; + RefWorkloadFactory factory; + auto workload = CreateConvertFp16ToFp32WorkloadTest(factory, graph); + + // Checks that outputs and inputs are as we expect them + CheckInputOutput( + std::move(workload), TensorInfo({1, 3, 2, 3}, DataType::Float16), TensorInfo({1, 3, 2, 3}, DataType::Float32)); +} + +BOOST_AUTO_TEST_CASE(CreateConvertFp32ToFp16Float16Workload) +{ + Graph graph; + RefWorkloadFactory factory; + auto workload = CreateConvertFp32ToFp16WorkloadTest(factory, graph); + + // Checks that outputs and inputs are as we expect them + CheckInputOutput( + std::move(workload), TensorInfo({1, 3, 2, 3}, DataType::Float32), TensorInfo({1, 3, 2, 3}, DataType::Float16)); +} + +BOOST_AUTO_TEST_CASE(CreateConvolution2dWorkload) +{ + Graph graph; + RefWorkloadFactory factory; + auto workload = CreateConvolution2dWorkloadTest(factory, graph); + + // Checks that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest). + CheckInputOutput(std::move(workload), + TensorInfo({2, 3, 8, 16}, DataType::Float32), + TensorInfo({2, 2, 2, 10}, DataType::Float32)); +} + +BOOST_AUTO_TEST_CASE(CreateDepthwiseConvolution2dWorkload) +{ + Graph graph; + RefWorkloadFactory factory; + auto workload = + CreateDepthwiseConvolution2dWorkloadTest(factory, graph); + + // Checks that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest). + CheckInputOutput(std::move(workload), + TensorInfo({2, 3, 8, 16}, DataType::Float32), + TensorInfo({2, 9, 2, 10}, DataType::Float32)); +} + +template +static void RefCreateFullyConnectedWorkloadTest() +{ + Graph graph; + RefWorkloadFactory factory; + auto workload = CreateFullyConnectedWorkloadTest(factory, graph); + + // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest). + float inputsQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 1.0f : 0.0; + float outputQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 2.0f : 0.0; + CheckInputOutput(std::move(workload), + TensorInfo({ 3, 1, 4, 5 }, DataType, inputsQScale), + TensorInfo({ 3, 7 }, DataType, outputQScale)); +} + +BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloat32Workload) +{ + RefCreateFullyConnectedWorkloadTest(); +} + +BOOST_AUTO_TEST_CASE(CreateFullyConnectedUint8Workload) +{ + RefCreateFullyConnectedWorkloadTest(); +} + +template +static void RefCreateNormalizationWorkloadTest() +{ + Graph graph; + RefWorkloadFactory factory; + auto workload = CreateNormalizationWorkloadTest(factory, graph); + + // Checks that outputs and inputs are as we expect them (see definition of CreateNormalizationWorkloadTest). + CheckInputOutput(std::move(workload), + TensorInfo({3, 5, 5, 1}, DataType), + TensorInfo({3, 5, 5, 1}, DataType)); +} + +BOOST_AUTO_TEST_CASE(CreateRefNormalizationNchwWorkload) +{ + RefCreateNormalizationWorkloadTest(); +} + +template +static void RefCreatePooling2dWorkloadTest() +{ + Graph graph; + RefWorkloadFactory factory; + auto workload = CreatePooling2dWorkloadTest(factory, graph); + + // Checks that outputs and inputs are as we expect them (see definition of CreatePooling2dWorkloadTest). + CheckInputOutput( + std::move(workload), + TensorInfo({3, 2, 5, 5}, DataType), + TensorInfo({3, 2, 2, 4}, DataType)); +} + +BOOST_AUTO_TEST_CASE(CreatePooling2dFloat32Workload) +{ + RefCreatePooling2dWorkloadTest(); +} + +BOOST_AUTO_TEST_CASE(CreatePooling2dUint8Workload) +{ + RefCreatePooling2dWorkloadTest(); +} + +template +static void RefCreateSoftmaxWorkloadTest() +{ + Graph graph; + RefWorkloadFactory factory; + auto workload = CreateSoftmaxWorkloadTest(factory, graph); + + // Checks that outputs and inputs are as we expect them (see definition of CreateSoftmaxWorkloadTest). + CheckInputOutput( + std::move(workload), + TensorInfo({4, 1}, DataType), + TensorInfo({4, 1}, DataType)); +} + +BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat32Workload) +{ + RefCreateSoftmaxWorkloadTest(); +} + +BOOST_AUTO_TEST_CASE(CreateSoftmaxUint8Workload) +{ + RefCreateSoftmaxWorkloadTest(); +} + +template +static void RefCreateSplitterWorkloadTest() +{ + Graph graph; + RefWorkloadFactory factory; + auto workload = CreateSplitterWorkloadTest(factory, graph); + + // Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest). + SplitterQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + BOOST_TEST((inputHandle->GetTensorInfo() == TensorInfo({ 5, 7, 7 }, DataType))); + + auto outputHandle0 = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + BOOST_TEST((outputHandle0->GetTensorInfo() == TensorInfo({ 1, 7, 7 }, DataType))); + + auto outputHandle1 = boost::polymorphic_downcast(queueDescriptor.m_Outputs[1]); + BOOST_TEST((outputHandle1->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType))); + + auto outputHandle2 = boost::polymorphic_downcast(queueDescriptor.m_Outputs[2]); + BOOST_TEST((outputHandle2->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType))); +} + +BOOST_AUTO_TEST_CASE(CreateSplitterFloat32Workload) +{ + RefCreateSplitterWorkloadTest(); +} + +BOOST_AUTO_TEST_CASE(CreateSplitterUint8Workload) +{ + RefCreateSplitterWorkloadTest(); +} + +template +static void RefCreateSplitterMergerWorkloadTest() +{ + // Tests that it is possible to decide which output of the splitter layer + // should be lined to which input of the merger layer. + // We tested that is is possible to specify 0th output + // of the splitter to be the 1st input to the merger and the 1st output of the splitter to be 0th input + // of the merger. + + Graph graph; + RefWorkloadFactory factory; + auto workloads = CreateSplitterMergerWorkloadTest + (factory, graph); + + auto wlSplitter = std::move(workloads.first); + auto wlMerger = std::move(workloads.second); + + //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction. + armnn::CpuTensorHandle* sOut0 = dynamic_cast(wlSplitter->GetData().m_Outputs[0]); + armnn::CpuTensorHandle* sOut1 = dynamic_cast(wlSplitter->GetData().m_Outputs[1]); + armnn::CpuTensorHandle* mIn0 = dynamic_cast(wlMerger->GetData().m_Inputs[0]); + armnn::CpuTensorHandle* mIn1 = dynamic_cast(wlMerger->GetData().m_Inputs[1]); + + BOOST_TEST(sOut0); + BOOST_TEST(sOut1); + BOOST_TEST(mIn0); + BOOST_TEST(mIn1); + + bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0); + + BOOST_TEST(validDataPointers); +} + +BOOST_AUTO_TEST_CASE(CreateSplitterMergerFloat32) +{ + RefCreateSplitterMergerWorkloadTest(); +} + +BOOST_AUTO_TEST_CASE(CreateSplitterMergerUint8) +{ + RefCreateSplitterMergerWorkloadTest(); +} + +template +static void RefCreateSingleOutputMultipleInputsTest() +{ + // Tests that it is possible to assign multiple (two) different layers to each of the outputs of a splitter layer. + // We created a splitter with two outputs. That each of those outputs is used by two different activation layers. + + Graph graph; + RefWorkloadFactory factory; + std::unique_ptr wlSplitter; + std::unique_ptr wlActiv0_0; + std::unique_ptr wlActiv0_1; + std::unique_ptr wlActiv1_0; + std::unique_ptr wlActiv1_1; + + CreateSplitterMultipleInputsOneOutputWorkloadTest(factory, graph, wlSplitter, wlActiv0_0, wlActiv0_1, wlActiv1_0, wlActiv1_1); + + armnn::CpuTensorHandle* sOut0 = dynamic_cast(wlSplitter->GetData().m_Outputs[0]); + armnn::CpuTensorHandle* sOut1 = dynamic_cast(wlSplitter->GetData().m_Outputs[1]); + armnn::CpuTensorHandle* activ0_0Im = dynamic_cast(wlActiv0_0->GetData().m_Inputs[0]); + armnn::CpuTensorHandle* activ0_1Im = dynamic_cast(wlActiv0_1->GetData().m_Inputs[0]); + armnn::CpuTensorHandle* activ1_0Im = dynamic_cast(wlActiv1_0->GetData().m_Inputs[0]); + armnn::CpuTensorHandle* activ1_1Im = dynamic_cast(wlActiv1_1->GetData().m_Inputs[0]); + + + BOOST_TEST(sOut0); + BOOST_TEST(sOut1); + BOOST_TEST(activ0_0Im); + BOOST_TEST(activ0_1Im); + BOOST_TEST(activ1_0Im); + BOOST_TEST(activ1_1Im); + + bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) && + (sOut1 == activ1_0Im) && (sOut1 == activ1_1Im); + + BOOST_TEST(validDataPointers); +} + +BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsFloat32) +{ + RefCreateSingleOutputMultipleInputsTest(); +} + +BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsUint8) +{ + RefCreateSingleOutputMultipleInputsTest(); +} + +template +static void RefCreateResizeBilinearTest() +{ + Graph graph; + RefWorkloadFactory factory; + auto workload = CreateResizeBilinearWorkloadTest(factory, graph); + + // Checks that outputs and inputs are as we expect them (see definition of CreateResizeBilinearWorkloadTest). + CheckInputOutput( + std::move(workload), + TensorInfo({ 2, 3, 4, 4 }, DataType), + TensorInfo({ 2, 3, 2, 2 }, DataType)); +} + +BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32) +{ + RefCreateResizeBilinearTest(); +} + +BOOST_AUTO_TEST_CASE(CreateResizeBilinearUint8) +{ + RefCreateResizeBilinearTest(); +} + +BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat32) +{ + Graph graph; + RefWorkloadFactory factory; + auto workload = CreateL2NormalizationWorkloadTest + (factory, graph); + + // Checks that outputs and inputs are as we expect them (see definition of CreateL2NormalizationWorkloadTest). + CheckInputOutput( + std::move(workload), + TensorInfo({ 5, 20, 50, 67 }, armnn::DataType::Float32), + TensorInfo({ 5, 20, 50, 67 }, armnn::DataType::Float32)); +} + +template +static void RefCreateReshapeWorkloadTest() +{ + Graph graph; + RefWorkloadFactory factory; + auto workload = CreateReshapeWorkloadTest(factory, graph); + + // Checks that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest). + CheckInputOutput( + std::move(workload), + TensorInfo({ 4, 1 }, DataType), + TensorInfo({ 1, 4 }, DataType)); +} + +BOOST_AUTO_TEST_CASE(CreateReshapeFloat32Workload) +{ + RefCreateReshapeWorkloadTest(); +} + +BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload) +{ + RefCreateReshapeWorkloadTest(); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/backends/reference/test/RefLayerSupportTests.cpp b/src/backends/reference/test/RefLayerSupportTests.cpp new file mode 100644 index 0000000000..be3f3f8f97 --- /dev/null +++ b/src/backends/reference/test/RefLayerSupportTests.cpp @@ -0,0 +1,118 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include +#include +#include + +#include +#include +#include +#include + +#include + +#include + +namespace +{ + +bool LayerTypeMatchesTest() +{ + return LayerTypeMatchesTestImpl(Tag()); +}; + +} // anonymous namespace + +BOOST_AUTO_TEST_SUITE(RefLayerSupported) + +BOOST_AUTO_TEST_CASE(IsLayerSupportedLayerTypeMatches) +{ + LayerTypeMatchesTest(); +} + +BOOST_AUTO_TEST_CASE(IsLayerSupportedFloat16Reference) +{ + armnn::RefWorkloadFactory factory; + IsLayerSupportedTests(&factory); +} + +BOOST_AUTO_TEST_CASE(IsLayerSupportedFloat32Reference) +{ + armnn::RefWorkloadFactory factory; + IsLayerSupportedTests(&factory); +} + +BOOST_AUTO_TEST_CASE(IsLayerSupportedUint8Reference) +{ + armnn::RefWorkloadFactory factory; + IsLayerSupportedTests(&factory); +} + +BOOST_AUTO_TEST_CASE(IsConvertFp16ToFp32SupportedReference) +{ + std::string reasonIfUnsupported; + + bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); + + BOOST_CHECK(result); +} + +BOOST_AUTO_TEST_CASE(IsConvertFp16ToFp32SupportedFp32InputReference) +{ + std::string reasonIfUnsupported; + + bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); + + BOOST_CHECK(!result); + BOOST_CHECK_EQUAL(reasonIfUnsupported, "Layer is not supported with float32 data type input"); +} + +BOOST_AUTO_TEST_CASE(IsConvertFp16ToFp32SupportedFp16OutputReference) +{ + std::string reasonIfUnsupported; + + bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); + + BOOST_CHECK(!result); + BOOST_CHECK_EQUAL(reasonIfUnsupported, "Layer is not supported with float16 data type output"); +} + +BOOST_AUTO_TEST_CASE(IsConvertFp32ToFp16SupportedReference) +{ + std::string reasonIfUnsupported; + + bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); + + BOOST_CHECK(result); +} + +BOOST_AUTO_TEST_CASE(IsConvertFp32ToFp16SupportedFp16InputReference) +{ + std::string reasonIfUnsupported; + + bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); + + BOOST_CHECK(!result); + BOOST_CHECK_EQUAL(reasonIfUnsupported, "Layer is not supported with float16 data type input"); +} + +BOOST_AUTO_TEST_CASE(IsConvertFp32ToFp16SupportedFp32OutputReference) +{ + std::string reasonIfUnsupported; + + bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); + + BOOST_CHECK(!result); + BOOST_CHECK_EQUAL(reasonIfUnsupported, "Layer is not supported with float32 data type output"); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp new file mode 100644 index 0000000000..de2c2fe332 --- /dev/null +++ b/src/backends/reference/test/RefLayerTests.cpp @@ -0,0 +1,273 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "test/TensorHelpers.hpp" +#include "test/UnitTests.hpp" + +#include +#include + +#include + +BOOST_AUTO_TEST_SUITE(Compute_Reference) +using FactoryType = armnn::RefWorkloadFactory; + +// ============================================================================ +// UNIT tests + +// Convolution +ARMNN_AUTO_TEST_CASE(SimpleConvolution2d3x5, SimpleConvolution2d3x5Test, true) +ARMNN_AUTO_TEST_CASE(SimpleConvolution2d3x5Uint8, SimpleConvolution2d3x5Uint8Test, true) + +ARMNN_AUTO_TEST_CASE(UnbiasedConvolution2d, SimpleConvolution2d3x5Test, false) +ARMNN_AUTO_TEST_CASE(UnbiasedConvolutionUint8, SimpleConvolution2d3x5Uint8Test, false) + +ARMNN_AUTO_TEST_CASE(SimpleConvolution1d, Convolution1dTest, true) +ARMNN_AUTO_TEST_CASE(SimpleConvolution1dUint8, Convolution1dUint8Test, true) + +ARMNN_AUTO_TEST_CASE(SimpleConvolution2d3x3, SimpleConvolution2d3x3Test, true) +ARMNN_AUTO_TEST_CASE(SimpleConvolution2d3x3Uint8, SimpleConvolution2d3x3Uint8Test, true) + +ARMNN_AUTO_TEST_CASE(UnbiasedConvolution2dSquare, SimpleConvolution2d3x3Test, false) + +ARMNN_AUTO_TEST_CASE(SimpleConvolution2dAsymmetricPaddingLargerThanHalfKernelSize, + Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest) +ARMNN_AUTO_TEST_CASE(SimpleConvolution2dAsymmetricPadding, Convolution2dAsymmetricPaddingTest) + +// Depthwise Convolution +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d, DepthwiseConvolution2dTest, true) +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dUint8, DepthwiseConvolution2dUint8Test, true) + +ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2d, DepthwiseConvolution2dTest, false) +ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dUint8, DepthwiseConvolution2dUint8Test, false) + +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul1, DepthwiseConvolution2dDepthMul1Test, true) +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul1Uint8, DepthwiseConvolution2dDepthMul1Uint8Test, true) + +ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dDepthMul1, DepthwiseConvolution2dDepthMul1Test, false) +ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dDepthMul1Uint8, DepthwiseConvolution2dDepthMul1Uint8Test, false) + +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dAsymmetric, DepthwiseConvolution2dAsymmetricTest, true) +ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dAsymmetric, DepthwiseConvolution2dAsymmetricTest, false) + +// Pooling +ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dSize2x2Stride2x2, SimpleMaxPooling2dSize2x2Stride2x2Test, false) +ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dSize2x2Stride2x2Uint8, SimpleMaxPooling2dSize2x2Stride2x2Uint8Test, false) + +ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dSize3x3Stride2x4, SimpleMaxPooling2dSize3x3Stride2x4Test, false) +ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dSize3x3Stride2x4Uint8, SimpleMaxPooling2dSize3x3Stride2x4Uint8Test, false) + +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleMaxPooling2d, IgnorePaddingSimpleMaxPooling2dTest) +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleMaxPooling2dUint8, IgnorePaddingSimpleMaxPooling2dUint8Test) +ARMNN_AUTO_TEST_CASE(IgnorePaddingMaxPooling2dSize3, IgnorePaddingMaxPooling2dSize3Test) +ARMNN_AUTO_TEST_CASE(IgnorePaddingMaxPooling2dSize3Uint8, IgnorePaddingMaxPooling2dSize3Uint8Test) + +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2d, IgnorePaddingSimpleAveragePooling2dTest) +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2dUint8, IgnorePaddingSimpleAveragePooling2dUint8Test) +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2dNoPadding, IgnorePaddingSimpleAveragePooling2dNoPaddingTest) +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2dNoPaddingUint8, + IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test) +ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3, IgnorePaddingAveragePooling2dSize3Test) +ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3Uint8, IgnorePaddingAveragePooling2dSize3Uint8Test) + +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleL2Pooling2d, IgnorePaddingSimpleL2Pooling2dTest) +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleL2Pooling2dUint8, IgnorePaddingSimpleL2Pooling2dUint8Test) +ARMNN_AUTO_TEST_CASE(IgnorePaddingL2Pooling2dSize3, IgnorePaddingL2Pooling2dSize3Test) +ARMNN_AUTO_TEST_CASE(IgnorePaddingL2Pooling2dSize3Uint8, IgnorePaddingL2Pooling2dSize3Uint8Test) + +ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2d, SimpleAveragePooling2dTest) +ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dUint8, SimpleAveragePooling2dUint8Test) +ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3x2Stride2x2, + IgnorePaddingAveragePooling2dSize3x2Stride2x2Test, false) +ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3x2Stride2x2NoPadding, + IgnorePaddingAveragePooling2dSize3x2Stride2x2Test, true) + +ARMNN_AUTO_TEST_CASE(LargeTensorsAveragePooling2d, LargeTensorsAveragePooling2dTest) +ARMNN_AUTO_TEST_CASE(LargeTensorsAveragePooling2dUint8, LargeTensorsAveragePooling2dUint8Test) + +ARMNN_AUTO_TEST_CASE(SimpleL2Pooling2d, SimpleL2Pooling2dTest) +ARMNN_AUTO_TEST_CASE(SimpleL2Pooling2dUint8, SimpleL2Pooling2dUint8Test) + +ARMNN_AUTO_TEST_CASE(L2Pooling2dSize7, L2Pooling2dSize7Test) +ARMNN_AUTO_TEST_CASE(L2Pooling2dSize7Uint8, L2Pooling2dSize7Uint8Test) + +ARMNN_AUTO_TEST_CASE(AsymmNonSquarePooling2d, AsymmetricNonSquarePooling2dTest) +ARMNN_AUTO_TEST_CASE(AsymmNonSquarePooling2dUint8, AsymmetricNonSquarePooling2dUint8Test) + +// Activation +ARMNN_AUTO_TEST_CASE(ConstantLinearActivation, ConstantLinearActivationTest) +ARMNN_AUTO_TEST_CASE(ConstantLinearActivationUint8, ConstantLinearActivationUint8Test) + +ARMNN_AUTO_TEST_CASE(SimpleNormalizationAcross, SimpleNormalizationAcrossTest) +ARMNN_AUTO_TEST_CASE(SimpleNormalizationWithin, SimpleNormalizationWithinTest) + +ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta1, SimpleSoftmaxTest, 1.0f) +ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta2, SimpleSoftmaxTest, 2.0f) +ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta1Uint8, SimpleSoftmaxUint8Test, 1.0f) +ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta2Uint8, SimpleSoftmaxUint8Test, 2.0f) + +ARMNN_AUTO_TEST_CASE(SimpleSigmoid, SimpleSigmoidTest) +ARMNN_AUTO_TEST_CASE(SimpleSigmoidUint8, SimpleSigmoidUint8Test) + +ARMNN_AUTO_TEST_CASE(ReLu1, BoundedReLuUpperAndLowerBoundTest) +ARMNN_AUTO_TEST_CASE(ReLu6, BoundedReLuUpperBoundOnlyTest) +ARMNN_AUTO_TEST_CASE(ReLu1Uint8, BoundedReLuUint8UpperAndLowerBoundTest) +ARMNN_AUTO_TEST_CASE(ReLu6Uint8, BoundedReLuUint8UpperBoundOnlyTest) + +// Fully Conected +ARMNN_AUTO_TEST_CASE(SimpleFullyConnected, FullyConnectedFloat32Test, false, false) +ARMNN_AUTO_TEST_CASE(FullyConnectedUint8, FullyConnectedUint8Test, false) +ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithBias, FullyConnectedFloat32Test, true, false) +ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedUint8, FullyConnectedUint8Test, true) +ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithTranspose, FullyConnectedFloat32Test, false, true) + +ARMNN_AUTO_TEST_CASE(FullyConnectedLarge, FullyConnectedLargeTest, false) +ARMNN_AUTO_TEST_CASE(FullyConnectedLargeTransposed, FullyConnectedLargeTest, true) + +// Splitter +ARMNN_AUTO_TEST_CASE(SimpleSplitter, SplitterTest) +ARMNN_AUTO_TEST_CASE(SimpleSplitterUint8, SplitterUint8Test) + +ARMNN_AUTO_TEST_CASE(CopyViaSplitter, CopyViaSplitterTest) +ARMNN_AUTO_TEST_CASE(CopyViaSplitterUint8, CopyViaSplitterUint8Test) + +// Merger +ARMNN_AUTO_TEST_CASE(SimpleMerger, MergerTest) +ARMNN_AUTO_TEST_CASE(MergerUint8, MergerUint8Test) + +// Add +ARMNN_AUTO_TEST_CASE(SimpleAdd, AdditionTest) +ARMNN_AUTO_TEST_CASE(AddBroadcast1Element, AdditionBroadcast1ElementTest) +ARMNN_AUTO_TEST_CASE(AddBroadcast, AdditionBroadcastTest) + +ARMNN_AUTO_TEST_CASE(AdditionUint8, AdditionUint8Test) +ARMNN_AUTO_TEST_CASE(AddBroadcastUint8, AdditionBroadcastUint8Test) +ARMNN_AUTO_TEST_CASE(AddBroadcast1ElementUint8, AdditionBroadcast1ElementUint8Test) + +// Sub +ARMNN_AUTO_TEST_CASE(SimpleSub, SubtractionTest) +ARMNN_AUTO_TEST_CASE(SubBroadcast1Element, SubtractionBroadcast1ElementTest) +ARMNN_AUTO_TEST_CASE(SubBroadcast, SubtractionBroadcastTest) + +ARMNN_AUTO_TEST_CASE(SubtractionUint8, SubtractionUint8Test) +ARMNN_AUTO_TEST_CASE(SubBroadcastUint8, SubtractionBroadcastUint8Test) +ARMNN_AUTO_TEST_CASE(SubBroadcast1ElementUint8, SubtractionBroadcast1ElementUint8Test) + +// Div +ARMNN_AUTO_TEST_CASE(SimpleDivision, DivisionTest) +ARMNN_AUTO_TEST_CASE(DivisionByZero, DivisionByZeroTest) +ARMNN_AUTO_TEST_CASE(DivisionBroadcast1Element, DivisionBroadcast1ElementTest) +ARMNN_AUTO_TEST_CASE(DivisionBroadcast1DVector, DivisionBroadcast1DVectorTest) +// NOTE: division by zero for quantized div needs more attention +// see IVGCVSW-1849 +ARMNN_AUTO_TEST_CASE(DivisionUint8, DivisionUint8Test) +ARMNN_AUTO_TEST_CASE(DivisionUint8Broadcast1Element, DivisionBroadcast1ElementUint8Test) +ARMNN_AUTO_TEST_CASE(DivisionUint8Broadcast1DVector, DivisionBroadcast1DVectorUint8Test) + +// Mul +ARMNN_AUTO_TEST_CASE(SimpleMultiplication, MultiplicationTest) +ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1Element, MultiplicationBroadcast1ElementTest) +ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1DVector, MultiplicationBroadcast1DVectorTest) +ARMNN_AUTO_TEST_CASE(MultiplicationUint8, MultiplicationUint8Test) +ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1ElementUint8, MultiplicationBroadcast1ElementUint8Test) +ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1DVectorUint8, MultiplicationBroadcast1DVectorUint8Test) + +// Batch Norm +ARMNN_AUTO_TEST_CASE(BatchNorm, BatchNormTest) +ARMNN_AUTO_TEST_CASE(BatchNormUint8, BatchNormUint8Test) + +// Resize Bilinear +ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, SimpleResizeBilinearTest) +ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8, SimpleResizeBilinearUint8Test) +ARMNN_AUTO_TEST_CASE(ResizeBilinearNop, ResizeBilinearNopTest) +ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8, ResizeBilinearNopUint8Test) +ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin, ResizeBilinearSqMinTest) +ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8, ResizeBilinearSqMinUint8Test) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMin, ResizeBilinearMinTest) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8, ResizeBilinearMinUint8Test) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMag, ResizeBilinearMagTest) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8, ResizeBilinearMagUint8Test) + +// Fake Quantization +ARMNN_AUTO_TEST_CASE(FakeQuantization, FakeQuantizationTest) + +// L2 Normalization +ARMNN_AUTO_TEST_CASE(L2Normalization1d, L2Normalization1dTest) +ARMNN_AUTO_TEST_CASE(L2Normalization2d, L2Normalization2dTest) +ARMNN_AUTO_TEST_CASE(L2Normalization3d, L2Normalization3dTest) +ARMNN_AUTO_TEST_CASE(L2Normalization4d, L2Normalization4dTest) + +// NOTE: These tests are disabled until NHWC is supported by the reference L2Normalization implementation. +//ARMNN_AUTO_TEST_CASE(L2Normalization1dNhwc, L2Normalization1dNhwcTest); +//ARMNN_AUTO_TEST_CASE(L2Normalization2dNhwc, L2Normalization2dNhwcTest); +//ARMNN_AUTO_TEST_CASE(L2Normalization3dNhwc, L2Normalization3dNhwcTest); +//ARMNN_AUTO_TEST_CASE(L2Normalization4dNhwc, L2Normalization4dNhwcTest); + +// Constant +ARMNN_AUTO_TEST_CASE(Constant, ConstantTest) +ARMNN_AUTO_TEST_CASE(ConstantUint8, ConstantUint8Test) + +// Concat +ARMNN_AUTO_TEST_CASE(Concatenation1d, Concatenation1dTest) +ARMNN_AUTO_TEST_CASE(Concatenation1dUint8, Concatenation1dUint8Test) + +ARMNN_AUTO_TEST_CASE(Concatenation2dDim0, Concatenation2dDim0Test) +ARMNN_AUTO_TEST_CASE(Concatenation2dDim0Uint8, Concatenation2dDim0Uint8Test) +ARMNN_AUTO_TEST_CASE(Concatenation2dDim1, Concatenation2dDim1Test) +ARMNN_AUTO_TEST_CASE(Concatenation2dDim1Uint8, Concatenation2dDim1Uint8Test) + +ARMNN_AUTO_TEST_CASE(Concatenation2dDim0DiffInputDims, Concatenation2dDim0DiffInputDimsTest) +ARMNN_AUTO_TEST_CASE(Concatenation2dDim0DiffInputDimsUint8, Concatenation2dDim0DiffInputDimsUint8Test) +ARMNN_AUTO_TEST_CASE(Concatenation2dDim1DiffInputDims, Concatenation2dDim1DiffInputDimsTest) +ARMNN_AUTO_TEST_CASE(Concatenation2dDim1DiffInputDimsUint8, Concatenation2dDim1DiffInputDimsUint8Test) + +ARMNN_AUTO_TEST_CASE(Concatenation3dDim0, Concatenation3dDim0Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim0Uint8, Concatenation3dDim0Uint8Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim1, Concatenation3dDim1Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim1Uint8, Concatenation3dDim1Uint8Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim2, Concatenation3dDim2Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim2Uint8, Concatenation3dDim2Uint8Test) + +ARMNN_AUTO_TEST_CASE(Concatenation3dDim0DiffInputDims, Concatenation3dDim0DiffInputDimsTest) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim0DiffInputDimsUint8, Concatenation3dDim0DiffInputDimsUint8Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim1DiffInputDims, Concatenation3dDim1DiffInputDimsTest) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim1DiffInputDimsUint8, Concatenation3dDim1DiffInputDimsUint8Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim2DiffInputDims, Concatenation3dDim2DiffInputDimsTest) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim2DiffInputDimsUint8, Concatenation3dDim2DiffInputDimsUint8Test) + +// Floor +ARMNN_AUTO_TEST_CASE(SimpleFloor, SimpleFloorTest) + +// Reshape +ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeFloat32Test) +ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeUint8Test) + +// Permute +ARMNN_AUTO_TEST_CASE(SimplePermuteFloat32, SimplePermuteFloat32Test) +ARMNN_AUTO_TEST_CASE(SimplePermuteUint8, SimplePermuteUint8Test) +ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet1, PermuteFloat32ValueSet1Test) +ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet2, PermuteFloat32ValueSet2Test) +ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet3, PermuteFloat32ValueSet3Test) + +// Convert from Float16 to Float32 +ARMNN_AUTO_TEST_CASE(SimpleConvertFp16ToFp32, SimpleConvertFp16ToFp32Test) +// Convert from Float32 to Float16 +ARMNN_AUTO_TEST_CASE(SimpleConvertFp32ToFp16, SimpleConvertFp32ToFp16Test) + +// Mean +ARMNN_AUTO_TEST_CASE(MeanUint8Simple, MeanUint8SimpleTest) +ARMNN_AUTO_TEST_CASE(MeanUint8SimpleAxis, MeanUint8SimpleAxisTest) +ARMNN_AUTO_TEST_CASE(MeanUint8KeepDims, MeanUint8KeepDimsTest) +ARMNN_AUTO_TEST_CASE(MeanUint8MultipleDims, MeanUint8MultipleDimsTest) +ARMNN_AUTO_TEST_CASE(MeanVtsUint8, MeanVtsUint8Test) + +ARMNN_AUTO_TEST_CASE(MeanFloatSimple, MeanFloatSimpleTest) +ARMNN_AUTO_TEST_CASE(MeanFloatSimpleAxis, MeanFloatSimpleAxisTest) +ARMNN_AUTO_TEST_CASE(MeanFloatKeepDims, MeanFloatKeepDimsTest) +ARMNN_AUTO_TEST_CASE(MeanFloatMultipleDims, MeanFloatMultipleDimsTest) +ARMNN_AUTO_TEST_CASE(MeanVtsFloat1, MeanVtsFloat1Test) +ARMNN_AUTO_TEST_CASE(MeanVtsFloat2, MeanVtsFloat2Test) + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/backends/test/ActivationFixture.hpp b/src/backends/test/ActivationFixture.hpp index d9d4ca7470..5028b252e1 100644 --- a/src/backends/test/ActivationFixture.hpp +++ b/src/backends/test/ActivationFixture.hpp @@ -7,6 +7,11 @@ #include "TensorCopyUtils.hpp" #include "WorkloadTestUtils.hpp" +#include + +#include +#include + struct ActivationFixture { ActivationFixture() diff --git a/src/backends/test/ArmComputeCl.cpp b/src/backends/test/ArmComputeCl.cpp deleted file mode 100644 index f4ead34c58..0000000000 --- a/src/backends/test/ArmComputeCl.cpp +++ /dev/null @@ -1,335 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// -#include -#include "test/TensorHelpers.hpp" -#include "LayerTests.hpp" - -#include -#include -#include -#include -#include -#include "ActivationFixture.hpp" -#include "ClContextControlFixture.hpp" - -#include -#include -#include -#include - -#include "test/UnitTests.hpp" - -BOOST_FIXTURE_TEST_SUITE(Compute_ArmComputeCl, ClContextControlFixture) -using FactoryType = armnn::ClWorkloadFactory; - -// ============================================================================ -// UNIT tests - -// Activation -ARMNN_AUTO_TEST_CASE(ConstantLinearActivation, ConstantLinearActivationTest) - -ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta1, SimpleSoftmaxTest, 1.0f) -ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta2, SimpleSoftmaxTest, 2.0f) -ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta1Uint8, SimpleSoftmaxUint8Test, 1.0f) -ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta2Uint8, SimpleSoftmaxUint8Test, 2.0f) - -ARMNN_AUTO_TEST_CASE(ReLu1Uint8, BoundedReLuUint8UpperAndLowerBoundTest) -ARMNN_AUTO_TEST_CASE(ReLu6Uint8, BoundedReLuUint8UpperBoundOnlyTest) - -// Fully Connected -ARMNN_AUTO_TEST_CASE(SimpleFullyConnected, FullyConnectedFloat32Test, false, false) -ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithBias, FullyConnectedFloat32Test, true, false) -ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithTranspose, FullyConnectedFloat32Test, false, true) -ARMNN_AUTO_TEST_CASE(FullyConnectedUint8, FullyConnectedUint8Test, false) -ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedUint8, FullyConnectedUint8Test, true) - -ARMNN_AUTO_TEST_CASE(FullyConnectedLarge, FullyConnectedLargeTest, false) -ARMNN_AUTO_TEST_CASE(FullyConnectedLargeTransposed, FullyConnectedLargeTest, true) - -// Convolution -ARMNN_AUTO_TEST_CASE(SimpleConvolution1d, Convolution1dTest, true) - -ARMNN_AUTO_TEST_CASE(SimpleConvolution2d, SimpleConvolution2d3x5Test, true) -ARMNN_AUTO_TEST_CASE(SimpleConvolution2dSquare, SimpleConvolution2d3x3Test, true) -ARMNN_AUTO_TEST_CASE(SimpleConvolution2d3x3Uint8, SimpleConvolution2d3x3Uint8Test, true) -ARMNN_AUTO_TEST_CASE(UnbiasedConvolution2d, SimpleConvolution2d3x5Test, false) -ARMNN_AUTO_TEST_CASE(UnbiasedConvolution2dSquare, SimpleConvolution2d3x3Test, false) -ARMNN_AUTO_TEST_CASE(SimpleConvolution2dAsymmetricPadding, Convolution2dAsymmetricPaddingTest) - -ARMNN_AUTO_TEST_CASE(SimpleConvolution2dSquareNhwc, SimpleConvolution2d3x3NhwcTest, false) - -// Depthwise Convolution -ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul1, DepthwiseConvolution2dDepthMul1Test, true) -ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dDepthMul1, DepthwiseConvolution2dDepthMul1Test, false) -ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul1Uint8, DepthwiseConvolution2dDepthMul1Uint8Test, true) -ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dDepthMul1Uint8, DepthwiseConvolution2dDepthMul1Uint8Test, false) - -ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dAsymmetric, DepthwiseConvolution2dAsymmetricTest, true) -ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dAsymmetric, DepthwiseConvolution2dAsymmetricTest, false) - -// Softmax -BOOST_AUTO_TEST_CASE(Softmax4dSupport) -{ - const unsigned int numDimensions = 4u; - std::array dimensionSizes; - dimensionSizes.fill(1u); - - const armnn::TensorInfo inputInfo(numDimensions, &dimensionSizes.front(), armnn::DataType::Float32); - const armnn::TensorInfo outputInfo(numDimensions, &dimensionSizes.front(), armnn::DataType::Float32); - - // 4D Softmax should be reported as unsupported on the CL backend - BOOST_TEST(!armnn::IsSoftmaxSupportedCl(inputInfo, outputInfo, armnn::SoftmaxDescriptor())); -} - -// Splitter -ARMNN_AUTO_TEST_CASE(SimpleSplitter, SplitterTest) -ARMNN_AUTO_TEST_CASE(SimpleSplitterUint8, SplitterUint8Test) - -ARMNN_AUTO_TEST_CASE(CopyViaSplitter, CopyViaSplitterTest) -ARMNN_AUTO_TEST_CASE(CopyViaSplitterUint8, CopyViaSplitterUint8Test) - -// Merger -ARMNN_AUTO_TEST_CASE(SimpleMerger, MergerTest) -ARMNN_AUTO_TEST_CASE(MergerUint8, MergerUint8Test) - -// Normalization -ARMNN_AUTO_TEST_CASE(SimpleNormalizationAcross, SimpleNormalizationAcrossTest) -ARMNN_AUTO_TEST_CASE(SimpleNormalizationWithin, SimpleNormalizationWithinTest) -ARMNN_AUTO_TEST_CASE(SimpleNormalizationAcrossNhwc, SimpleNormalizationAcrossNhwcTest) - -// Pooling -ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dSize3x3Stride2x4, SimpleMaxPooling2dSize3x3Stride2x4Test, true) -ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dSize3x3Stride2x4Uint8, SimpleMaxPooling2dSize3x3Stride2x4Uint8Test, true) - -ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleMaxPooling2d, IgnorePaddingSimpleMaxPooling2dTest) -ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleMaxPooling2dUint8, IgnorePaddingSimpleMaxPooling2dUint8Test) -ARMNN_AUTO_TEST_CASE(IgnorePaddingMaxPooling2dSize3, IgnorePaddingMaxPooling2dSize3Test) -ARMNN_AUTO_TEST_CASE(IgnorePaddingMaxPooling2dSize3Uint8, IgnorePaddingMaxPooling2dSize3Uint8Test) - -ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2d, IgnorePaddingSimpleAveragePooling2dTest) -ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2dUint8, IgnorePaddingSimpleAveragePooling2dUint8Test) -ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2dNoPadding, IgnorePaddingSimpleAveragePooling2dNoPaddingTest) -ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2dNoPaddingUint8, - IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test) -ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3, IgnorePaddingAveragePooling2dSize3Test) -ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3Uint8, IgnorePaddingAveragePooling2dSize3Uint8Test) - -ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleL2Pooling2d, IgnorePaddingSimpleL2Pooling2dTest) -ARMNN_AUTO_TEST_CASE(UNSUPPORTED_IgnorePaddingSimpleL2Pooling2dUint8, IgnorePaddingSimpleL2Pooling2dUint8Test) -ARMNN_AUTO_TEST_CASE(IgnorePaddingL2Pooling2dSize3, IgnorePaddingL2Pooling2dSize3Test) -ARMNN_AUTO_TEST_CASE(UNSUPPORTED_IgnorePaddingL2Pooling2dSize3Uint8, IgnorePaddingL2Pooling2dSize3Uint8Test) - -ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2d, SimpleAveragePooling2dTest) -ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dNhwc, SimpleAveragePooling2dNhwcTest) -ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dUint8, SimpleAveragePooling2dUint8Test) -ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3x2Stride2x2, - IgnorePaddingAveragePooling2dSize3x2Stride2x2Test, - false) -ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3x2Stride2x2NoPadding, - IgnorePaddingAveragePooling2dSize3x2Stride2x2Test, - true) -ARMNN_AUTO_TEST_CASE(LargeTensorsAveragePooling2d, LargeTensorsAveragePooling2dTest) -ARMNN_AUTO_TEST_CASE(LargeTensorsAveragePooling2dUint8, LargeTensorsAveragePooling2dUint8Test) - -ARMNN_AUTO_TEST_CASE(SimpleL2Pooling2d, SimpleL2Pooling2dTest) -ARMNN_AUTO_TEST_CASE(UNSUPPORTED_SimpleL2Pooling2dUint8, SimpleL2Pooling2dUint8Test) -ARMNN_AUTO_TEST_CASE(L2Pooling2dSize3Stride1, L2Pooling2dSize3Stride1Test) -ARMNN_AUTO_TEST_CASE(UNSUPPORTED_L2Pooling2dSize3Stride1Uint8, L2Pooling2dSize3Stride1Uint8Test) -ARMNN_AUTO_TEST_CASE(L2Pooling2dSize3Stride3, L2Pooling2dSize3Stride3Test) -ARMNN_AUTO_TEST_CASE(UNSUPPORTED_L2Pooling2dSize3Stride3Uint8, L2Pooling2dSize3Stride3Uint8Test) -ARMNN_AUTO_TEST_CASE(L2Pooling2dSize3Stride4, L2Pooling2dSize3Stride4Test) -ARMNN_AUTO_TEST_CASE(UNSUPPORTED_L2Pooling2dSize3Stride4Uint8, L2Pooling2dSize3Stride4Uint8Test) -ARMNN_AUTO_TEST_CASE(L2Pooling2dSize7, L2Pooling2dSize7Test) -ARMNN_AUTO_TEST_CASE(UNSUPPORTED_L2Pooling2dSize7Uint8, L2Pooling2dSize7Uint8Test) -ARMNN_AUTO_TEST_CASE(L2Pooling2dSize9, L2Pooling2dSize9Test) -ARMNN_AUTO_TEST_CASE(UNSUPPORTED_L2Pooling2dSize9Uint8, L2Pooling2dSize9Uint8Test) - -// Add -ARMNN_AUTO_TEST_CASE(SimpleAdd, AdditionTest) -ARMNN_AUTO_TEST_CASE(AddBroadcast1Element, AdditionBroadcast1ElementTest) -ARMNN_AUTO_TEST_CASE(AddBroadcast, AdditionBroadcastTest) - -ARMNN_AUTO_TEST_CASE(AdditionUint8, AdditionUint8Test) -ARMNN_AUTO_TEST_CASE(AddBroadcastUint8, AdditionBroadcastUint8Test) -ARMNN_AUTO_TEST_CASE(AddBroadcast1ElementUint8, AdditionBroadcast1ElementUint8Test) - -// Sub -ARMNN_AUTO_TEST_CASE(SimpleSub, SubtractionTest) - -// Div -ARMNN_AUTO_TEST_CASE(SimpleDivision, DivisionTest) -ARMNN_AUTO_TEST_CASE(DivisionByZero, DivisionByZeroTest) -ARMNN_AUTO_TEST_CASE(DivisionBroadcast1Element, DivisionBroadcast1ElementTest) -ARMNN_AUTO_TEST_CASE(DivisionBroadcast1DVector, DivisionBroadcast1DVectorTest) -// NOTE: quantized division is not supported by CL and not required by the -// android NN api - -// Mul -ARMNN_AUTO_TEST_CASE(SimpleMultiplication, MultiplicationTest) -ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1Element, MultiplicationBroadcast1ElementTest) -ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1DVector, MultiplicationBroadcast1DVectorTest) -ARMNN_AUTO_TEST_CASE(MultiplicationUint8, MultiplicationUint8Test) -ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1ElementUint8, MultiplicationBroadcast1ElementUint8Test) -ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1DVectorUint8, MultiplicationBroadcast1DVectorUint8Test) - -// Batch Norm -ARMNN_AUTO_TEST_CASE(BatchNorm, BatchNormTest) - -// L2 Normalization -ARMNN_AUTO_TEST_CASE(L2Normalization1d, L2Normalization1dTest) -ARMNN_AUTO_TEST_CASE(L2Normalization2d, L2Normalization2dTest) -ARMNN_AUTO_TEST_CASE(L2Normalization3d, L2Normalization3dTest) -ARMNN_AUTO_TEST_CASE(L2Normalization4d, L2Normalization4dTest) - -ARMNN_AUTO_TEST_CASE(L2Normalization1dNhwc, L2Normalization1dNhwcTest) -ARMNN_AUTO_TEST_CASE(L2Normalization2dNhwc, L2Normalization2dNhwcTest) -ARMNN_AUTO_TEST_CASE(L2Normalization3dNhwc, L2Normalization3dNhwcTest) -ARMNN_AUTO_TEST_CASE(L2Normalization4dNhwc, L2Normalization4dNhwcTest) - -// Resize Bilinear - NCHW data layout -ARMNN_AUTO_TEST_CASE(ResizeBilinearNop, ResizeBilinearNopTest) -ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, SimpleResizeBilinearTest) -ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin, ResizeBilinearSqMinTest) -ARMNN_AUTO_TEST_CASE(ResizeBilinearMin, ResizeBilinearMinTest) -ARMNN_AUTO_TEST_CASE(ResizeBilinearMag, ResizeBilinearMagTest) - -// Resize Bilinear - NHWC data layout -ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwc, ResizeBilinearNopNhwcTest) -ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc, SimpleResizeBilinearNhwcTest) -ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc, ResizeBilinearSqMinNhwcTest) -ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc, ResizeBilinearMinNhwcTest) -ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwc, ResizeBilinearMagNhwcTest) - -// Constant -ARMNN_AUTO_TEST_CASE(Constant, ConstantTest) -ARMNN_AUTO_TEST_CASE(ConstantUint8, ConstantTestUint8) - -// Concat -ARMNN_AUTO_TEST_CASE(Concatenation1d, Concatenation1dTest) -ARMNN_AUTO_TEST_CASE(Concatenation1dUint8, Concatenation1dUint8Test) - -ARMNN_AUTO_TEST_CASE(Concatenation2dDim0, Concatenation2dDim0Test) -ARMNN_AUTO_TEST_CASE(Concatenation2dDim0Uint8, Concatenation2dDim0Uint8Test) -ARMNN_AUTO_TEST_CASE(Concatenation2dDim1, Concatenation2dDim1Test) -ARMNN_AUTO_TEST_CASE(Concatenation2dDim1Uint8, Concatenation2dDim1Uint8Test) - -ARMNN_AUTO_TEST_CASE(Concatenation2dDim0DiffInputDims, Concatenation2dDim0DiffInputDimsTest) -ARMNN_AUTO_TEST_CASE(Concatenation2dDim0DiffInputDimsUint8, Concatenation2dDim0DiffInputDimsUint8Test) -ARMNN_AUTO_TEST_CASE(Concatenation2dDim1DiffInputDims, Concatenation2dDim1DiffInputDimsTest) -ARMNN_AUTO_TEST_CASE(Concatenation2dDim1DiffInputDimsUint8, Concatenation2dDim1DiffInputDimsUint8Test) - -ARMNN_AUTO_TEST_CASE(Concatenation3dDim0, Concatenation3dDim0Test) -ARMNN_AUTO_TEST_CASE(Concatenation3dDim0Uint8, Concatenation3dDim0Uint8Test) -ARMNN_AUTO_TEST_CASE(Concatenation3dDim1, Concatenation3dDim1Test) -ARMNN_AUTO_TEST_CASE(Concatenation3dDim1Uint8, Concatenation3dDim1Uint8Test) -ARMNN_AUTO_TEST_CASE(Concatenation3dDim2, Concatenation3dDim2Test) -ARMNN_AUTO_TEST_CASE(Concatenation3dDim2Uint8, Concatenation3dDim2Uint8Test) - -ARMNN_AUTO_TEST_CASE(Concatenation3dDim0DiffInputDims, Concatenation3dDim0DiffInputDimsTest) -ARMNN_AUTO_TEST_CASE(Concatenation3dDim0DiffInputDimsUint8, Concatenation3dDim0DiffInputDimsUint8Test) -ARMNN_AUTO_TEST_CASE(Concatenation3dDim1DiffInputDims, Concatenation3dDim1DiffInputDimsTest) -ARMNN_AUTO_TEST_CASE(Concatenation3dDim1DiffInputDimsUint8, Concatenation3dDim1DiffInputDimsUint8Test) -ARMNN_AUTO_TEST_CASE(Concatenation3dDim2DiffInputDims, Concatenation3dDim2DiffInputDimsTest) -ARMNN_AUTO_TEST_CASE(Concatenation3dDim2DiffInputDimsUint8, Concatenation3dDim2DiffInputDimsUint8Test) - -// Floor -ARMNN_AUTO_TEST_CASE(SimpleFloor, SimpleFloorTest) - -// Reshape -ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeFloat32Test) -ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeUint8Test) - -// Permute -ARMNN_AUTO_TEST_CASE(SimplePermuteFloat32, SimplePermuteFloat32Test) -ARMNN_AUTO_TEST_CASE(SimplePermuteUint8, SimplePermuteUint8Test) -ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet1, PermuteFloat32ValueSet1Test) -ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet2, PermuteFloat32ValueSet2Test) -ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet3, PermuteFloat32ValueSet3Test) - -// Lstm -ARMNN_AUTO_TEST_CASE(LstmLayerFloat32WithCifgWithPeepholeNoProjection, - LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest) -ARMNN_AUTO_TEST_CASE(LstmLayerFloat32NoCifgNoPeepholeNoProjection, - LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest) -ARMNN_AUTO_TEST_CASE(LstmLayerFloat32NoCifgWithPeepholeWithProjection, - LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest) - -// Convert from Float16 to Float32 -ARMNN_AUTO_TEST_CASE(SimpleConvertFp16ToFp32, SimpleConvertFp16ToFp32Test) -// Convert from Float32 to Float16 -ARMNN_AUTO_TEST_CASE(SimpleConvertFp32ToFp16, SimpleConvertFp32ToFp16Test) - -// ============================================================================ -// COMPARE tests - -ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareConv2dWithReference, CompareConvolution2dTest) - -ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceFloat32, CompareDepthwiseConvolution2dTest) -ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceUint8, CompareDepthwiseConvolution2dTest) - -ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareNormalizationWithinWithReference, CompareNormalizationTest, - armnn::NormalizationAlgorithmChannel::Within, - armnn::NormalizationAlgorithmMethod::LocalBrightness) -ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareNormalizationAcrossWithReference, CompareNormalizationTest, - armnn::NormalizationAlgorithmChannel::Across, - armnn::NormalizationAlgorithmMethod::LocalBrightness) - -ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareSoftmaxBeta1WithReference, CompareSoftmaxTest, 1.0f) -ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareSoftmaxBeta2WithReference, CompareSoftmaxTest, 2.0f) -ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareSoftmaxUint8, CompareSoftmaxUint8Test, 1.0f) - -ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareMaxPooling2dWithRef, ComparePooling2dTest, armnn::PoolingAlgorithm::Max) - -ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareAveragePooling2dWithRef, ComparePooling2dTest, armnn::PoolingAlgorithm::Average) -ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareAveragePooling2dWithRefUint8, ComparePooling2dUint8Test, - armnn::PoolingAlgorithm::Average) - -ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareL2Pooling2dWithRef, ComparePooling2dTest, armnn::PoolingAlgorithm::L2) - -ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareAddition, CompareAdditionTest) - -ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareMultiplicationWithRef, CompareMultiplicationTest) - -ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareBatchNorm, CompareBatchNormTest) - -ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareReLu1, CompareBoundedReLuTest, 1.0f, -1.0f) -ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareReLu6, CompareBoundedReLuTest, 6.0f, 0.0f) - -// ============================================================================ -// FIXTURE tests - -ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareSigmoidActivationWithReference, ActivationFixture, - CompareActivationTest, armnn::ActivationFunction::Sigmoid, 5u) - -ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareTanhActivationWithReference, ActivationFixture, - CompareActivationTest, armnn::ActivationFunction::TanH, 5u) - -ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareLinearActivationWithReference, ActivationFixture, - CompareActivationTest, armnn::ActivationFunction::Linear, 5u) - -ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareReLuActivationWithReference, ActivationFixture, - CompareActivationTest, armnn::ActivationFunction::ReLu, 5u) - -ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareBoundedReLuActivationWithReference, ActivationFixture, - CompareActivationTest, armnn::ActivationFunction::BoundedReLu, 5u) -ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareBoundedReLuActivationWithReferenceUint8, ActivationFixture, - CompareActivationUint8Test, armnn::ActivationFunction::BoundedReLu) - -ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareSoftReLuActivationWithReference, ActivationFixture, - CompareActivationTest, armnn::ActivationFunction::SoftReLu, 5u) - -ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareLeakyReLuActivationWithReference, ActivationFixture, - CompareActivationTest, armnn::ActivationFunction::LeakyReLu, 5u) - -ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareAbsActivationWithReference, ActivationFixture, - CompareActivationTest, armnn::ActivationFunction::Abs, 5u) - -ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareSqrtActivationWithReference, PositiveActivationFixture, - CompareActivationTest, armnn::ActivationFunction::Sqrt, 5u) - -ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareSquareActivationWithReference, ActivationFixture, - CompareActivationTest, armnn::ActivationFunction::Square, 5u) - -BOOST_AUTO_TEST_SUITE_END() diff --git a/src/backends/test/ArmComputeNeon.cpp b/src/backends/test/ArmComputeNeon.cpp deleted file mode 100644 index 045aa30889..0000000000 --- a/src/backends/test/ArmComputeNeon.cpp +++ /dev/null @@ -1,485 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// -#include - -#include "test/TensorHelpers.hpp" -#include "LayerTests.hpp" - -#include -#include -#include -#include -#include -#include "ActivationFixture.hpp" - -#include "WorkloadTestUtils.hpp" - -#include "test/UnitTests.hpp" - -BOOST_AUTO_TEST_SUITE(Compute_ArmComputeNeon) -using FactoryType = armnn::NeonWorkloadFactory; - -// ============================================================================ -// UNIT tests - -// Convolution -ARMNN_AUTO_TEST_CASE(SimpleConvolution1d, Convolution1dTest, true) - -ARMNN_AUTO_TEST_CASE(SimpleConvolution2d, SimpleConvolution2d3x5Test, true) -ARMNN_AUTO_TEST_CASE(SimpleConvolution2dSquare, SimpleConvolution2d3x3Test, true) -ARMNN_AUTO_TEST_CASE(UnbiasedConvolution2d, SimpleConvolution2d3x5Test, false) -ARMNN_AUTO_TEST_CASE(UnbiasedConvolution2dSquare, SimpleConvolution2d3x3Test, false) -ARMNN_AUTO_TEST_CASE(SimpleConvolution2dAsymmetricPadding, Convolution2dAsymmetricPaddingTest) - -ARMNN_AUTO_TEST_CASE(SimpleConvolution2dSquareNhwc, SimpleConvolution2d3x3NhwcTest, false) -namespace -{ - -armnn::Convolution2dDescriptor MakeConv2dDesc(uint32_t strideX, uint32_t strideY, - uint32_t padLeft = 0, uint32_t padRight = 0, uint32_t padTop = 0, uint32_t padBottom = 0) -{ - armnn::Convolution2dDescriptor result; - result.m_StrideX = strideX; - result.m_StrideY = strideY; - result.m_PadLeft = padLeft; - result.m_PadRight = padRight; - result.m_PadTop = padTop; - result.m_PadBottom = padBottom; - result.m_BiasEnabled = true; - return result; -} - -} - -BOOST_AUTO_TEST_CASE(Conv2dUtils) -{ - // The only preferred Neon convolution is 1x1 with padding=0 and stride size {1,2,3}. - armnn::TensorShape shape1x1({ 1,1,1,1 }); - armnn::TensorInfo info1x1(shape1x1, armnn::DataType::Float32); - BOOST_TEST(armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(1, 1))); - BOOST_TEST(armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(1, 2))); - BOOST_TEST(armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(1, 3))); - BOOST_TEST(armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(2, 1))); - BOOST_TEST(armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(2, 2))); - BOOST_TEST(armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(2, 3))); - BOOST_TEST(armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(3, 1))); - BOOST_TEST(armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(3, 2))); - BOOST_TEST(armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(3, 3))); - - BOOST_TEST(!armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(4, 1))); - BOOST_TEST(!armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(4, 5))); - BOOST_TEST(!armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(3, 6))); - - // non zero padding is not preferred for direct convolution - BOOST_TEST(!armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(1, 1, 1, 0))); - BOOST_TEST(!armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(1, 1, 0, 1))); - BOOST_TEST(!armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(1, 1, 1, 1))); - - // 2x2 filter not preferred for direct convolution - armnn::TensorShape shape2x2({ 1,1,2,2 }); - armnn::TensorInfo info2x2(shape2x2, armnn::DataType::Float32); - BOOST_TEST(!armnn::IsNeonDirectConvolutionPreferred(info2x2, MakeConv2dDesc(1, 1))); -} - -// Depthwise Convolution -ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul1, DepthwiseConvolution2dDepthMul1Test, true) -ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dDepthMul1, DepthwiseConvolution2dDepthMul1Test, false) -ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul1Uint8, DepthwiseConvolution2dDepthMul1Uint8Test, true) -ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dDepthMul1Uint8, DepthwiseConvolution2dDepthMul1Uint8Test, false) - -ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dAsymmetric, DepthwiseConvolution2dAsymmetricTest, true) -ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dAsymmetric, DepthwiseConvolution2dAsymmetricTest, false) - -namespace -{ - -armnn::DepthwiseConvolution2dDescriptor MakeDepthwiseConv2dDesc(uint32_t strideX, uint32_t strideY, - uint32_t depthMultiplier = 1, uint32_t padLeft = 0, uint32_t padRight = 0, - uint32_t padTop = 0, uint32_t padBottom = 0) -{ - boost::ignore_unused(depthMultiplier); - - armnn::DepthwiseConvolution2dDescriptor desc; - - desc.m_PadLeft = padLeft; - desc.m_PadRight = padRight; - - desc.m_PadTop = padTop; - desc.m_PadBottom = padBottom; - desc.m_StrideX = strideX; - desc.m_StrideY = strideY; - desc.m_BiasEnabled = false; - - return desc; -} - -armnn::TensorInfo CreateOutputTensorInfo(const armnn::TensorInfo& inputInfo, - const armnn::TensorInfo& weightsInfo, - const armnn::DepthwiseConvolution2dDescriptor& descriptor, - armnn::DataType dataType) -{ - const armnn::TensorShape& inputShape = inputInfo.GetShape(); - const armnn::TensorShape& filterShape = weightsInfo.GetShape(); - - unsigned int inWidth = inputShape[3]; - unsigned int inHeight = inputShape[2]; - unsigned int inBatchSize = inputShape[0]; - - unsigned int filterWidth = filterShape[3]; - unsigned int readWidth = (inWidth + descriptor.m_PadLeft + descriptor.m_PadRight) - (filterWidth); - unsigned int outWidth = 1u + (readWidth / descriptor.m_StrideX); - - unsigned int filterHeight = filterShape[2]; - unsigned int readHeight = (inHeight + descriptor.m_PadTop + descriptor.m_PadBottom) - (filterHeight); - unsigned int outHeight = 1u + (readHeight / descriptor.m_StrideY); - unsigned int depthMultiplier = filterShape[0]; - - unsigned int outChannels = filterShape[1] * depthMultiplier; - unsigned int outBatchSize = inBatchSize; - - armnn::TensorShape outputShape({outBatchSize, outChannels, outHeight, outWidth}); - return armnn::TensorInfo(outputShape, dataType); -} -} - -BOOST_AUTO_TEST_CASE(DepthwiseConv2dUtils) -{ - const armnn::DataType dataType = armnn::DataType::Float32; - - armnn::TensorInfo inputInfo({1, 1, 10, 10 }, dataType); - armnn::TensorInfo outputInfo; - armnn::TensorInfo weightsInfo3x3({ 1, 1, 3, 3 }, dataType); - armnn::TensorInfo biasesInfo; - - armnn::DepthwiseConvolution2dDescriptor descriptor; - - // Strides supported: 1,2,3 - descriptor = MakeDepthwiseConv2dDesc(1, 1); - outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType); - BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor, - weightsInfo3x3, biasesInfo)); - - descriptor = MakeDepthwiseConv2dDesc(1, 2); - outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType); - BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor, - weightsInfo3x3, biasesInfo)); - - descriptor = MakeDepthwiseConv2dDesc(1, 3); - outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType); - BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor, - weightsInfo3x3, biasesInfo)); - - descriptor = MakeDepthwiseConv2dDesc(2, 1); - outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType); - BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor, - weightsInfo3x3, biasesInfo)); - - descriptor = MakeDepthwiseConv2dDesc(2, 2); - outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType); - BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor, - weightsInfo3x3, biasesInfo)); - - descriptor = MakeDepthwiseConv2dDesc(2, 3); - outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType); - BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor, - weightsInfo3x3, biasesInfo)); - - descriptor = MakeDepthwiseConv2dDesc(3, 1); - outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType); - BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor, - weightsInfo3x3, biasesInfo)); - - descriptor = MakeDepthwiseConv2dDesc(3, 2); - outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType); - BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor, - weightsInfo3x3, biasesInfo)); - - descriptor = MakeDepthwiseConv2dDesc(3, 3); - outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType); - BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor, - weightsInfo3x3, biasesInfo)); - - // Supported stride 4 - descriptor = MakeDepthwiseConv2dDesc(4, 1); - outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType); - BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor, - weightsInfo3x3, biasesInfo)); - - // Supported weights shape 1x1 - armnn::TensorInfo weightsInfo1x1({ 1, 1, 1, 1 }, armnn::DataType::Float32); - descriptor = MakeDepthwiseConv2dDesc(1, 1); - outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo1x1, descriptor, dataType); - BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor, - weightsInfo1x1, biasesInfo)); - - // Supported shape 2x2 - armnn::TensorInfo weightsInfo2x2({ 1, 1, 2, 2 }, armnn::DataType::Float32); - descriptor = MakeDepthwiseConv2dDesc(1, 1); - outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo2x2, descriptor, dataType); - BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor, - weightsInfo2x2, biasesInfo)); - - // Asymmetric padding - descriptor = MakeDepthwiseConv2dDesc(1, 1, 1, 1, 2, 1, 2); - outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType); - BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor, - weightsInfo3x3, biasesInfo)); -} - -// Pooling -ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dSize3x3Stride2x4, SimpleMaxPooling2dSize3x3Stride2x4Test, true) -ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dSize3x3Stride2x4Uint8, SimpleMaxPooling2dSize3x3Stride2x4Uint8Test, true) -ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2d, SimpleAveragePooling2dTest) -ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dNhwc, SimpleAveragePooling2dNhwcTest) -ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dUint8, SimpleAveragePooling2dUint8Test) - -ARMNN_AUTO_TEST_CASE(LargeTensorsAveragePooling2d, LargeTensorsAveragePooling2dTest) -ARMNN_AUTO_TEST_CASE(LargeTensorsAveragePooling2dUint8, LargeTensorsAveragePooling2dUint8Test) - -ARMNN_AUTO_TEST_CASE(SimpleL2Pooling2d, SimpleL2Pooling2dTest) -ARMNN_AUTO_TEST_CASE(UNSUPPORTED_SimpleL2Pooling2dUint8, SimpleL2Pooling2dUint8Test) -ARMNN_AUTO_TEST_CASE(L2Pooling2dSize3Stride1, L2Pooling2dSize3Stride1Test) -ARMNN_AUTO_TEST_CASE(UNSUPPORTED_L2Pooling2dSize3Stride1Uint8, L2Pooling2dSize3Stride1Uint8Test) -ARMNN_AUTO_TEST_CASE(L2Pooling2dSize3Stride3, L2Pooling2dSize3Stride3Test) -ARMNN_AUTO_TEST_CASE(UNSUPPORTED_L2Pooling2dSize3Stride3Uint8, L2Pooling2dSize3Stride3Uint8Test) -ARMNN_AUTO_TEST_CASE(L2Pooling2dSize3Stride4, L2Pooling2dSize3Stride4Test) -ARMNN_AUTO_TEST_CASE(UNSUPPORTED_L2Pooling2dSize3Stride4Uint8, L2Pooling2dSize3Stride4Uint8Test) -ARMNN_AUTO_TEST_CASE(L2Pooling2dSize7, L2Pooling2dSize7Test) -ARMNN_AUTO_TEST_CASE(UNSUPPORTED_L2Pooling2dSize7Uint8, L2Pooling2dSize7Uint8Test) -ARMNN_AUTO_TEST_CASE(L2Pooling2dSize9, L2Pooling2dSize9Test) -ARMNN_AUTO_TEST_CASE(UNSUPPORTED_L2Pooling2dSize9Uint8, L2Pooling2dSize9Uint8Test) - -// Ignore padding values for pooling but count padding fields into the divisor -ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleMaxPooling2d, IgnorePaddingSimpleMaxPooling2dTest) -ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleMaxPooling2dUint8, IgnorePaddingSimpleMaxPooling2dUint8Test) -ARMNN_AUTO_TEST_CASE(IgnorePaddingMaxPooling2dSize3, IgnorePaddingMaxPooling2dSize3Test) -ARMNN_AUTO_TEST_CASE(IgnorePaddingMaxPooling2dSize3Uint8, IgnorePaddingMaxPooling2dSize3Uint8Test) - -ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2d, IgnorePaddingSimpleAveragePooling2dTest) -ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2dUint8, IgnorePaddingSimpleAveragePooling2dUint8Test) -ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2dNoPadding, IgnorePaddingSimpleAveragePooling2dNoPaddingTest) -ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2dNoPaddingUint8, - IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test) -ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3, IgnorePaddingAveragePooling2dSize3Test) -ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3Uint8, IgnorePaddingAveragePooling2dSize3Uint8Test) -ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3x2Stride2x2, - IgnorePaddingAveragePooling2dSize3x2Stride2x2Test, false) -ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3x2Stride2x2NoPadding, - IgnorePaddingAveragePooling2dSize3x2Stride2x2Test, - true) - -ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleL2Pooling2d, IgnorePaddingSimpleL2Pooling2dTest) -ARMNN_AUTO_TEST_CASE(UNSUPPORTED_IgnorePaddingSimpleL2Pooling2dUint8, IgnorePaddingSimpleL2Pooling2dUint8Test) -ARMNN_AUTO_TEST_CASE(IgnorePaddingL2Pooling2dSize3, IgnorePaddingL2Pooling2dSize3Test) -ARMNN_AUTO_TEST_CASE(UNSUPPORTED_IgnorePaddingL2Pooling2dSize3Uint8, IgnorePaddingL2Pooling2dSize3Uint8Test) - -// Activation -ARMNN_AUTO_TEST_CASE(ConstantLinearActivation, ConstantLinearActivationTest) - -ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta1, SimpleSoftmaxTest, 1.0f) -ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta2, SimpleSoftmaxTest, 2.0f) - -ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta1Uint8, SimpleSoftmaxUint8Test, 1.0f) -ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta2Uint8, SimpleSoftmaxUint8Test, 2.0f) - -ARMNN_AUTO_TEST_CASE(ReLu1Uint8, BoundedReLuUint8UpperAndLowerBoundTest) -ARMNN_AUTO_TEST_CASE(ReLu6Uint8, BoundedReLuUint8UpperBoundOnlyTest) - -// Softmax -BOOST_AUTO_TEST_CASE(Softmax4dSupport) -{ - const unsigned int numDimensions = 4u; - std::array dimensionSizes; - dimensionSizes.fill(1u); - - const armnn::TensorInfo inputInfo(numDimensions, &dimensionSizes.front(), armnn::DataType::Float32); - const armnn::TensorInfo outputInfo(numDimensions, &dimensionSizes.front(), armnn::DataType::Float32); - - // 4D Softmax should be reported as unsupported on the NEON backend - BOOST_TEST(!armnn::IsSoftmaxSupportedNeon(inputInfo, outputInfo, armnn::SoftmaxDescriptor())); -} - -// Splitter -ARMNN_AUTO_TEST_CASE(SimpleSplitter, SplitterTest) -ARMNN_AUTO_TEST_CASE(SimpleSplitterUint8, SplitterUint8Test) - -ARMNN_AUTO_TEST_CASE(CopyViaSplitter, CopyViaSplitterTest) -ARMNN_AUTO_TEST_CASE(CopyViaSplitterUint8, CopyViaSplitterUint8Test) - -// Merger -ARMNN_AUTO_TEST_CASE(SimpleMerger, MergerTest) -ARMNN_AUTO_TEST_CASE(MergerUint8, MergerUint8Test) - -// Fully Connected -ARMNN_AUTO_TEST_CASE(SimpleFullyConnected, FullyConnectedFloat32Test, false, false) -ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithBias, FullyConnectedFloat32Test, true, false) -ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithTranspose, FullyConnectedFloat32Test, false, true) -ARMNN_AUTO_TEST_CASE(FullyConnectedLarge, FullyConnectedLargeTest, false) -ARMNN_AUTO_TEST_CASE(FullyConnectedLargeTransposed, FullyConnectedLargeTest, true) -ARMNN_AUTO_TEST_CASE(FullyConnectedUint8, FullyConnectedUint8Test, false) -ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedUint8, FullyConnectedUint8Test, true) - -// Add -ARMNN_AUTO_TEST_CASE(SimpleAdd, AdditionTest) -ARMNN_AUTO_TEST_CASE(AddBroadcast, AdditionBroadcastTest) -ARMNN_AUTO_TEST_CASE(AddBroadcast1Element, AdditionBroadcast1ElementTest) - -// Sub -ARMNN_AUTO_TEST_CASE(SimpleSub, SubtractionTest) - -// Mul -ARMNN_AUTO_TEST_CASE(SimpleMultiplication, MultiplicationTest) -ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1Element, MultiplicationBroadcast1ElementTest) -ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1DVector, MultiplicationBroadcast1DVectorTest) - -// Batch Norm -ARMNN_AUTO_TEST_CASE(BatchNorm, BatchNormTest) - -// Constant -ARMNN_AUTO_TEST_CASE(Constant, ConstantTest) -ARMNN_AUTO_TEST_CASE(ConstantUint8, ConstantTestUint8) - -// Concatenation -ARMNN_AUTO_TEST_CASE(Concatenation1d, Concatenation1dTest) -ARMNN_AUTO_TEST_CASE(Concatenation1dUint8, Concatenation1dUint8Test) - -ARMNN_AUTO_TEST_CASE(Concatenation2dDim0, Concatenation2dDim0Test) -ARMNN_AUTO_TEST_CASE(Concatenation2dDim0Uint8, Concatenation2dDim0Uint8Test) -ARMNN_AUTO_TEST_CASE(Concatenation2dDim1, Concatenation2dDim1Test) -ARMNN_AUTO_TEST_CASE(Concatenation2dDim1Uint8, Concatenation2dDim1Uint8Test) - -ARMNN_AUTO_TEST_CASE(Concatenation2dDim0DiffInputDims, Concatenation2dDim0DiffInputDimsTest) -ARMNN_AUTO_TEST_CASE(Concatenation2dDim0DiffInputDimsUint8, Concatenation2dDim0DiffInputDimsUint8Test) -ARMNN_AUTO_TEST_CASE(Concatenation2dDim1DiffInputDims, Concatenation2dDim1DiffInputDimsTest) -ARMNN_AUTO_TEST_CASE(Concatenation2dDim1DiffInputDimsUint8, Concatenation2dDim1DiffInputDimsUint8Test) - -ARMNN_AUTO_TEST_CASE(Concatenation3dDim0, Concatenation3dDim0Test) -ARMNN_AUTO_TEST_CASE(Concatenation3dDim0Uint8, Concatenation3dDim0Uint8Test) -ARMNN_AUTO_TEST_CASE(Concatenation3dDim1, Concatenation3dDim1Test) -ARMNN_AUTO_TEST_CASE(Concatenation3dDim1Uint8, Concatenation3dDim1Uint8Test) -ARMNN_AUTO_TEST_CASE(Concatenation3dDim2, Concatenation3dDim2Test) -ARMNN_AUTO_TEST_CASE(Concatenation3dDim2Uint8, Concatenation3dDim2Uint8Test) - -ARMNN_AUTO_TEST_CASE(Concatenation3dDim0DiffInputDims, Concatenation3dDim0DiffInputDimsTest) -ARMNN_AUTO_TEST_CASE(Concatenation3dDim0DiffInputDimsUint8, Concatenation3dDim0DiffInputDimsUint8Test) -ARMNN_AUTO_TEST_CASE(Concatenation3dDim1DiffInputDims, Concatenation3dDim1DiffInputDimsTest) -ARMNN_AUTO_TEST_CASE(Concatenation3dDim1DiffInputDimsUint8, Concatenation3dDim1DiffInputDimsUint8Test) -ARMNN_AUTO_TEST_CASE(Concatenation3dDim2DiffInputDims, Concatenation3dDim2DiffInputDimsTest) -ARMNN_AUTO_TEST_CASE(Concatenation3dDim2DiffInputDimsUint8, Concatenation3dDim2DiffInputDimsUint8Test) - -// L2 Normalization -ARMNN_AUTO_TEST_CASE(L2Normalization1d, L2Normalization1dTest) -ARMNN_AUTO_TEST_CASE(L2Normalization2d, L2Normalization2dTest) -ARMNN_AUTO_TEST_CASE(L2Normalization3d, L2Normalization3dTest) -ARMNN_AUTO_TEST_CASE(L2Normalization4d, L2Normalization4dTest) - -ARMNN_AUTO_TEST_CASE(L2Normalization1dNhwc, L2Normalization1dNhwcTest) -ARMNN_AUTO_TEST_CASE(L2Normalization2dNhwc, L2Normalization2dNhwcTest) -ARMNN_AUTO_TEST_CASE(L2Normalization3dNhwc, L2Normalization3dNhwcTest) -ARMNN_AUTO_TEST_CASE(L2Normalization4dNhwc, L2Normalization4dNhwcTest) - -// Floor -ARMNN_AUTO_TEST_CASE(SimpleFloor, SimpleFloorTest) - -// Reshape -ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeFloat32Test) -ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeUint8Test) - -// Permute -ARMNN_AUTO_TEST_CASE(SimplePermuteFloat32, SimplePermuteFloat32Test) -ARMNN_AUTO_TEST_CASE(SimplePermuteUint8, SimplePermuteUint8Test) -ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet1, PermuteFloat32ValueSet1Test) -ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet2, PermuteFloat32ValueSet2Test) -ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet3, PermuteFloat32ValueSet3Test) - -// Lstm -ARMNN_AUTO_TEST_CASE(LstmLayerFloat32WithCifgWithPeepholeNoProjection, - LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest) -ARMNN_AUTO_TEST_CASE(LstmLayerFloat32NoCifgNoPeepholeNoProjection, - LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest) -ARMNN_AUTO_TEST_CASE(LstmLayerFloat32NoCifgWithPeepholeWithProjection, - LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest) - -// Normalization -ARMNN_AUTO_TEST_CASE(SimpleNormalizationAcross, SimpleNormalizationAcrossTest) -ARMNN_AUTO_TEST_CASE(SimpleNormalizationWithin, SimpleNormalizationWithinTest) -ARMNN_AUTO_TEST_CASE(SimpleNormalizationAcrossNhwc, SimpleNormalizationAcrossNhwcTest) - -// ============================================================================ -// COMPARE tests - -ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareConv2dWithReference, CompareConvolution2dTest) - -ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceFloat32, CompareDepthwiseConvolution2dTest) -ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceUint8, CompareDepthwiseConvolution2dTest) - -ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareNormalizationWithinWithReference, CompareNormalizationTest, - armnn::NormalizationAlgorithmChannel::Within, - armnn::NormalizationAlgorithmMethod::LocalBrightness) -ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareNormalizationAcrossWithReference, CompareNormalizationTest, - armnn::NormalizationAlgorithmChannel::Across, - armnn::NormalizationAlgorithmMethod::LocalBrightness) - -ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareMaxPooling2dWithReference, ComparePooling2dTest, armnn::PoolingAlgorithm::Max) -ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareMaxPooling2dWithReferenceUint8, ComparePooling2dUint8Test, - armnn::PoolingAlgorithm::Max) -ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareAveragePooling2dWithReference, ComparePooling2dTest, - armnn::PoolingAlgorithm::Average) -ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareAveragePooling2dWithReferenceUint8, ComparePooling2dUint8Test, - armnn::PoolingAlgorithm::Average) -ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareL2Pooling2dWithReference, ComparePooling2dTest, armnn::PoolingAlgorithm::L2) -ARMNN_COMPARE_REF_AUTO_TEST_CASE(UNSUPPORTED_CompareL2Pooling2dWithReferenceUint8, ComparePooling2dUint8Test, - armnn::PoolingAlgorithm::L2) - -ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareSoftmaxBeta1WithReference, CompareSoftmaxTest, 1.0f) -ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareSoftmaxBeta2WithReference, CompareSoftmaxTest, 2.0f) - -ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareSoftmaxUint8Beta1WithReference, CompareSoftmaxUint8Test, 1.0f) -ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareSoftmaxUint8Beta2WithReference, CompareSoftmaxUint8Test, 2.0f) - -ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareAddition, CompareAdditionTest) - -ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareMultiplicationWithReference, CompareMultiplicationTest) - -ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareBatchNorm, CompareBatchNormTest) - -ARMNN_COMPARE_REF_AUTO_TEST_CASE(ReLu1, CompareBoundedReLuTest, 1.0f, -1.0f) -ARMNN_COMPARE_REF_AUTO_TEST_CASE(ReLu6, CompareBoundedReLuTest, 6.0f, 0.0f) - -// ============================================================================ -// FIXTURE tests - -ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareSigmoidActivationWithReference, ActivationFixture, - CompareActivationTest, armnn::ActivationFunction::Sigmoid, 5u) - -ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareTanhActivationWithReference, ActivationFixture, - CompareActivationTest, armnn::ActivationFunction::TanH, 5u) - -ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareLinearActivationWithReference, ActivationFixture, - CompareActivationTest, armnn::ActivationFunction::Linear, 5u) - -ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareReLuActivationWithReference, ActivationFixture, - CompareActivationTest, armnn::ActivationFunction::ReLu, 5u) - -ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareBoundedReLuActivationWithReference, ActivationFixture, - CompareActivationTest, armnn::ActivationFunction::BoundedReLu, 5u) -ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareBoundedReLuActivationWithReferenceUint8, ActivationFixture, - CompareActivationUint8Test, armnn::ActivationFunction::BoundedReLu) - -ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareSoftReLuActivationWithReference, ActivationFixture, - CompareActivationTest, armnn::ActivationFunction::SoftReLu, 1u) - -ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareLeakyReLuActivationWithReference, ActivationFixture, - CompareActivationTest, armnn::ActivationFunction::LeakyReLu, 5u) - -ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareAbsActivationWithReference, ActivationFixture, - CompareActivationTest, armnn::ActivationFunction::Abs, 5u) - -ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareSqrtActivationWithReference, PositiveActivationFixture, - CompareActivationTest, armnn::ActivationFunction::Sqrt, 5u) - -ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareSquareActivationWithReference, ActivationFixture, - CompareActivationTest, armnn::ActivationFunction::Square, 5u) -BOOST_AUTO_TEST_SUITE_END() diff --git a/src/backends/test/ClContextControlFixture.hpp b/src/backends/test/ClContextControlFixture.hpp deleted file mode 100644 index fd53e3fcf3..0000000000 --- a/src/backends/test/ClContextControlFixture.hpp +++ /dev/null @@ -1,34 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#pragma once - -#include - -template -struct ClContextControlFixtureBase -{ - static ClContextControlFixtureBase*& Instance() - { - static ClContextControlFixtureBase* s_Instance = nullptr; - return s_Instance; - } - - // Initialising ClContextControl to ensure OpenCL is loaded correctly for each test case - ClContextControlFixtureBase() - : m_ClContextControl(nullptr, ProfilingEnabled) - { - Instance() = this; - } - ~ClContextControlFixtureBase() - { - Instance() = nullptr; - } - - armnn::ClContextControl m_ClContextControl; -}; - -using ClContextControlFixture = ClContextControlFixtureBase; -using ClProfilingContextControlFixture = ClContextControlFixtureBase; diff --git a/src/backends/test/CreateWorkloadCl.cpp b/src/backends/test/CreateWorkloadCl.cpp deleted file mode 100644 index d56bad2bb9..0000000000 --- a/src/backends/test/CreateWorkloadCl.cpp +++ /dev/null @@ -1,676 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// -#include -#include -#include -#include -#include -#include -#include "ClContextControlFixture.hpp" - -#include - -boost::test_tools::predicate_result CompareIClTensorHandleShape(IClTensorHandle* tensorHandle, - std::initializer_list expectedDimensions) -{ - return CompareTensorHandleShape(tensorHandle, expectedDimensions); -} - -BOOST_FIXTURE_TEST_SUITE(CreateWorkloadCl, ClContextControlFixture) - -template -static void ClCreateActivationWorkloadTest() -{ - Graph graph; - ClWorkloadFactory factory; - - auto workload = CreateActivationWorkloadTest(factory, graph); - - // Checks that inputs/outputs are as we expect them (see definition of CreateActivationWorkloadTest). - ActivationQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); - - BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {1})); - BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {1})); -} - -BOOST_AUTO_TEST_CASE(CreateActivationFloatWorkload) -{ - ClCreateActivationWorkloadTest(); -} - -BOOST_AUTO_TEST_CASE(CreateActivationFloat16Workload) -{ - ClCreateActivationWorkloadTest(); -} - -template -static void ClCreateArithmethicWorkloadTest() -{ - Graph graph; - ClWorkloadFactory factory; - auto workload = CreateArithmeticWorkloadTest(factory, graph); - - // Checks that inputs/outputs are as we expect them (see definition of CreateArithmeticWorkloadTest). - DescriptorType queueDescriptor = workload->GetData(); - auto inputHandle1 = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto inputHandle2 = boost::polymorphic_downcast(queueDescriptor.m_Inputs[1]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); - BOOST_TEST(CompareIClTensorHandleShape(inputHandle1, {2, 3})); - BOOST_TEST(CompareIClTensorHandleShape(inputHandle2, {2, 3})); - BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 3})); -} - -BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload) -{ - ClCreateArithmethicWorkloadTest(); -} - -BOOST_AUTO_TEST_CASE(CreateAdditionFloat16Workload) -{ - ClCreateArithmethicWorkloadTest(); -} - -BOOST_AUTO_TEST_CASE(CreateSubtractionFloatWorkload) -{ - ClCreateArithmethicWorkloadTest(); -} - -BOOST_AUTO_TEST_CASE(CreateSubtractionFloat16Workload) -{ - ClCreateArithmethicWorkloadTest(); -} - -BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkloadTest) -{ - ClCreateArithmethicWorkloadTest(); -} - -BOOST_AUTO_TEST_CASE(CreateMultiplicationFloat16WorkloadTest) -{ - ClCreateArithmethicWorkloadTest(); -} - -BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8WorkloadTest) -{ - ClCreateArithmethicWorkloadTest(); -} - -BOOST_AUTO_TEST_CASE(CreateDivisionFloatWorkloadTest) -{ - ClCreateArithmethicWorkloadTest(); -} - -BOOST_AUTO_TEST_CASE(CreateDivisionFloat16WorkloadTest) -{ - ClCreateArithmethicWorkloadTest(); -} - -template -static void ClCreateBatchNormalizationWorkloadTest() -{ - Graph graph; - ClWorkloadFactory factory; - - auto workload = CreateBatchNormalizationWorkloadTest - (factory, graph); - - // Checks that inputs/outputs are as we expect them (see definition of CreateBatchNormalizationWorkloadTest). - BatchNormalizationQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); - - BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {2, 3, 1, 1})); - BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 3, 1, 1})); -} - -BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloatWorkload) -{ - ClCreateBatchNormalizationWorkloadTest(); -} - -BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16Workload) -{ - ClCreateBatchNormalizationWorkloadTest(); -} - -BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Workload) -{ - Graph graph; - ClWorkloadFactory factory; - auto workload = CreateConvertFp16ToFp32WorkloadTest(factory, graph); - - ConvertFp16ToFp32QueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); - - BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {3, 2, 3})); - BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {3, 2, 3})); - BOOST_TEST((inputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F16)); - BOOST_TEST((outputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F32)); -} - -BOOST_AUTO_TEST_CASE(CreateConvertFp32ToFp16Workload) -{ - Graph graph; - ClWorkloadFactory factory; - auto workload = CreateConvertFp32ToFp16WorkloadTest(factory, graph); - - ConvertFp32ToFp16QueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); - - BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {3, 2, 3})); - BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {3, 2, 3})); - BOOST_TEST((inputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F32)); - BOOST_TEST((outputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F16)); -} - -template -static void ClConvolution2dWorkloadTest(DataLayout dataLayout) -{ - Graph graph; - ClWorkloadFactory factory; - auto workload = CreateConvolution2dWorkloadTest(factory, - graph, - dataLayout); - - std::initializer_list inputShape = (dataLayout == DataLayout::NCHW) ? - std::initializer_list({2, 3, 8, 16}) : std::initializer_list({2, 8, 16, 3}); - std::initializer_list outputShape = (dataLayout == DataLayout::NCHW) ? - std::initializer_list({2, 2, 2, 10}) : std::initializer_list({2, 2, 10, 2}); - - // Checks that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest). - Convolution2dQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); - BOOST_TEST(CompareIClTensorHandleShape(inputHandle, inputShape)); - BOOST_TEST(CompareIClTensorHandleShape(outputHandle, outputShape)); -} - -BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNchwWorkload) -{ - ClConvolution2dWorkloadTest(DataLayout::NCHW); -} - -BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNhwcWorkload) -{ - ClConvolution2dWorkloadTest(DataLayout::NHWC); -} - -BOOST_AUTO_TEST_CASE(CreateConvolution2dFloat16NchwWorkload) -{ - ClConvolution2dWorkloadTest(DataLayout::NCHW); -} - -BOOST_AUTO_TEST_CASE(CreateConvolution2dFloat16NhwcWorkload) -{ - ClConvolution2dWorkloadTest(DataLayout::NHWC); -} - -template -static void ClDirectConvolution2dWorkloadTest() -{ - Graph graph; - ClWorkloadFactory factory; - auto workload = CreateDirectConvolution2dWorkloadTest(factory, graph); - - // Checks that outputs and inputs are as we expect them (see definition of CreateDirectConvolution2dWorkloadTest). - Convolution2dQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); - BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {2, 3, 6, 6})); - BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 2, 6, 6})); -} - -BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dFloatWorkload) -{ - ClDirectConvolution2dWorkloadTest(); -} - -BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dFloat16Workload) -{ - ClDirectConvolution2dWorkloadTest(); -} - -BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dUint8Workload) -{ - ClDirectConvolution2dWorkloadTest(); -} - -template -static void ClCreateFullyConnectedWorkloadTest() -{ - Graph graph; - ClWorkloadFactory factory; - auto workload = - CreateFullyConnectedWorkloadTest(factory, graph); - - // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest). - FullyConnectedQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); - BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {3, 1, 4, 5})); - BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {3, 7})); -} - - -BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloatWorkloadTest) -{ - ClCreateFullyConnectedWorkloadTest(); -} - -BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloat16WorkloadTest) -{ - ClCreateFullyConnectedWorkloadTest(); -} - -template -static void ClNormalizationWorkloadTest(DataLayout dataLayout) -{ - Graph graph; - ClWorkloadFactory factory; - - auto workload = CreateNormalizationWorkloadTest - (factory, graph, dataLayout); - - // Checks that inputs/outputs are as we expect them (see definition of CreateNormalizationWorkloadTest). - NormalizationQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); - - BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {3, 5, 5, 1})); - BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {3, 5, 5, 1})); -} - -BOOST_AUTO_TEST_CASE(CreateNormalizationFloat32NchwWorkload) -{ - ClNormalizationWorkloadTest(DataLayout::NCHW); -} - -BOOST_AUTO_TEST_CASE(CreateNormalizationFloat16NchwWorkload) -{ - ClNormalizationWorkloadTest(DataLayout::NCHW); -} - -BOOST_AUTO_TEST_CASE(CreateNormalizationFloat32NhwcWorkload) -{ - ClNormalizationWorkloadTest(DataLayout::NHWC); -} - -BOOST_AUTO_TEST_CASE(CreateNormalizationFloat16NhwcWorkload) -{ - ClNormalizationWorkloadTest(DataLayout::NHWC); -} - -template -static void ClPooling2dWorkloadTest(DataLayout dataLayout) -{ - Graph graph; - ClWorkloadFactory factory; - - auto workload = CreatePooling2dWorkloadTest(factory, graph, dataLayout); - - std::initializer_list inputShape = (dataLayout == DataLayout::NCHW) ? - std::initializer_list({3, 2, 5, 5}) : std::initializer_list({3, 5, 5, 2}); - std::initializer_list outputShape = (dataLayout == DataLayout::NCHW) ? - std::initializer_list({3, 2, 2, 4}) : std::initializer_list({3, 2, 4, 2}); - - // Check that inputs/outputs are as we expect them (see definition of CreatePooling2dWorkloadTest). - Pooling2dQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); - - BOOST_TEST(CompareIClTensorHandleShape(inputHandle, inputShape)); - BOOST_TEST(CompareIClTensorHandleShape(outputHandle, outputShape)); -} - -BOOST_AUTO_TEST_CASE(CreatePooling2dFloatNchwWorkload) -{ - ClPooling2dWorkloadTest(DataLayout::NCHW); -} - -BOOST_AUTO_TEST_CASE(CreatePooling2dFloatNhwcWorkload) -{ - ClPooling2dWorkloadTest(DataLayout::NHWC); -} - -BOOST_AUTO_TEST_CASE(CreatePooling2dFloat16NchwWorkload) -{ - ClPooling2dWorkloadTest(DataLayout::NCHW); -} - -BOOST_AUTO_TEST_CASE(CreatePooling2dFloat16NhwcWorkload) -{ - ClPooling2dWorkloadTest(DataLayout::NHWC); -} - -template -static void ClCreateReshapeWorkloadTest() -{ - Graph graph; - ClWorkloadFactory factory; - - auto workload = CreateReshapeWorkloadTest(factory, graph); - - // Checks that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest). - ReshapeQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); - - BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {4, 1})); - BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {4})); // Leading size 1 dimensions are collapsed by ACL. -} - -BOOST_AUTO_TEST_CASE(CreateReshapeFloatWorkload) -{ - ClCreateReshapeWorkloadTest(); -} - -BOOST_AUTO_TEST_CASE(CreateReshapeFloat16Workload) -{ - ClCreateReshapeWorkloadTest(); -} - -BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload) -{ - ClCreateReshapeWorkloadTest(); -} - -template -static void ClSoftmaxWorkloadTest() -{ - Graph graph; - ClWorkloadFactory factory; - - auto workload = CreateSoftmaxWorkloadTest(factory, graph); - - // Checks that inputs/outputs are as we expect them (see definition of ClSoftmaxFloatWorkload). - SoftmaxQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); - - BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {4, 1})); - BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {4, 1})); -} - - -BOOST_AUTO_TEST_CASE(CreateSoftmaxFloatWorkloadTest) -{ - ClSoftmaxWorkloadTest(); -} - -BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat16WorkloadTest) -{ - ClSoftmaxWorkloadTest(); -} - -template -static void ClSplitterWorkloadTest() -{ - Graph graph; - ClWorkloadFactory factory; - - auto workload = CreateSplitterWorkloadTest(factory, graph); - - // Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest). - SplitterQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {5, 7, 7})); - - auto outputHandle1 = boost::polymorphic_downcast(queueDescriptor.m_Outputs[1]); - BOOST_TEST(CompareIClTensorHandleShape(outputHandle1, {2, 7, 7})); - - auto outputHandle2 = boost::polymorphic_downcast(queueDescriptor.m_Outputs[2]); - BOOST_TEST(CompareIClTensorHandleShape(outputHandle2, {2, 7, 7})); - - auto outputHandle0 = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); - // NOTE: At the moment the CL collapses the tensor to a 2 dim when dimension zero = 1 - // we are raising this difference between the NEON and CL libs as an issue with the compute library team. - BOOST_TEST(CompareIClTensorHandleShape(outputHandle0, {7, 7})); -} - -BOOST_AUTO_TEST_CASE(CreateSplitterFloatWorkload) -{ - ClSplitterWorkloadTest(); -} - -BOOST_AUTO_TEST_CASE(CreateSplitterFloat16Workload) -{ - ClSplitterWorkloadTest(); -} - -template -static void ClSplitterMergerTest() -{ - // Tests that it is possible to decide which output of the splitter layer - // should be lined to which input of the merger layer. - // We test that is is possible to specify 0th output - // of the splitter to be the 1st input to the merger and the 1st output of the splitter to be 0th input - // of the merger. - - Graph graph; - ClWorkloadFactory factory; - - auto workloads = - CreateSplitterMergerWorkloadTest - (factory, graph); - - auto wlSplitter = std::move(workloads.first); - auto wlMerger = std::move(workloads.second); - - //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction. - armnn::ClSubTensorHandle* sOut0 = dynamic_cast(wlSplitter->GetData().m_Outputs[0]); - armnn::ClSubTensorHandle* sOut1 = dynamic_cast(wlSplitter->GetData().m_Outputs[1]); - armnn::ClSubTensorHandle* mIn0 = dynamic_cast(wlMerger->GetData().m_Inputs[0]); - armnn::ClSubTensorHandle* mIn1 = dynamic_cast(wlMerger->GetData().m_Inputs[1]); - - BOOST_TEST(sOut0); - BOOST_TEST(sOut1); - BOOST_TEST(mIn0); - BOOST_TEST(mIn1); - - //Fliped order of inputs/outputs. - bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0); - BOOST_TEST(validDataPointers); - - - //Also make sure that the inputs are subtensors of one tensor and outputs are sub tensors of another tensor. - bool validSubTensorParents = (mIn0->GetTensor().parent() == mIn1->GetTensor().parent()) - && (sOut0->GetTensor().parent() == sOut1->GetTensor().parent()); - - BOOST_TEST(validSubTensorParents); -} - -BOOST_AUTO_TEST_CASE(CreateSplitterMergerFloatWorkload) -{ - ClSplitterMergerTest(); -} - -BOOST_AUTO_TEST_CASE(CreateSplitterMergerFloat16Workload) -{ - ClSplitterMergerTest(); -} - - -BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputs) -{ - // Test that it is possible to assign multiple (two) different layers to each of the outputs of a splitter layer. - // We create a splitter with two outputs. That each of those outputs is used by two different activation layers. - - Graph graph; - ClWorkloadFactory factory; - std::unique_ptr wlSplitter; - std::unique_ptr wlActiv0_0; - std::unique_ptr wlActiv0_1; - std::unique_ptr wlActiv1_0; - std::unique_ptr wlActiv1_1; - - CreateSplitterMultipleInputsOneOutputWorkloadTest(factory, graph, wlSplitter, wlActiv0_0, wlActiv0_1, - wlActiv1_0, wlActiv1_1); - - //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction. - armnn::ClSubTensorHandle* sOut0 = dynamic_cast(wlSplitter->GetData().m_Outputs[0]); - armnn::ClSubTensorHandle* sOut1 = dynamic_cast(wlSplitter->GetData().m_Outputs[1]); - armnn::ClSubTensorHandle* activ0_0Im = dynamic_cast(wlActiv0_0->GetData().m_Inputs[0]); - armnn::ClSubTensorHandle* activ0_1Im = dynamic_cast(wlActiv0_1->GetData().m_Inputs[0]); - armnn::ClSubTensorHandle* activ1_0Im = dynamic_cast(wlActiv1_0->GetData().m_Inputs[0]); - armnn::ClSubTensorHandle* activ1_1Im = dynamic_cast(wlActiv1_1->GetData().m_Inputs[0]); - - - BOOST_TEST(sOut0); - BOOST_TEST(sOut1); - BOOST_TEST(activ0_0Im); - BOOST_TEST(activ0_1Im); - BOOST_TEST(activ1_0Im); - BOOST_TEST(activ1_1Im); - - bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) && - (sOut1 == activ1_0Im) && (sOut1 == activ1_1Im); - - BOOST_TEST(validDataPointers); -} - -BOOST_AUTO_TEST_CASE(CreateMemCopyWorkloadsCl) -{ - ClWorkloadFactory factory; - CreateMemCopyWorkloads(factory); -} - -template -static void ClL2NormalizationWorkloadTest(DataLayout dataLayout) -{ - Graph graph; - ClWorkloadFactory factory; - - auto workload = CreateL2NormalizationWorkloadTest - (factory, graph, dataLayout); - - // Checks that inputs/outputs are as we expect them (see definition of CreateNormalizationWorkloadTest). - L2NormalizationQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); - - BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 5, 20, 50, 67 })); - BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 5, 20, 50, 67 })); -} - -BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloatNchwWorkload) -{ - ClL2NormalizationWorkloadTest(DataLayout::NCHW); -} - -BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloatNhwcWorkload) -{ - ClL2NormalizationWorkloadTest(DataLayout::NHWC); -} - -BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat16NchwWorkload) -{ - ClL2NormalizationWorkloadTest(DataLayout::NCHW); -} - -BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat16NhwcWorkload) -{ - ClL2NormalizationWorkloadTest(DataLayout::NHWC); -} - -template -static void ClCreateLstmWorkloadTest() -{ - Graph graph; - ClWorkloadFactory factory; - auto workload = CreateLstmWorkloadTest(factory, graph); - - LstmQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[1]); - BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 2 })); - BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 4 })); -} - -BOOST_AUTO_TEST_CASE(CreateLSTMWorkloadFloatWorkload) -{ - ClCreateLstmWorkloadTest(); -} - -template -static void ClResizeBilinearWorkloadTest(DataLayout dataLayout) -{ - Graph graph; - ClWorkloadFactory factory; - - auto workload = CreateResizeBilinearWorkloadTest(factory, graph, dataLayout); - - // Checks that inputs/outputs are as we expect them (see definition of CreateResizeBilinearWorkloadTest). - ResizeBilinearQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); - - switch (dataLayout) - { - case DataLayout::NHWC: - BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 4, 4, 3 })); - BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 2, 2, 3 })); - break; - default: // NCHW - BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 3, 4, 4 })); - BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 3, 2, 2 })); - } -} - -BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32NchwWorkload) -{ - ClResizeBilinearWorkloadTest(DataLayout::NCHW); -} - -BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat16NchwWorkload) -{ - ClResizeBilinearWorkloadTest(DataLayout::NCHW); -} - -BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32NhwcWorkload) -{ - ClResizeBilinearWorkloadTest(DataLayout::NHWC); -} - -BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat16NhwcWorkload) -{ - ClResizeBilinearWorkloadTest(DataLayout::NHWC); -} - -BOOST_AUTO_TEST_SUITE_END() diff --git a/src/backends/test/CreateWorkloadNeon.cpp b/src/backends/test/CreateWorkloadNeon.cpp deleted file mode 100644 index b2ec563a69..0000000000 --- a/src/backends/test/CreateWorkloadNeon.cpp +++ /dev/null @@ -1,530 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// -#include -#include -#include -#include -#include - -#include "test/CreateWorkloadClNeon.hpp" - -BOOST_AUTO_TEST_SUITE(CreateWorkloadNeon) - -namespace -{ - -bool TestNeonTensorHandleInfo(armnn::INeonTensorHandle* handle, const armnn::TensorInfo& expectedInfo) -{ - using namespace armnn::armcomputetensorutils; - - const arm_compute::ITensorInfo* handleInfo = handle->GetTensor().info(); - const arm_compute::TensorInfo expectedAclInfo = BuildArmComputeTensorInfo(expectedInfo); - - if (handleInfo->data_type() != expectedAclInfo.data_type()) - { - return false; - } - - if (handleInfo->num_dimensions() != expectedAclInfo.num_dimensions()) - { - return false; - } - - if (handleInfo->quantization_info() != expectedAclInfo.quantization_info()) - { - return false; - } - - for (std::size_t d = 0; d < expectedAclInfo.num_dimensions(); ++d) - { - if (handleInfo->dimension(d) != expectedAclInfo.dimension(d)) - { - return false; - } - } - - return true; -} - -} // namespace - -template -static void NeonCreateActivationWorkloadTest() -{ - Graph graph; - NeonWorkloadFactory factory; - auto workload = CreateActivationWorkloadTest - (factory, graph); - - // Checks that inputs/outputs are as we expect them (see definition of CreateActivationWorkloadTest). - ActivationQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); - BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({1, 1}, DataType))); - BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({1, 1}, DataType))); -} - -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -BOOST_AUTO_TEST_CASE(CreateActivationFloat16Workload) -{ - NeonCreateActivationWorkloadTest(); -} -#endif - -BOOST_AUTO_TEST_CASE(CreateActivationFloatWorkload) -{ - NeonCreateActivationWorkloadTest(); -} - -template -static void NeonCreateArithmethicWorkloadTest() -{ - Graph graph; - NeonWorkloadFactory factory; - auto workload = CreateArithmeticWorkloadTest(factory, graph); - - DescriptorType queueDescriptor = workload->GetData(); - auto inputHandle1 = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto inputHandle2 = boost::polymorphic_downcast(queueDescriptor.m_Inputs[1]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); - BOOST_TEST(TestNeonTensorHandleInfo(inputHandle1, TensorInfo({2, 3}, DataType))); - BOOST_TEST(TestNeonTensorHandleInfo(inputHandle2, TensorInfo({2, 3}, DataType))); - BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({2, 3}, DataType))); -} - -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -BOOST_AUTO_TEST_CASE(CreateAdditionFloat16Workload) -{ - NeonCreateArithmethicWorkloadTest(); -} -#endif - -BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload) -{ - NeonCreateArithmethicWorkloadTest(); -} - -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -BOOST_AUTO_TEST_CASE(CreateSubtractionFloat16Workload) -{ - NeonCreateArithmethicWorkloadTest(); -} -#endif - -BOOST_AUTO_TEST_CASE(CreateSubtractionFloatWorkload) -{ - NeonCreateArithmethicWorkloadTest(); -} - -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -BOOST_AUTO_TEST_CASE(CreateMultiplicationFloat16Workload) -{ - NeonCreateArithmethicWorkloadTest(); -} -#endif - -BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkload) -{ - NeonCreateArithmethicWorkloadTest(); -} - -template -static void NeonCreateBatchNormalizationWorkloadTest() -{ - Graph graph; - NeonWorkloadFactory factory; - auto workload = CreateBatchNormalizationWorkloadTest(factory, graph); - - // Checks that outputs and inputs are as we expect them (see definition of CreateBatchNormalizationWorkloadTest). - BatchNormalizationQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); - BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({2, 3, 1, 1}, DataType))); - BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({2, 3, 1, 1}, DataType))); -} - -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16Workload) -{ - NeonCreateBatchNormalizationWorkloadTest(); -} -#endif - -BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloatWorkload) -{ - NeonCreateBatchNormalizationWorkloadTest(); -} - -template -static void NeonCreateConvolution2dWorkloadTest(DataLayout dataLayout = DataLayout::NCHW) -{ - Graph graph; - NeonWorkloadFactory factory; - auto workload = CreateConvolution2dWorkloadTest(factory, graph, dataLayout); - - TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 8, 16} : TensorShape{2, 8, 16, 3}; - TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 2, 2, 10} : TensorShape{2, 2, 10, 2}; - - // Checks that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest). - Convolution2dQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); - BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType))); - BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType))); -} - -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -BOOST_AUTO_TEST_CASE(CreateConvolution2dFloat16NchwWorkload) -{ - NeonCreateConvolution2dWorkloadTest(); -} - -BOOST_AUTO_TEST_CASE(CreateConvolution2dFloat16NhwcWorkload) -{ - NeonCreateConvolution2dWorkloadTest(DataLayout::NHWC); -} - -#endif -BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNchwWorkload) -{ - NeonCreateConvolution2dWorkloadTest(); -} - -BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNhwcWorkload) -{ - NeonCreateConvolution2dWorkloadTest(DataLayout::NHWC); -} - -template -static void NeonCreateFullyConnectedWorkloadTest() -{ - Graph graph; - NeonWorkloadFactory factory; - auto workload = CreateFullyConnectedWorkloadTest(factory, graph); - - // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest). - FullyConnectedQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); - BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({3, 1, 4, 5}, DataType))); - BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({3, 7}, DataType))); -} - -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloat16Workload) -{ - NeonCreateFullyConnectedWorkloadTest(); -} -#endif - -BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloatWorkload) -{ - NeonCreateFullyConnectedWorkloadTest(); -} - -template -static void NeonCreateNormalizationWorkloadTest(DataLayout dataLayout) -{ - Graph graph; - NeonWorkloadFactory factory; - auto workload = CreateNormalizationWorkloadTest(factory, graph, dataLayout); - - // Checks that outputs and inputs are as we expect them (see definition of CreateNormalizationWorkloadTest). - NormalizationQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); - BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({3, 5, 5, 1}, DataType))); - BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({3, 5, 5, 1}, DataType))); -} - -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -BOOST_AUTO_TEST_CASE(CreateNormalizationFloat16NchwWorkload) -{ - NeonCreateNormalizationWorkloadTest(DataLayout::NCHW); -} - -BOOST_AUTO_TEST_CASE(CreateNormalizationFloat16NhwcWorkload) -{ - NeonCreateNormalizationWorkloadTest(DataLayout::NHWC); -} -#endif - -BOOST_AUTO_TEST_CASE(CreateNormalizationFloatNchwWorkload) -{ - NeonCreateNormalizationWorkloadTest(DataLayout::NCHW); -} - -BOOST_AUTO_TEST_CASE(CreateNormalizationFloatNhwcWorkload) -{ - NeonCreateNormalizationWorkloadTest(DataLayout::NHWC); -} - - -template -static void NeonCreatePooling2dWorkloadTest(DataLayout dataLayout = DataLayout::NCHW) -{ - Graph graph; - NeonWorkloadFactory factory; - auto workload = CreatePooling2dWorkloadTest - (factory, graph, dataLayout); - - TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 2, 5, 5} : TensorShape{3, 5, 5, 2}; - TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 2, 2, 4} : TensorShape{3, 2, 4, 2}; - - // Checks that outputs and inputs are as we expect them (see definition of CreatePooling2dWorkloadTest). - Pooling2dQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); - BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType))); - BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType))); -} - -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -BOOST_AUTO_TEST_CASE(CreatePooling2dFloat16Workload) -{ - NeonCreatePooling2dWorkloadTest(); -} -#endif - -BOOST_AUTO_TEST_CASE(CreatePooling2dFloatNchwWorkload) -{ - NeonCreatePooling2dWorkloadTest(DataLayout::NCHW); -} - -BOOST_AUTO_TEST_CASE(CreatePooling2dFloatNhwcWorkload) -{ - NeonCreatePooling2dWorkloadTest(DataLayout::NHWC); -} - -BOOST_AUTO_TEST_CASE(CreatePooling2dUint8NchwWorkload) -{ - NeonCreatePooling2dWorkloadTest(DataLayout::NCHW); -} - -BOOST_AUTO_TEST_CASE(CreatePooling2dUint8NhwcWorkload) -{ - NeonCreatePooling2dWorkloadTest(DataLayout::NHWC); -} - -template -static void NeonCreateReshapeWorkloadTest() -{ - Graph graph; - NeonWorkloadFactory factory; - auto workload = CreateReshapeWorkloadTest(factory, graph); - - // Checks that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest). - ReshapeQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); - BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({4, 1}, DataType))); - BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({1, 4}, DataType))); -} - -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -BOOST_AUTO_TEST_CASE(CreateReshapeFloat16Workload) -{ - NeonCreateReshapeWorkloadTest(); -} -#endif - -BOOST_AUTO_TEST_CASE(CreateReshapeFloatWorkload) -{ - NeonCreateReshapeWorkloadTest(); -} - -BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload) -{ - NeonCreateReshapeWorkloadTest(); -} - -template -static void NeonCreateSoftmaxWorkloadTest() -{ - Graph graph; - NeonWorkloadFactory factory; - auto workload = CreateSoftmaxWorkloadTest(factory, graph); - - // Checks that outputs and inputs are as we expect them (see definition of CreateSoftmaxWorkloadTest). - SoftmaxQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); - BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({4, 1}, DataType))); - BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({4, 1}, DataType))); -} - -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat16Workload) -{ - NeonCreateSoftmaxWorkloadTest(); -} -#endif - -BOOST_AUTO_TEST_CASE(CreateSoftmaxFloatWorkload) -{ - NeonCreateSoftmaxWorkloadTest(); -} - -BOOST_AUTO_TEST_CASE(CreateSplitterWorkload) -{ - Graph graph; - NeonWorkloadFactory factory; - auto workload = CreateSplitterWorkloadTest(factory, graph); - - // Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest). - SplitterQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({5, 7, 7}, DataType::Float32))); - - auto outputHandle0 = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); - BOOST_TEST(TestNeonTensorHandleInfo(outputHandle0, TensorInfo({1, 7, 7}, DataType::Float32))); - - auto outputHandle1 = boost::polymorphic_downcast(queueDescriptor.m_Outputs[1]); - BOOST_TEST(TestNeonTensorHandleInfo(outputHandle1, TensorInfo({2, 7, 7}, DataType::Float32))); - - auto outputHandle2 = boost::polymorphic_downcast(queueDescriptor.m_Outputs[2]); - BOOST_TEST(TestNeonTensorHandleInfo(outputHandle2, TensorInfo({2, 7, 7}, DataType::Float32))); -} - -BOOST_AUTO_TEST_CASE(CreateSplitterMerger) -{ - // Tests that it is possible to decide which output of the splitter layer - // should be lined to which input of the merger layer. - // We tested that is is possible to specify 0th output - // of the splitter to be the 1st input to the merger, and the 1st output of the splitter to be 0th input - // of the merger. - - Graph graph; - NeonWorkloadFactory factory; - - auto workloads = - CreateSplitterMergerWorkloadTest(factory, graph); - - auto wlSplitter = std::move(workloads.first); - auto wlMerger = std::move(workloads.second); - - //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction. - armnn::INeonTensorHandle* sOut0 = dynamic_cast(wlSplitter->GetData().m_Outputs[0]); - armnn::INeonTensorHandle* sOut1 = dynamic_cast(wlSplitter->GetData().m_Outputs[1]); - armnn::INeonTensorHandle* mIn0 = dynamic_cast(wlMerger->GetData().m_Inputs[0]); - armnn::INeonTensorHandle* mIn1 = dynamic_cast(wlMerger->GetData().m_Inputs[1]); - - BOOST_TEST(sOut0); - BOOST_TEST(sOut1); - BOOST_TEST(mIn0); - BOOST_TEST(mIn1); - - bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0); - - BOOST_TEST(validDataPointers); -} - -BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputs) -{ - // Tests that it is possible to assign multiple (two) different layers to each of the outputs of a splitter layer. - // We created a splitter with two outputs. That each of those outputs is used by two different activation layers - - Graph graph; - NeonWorkloadFactory factory; - std::unique_ptr wlSplitter; - std::unique_ptr wlActiv0_0; - std::unique_ptr wlActiv0_1; - std::unique_ptr wlActiv1_0; - std::unique_ptr wlActiv1_1; - - CreateSplitterMultipleInputsOneOutputWorkloadTest(factory, graph, wlSplitter, wlActiv0_0, wlActiv0_1, - wlActiv1_0, wlActiv1_1); - - armnn::INeonTensorHandle* sOut0 = dynamic_cast(wlSplitter->GetData().m_Outputs[0]); - armnn::INeonTensorHandle* sOut1 = dynamic_cast(wlSplitter->GetData().m_Outputs[1]); - armnn::INeonTensorHandle* activ0_0Im = dynamic_cast(wlActiv0_0->GetData().m_Inputs[0]); - armnn::INeonTensorHandle* activ0_1Im = dynamic_cast(wlActiv0_1->GetData().m_Inputs[0]); - armnn::INeonTensorHandle* activ1_0Im = dynamic_cast(wlActiv1_0->GetData().m_Inputs[0]); - armnn::INeonTensorHandle* activ1_1Im = dynamic_cast(wlActiv1_1->GetData().m_Inputs[0]); - - - BOOST_TEST(sOut0); - BOOST_TEST(sOut1); - BOOST_TEST(activ0_0Im); - BOOST_TEST(activ0_1Im); - BOOST_TEST(activ1_0Im); - BOOST_TEST(activ1_1Im); - - bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) && - (sOut1 == activ1_0Im) && (sOut1 == activ1_1Im); - - BOOST_TEST(validDataPointers); -} - -BOOST_AUTO_TEST_CASE(CreateMemCopyWorkloadsNeon) -{ - NeonWorkloadFactory factory; - CreateMemCopyWorkloads(factory); -} - -template -static void NeonCreateL2NormalizationWorkloadTest(DataLayout dataLayout) -{ - Graph graph; - NeonWorkloadFactory factory; - auto workload = CreateL2NormalizationWorkloadTest(factory, graph, dataLayout); - - // Checks that inputs/outputs are as we expect them (see definition of CreateNormalizationWorkloadTest). - L2NormalizationQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); - BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({ 5, 20, 50, 67 }, DataType))); - BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({ 5, 20, 50, 67 }, DataType))); -} - -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat16NchwWorkload) -{ - NeonCreateL2NormalizationWorkloadTest(DataLayout::NCHW); -} - -BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat16NhwcWorkload) -{ - NeonCreateL2NormalizationWorkloadTest(DataLayout::NHWC); -} -#endif - -BOOST_AUTO_TEST_CASE(CreateL2NormalizationNchwWorkload) -{ - NeonCreateL2NormalizationWorkloadTest(DataLayout::NCHW); -} - -BOOST_AUTO_TEST_CASE(CreateL2NormalizationNhwcWorkload) -{ - NeonCreateL2NormalizationWorkloadTest(DataLayout::NHWC); -} - -BOOST_AUTO_TEST_SUITE_END() diff --git a/src/backends/test/CreateWorkloadRef.cpp b/src/backends/test/CreateWorkloadRef.cpp deleted file mode 100644 index c30093da92..0000000000 --- a/src/backends/test/CreateWorkloadRef.cpp +++ /dev/null @@ -1,483 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// -#include -#include -#include - -#include "test/CreateWorkload.hpp" - -namespace -{ - -template -void CheckInputOutput(std::unique_ptr workload, const TensorInfo& inputInfo, const TensorInfo& outputInfo) -{ - auto queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); - BOOST_TEST((inputHandle->GetTensorInfo() == inputInfo)); - BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo)); -} - -template -void CheckInputsOutput(std::unique_ptr workload, - const TensorInfo& inputInfo0, - const TensorInfo& inputInfo1, - const TensorInfo& outputInfo) -{ - auto queueDescriptor = workload->GetData(); - auto inputHandle0 = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto inputHandle1 = boost::polymorphic_downcast(queueDescriptor.m_Inputs[1]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); - BOOST_TEST((inputHandle0->GetTensorInfo() == inputInfo0)); - BOOST_TEST((inputHandle1->GetTensorInfo() == inputInfo1)); - BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo)); -} -} - -BOOST_AUTO_TEST_SUITE(CreateWorkloadRef) - -template -static void RefCreateActivationWorkloadTest() -{ - Graph graph; - RefWorkloadFactory factory; - auto workload = CreateActivationWorkloadTest(factory, graph); - - // Checks that outputs are as we expect them (see definition of CreateActivationWorkloadTest). - CheckInputOutput(std::move(workload), - TensorInfo({ 1, 1 }, DataType), - TensorInfo({ 1, 1 }, DataType)); -} - -BOOST_AUTO_TEST_CASE(CreateActivationFloat32Workload) -{ - RefCreateActivationWorkloadTest(); -} - -BOOST_AUTO_TEST_CASE(CreateActivationUint8Workload) -{ - RefCreateActivationWorkloadTest(); -} - -template -static void RefCreateArithmethicWorkloadTest() -{ - Graph graph; - RefWorkloadFactory factory; - auto workload = CreateArithmeticWorkloadTest(factory, graph); - - CheckInputsOutput(std::move(workload), - TensorInfo({ 2, 3 }, DataType), - TensorInfo({ 2, 3 }, DataType), - TensorInfo({ 2, 3 }, DataType)); -} - -BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload) -{ - RefCreateArithmethicWorkloadTest(); -} - -BOOST_AUTO_TEST_CASE(CreateAdditionUint8Workload) -{ - RefCreateArithmethicWorkloadTest(); -} - -BOOST_AUTO_TEST_CASE(CreateSubtractionFloatWorkload) -{ - RefCreateArithmethicWorkloadTest(); -} - -BOOST_AUTO_TEST_CASE(CreateSubtractionUint8Workload) -{ - RefCreateArithmethicWorkloadTest(); -} - -BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkload) -{ - RefCreateArithmethicWorkloadTest(); -} - -BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8Workload) -{ - RefCreateArithmethicWorkloadTest(); -} - -BOOST_AUTO_TEST_CASE(CreateDivisionFloatWorkload) -{ - RefCreateArithmethicWorkloadTest(); -} - -BOOST_AUTO_TEST_CASE(CreateDivisionUint8Workload) -{ - RefCreateArithmethicWorkloadTest(); -} - -BOOST_AUTO_TEST_CASE(CreateBatchNormalizationWorkload) -{ - Graph graph; - RefWorkloadFactory factory; - auto workload = CreateBatchNormalizationWorkloadTest - (factory, graph); - - // Checks that outputs and inputs are as we expect them (see definition of CreateBatchNormalizationWorkloadTest). - CheckInputOutput( - std::move(workload), TensorInfo({2, 3, 1, 1}, DataType::Float32), TensorInfo({2, 3, 1, 1}, DataType::Float32)); -} - -BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Float32Workload) -{ - Graph graph; - RefWorkloadFactory factory; - auto workload = CreateConvertFp16ToFp32WorkloadTest(factory, graph); - - // Checks that outputs and inputs are as we expect them - CheckInputOutput( - std::move(workload), TensorInfo({1, 3, 2, 3}, DataType::Float16), TensorInfo({1, 3, 2, 3}, DataType::Float32)); -} - -BOOST_AUTO_TEST_CASE(CreateConvertFp32ToFp16Float16Workload) -{ - Graph graph; - RefWorkloadFactory factory; - auto workload = CreateConvertFp32ToFp16WorkloadTest(factory, graph); - - // Checks that outputs and inputs are as we expect them - CheckInputOutput( - std::move(workload), TensorInfo({1, 3, 2, 3}, DataType::Float32), TensorInfo({1, 3, 2, 3}, DataType::Float16)); -} - -BOOST_AUTO_TEST_CASE(CreateConvolution2dWorkload) -{ - Graph graph; - RefWorkloadFactory factory; - auto workload = CreateConvolution2dWorkloadTest(factory, graph); - - // Checks that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest). - CheckInputOutput(std::move(workload), - TensorInfo({2, 3, 8, 16}, DataType::Float32), - TensorInfo({2, 2, 2, 10}, DataType::Float32)); -} - -BOOST_AUTO_TEST_CASE(CreateDepthwiseConvolution2dWorkload) -{ - Graph graph; - RefWorkloadFactory factory; - auto workload = - CreateDepthwiseConvolution2dWorkloadTest(factory, graph); - - // Checks that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest). - CheckInputOutput(std::move(workload), - TensorInfo({2, 3, 8, 16}, DataType::Float32), - TensorInfo({2, 9, 2, 10}, DataType::Float32)); -} - -template -static void RefCreateFullyConnectedWorkloadTest() -{ - Graph graph; - RefWorkloadFactory factory; - auto workload = CreateFullyConnectedWorkloadTest(factory, graph); - - // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest). - float inputsQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 1.0f : 0.0; - float outputQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 2.0f : 0.0; - CheckInputOutput(std::move(workload), - TensorInfo({ 3, 1, 4, 5 }, DataType, inputsQScale), - TensorInfo({ 3, 7 }, DataType, outputQScale)); -} - -BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloat32Workload) -{ - RefCreateFullyConnectedWorkloadTest(); -} - -BOOST_AUTO_TEST_CASE(CreateFullyConnectedUint8Workload) -{ - RefCreateFullyConnectedWorkloadTest(); -} - -template -static void RefCreateNormalizationWorkloadTest() -{ - Graph graph; - RefWorkloadFactory factory; - auto workload = CreateNormalizationWorkloadTest(factory, graph); - - // Checks that outputs and inputs are as we expect them (see definition of CreateNormalizationWorkloadTest). - CheckInputOutput(std::move(workload), - TensorInfo({3, 5, 5, 1}, DataType), - TensorInfo({3, 5, 5, 1}, DataType)); -} - -BOOST_AUTO_TEST_CASE(CreateRefNormalizationNchwWorkload) -{ - RefCreateNormalizationWorkloadTest(); -} - -template -static void RefCreatePooling2dWorkloadTest() -{ - Graph graph; - RefWorkloadFactory factory; - auto workload = CreatePooling2dWorkloadTest(factory, graph); - - // Checks that outputs and inputs are as we expect them (see definition of CreatePooling2dWorkloadTest). - CheckInputOutput( - std::move(workload), - TensorInfo({3, 2, 5, 5}, DataType), - TensorInfo({3, 2, 2, 4}, DataType)); -} - -BOOST_AUTO_TEST_CASE(CreatePooling2dFloat32Workload) -{ - RefCreatePooling2dWorkloadTest(); -} - -BOOST_AUTO_TEST_CASE(CreatePooling2dUint8Workload) -{ - RefCreatePooling2dWorkloadTest(); -} - -template -static void RefCreateSoftmaxWorkloadTest() -{ - Graph graph; - RefWorkloadFactory factory; - auto workload = CreateSoftmaxWorkloadTest(factory, graph); - - // Checks that outputs and inputs are as we expect them (see definition of CreateSoftmaxWorkloadTest). - CheckInputOutput( - std::move(workload), - TensorInfo({4, 1}, DataType), - TensorInfo({4, 1}, DataType)); -} - -BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat32Workload) -{ - RefCreateSoftmaxWorkloadTest(); -} - -BOOST_AUTO_TEST_CASE(CreateSoftmaxUint8Workload) -{ - RefCreateSoftmaxWorkloadTest(); -} - -template -static void RefCreateSplitterWorkloadTest() -{ - Graph graph; - RefWorkloadFactory factory; - auto workload = CreateSplitterWorkloadTest(factory, graph); - - // Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest). - SplitterQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - BOOST_TEST((inputHandle->GetTensorInfo() == TensorInfo({ 5, 7, 7 }, DataType))); - - auto outputHandle0 = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); - BOOST_TEST((outputHandle0->GetTensorInfo() == TensorInfo({ 1, 7, 7 }, DataType))); - - auto outputHandle1 = boost::polymorphic_downcast(queueDescriptor.m_Outputs[1]); - BOOST_TEST((outputHandle1->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType))); - - auto outputHandle2 = boost::polymorphic_downcast(queueDescriptor.m_Outputs[2]); - BOOST_TEST((outputHandle2->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType))); -} - -BOOST_AUTO_TEST_CASE(CreateSplitterFloat32Workload) -{ - RefCreateSplitterWorkloadTest(); -} - -BOOST_AUTO_TEST_CASE(CreateSplitterUint8Workload) -{ - RefCreateSplitterWorkloadTest(); -} - -template -static void RefCreateSplitterMergerWorkloadTest() -{ - // Tests that it is possible to decide which output of the splitter layer - // should be lined to which input of the merger layer. - // We tested that is is possible to specify 0th output - // of the splitter to be the 1st input to the merger and the 1st output of the splitter to be 0th input - // of the merger. - - Graph graph; - RefWorkloadFactory factory; - auto workloads = CreateSplitterMergerWorkloadTest - (factory, graph); - - auto wlSplitter = std::move(workloads.first); - auto wlMerger = std::move(workloads.second); - - //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction. - armnn::CpuTensorHandle* sOut0 = dynamic_cast(wlSplitter->GetData().m_Outputs[0]); - armnn::CpuTensorHandle* sOut1 = dynamic_cast(wlSplitter->GetData().m_Outputs[1]); - armnn::CpuTensorHandle* mIn0 = dynamic_cast(wlMerger->GetData().m_Inputs[0]); - armnn::CpuTensorHandle* mIn1 = dynamic_cast(wlMerger->GetData().m_Inputs[1]); - - BOOST_TEST(sOut0); - BOOST_TEST(sOut1); - BOOST_TEST(mIn0); - BOOST_TEST(mIn1); - - bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0); - - BOOST_TEST(validDataPointers); -} - -BOOST_AUTO_TEST_CASE(CreateSplitterMergerFloat32) -{ - RefCreateSplitterMergerWorkloadTest(); -} - -BOOST_AUTO_TEST_CASE(CreateSplitterMergerUint8) -{ - RefCreateSplitterMergerWorkloadTest(); -} - -template -static void RefCreateSingleOutputMultipleInputsTest() -{ - // Tests that it is possible to assign multiple (two) different layers to each of the outputs of a splitter layer. - // We created a splitter with two outputs. That each of those outputs is used by two different activation layers. - - Graph graph; - RefWorkloadFactory factory; - std::unique_ptr wlSplitter; - std::unique_ptr wlActiv0_0; - std::unique_ptr wlActiv0_1; - std::unique_ptr wlActiv1_0; - std::unique_ptr wlActiv1_1; - - CreateSplitterMultipleInputsOneOutputWorkloadTest(factory, graph, wlSplitter, wlActiv0_0, wlActiv0_1, wlActiv1_0, wlActiv1_1); - - armnn::CpuTensorHandle* sOut0 = dynamic_cast(wlSplitter->GetData().m_Outputs[0]); - armnn::CpuTensorHandle* sOut1 = dynamic_cast(wlSplitter->GetData().m_Outputs[1]); - armnn::CpuTensorHandle* activ0_0Im = dynamic_cast(wlActiv0_0->GetData().m_Inputs[0]); - armnn::CpuTensorHandle* activ0_1Im = dynamic_cast(wlActiv0_1->GetData().m_Inputs[0]); - armnn::CpuTensorHandle* activ1_0Im = dynamic_cast(wlActiv1_0->GetData().m_Inputs[0]); - armnn::CpuTensorHandle* activ1_1Im = dynamic_cast(wlActiv1_1->GetData().m_Inputs[0]); - - - BOOST_TEST(sOut0); - BOOST_TEST(sOut1); - BOOST_TEST(activ0_0Im); - BOOST_TEST(activ0_1Im); - BOOST_TEST(activ1_0Im); - BOOST_TEST(activ1_1Im); - - bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) && - (sOut1 == activ1_0Im) && (sOut1 == activ1_1Im); - - BOOST_TEST(validDataPointers); -} - -BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsFloat32) -{ - RefCreateSingleOutputMultipleInputsTest(); -} - -BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsUint8) -{ - RefCreateSingleOutputMultipleInputsTest(); -} - -template -static void RefCreateResizeBilinearTest() -{ - Graph graph; - RefWorkloadFactory factory; - auto workload = CreateResizeBilinearWorkloadTest(factory, graph); - - // Checks that outputs and inputs are as we expect them (see definition of CreateResizeBilinearWorkloadTest). - CheckInputOutput( - std::move(workload), - TensorInfo({ 2, 3, 4, 4 }, DataType), - TensorInfo({ 2, 3, 2, 2 }, DataType)); -} - -BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32) -{ - RefCreateResizeBilinearTest(); -} - -BOOST_AUTO_TEST_CASE(CreateResizeBilinearUint8) -{ - RefCreateResizeBilinearTest(); -} - -BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat32) -{ - Graph graph; - RefWorkloadFactory factory; - auto workload = CreateL2NormalizationWorkloadTest - (factory, graph); - - // Checks that outputs and inputs are as we expect them (see definition of CreateL2NormalizationWorkloadTest). - CheckInputOutput( - std::move(workload), - TensorInfo({ 5, 20, 50, 67 }, armnn::DataType::Float32), - TensorInfo({ 5, 20, 50, 67 }, armnn::DataType::Float32)); -} - -template -static void RefCreateReshapeWorkloadTest() -{ - Graph graph; - RefWorkloadFactory factory; - auto workload = CreateReshapeWorkloadTest(factory, graph); - - // Checks that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest). - CheckInputOutput( - std::move(workload), - TensorInfo({ 4, 1 }, DataType), - TensorInfo({ 1, 4 }, DataType)); -} - -BOOST_AUTO_TEST_CASE(CreateReshapeFloat32Workload) -{ - RefCreateReshapeWorkloadTest(); -} - -BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload) -{ - RefCreateReshapeWorkloadTest(); -} - -BOOST_AUTO_TEST_SUITE_END() diff --git a/src/backends/test/IsLayerSupportedTest.cpp b/src/backends/test/IsLayerSupportedTest.cpp deleted file mode 100644 index 089822dade..0000000000 --- a/src/backends/test/IsLayerSupportedTest.cpp +++ /dev/null @@ -1,239 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// -#include - -#include -#include "LayerTests.hpp" - -#include -#include -#include - -#include -#include -#include - -#include "IsLayerSupportedTestImpl.hpp" -#include - -#include -#include - -BOOST_AUTO_TEST_SUITE(IsLayerSupported) - -BOOST_AUTO_TEST_CASE(IsLayerSupportedLayerTypeMatches) -{ - LayerTypeMatchesTest(); -} - -BOOST_AUTO_TEST_CASE(IsLayerSupportedFloat16Reference) -{ - armnn::RefWorkloadFactory factory; - IsLayerSupportedTests(&factory); -} - -BOOST_AUTO_TEST_CASE(IsLayerSupportedFloat32Reference) -{ - armnn::RefWorkloadFactory factory; - IsLayerSupportedTests(&factory); -} - -BOOST_AUTO_TEST_CASE(IsLayerSupportedUint8Reference) -{ - armnn::RefWorkloadFactory factory; - IsLayerSupportedTests(&factory); -} - -BOOST_AUTO_TEST_CASE(IsConvertFp16ToFp32SupportedReference) -{ - std::string reasonIfUnsupported; - - bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); - - BOOST_CHECK(result); -} - -BOOST_AUTO_TEST_CASE(IsConvertFp16ToFp32SupportedFp32InputReference) -{ - std::string reasonIfUnsupported; - - bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); - - BOOST_CHECK(!result); - BOOST_CHECK_EQUAL(reasonIfUnsupported, "Layer is not supported with float32 data type input"); -} - -BOOST_AUTO_TEST_CASE(IsConvertFp16ToFp32SupportedFp16OutputReference) -{ - std::string reasonIfUnsupported; - - bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); - - BOOST_CHECK(!result); - BOOST_CHECK_EQUAL(reasonIfUnsupported, "Layer is not supported with float16 data type output"); -} - -BOOST_AUTO_TEST_CASE(IsConvertFp32ToFp16SupportedReference) -{ - std::string reasonIfUnsupported; - - bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); - - BOOST_CHECK(result); -} - -BOOST_AUTO_TEST_CASE(IsConvertFp32ToFp16SupportedFp16InputReference) -{ - std::string reasonIfUnsupported; - - bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); - - BOOST_CHECK(!result); - BOOST_CHECK_EQUAL(reasonIfUnsupported, "Layer is not supported with float16 data type input"); -} - -BOOST_AUTO_TEST_CASE(IsConvertFp32ToFp16SupportedFp32OutputReference) -{ - std::string reasonIfUnsupported; - - bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); - - BOOST_CHECK(!result); - BOOST_CHECK_EQUAL(reasonIfUnsupported, "Layer is not supported with float32 data type output"); -} - -#ifdef ARMCOMPUTENEON_ENABLED -BOOST_AUTO_TEST_CASE(IsLayerSupportedFloat16Neon) -{ - armnn::NeonWorkloadFactory factory; - IsLayerSupportedTests(&factory); -} - -BOOST_AUTO_TEST_CASE(IsLayerSupportedFloat32Neon) -{ - armnn::NeonWorkloadFactory factory; - IsLayerSupportedTests(&factory); -} - -BOOST_AUTO_TEST_CASE(IsLayerSupportedUint8Neon) -{ - armnn::NeonWorkloadFactory factory; - IsLayerSupportedTests(&factory); -} - -BOOST_AUTO_TEST_CASE(IsConvertFp16ToFp32SupportedNeon) -{ - std::string reasonIfUnsupported; - - bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); - - BOOST_CHECK(result); -} - -BOOST_AUTO_TEST_CASE(IsConvertFp32ToFp16SupportedNeon) -{ - std::string reasonIfUnsupported; - - bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); - - BOOST_CHECK(result); -} -#endif //#ifdef ARMCOMPUTENEON_ENABLED. - - -#ifdef ARMCOMPUTECL_ENABLED - -BOOST_FIXTURE_TEST_CASE(IsLayerSupportedFloat16Cl, ClContextControlFixture) -{ - armnn::ClWorkloadFactory factory; - IsLayerSupportedTests(&factory); -} - -BOOST_FIXTURE_TEST_CASE(IsLayerSupportedFloat32Cl, ClContextControlFixture) -{ - armnn::ClWorkloadFactory factory; - IsLayerSupportedTests(&factory); -} - -BOOST_FIXTURE_TEST_CASE(IsLayerSupportedUint8Cl, ClContextControlFixture) -{ - armnn::ClWorkloadFactory factory; - IsLayerSupportedTests(&factory); -} - -BOOST_FIXTURE_TEST_CASE(IsConvertFp16ToFp32SupportedCl, ClContextControlFixture) -{ - std::string reasonIfUnsupported; - - bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); - - BOOST_CHECK(result); -} - -BOOST_FIXTURE_TEST_CASE(IsConvertFp16ToFp32SupportedFp32InputCl, ClContextControlFixture) -{ - std::string reasonIfUnsupported; - - bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); - - BOOST_CHECK(!result); - BOOST_CHECK_EQUAL(reasonIfUnsupported, "Input should be Float16"); -} - -BOOST_FIXTURE_TEST_CASE(IsConvertFp16ToFp32SupportedFp16OutputCl, ClContextControlFixture) -{ - std::string reasonIfUnsupported; - - bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); - - BOOST_CHECK(!result); - BOOST_CHECK_EQUAL(reasonIfUnsupported, "Output should be Float32"); -} - -BOOST_FIXTURE_TEST_CASE(IsConvertFp32ToFp16SupportedCl, ClContextControlFixture) -{ - std::string reasonIfUnsupported; - - bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); - - BOOST_CHECK(result); -} - -BOOST_FIXTURE_TEST_CASE(IsConvertFp32ToFp16SupportedFp16InputCl, ClContextControlFixture) -{ - std::string reasonIfUnsupported; - - bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); - - BOOST_CHECK(!result); - BOOST_CHECK_EQUAL(reasonIfUnsupported, "Input should be Float32"); -} - -BOOST_FIXTURE_TEST_CASE(IsConvertFp32ToFp16SupportedFp32OutputCl, ClContextControlFixture) -{ - std::string reasonIfUnsupported; - - bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); - - BOOST_CHECK(!result); - BOOST_CHECK_EQUAL(reasonIfUnsupported, "Output should be Float16"); -} -#endif //#ifdef ARMCOMPUTECL_ENABLED. - -BOOST_AUTO_TEST_SUITE_END() diff --git a/src/backends/test/IsLayerSupportedTestImpl.hpp b/src/backends/test/IsLayerSupportedTestImpl.hpp index 0f31c8ed08..867f7f2732 100644 --- a/src/backends/test/IsLayerSupportedTestImpl.hpp +++ b/src/backends/test/IsLayerSupportedTestImpl.hpp @@ -537,11 +537,6 @@ bool LayerTypeMatchesTestImpl(Tag) LayerTypeMatchesTestImpl(Tag()); }; -bool LayerTypeMatchesTest() -{ - return LayerTypeMatchesTestImpl(Tag()); -}; - template bool IsConvertLayerSupportedTests(std::string& reasonIfUnsupported) { diff --git a/src/backends/test/LayerTests.cpp b/src/backends/test/LayerTests.cpp index 17f3ae12e1..d88db06411 100644 --- a/src/backends/test/LayerTests.cpp +++ b/src/backends/test/LayerTests.cpp @@ -39,7 +39,7 @@ #include "ConvertFp16ToFp32TestImpl.hpp" #include "ConvertFp32ToFp16TestImpl.hpp" -#include "ClContextControlFixture.hpp" +#include // 3-channel 16x8 image used as common input data for a number of Conv2d tests. static std::vector ConvInput3x8x16({ diff --git a/src/backends/test/Reference.cpp b/src/backends/test/Reference.cpp deleted file mode 100644 index 05ebf2e8b0..0000000000 --- a/src/backends/test/Reference.cpp +++ /dev/null @@ -1,273 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// -#include - -#include "LayerTests.hpp" -#include "test/TensorHelpers.hpp" - -#include - -#include "test/UnitTests.hpp" - -BOOST_AUTO_TEST_SUITE(Compute_Reference) -using FactoryType = armnn::RefWorkloadFactory; - -// ============================================================================ -// UNIT tests - -// Convolution -ARMNN_AUTO_TEST_CASE(SimpleConvolution2d3x5, SimpleConvolution2d3x5Test, true) -ARMNN_AUTO_TEST_CASE(SimpleConvolution2d3x5Uint8, SimpleConvolution2d3x5Uint8Test, true) - -ARMNN_AUTO_TEST_CASE(UnbiasedConvolution2d, SimpleConvolution2d3x5Test, false) -ARMNN_AUTO_TEST_CASE(UnbiasedConvolutionUint8, SimpleConvolution2d3x5Uint8Test, false) - -ARMNN_AUTO_TEST_CASE(SimpleConvolution1d, Convolution1dTest, true) -ARMNN_AUTO_TEST_CASE(SimpleConvolution1dUint8, Convolution1dUint8Test, true) - -ARMNN_AUTO_TEST_CASE(SimpleConvolution2d3x3, SimpleConvolution2d3x3Test, true) -ARMNN_AUTO_TEST_CASE(SimpleConvolution2d3x3Uint8, SimpleConvolution2d3x3Uint8Test, true) - -ARMNN_AUTO_TEST_CASE(UnbiasedConvolution2dSquare, SimpleConvolution2d3x3Test, false) - -ARMNN_AUTO_TEST_CASE(SimpleConvolution2dAsymmetricPaddingLargerThanHalfKernelSize, - Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest) -ARMNN_AUTO_TEST_CASE(SimpleConvolution2dAsymmetricPadding, Convolution2dAsymmetricPaddingTest) - -// Depthwise Convolution -ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d, DepthwiseConvolution2dTest, true) -ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dUint8, DepthwiseConvolution2dUint8Test, true) - -ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2d, DepthwiseConvolution2dTest, false) -ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dUint8, DepthwiseConvolution2dUint8Test, false) - -ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul1, DepthwiseConvolution2dDepthMul1Test, true) -ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul1Uint8, DepthwiseConvolution2dDepthMul1Uint8Test, true) - -ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dDepthMul1, DepthwiseConvolution2dDepthMul1Test, false) -ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dDepthMul1Uint8, DepthwiseConvolution2dDepthMul1Uint8Test, false) - -ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dAsymmetric, DepthwiseConvolution2dAsymmetricTest, true) -ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dAsymmetric, DepthwiseConvolution2dAsymmetricTest, false) - -// Pooling -ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dSize2x2Stride2x2, SimpleMaxPooling2dSize2x2Stride2x2Test, false) -ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dSize2x2Stride2x2Uint8, SimpleMaxPooling2dSize2x2Stride2x2Uint8Test, false) - -ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dSize3x3Stride2x4, SimpleMaxPooling2dSize3x3Stride2x4Test, false) -ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dSize3x3Stride2x4Uint8, SimpleMaxPooling2dSize3x3Stride2x4Uint8Test, false) - -ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleMaxPooling2d, IgnorePaddingSimpleMaxPooling2dTest) -ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleMaxPooling2dUint8, IgnorePaddingSimpleMaxPooling2dUint8Test) -ARMNN_AUTO_TEST_CASE(IgnorePaddingMaxPooling2dSize3, IgnorePaddingMaxPooling2dSize3Test) -ARMNN_AUTO_TEST_CASE(IgnorePaddingMaxPooling2dSize3Uint8, IgnorePaddingMaxPooling2dSize3Uint8Test) - -ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2d, IgnorePaddingSimpleAveragePooling2dTest) -ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2dUint8, IgnorePaddingSimpleAveragePooling2dUint8Test) -ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2dNoPadding, IgnorePaddingSimpleAveragePooling2dNoPaddingTest) -ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2dNoPaddingUint8, - IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test) -ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3, IgnorePaddingAveragePooling2dSize3Test) -ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3Uint8, IgnorePaddingAveragePooling2dSize3Uint8Test) - -ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleL2Pooling2d, IgnorePaddingSimpleL2Pooling2dTest) -ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleL2Pooling2dUint8, IgnorePaddingSimpleL2Pooling2dUint8Test) -ARMNN_AUTO_TEST_CASE(IgnorePaddingL2Pooling2dSize3, IgnorePaddingL2Pooling2dSize3Test) -ARMNN_AUTO_TEST_CASE(IgnorePaddingL2Pooling2dSize3Uint8, IgnorePaddingL2Pooling2dSize3Uint8Test) - -ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2d, SimpleAveragePooling2dTest) -ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dUint8, SimpleAveragePooling2dUint8Test) -ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3x2Stride2x2, - IgnorePaddingAveragePooling2dSize3x2Stride2x2Test, false) -ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3x2Stride2x2NoPadding, - IgnorePaddingAveragePooling2dSize3x2Stride2x2Test, true) - -ARMNN_AUTO_TEST_CASE(LargeTensorsAveragePooling2d, LargeTensorsAveragePooling2dTest) -ARMNN_AUTO_TEST_CASE(LargeTensorsAveragePooling2dUint8, LargeTensorsAveragePooling2dUint8Test) - -ARMNN_AUTO_TEST_CASE(SimpleL2Pooling2d, SimpleL2Pooling2dTest) -ARMNN_AUTO_TEST_CASE(SimpleL2Pooling2dUint8, SimpleL2Pooling2dUint8Test) - -ARMNN_AUTO_TEST_CASE(L2Pooling2dSize7, L2Pooling2dSize7Test) -ARMNN_AUTO_TEST_CASE(L2Pooling2dSize7Uint8, L2Pooling2dSize7Uint8Test) - -ARMNN_AUTO_TEST_CASE(AsymmNonSquarePooling2d, AsymmetricNonSquarePooling2dTest) -ARMNN_AUTO_TEST_CASE(AsymmNonSquarePooling2dUint8, AsymmetricNonSquarePooling2dUint8Test) - -// Activation -ARMNN_AUTO_TEST_CASE(ConstantLinearActivation, ConstantLinearActivationTest) -ARMNN_AUTO_TEST_CASE(ConstantLinearActivationUint8, ConstantLinearActivationUint8Test) - -ARMNN_AUTO_TEST_CASE(SimpleNormalizationAcross, SimpleNormalizationAcrossTest) -ARMNN_AUTO_TEST_CASE(SimpleNormalizationWithin, SimpleNormalizationWithinTest) - -ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta1, SimpleSoftmaxTest, 1.0f) -ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta2, SimpleSoftmaxTest, 2.0f) -ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta1Uint8, SimpleSoftmaxUint8Test, 1.0f) -ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta2Uint8, SimpleSoftmaxUint8Test, 2.0f) - -ARMNN_AUTO_TEST_CASE(SimpleSigmoid, SimpleSigmoidTest) -ARMNN_AUTO_TEST_CASE(SimpleSigmoidUint8, SimpleSigmoidUint8Test) - -ARMNN_AUTO_TEST_CASE(ReLu1, BoundedReLuUpperAndLowerBoundTest) -ARMNN_AUTO_TEST_CASE(ReLu6, BoundedReLuUpperBoundOnlyTest) -ARMNN_AUTO_TEST_CASE(ReLu1Uint8, BoundedReLuUint8UpperAndLowerBoundTest) -ARMNN_AUTO_TEST_CASE(ReLu6Uint8, BoundedReLuUint8UpperBoundOnlyTest) - -// Fully Conected -ARMNN_AUTO_TEST_CASE(SimpleFullyConnected, FullyConnectedFloat32Test, false, false) -ARMNN_AUTO_TEST_CASE(FullyConnectedUint8, FullyConnectedUint8Test, false) -ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithBias, FullyConnectedFloat32Test, true, false) -ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedUint8, FullyConnectedUint8Test, true) -ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithTranspose, FullyConnectedFloat32Test, false, true) - -ARMNN_AUTO_TEST_CASE(FullyConnectedLarge, FullyConnectedLargeTest, false) -ARMNN_AUTO_TEST_CASE(FullyConnectedLargeTransposed, FullyConnectedLargeTest, true) - -// Splitter -ARMNN_AUTO_TEST_CASE(SimpleSplitter, SplitterTest) -ARMNN_AUTO_TEST_CASE(SimpleSplitterUint8, SplitterUint8Test) - -ARMNN_AUTO_TEST_CASE(CopyViaSplitter, CopyViaSplitterTest) -ARMNN_AUTO_TEST_CASE(CopyViaSplitterUint8, CopyViaSplitterUint8Test) - -// Merger -ARMNN_AUTO_TEST_CASE(SimpleMerger, MergerTest) -ARMNN_AUTO_TEST_CASE(MergerUint8, MergerUint8Test) - -// Add -ARMNN_AUTO_TEST_CASE(SimpleAdd, AdditionTest) -ARMNN_AUTO_TEST_CASE(AddBroadcast1Element, AdditionBroadcast1ElementTest) -ARMNN_AUTO_TEST_CASE(AddBroadcast, AdditionBroadcastTest) - -ARMNN_AUTO_TEST_CASE(AdditionUint8, AdditionUint8Test) -ARMNN_AUTO_TEST_CASE(AddBroadcastUint8, AdditionBroadcastUint8Test) -ARMNN_AUTO_TEST_CASE(AddBroadcast1ElementUint8, AdditionBroadcast1ElementUint8Test) - -// Sub -ARMNN_AUTO_TEST_CASE(SimpleSub, SubtractionTest) -ARMNN_AUTO_TEST_CASE(SubBroadcast1Element, SubtractionBroadcast1ElementTest) -ARMNN_AUTO_TEST_CASE(SubBroadcast, SubtractionBroadcastTest) - -ARMNN_AUTO_TEST_CASE(SubtractionUint8, SubtractionUint8Test) -ARMNN_AUTO_TEST_CASE(SubBroadcastUint8, SubtractionBroadcastUint8Test) -ARMNN_AUTO_TEST_CASE(SubBroadcast1ElementUint8, SubtractionBroadcast1ElementUint8Test) - -// Div -ARMNN_AUTO_TEST_CASE(SimpleDivision, DivisionTest) -ARMNN_AUTO_TEST_CASE(DivisionByZero, DivisionByZeroTest) -ARMNN_AUTO_TEST_CASE(DivisionBroadcast1Element, DivisionBroadcast1ElementTest) -ARMNN_AUTO_TEST_CASE(DivisionBroadcast1DVector, DivisionBroadcast1DVectorTest) -// NOTE: division by zero for quantized div needs more attention -// see IVGCVSW-1849 -ARMNN_AUTO_TEST_CASE(DivisionUint8, DivisionUint8Test) -ARMNN_AUTO_TEST_CASE(DivisionUint8Broadcast1Element, DivisionBroadcast1ElementUint8Test) -ARMNN_AUTO_TEST_CASE(DivisionUint8Broadcast1DVector, DivisionBroadcast1DVectorUint8Test) - -// Mul -ARMNN_AUTO_TEST_CASE(SimpleMultiplication, MultiplicationTest) -ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1Element, MultiplicationBroadcast1ElementTest) -ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1DVector, MultiplicationBroadcast1DVectorTest) -ARMNN_AUTO_TEST_CASE(MultiplicationUint8, MultiplicationUint8Test) -ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1ElementUint8, MultiplicationBroadcast1ElementUint8Test) -ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1DVectorUint8, MultiplicationBroadcast1DVectorUint8Test) - -// Batch Norm -ARMNN_AUTO_TEST_CASE(BatchNorm, BatchNormTest) -ARMNN_AUTO_TEST_CASE(BatchNormUint8, BatchNormUint8Test) - -// Resize Bilinear -ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, SimpleResizeBilinearTest) -ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8, SimpleResizeBilinearUint8Test) -ARMNN_AUTO_TEST_CASE(ResizeBilinearNop, ResizeBilinearNopTest) -ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8, ResizeBilinearNopUint8Test) -ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin, ResizeBilinearSqMinTest) -ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8, ResizeBilinearSqMinUint8Test) -ARMNN_AUTO_TEST_CASE(ResizeBilinearMin, ResizeBilinearMinTest) -ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8, ResizeBilinearMinUint8Test) -ARMNN_AUTO_TEST_CASE(ResizeBilinearMag, ResizeBilinearMagTest) -ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8, ResizeBilinearMagUint8Test) - -// Fake Quantization -ARMNN_AUTO_TEST_CASE(FakeQuantization, FakeQuantizationTest) - -// L2 Normalization -ARMNN_AUTO_TEST_CASE(L2Normalization1d, L2Normalization1dTest) -ARMNN_AUTO_TEST_CASE(L2Normalization2d, L2Normalization2dTest) -ARMNN_AUTO_TEST_CASE(L2Normalization3d, L2Normalization3dTest) -ARMNN_AUTO_TEST_CASE(L2Normalization4d, L2Normalization4dTest) - -// NOTE: These tests are disabled until NHWC is supported by the reference L2Normalization implementation. -//ARMNN_AUTO_TEST_CASE(L2Normalization1dNhwc, L2Normalization1dNhwcTest); -//ARMNN_AUTO_TEST_CASE(L2Normalization2dNhwc, L2Normalization2dNhwcTest); -//ARMNN_AUTO_TEST_CASE(L2Normalization3dNhwc, L2Normalization3dNhwcTest); -//ARMNN_AUTO_TEST_CASE(L2Normalization4dNhwc, L2Normalization4dNhwcTest); - -// Constant -ARMNN_AUTO_TEST_CASE(Constant, ConstantTest) -ARMNN_AUTO_TEST_CASE(ConstantUint8, ConstantUint8Test) - -// Concat -ARMNN_AUTO_TEST_CASE(Concatenation1d, Concatenation1dTest) -ARMNN_AUTO_TEST_CASE(Concatenation1dUint8, Concatenation1dUint8Test) - -ARMNN_AUTO_TEST_CASE(Concatenation2dDim0, Concatenation2dDim0Test) -ARMNN_AUTO_TEST_CASE(Concatenation2dDim0Uint8, Concatenation2dDim0Uint8Test) -ARMNN_AUTO_TEST_CASE(Concatenation2dDim1, Concatenation2dDim1Test) -ARMNN_AUTO_TEST_CASE(Concatenation2dDim1Uint8, Concatenation2dDim1Uint8Test) - -ARMNN_AUTO_TEST_CASE(Concatenation2dDim0DiffInputDims, Concatenation2dDim0DiffInputDimsTest) -ARMNN_AUTO_TEST_CASE(Concatenation2dDim0DiffInputDimsUint8, Concatenation2dDim0DiffInputDimsUint8Test) -ARMNN_AUTO_TEST_CASE(Concatenation2dDim1DiffInputDims, Concatenation2dDim1DiffInputDimsTest) -ARMNN_AUTO_TEST_CASE(Concatenation2dDim1DiffInputDimsUint8, Concatenation2dDim1DiffInputDimsUint8Test) - -ARMNN_AUTO_TEST_CASE(Concatenation3dDim0, Concatenation3dDim0Test) -ARMNN_AUTO_TEST_CASE(Concatenation3dDim0Uint8, Concatenation3dDim0Uint8Test) -ARMNN_AUTO_TEST_CASE(Concatenation3dDim1, Concatenation3dDim1Test) -ARMNN_AUTO_TEST_CASE(Concatenation3dDim1Uint8, Concatenation3dDim1Uint8Test) -ARMNN_AUTO_TEST_CASE(Concatenation3dDim2, Concatenation3dDim2Test) -ARMNN_AUTO_TEST_CASE(Concatenation3dDim2Uint8, Concatenation3dDim2Uint8Test) - -ARMNN_AUTO_TEST_CASE(Concatenation3dDim0DiffInputDims, Concatenation3dDim0DiffInputDimsTest) -ARMNN_AUTO_TEST_CASE(Concatenation3dDim0DiffInputDimsUint8, Concatenation3dDim0DiffInputDimsUint8Test) -ARMNN_AUTO_TEST_CASE(Concatenation3dDim1DiffInputDims, Concatenation3dDim1DiffInputDimsTest) -ARMNN_AUTO_TEST_CASE(Concatenation3dDim1DiffInputDimsUint8, Concatenation3dDim1DiffInputDimsUint8Test) -ARMNN_AUTO_TEST_CASE(Concatenation3dDim2DiffInputDims, Concatenation3dDim2DiffInputDimsTest) -ARMNN_AUTO_TEST_CASE(Concatenation3dDim2DiffInputDimsUint8, Concatenation3dDim2DiffInputDimsUint8Test) - -// Floor -ARMNN_AUTO_TEST_CASE(SimpleFloor, SimpleFloorTest) - -// Reshape -ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeFloat32Test) -ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeUint8Test) - -// Permute -ARMNN_AUTO_TEST_CASE(SimplePermuteFloat32, SimplePermuteFloat32Test) -ARMNN_AUTO_TEST_CASE(SimplePermuteUint8, SimplePermuteUint8Test) -ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet1, PermuteFloat32ValueSet1Test) -ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet2, PermuteFloat32ValueSet2Test) -ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet3, PermuteFloat32ValueSet3Test) - -// Convert from Float16 to Float32 -ARMNN_AUTO_TEST_CASE(SimpleConvertFp16ToFp32, SimpleConvertFp16ToFp32Test) -// Convert from Float32 to Float16 -ARMNN_AUTO_TEST_CASE(SimpleConvertFp32ToFp16, SimpleConvertFp32ToFp16Test) - -// Mean -ARMNN_AUTO_TEST_CASE(MeanUint8Simple, MeanUint8SimpleTest) -ARMNN_AUTO_TEST_CASE(MeanUint8SimpleAxis, MeanUint8SimpleAxisTest) -ARMNN_AUTO_TEST_CASE(MeanUint8KeepDims, MeanUint8KeepDimsTest) -ARMNN_AUTO_TEST_CASE(MeanUint8MultipleDims, MeanUint8MultipleDimsTest) -ARMNN_AUTO_TEST_CASE(MeanVtsUint8, MeanVtsUint8Test) - -ARMNN_AUTO_TEST_CASE(MeanFloatSimple, MeanFloatSimpleTest) -ARMNN_AUTO_TEST_CASE(MeanFloatSimpleAxis, MeanFloatSimpleAxisTest) -ARMNN_AUTO_TEST_CASE(MeanFloatKeepDims, MeanFloatKeepDimsTest) -ARMNN_AUTO_TEST_CASE(MeanFloatMultipleDims, MeanFloatMultipleDimsTest) -ARMNN_AUTO_TEST_CASE(MeanVtsFloat1, MeanVtsFloat1Test) -ARMNN_AUTO_TEST_CASE(MeanVtsFloat2, MeanVtsFloat2Test) - -BOOST_AUTO_TEST_SUITE_END() -- cgit v1.2.1