aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/backends/test
diff options
context:
space:
mode:
authorarovir01 <Aron.Virginas-Tar@arm.com>2018-08-31 15:26:35 +0100
committerMatthew Bentham <matthew.bentham@arm.com>2018-09-17 17:21:25 +0100
commit9e53a35b66b1ec7ceee7c712380a13596175b83b (patch)
treed40bf9f27c799184324b6ab91cbb1a546fc4012e /src/armnn/backends/test
parent5540d2f379b15503269d1b9b5fbe8fbafd160d2e (diff)
downloadarmnn-9e53a35b66b1ec7ceee7c712380a13596175b83b.tar.gz
IVGCVSW-1784: Rename float32 workloads for ACL
Change-Id: I98bdfe9cb12c663d1d5cfa456e2cc967d70ab22b
Diffstat (limited to 'src/armnn/backends/test')
-rw-r--r--src/armnn/backends/test/CreateWorkloadCl.cpp102
-rw-r--r--src/armnn/backends/test/CreateWorkloadNeon.cpp80
2 files changed, 91 insertions, 91 deletions
diff --git a/src/armnn/backends/test/CreateWorkloadCl.cpp b/src/armnn/backends/test/CreateWorkloadCl.cpp
index 5d4265911f..538d5af667 100644
--- a/src/armnn/backends/test/CreateWorkloadCl.cpp
+++ b/src/armnn/backends/test/CreateWorkloadCl.cpp
@@ -37,14 +37,14 @@ static void ClCreateActivationWorkloadTest()
BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {1}));
}
-BOOST_AUTO_TEST_CASE(CreateActivationFloat32Workload)
+BOOST_AUTO_TEST_CASE(CreateActivationFloatWorkload)
{
- ClCreateActivationWorkloadTest<ClActivationFloat32Workload, armnn::DataType::Float32>();
+ ClCreateActivationWorkloadTest<ClActivationFloatWorkload, armnn::DataType::Float32>();
}
BOOST_AUTO_TEST_CASE(CreateActivationFloat16Workload)
{
- ClCreateActivationWorkloadTest<ClActivationFloat32Workload, armnn::DataType::Float16>();
+ ClCreateActivationWorkloadTest<ClActivationFloatWorkload, armnn::DataType::Float16>();
}
template <typename AdditionWorkloadType, armnn::DataType DataType>
@@ -64,14 +64,14 @@ static void ClCreateAdditionWorkloadTest()
BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 3}));
}
-BOOST_AUTO_TEST_CASE(CreateAdditionFloat32Workload)
+BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
{
- ClCreateAdditionWorkloadTest<ClAdditionFloat32Workload, armnn::DataType::Float32>();
+ ClCreateAdditionWorkloadTest<ClAdditionFloatWorkload, armnn::DataType::Float32>();
}
BOOST_AUTO_TEST_CASE(CreateAdditionFloat16Workload)
{
- ClCreateAdditionWorkloadTest<ClAdditionFloat32Workload, armnn::DataType::Float16>();
+ ClCreateAdditionWorkloadTest<ClAdditionFloatWorkload, armnn::DataType::Float16>();
}
template <typename BatchNormalizationWorkloadType, armnn::DataType DataType>
@@ -92,14 +92,14 @@ static void ClCreateBatchNormalizationWorkloadTest()
BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 3, 1, 1}));
}
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat32Workload)
+BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloatWorkload)
{
- ClCreateBatchNormalizationWorkloadTest<ClBatchNormalizationFloat32Workload, armnn::DataType::Float32>();
+ ClCreateBatchNormalizationWorkloadTest<ClBatchNormalizationFloatWorkload, armnn::DataType::Float32>();
}
BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16Workload)
{
- ClCreateBatchNormalizationWorkloadTest<ClBatchNormalizationFloat32Workload, armnn::DataType::Float16>();
+ ClCreateBatchNormalizationWorkloadTest<ClBatchNormalizationFloatWorkload, armnn::DataType::Float16>();
}
BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Workload)
@@ -150,14 +150,14 @@ static void ClConvolution2dWorkloadTest()
BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 2, 2, 10}));
}
-BOOST_AUTO_TEST_CASE(CreateConvolution2dFloat32Workload)
+BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatWorkload)
{
- ClConvolution2dWorkloadTest<ClConvolution2dFloat32Workload, armnn::DataType::Float32>();
+ ClConvolution2dWorkloadTest<ClConvolution2dFloatWorkload, armnn::DataType::Float32>();
}
BOOST_AUTO_TEST_CASE(CreateConvolution2dFloat16Workload)
{
- ClConvolution2dWorkloadTest<ClConvolution2dFloat32Workload, armnn::DataType::Float16>();
+ ClConvolution2dWorkloadTest<ClConvolution2dFloatWorkload, armnn::DataType::Float16>();
}
@@ -177,14 +177,14 @@ static void ClDirectConvolution2dWorkloadTest()
BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 2, 6, 6}));
}
-BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dFloat32Workload)
+BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dFloatWorkload)
{
- ClDirectConvolution2dWorkloadTest<ClConvolution2dFloat32Workload, armnn::DataType::Float32>();
+ ClDirectConvolution2dWorkloadTest<ClConvolution2dFloatWorkload, armnn::DataType::Float32>();
}
BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dFloat16Workload)
{
- ClDirectConvolution2dWorkloadTest<ClConvolution2dFloat32Workload, armnn::DataType::Float16>();
+ ClDirectConvolution2dWorkloadTest<ClConvolution2dFloatWorkload, armnn::DataType::Float16>();
}
BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dUint8Workload)
@@ -209,14 +209,14 @@ static void ClCreateFullyConnectedWorkloadTest()
}
-BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloat32WorkloadTest)
+BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloatWorkloadTest)
{
- ClCreateFullyConnectedWorkloadTest<ClFullyConnectedFloat32Workload, armnn::DataType::Float32>();
+ ClCreateFullyConnectedWorkloadTest<ClFullyConnectedFloatWorkload, armnn::DataType::Float32>();
}
BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloat16WorkloadTest)
{
- ClCreateFullyConnectedWorkloadTest<ClFullyConnectedFloat32Workload, armnn::DataType::Float16>();
+ ClCreateFullyConnectedWorkloadTest<ClFullyConnectedFloatWorkload, armnn::DataType::Float16>();
}
@@ -239,14 +239,14 @@ static void ClCreateMultiplicationWorkloadTest()
BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 3}));
}
-BOOST_AUTO_TEST_CASE(CreateMultiplicationFloat32WorkloadTest)
+BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkloadTest)
{
- ClCreateMultiplicationWorkloadTest<ClMultiplicationFloat32Workload, armnn::DataType::Float32>();
+ ClCreateMultiplicationWorkloadTest<ClMultiplicationFloatWorkload, armnn::DataType::Float32>();
}
BOOST_AUTO_TEST_CASE(CreateMultiplicationFloat16WorkloadTest)
{
- ClCreateMultiplicationWorkloadTest<ClMultiplicationFloat32Workload, armnn::DataType::Float16>();
+ ClCreateMultiplicationWorkloadTest<ClMultiplicationFloatWorkload, armnn::DataType::Float16>();
}
template <typename NormalizationWorkloadType, typename armnn::DataType DataType>
@@ -267,14 +267,14 @@ static void ClNormalizationWorkloadTest()
BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {3, 5, 5, 1}));
}
-BOOST_AUTO_TEST_CASE(CreateNormalizationFloat32Workload)
+BOOST_AUTO_TEST_CASE(CreateNormalizationFloatWorkload)
{
- ClNormalizationWorkloadTest<ClNormalizationFloat32Workload, armnn::DataType::Float32>();
+ ClNormalizationWorkloadTest<ClNormalizationFloatWorkload, armnn::DataType::Float32>();
}
BOOST_AUTO_TEST_CASE(CreateNormalizationFloat16Workload)
{
- ClNormalizationWorkloadTest<ClNormalizationFloat32Workload, armnn::DataType::Float16>();
+ ClNormalizationWorkloadTest<ClNormalizationFloatWorkload, armnn::DataType::Float16>();
}
template <typename Pooling2dWorkloadType, typename armnn::DataType DataType>
@@ -294,14 +294,14 @@ static void ClPooling2dWorkloadTest()
BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {3, 2, 2, 4}));
}
-BOOST_AUTO_TEST_CASE(CreatePooling2dFloat32Workload)
+BOOST_AUTO_TEST_CASE(CreatePooling2dFloatWorkload)
{
- ClPooling2dWorkloadTest<ClPooling2dFloat32Workload, armnn::DataType::Float32>();
+ ClPooling2dWorkloadTest<ClPooling2dFloatWorkload, armnn::DataType::Float32>();
}
BOOST_AUTO_TEST_CASE(CreatePooling2dFloat16Workload)
{
- ClPooling2dWorkloadTest<ClPooling2dFloat32Workload, armnn::DataType::Float16>();
+ ClPooling2dWorkloadTest<ClPooling2dFloatWorkload, armnn::DataType::Float16>();
}
template <typename ReshapeWorkloadType, typename armnn::DataType DataType>
@@ -321,14 +321,14 @@ static void ClCreateReshapeWorkloadTest()
BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {4})); // Leading size 1 dimensions are collapsed by ACL.
}
-BOOST_AUTO_TEST_CASE(CreateReshapeFloat32Workload)
+BOOST_AUTO_TEST_CASE(CreateReshapeFloatWorkload)
{
- ClCreateReshapeWorkloadTest<ClReshapeFloat32Workload, armnn::DataType::Float32>();
+ ClCreateReshapeWorkloadTest<ClReshapeFloatWorkload, armnn::DataType::Float32>();
}
BOOST_AUTO_TEST_CASE(CreateReshapeFloat16Workload)
{
- ClCreateReshapeWorkloadTest<ClReshapeFloat32Workload, armnn::DataType::Float16>();
+ ClCreateReshapeWorkloadTest<ClReshapeFloatWorkload, armnn::DataType::Float16>();
}
BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload)
@@ -344,7 +344,7 @@ static void ClSoftmaxWorkloadTest()
auto workload = CreateSoftmaxWorkloadTest<SoftmaxWorkloadType, DataType>(factory, graph);
- // Checks that inputs/outputs are as we expect them (see definition of ClSoftmaxFloat32Workload).
+ // Checks that inputs/outputs are as we expect them (see definition of ClSoftmaxFloatWorkload).
SoftmaxQueueDescriptor queueDescriptor = workload->GetData();
auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
@@ -354,14 +354,14 @@ static void ClSoftmaxWorkloadTest()
}
-BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat32WorkloadTest)
+BOOST_AUTO_TEST_CASE(CreateSoftmaxFloatWorkloadTest)
{
- ClSoftmaxWorkloadTest<ClSoftmaxFloat32Workload, armnn::DataType::Float32>();
+ ClSoftmaxWorkloadTest<ClSoftmaxFloatWorkload, armnn::DataType::Float32>();
}
BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat16WorkloadTest)
{
- ClSoftmaxWorkloadTest<ClSoftmaxFloat32Workload, armnn::DataType::Float16>();
+ ClSoftmaxWorkloadTest<ClSoftmaxFloatWorkload, armnn::DataType::Float16>();
}
template <typename SplitterWorkloadType, typename armnn::DataType DataType>
@@ -389,14 +389,14 @@ static void ClSplitterWorkloadTest()
BOOST_TEST(CompareIClTensorHandleShape(outputHandle0, {7, 7}));
}
-BOOST_AUTO_TEST_CASE(CreateSplitterFloat32Workload)
+BOOST_AUTO_TEST_CASE(CreateSplitterFloatWorkload)
{
- ClSplitterWorkloadTest<ClSplitterFloat32Workload, armnn::DataType::Float32>();
+ ClSplitterWorkloadTest<ClSplitterFloatWorkload, armnn::DataType::Float32>();
}
BOOST_AUTO_TEST_CASE(CreateSplitterFloat16Workload)
{
- ClSplitterWorkloadTest<ClSplitterFloat32Workload, armnn::DataType::Float16>();
+ ClSplitterWorkloadTest<ClSplitterFloatWorkload, armnn::DataType::Float16>();
}
template <typename SplitterWorkloadType, typename MergerWorkloadType, typename armnn::DataType DataType>
@@ -441,14 +441,14 @@ static void ClSplitterMergerTest()
BOOST_TEST(validSubTensorParents);
}
-BOOST_AUTO_TEST_CASE(CreateSplitterMergerFloat32Workload)
+BOOST_AUTO_TEST_CASE(CreateSplitterMergerFloatWorkload)
{
- ClSplitterMergerTest<ClSplitterFloat32Workload, ClMergerFloat32Workload, armnn::DataType::Float32>();
+ ClSplitterMergerTest<ClSplitterFloatWorkload, ClMergerFloatWorkload, armnn::DataType::Float32>();
}
BOOST_AUTO_TEST_CASE(CreateSplitterMergerFloat16Workload)
{
- ClSplitterMergerTest<ClSplitterFloat32Workload, ClMergerFloat32Workload, armnn::DataType::Float16>();
+ ClSplitterMergerTest<ClSplitterFloatWorkload, ClMergerFloatWorkload, armnn::DataType::Float16>();
}
@@ -459,14 +459,14 @@ BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputs)
Graph graph;
ClWorkloadFactory factory;
- std::unique_ptr<ClSplitterFloat32Workload> wlSplitter;
- std::unique_ptr<ClActivationFloat32Workload> wlActiv0_0;
- std::unique_ptr<ClActivationFloat32Workload> wlActiv0_1;
- std::unique_ptr<ClActivationFloat32Workload> wlActiv1_0;
- std::unique_ptr<ClActivationFloat32Workload> wlActiv1_1;
-
- CreateSplitterMultipleInputsOneOutputWorkloadTest<ClSplitterFloat32Workload,
- ClActivationFloat32Workload, armnn::DataType::Float32>(factory, graph, wlSplitter, wlActiv0_0, wlActiv0_1,
+ std::unique_ptr<ClSplitterFloatWorkload> wlSplitter;
+ std::unique_ptr<ClActivationFloatWorkload> wlActiv0_0;
+ std::unique_ptr<ClActivationFloatWorkload> wlActiv0_1;
+ std::unique_ptr<ClActivationFloatWorkload> wlActiv1_0;
+ std::unique_ptr<ClActivationFloatWorkload> wlActiv1_1;
+
+ CreateSplitterMultipleInputsOneOutputWorkloadTest<ClSplitterFloatWorkload,
+ ClActivationFloatWorkload, armnn::DataType::Float32>(factory, graph, wlSplitter, wlActiv0_0, wlActiv0_1,
wlActiv1_0, wlActiv1_1);
//Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction.
@@ -502,7 +502,7 @@ BOOST_AUTO_TEST_CASE(CreateL2NormalizationWorkload)
Graph graph;
ClWorkloadFactory factory;
- auto workload = CreateL2NormalizationWorkloadTest<ClL2NormalizationFloat32Workload, armnn::DataType::Float32>
+ auto workload = CreateL2NormalizationWorkloadTest<ClL2NormalizationFloatWorkload, armnn::DataType::Float32>
(factory, graph);
// Checks that inputs/outputs are as we expect them (see definition of CreateNormalizationWorkloadTest).
@@ -528,9 +528,9 @@ static void ClCreateLstmWorkloadTest()
BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 4 }));
}
-BOOST_AUTO_TEST_CASE(CreateLSTMWorkloadFloat32Workload)
+BOOST_AUTO_TEST_CASE(CreateLSTMWorkloadFloatWorkload)
{
- ClCreateLstmWorkloadTest<ClLstmFloat32Workload>();
+ ClCreateLstmWorkloadTest<ClLstmFloatWorkload>();
}
diff --git a/src/armnn/backends/test/CreateWorkloadNeon.cpp b/src/armnn/backends/test/CreateWorkloadNeon.cpp
index b2a444af74..d84b39b339 100644
--- a/src/armnn/backends/test/CreateWorkloadNeon.cpp
+++ b/src/armnn/backends/test/CreateWorkloadNeon.cpp
@@ -69,13 +69,13 @@ static void NeonCreateActivationWorkloadTest()
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
BOOST_AUTO_TEST_CASE(CreateActivationFloat16Workload)
{
- NeonCreateActivationWorkloadTest<NeonActivationFloat32Workload, DataType::Float16>();
+ NeonCreateActivationWorkloadTest<NeonActivationFloatWorkload, DataType::Float16>();
}
#endif
-BOOST_AUTO_TEST_CASE(CreateActivationFloat32Workload)
+BOOST_AUTO_TEST_CASE(CreateActivationFloatWorkload)
{
- NeonCreateActivationWorkloadTest<NeonActivationFloat32Workload, DataType::Float32>();
+ NeonCreateActivationWorkloadTest<NeonActivationFloatWorkload, DataType::Float32>();
}
template <typename AdditionWorkloadType, typename armnn::DataType DataType>
@@ -98,13 +98,13 @@ static void NeonCreateAdditionWorkloadTest()
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
BOOST_AUTO_TEST_CASE(CreateAdditionFloat16Workload)
{
- NeonCreateAdditionWorkloadTest<NeonAdditionFloat32Workload, DataType::Float16>();
+ NeonCreateAdditionWorkloadTest<NeonAdditionFloatWorkload, DataType::Float16>();
}
#endif
-BOOST_AUTO_TEST_CASE(CreateAdditionFloat32Workload)
+BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
{
- NeonCreateAdditionWorkloadTest<NeonAdditionFloat32Workload, DataType::Float32>();
+ NeonCreateAdditionWorkloadTest<NeonAdditionFloatWorkload, DataType::Float32>();
}
template <typename BatchNormalizationWorkloadType, typename armnn::DataType DataType>
@@ -125,13 +125,13 @@ static void NeonCreateBatchNormalizationWorkloadTest()
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16Workload)
{
- NeonCreateBatchNormalizationWorkloadTest<NeonBatchNormalizationFloat32Workload, DataType::Float16>();
+ NeonCreateBatchNormalizationWorkloadTest<NeonBatchNormalizationFloatWorkload, DataType::Float16>();
}
#endif
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat32Workload)
+BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloatWorkload)
{
- NeonCreateBatchNormalizationWorkloadTest<NeonBatchNormalizationFloat32Workload, DataType::Float32>();
+ NeonCreateBatchNormalizationWorkloadTest<NeonBatchNormalizationFloatWorkload, DataType::Float32>();
}
template <typename Convolution2dWorkloadType, typename armnn::DataType DataType>
@@ -153,13 +153,13 @@ static void NeonCreateConvolution2dWorkloadTest()
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
BOOST_AUTO_TEST_CASE(CreateConvolution2dFloat16Workload)
{
- NeonCreateConvolution2dWorkloadTest<NeonConvolution2dFloat32Workload, DataType::Float16>();
+ NeonCreateConvolution2dWorkloadTest<NeonConvolution2dFloatWorkload, DataType::Float16>();
}
#endif
-BOOST_AUTO_TEST_CASE(CreateConvolution2dFloat32Workload)
+BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatWorkload)
{
- NeonCreateConvolution2dWorkloadTest<NeonConvolution2dFloat32Workload, DataType::Float32>();
+ NeonCreateConvolution2dWorkloadTest<NeonConvolution2dFloatWorkload, DataType::Float32>();
}
template <typename FullyConnectedWorkloadType, typename armnn::DataType DataType>
@@ -181,13 +181,13 @@ static void NeonCreateFullyConnectedWorkloadTest()
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloat16Workload)
{
- NeonCreateFullyConnectedWorkloadTest<NeonFullyConnectedFloat32Workload, DataType::Float16>();
+ NeonCreateFullyConnectedWorkloadTest<NeonFullyConnectedFloatWorkload, DataType::Float16>();
}
#endif
-BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloat32Workload)
+BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloatWorkload)
{
- NeonCreateFullyConnectedWorkloadTest<NeonFullyConnectedFloat32Workload, DataType::Float32>();
+ NeonCreateFullyConnectedWorkloadTest<NeonFullyConnectedFloatWorkload, DataType::Float32>();
}
template <typename MultiplicationWorkloadType, typename armnn::DataType DataType>
@@ -211,13 +211,13 @@ static void NeonCreateMultiplicationWorkloadTest()
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
BOOST_AUTO_TEST_CASE(CreateMultiplicationFloat16Workload)
{
- NeonCreateMultiplicationWorkloadTest<NeonMultiplicationFloat32Workload, DataType::Float16>();
+ NeonCreateMultiplicationWorkloadTest<NeonMultiplicationFloatWorkload, DataType::Float16>();
}
#endif
-BOOST_AUTO_TEST_CASE(CreateMultiplicationFloat32Workload)
+BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkload)
{
- NeonCreateMultiplicationWorkloadTest<NeonMultiplicationFloat32Workload, DataType::Float32>();
+ NeonCreateMultiplicationWorkloadTest<NeonMultiplicationFloatWorkload, DataType::Float32>();
}
template <typename NormalizationWorkloadType, typename armnn::DataType DataType>
@@ -238,13 +238,13 @@ static void NeonCreateNormalizationWorkloadTest()
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
BOOST_AUTO_TEST_CASE(CreateNormalizationFloat16Workload)
{
- NeonCreateNormalizationWorkloadTest<NeonNormalizationFloat32Workload, DataType::Float16>();
+ NeonCreateNormalizationWorkloadTest<NeonNormalizationFloatWorkload, DataType::Float16>();
}
#endif
-BOOST_AUTO_TEST_CASE(CreateNormalizationFloat32Workload)
+BOOST_AUTO_TEST_CASE(CreateNormalizationFloatWorkload)
{
- NeonCreateNormalizationWorkloadTest<NeonNormalizationFloat32Workload, DataType::Float32>();
+ NeonCreateNormalizationWorkloadTest<NeonNormalizationFloatWorkload, DataType::Float32>();
}
template <typename Pooling2dWorkloadType, typename armnn::DataType DataType>
@@ -266,13 +266,13 @@ static void NeonCreatePooling2dWorkloadTest()
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
BOOST_AUTO_TEST_CASE(CreatePooling2dFloat16Workload)
{
- NeonCreatePooling2dWorkloadTest<NeonPooling2dFloat32Workload, DataType::Float16>();
+ NeonCreatePooling2dWorkloadTest<NeonPooling2dFloatWorkload, DataType::Float16>();
}
#endif
-BOOST_AUTO_TEST_CASE(CreatePooling2dFloat32Workload)
+BOOST_AUTO_TEST_CASE(CreatePooling2dFloatWorkload)
{
- NeonCreatePooling2dWorkloadTest<NeonPooling2dFloat32Workload, DataType::Float32>();
+ NeonCreatePooling2dWorkloadTest<NeonPooling2dFloatWorkload, DataType::Float32>();
}
BOOST_AUTO_TEST_CASE(CreatePooling2dUint8Workload)
@@ -298,13 +298,13 @@ static void NeonCreateReshapeWorkloadTest()
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
BOOST_AUTO_TEST_CASE(CreateReshapeFloat16Workload)
{
- NeonCreateReshapeWorkloadTest<NeonReshapeFloat32Workload, DataType::Float16>();
+ NeonCreateReshapeWorkloadTest<NeonReshapeFloatWorkload, DataType::Float16>();
}
#endif
-BOOST_AUTO_TEST_CASE(CreateReshapeFloat32Workload)
+BOOST_AUTO_TEST_CASE(CreateReshapeFloatWorkload)
{
- NeonCreateReshapeWorkloadTest<NeonReshapeFloat32Workload, DataType::Float32>();
+ NeonCreateReshapeWorkloadTest<NeonReshapeFloatWorkload, DataType::Float32>();
}
BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload)
@@ -330,20 +330,20 @@ static void NeonCreateSoftmaxWorkloadTest()
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat16Workload)
{
- NeonCreateSoftmaxWorkloadTest<NeonSoftmaxFloat32Workload, DataType::Float16>();
+ NeonCreateSoftmaxWorkloadTest<NeonSoftmaxFloatWorkload, DataType::Float16>();
}
#endif
-BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat32Workload)
+BOOST_AUTO_TEST_CASE(CreateSoftmaxFloatWorkload)
{
- NeonCreateSoftmaxWorkloadTest<NeonSoftmaxFloat32Workload, DataType::Float32>();
+ NeonCreateSoftmaxWorkloadTest<NeonSoftmaxFloatWorkload, DataType::Float32>();
}
BOOST_AUTO_TEST_CASE(CreateSplitterWorkload)
{
Graph graph;
NeonWorkloadFactory factory;
- auto workload = CreateSplitterWorkloadTest<NeonSplitterFloat32Workload, DataType::Float32>(factory, graph);
+ auto workload = CreateSplitterWorkloadTest<NeonSplitterFloatWorkload, DataType::Float32>(factory, graph);
// Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest).
SplitterQueueDescriptor queueDescriptor = workload->GetData();
@@ -372,7 +372,7 @@ BOOST_AUTO_TEST_CASE(CreateSplitterMerger)
NeonWorkloadFactory factory;
auto workloads =
- CreateSplitterMergerWorkloadTest<NeonSplitterFloat32Workload, NeonMergerFloat32Workload,
+ CreateSplitterMergerWorkloadTest<NeonSplitterFloatWorkload, NeonMergerFloatWorkload,
DataType::Float32>(factory, graph);
auto wlSplitter = std::move(workloads.first);
@@ -401,14 +401,14 @@ BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputs)
Graph graph;
NeonWorkloadFactory factory;
- std::unique_ptr<NeonSplitterFloat32Workload> wlSplitter;
- std::unique_ptr<NeonActivationFloat32Workload> wlActiv0_0;
- std::unique_ptr<NeonActivationFloat32Workload> wlActiv0_1;
- std::unique_ptr<NeonActivationFloat32Workload> wlActiv1_0;
- std::unique_ptr<NeonActivationFloat32Workload> wlActiv1_1;
-
- CreateSplitterMultipleInputsOneOutputWorkloadTest<NeonSplitterFloat32Workload,
- NeonActivationFloat32Workload, DataType::Float32>(factory, graph, wlSplitter, wlActiv0_0, wlActiv0_1,
+ std::unique_ptr<NeonSplitterFloatWorkload> wlSplitter;
+ std::unique_ptr<NeonActivationFloatWorkload> wlActiv0_0;
+ std::unique_ptr<NeonActivationFloatWorkload> wlActiv0_1;
+ std::unique_ptr<NeonActivationFloatWorkload> wlActiv1_0;
+ std::unique_ptr<NeonActivationFloatWorkload> wlActiv1_1;
+
+ CreateSplitterMultipleInputsOneOutputWorkloadTest<NeonSplitterFloatWorkload,
+ NeonActivationFloatWorkload, DataType::Float32>(factory, graph, wlSplitter, wlActiv0_0, wlActiv0_1,
wlActiv1_0, wlActiv1_1);
armnn::INeonTensorHandle* sOut0 = dynamic_cast<armnn::INeonTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);