aboutsummaryrefslogtreecommitdiff
path: root/src/backends/cl/test/ClCreateWorkloadTests.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/cl/test/ClCreateWorkloadTests.cpp')
-rw-r--r--src/backends/cl/test/ClCreateWorkloadTests.cpp360
1 files changed, 181 insertions, 179 deletions
diff --git a/src/backends/cl/test/ClCreateWorkloadTests.cpp b/src/backends/cl/test/ClCreateWorkloadTests.cpp
index 7602cbbc0b..4e403283e7 100644
--- a/src/backends/cl/test/ClCreateWorkloadTests.cpp
+++ b/src/backends/cl/test/ClCreateWorkloadTests.cpp
@@ -21,14 +21,16 @@
#include <cl/workloads/ClWorkloads.hpp>
#include <cl/workloads/ClWorkloadUtils.hpp>
+#include <doctest/doctest.h>
+
armnn::PredicateResult CompareIClTensorHandleShape(IClTensorHandle* tensorHandle,
std::initializer_list<unsigned int> expectedDimensions)
{
return CompareTensorHandleShape<IClTensorHandle>(tensorHandle, expectedDimensions);
}
-BOOST_FIXTURE_TEST_SUITE(CreateWorkloadCl, ClContextControlFixture)
-
+TEST_SUITE("CreateWorkloadCl")
+{
template <armnn::DataType DataType>
static void ClCreateActivationWorkloadTest()
{
@@ -44,18 +46,18 @@ static void ClCreateActivationWorkloadTest()
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
auto predResult = CompareIClTensorHandleShape(inputHandle, {1, 1});
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
predResult = CompareIClTensorHandleShape(outputHandle, {1, 1});
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
}
-BOOST_AUTO_TEST_CASE(CreateActivationFloatWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateActivationFloatWorkload")
{
ClCreateActivationWorkloadTest<armnn::DataType::Float32>();
}
-BOOST_AUTO_TEST_CASE(CreateActivationFloat16Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateActivationFloat16Workload")
{
ClCreateActivationWorkloadTest<armnn::DataType::Float16>();
}
@@ -78,14 +80,14 @@ static void ClCreateElementwiseWorkloadTest()
auto inputHandle2 = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[1]);
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
auto predResult = CompareIClTensorHandleShape(inputHandle1, {2, 3});
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
predResult = CompareIClTensorHandleShape(inputHandle2, {2, 3});
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
predResult = CompareIClTensorHandleShape(outputHandle, {2, 3});
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
}
-BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateAdditionFloatWorkload")
{
ClCreateElementwiseWorkloadTest<ClAdditionWorkload,
AdditionQueueDescriptor,
@@ -93,7 +95,7 @@ BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
armnn::DataType::Float32>();
}
-BOOST_AUTO_TEST_CASE(CreateAdditionFloat16Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateAdditionFloat16Workload")
{
ClCreateElementwiseWorkloadTest<ClAdditionWorkload,
AdditionQueueDescriptor,
@@ -101,7 +103,7 @@ BOOST_AUTO_TEST_CASE(CreateAdditionFloat16Workload)
armnn::DataType::Float16>();
}
-BOOST_AUTO_TEST_CASE(CreateSubtractionFloatWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSubtractionFloatWorkload")
{
ClCreateElementwiseWorkloadTest<ClSubtractionWorkload,
SubtractionQueueDescriptor,
@@ -109,7 +111,7 @@ BOOST_AUTO_TEST_CASE(CreateSubtractionFloatWorkload)
armnn::DataType::Float32>();
}
-BOOST_AUTO_TEST_CASE(CreateSubtractionFloat16Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSubtractionFloat16Workload")
{
ClCreateElementwiseWorkloadTest<ClSubtractionWorkload,
SubtractionQueueDescriptor,
@@ -117,7 +119,7 @@ BOOST_AUTO_TEST_CASE(CreateSubtractionFloat16Workload)
armnn::DataType::Float16>();
}
-BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkloadTest)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateMultiplicationFloatWorkloadTest")
{
ClCreateElementwiseWorkloadTest<ClMultiplicationWorkload,
MultiplicationQueueDescriptor,
@@ -125,7 +127,7 @@ BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkloadTest)
armnn::DataType::Float32>();
}
-BOOST_AUTO_TEST_CASE(CreateMultiplicationFloat16WorkloadTest)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateMultiplicationFloat16WorkloadTest")
{
ClCreateElementwiseWorkloadTest<ClMultiplicationWorkload,
MultiplicationQueueDescriptor,
@@ -133,7 +135,7 @@ BOOST_AUTO_TEST_CASE(CreateMultiplicationFloat16WorkloadTest)
armnn::DataType::Float16>();
}
-BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8WorkloadTest)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateMultiplicationUint8WorkloadTest")
{
ClCreateElementwiseWorkloadTest<ClMultiplicationWorkload,
MultiplicationQueueDescriptor,
@@ -141,7 +143,7 @@ BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8WorkloadTest)
armnn::DataType::QAsymmU8>();
}
-BOOST_AUTO_TEST_CASE(CreateDivisionFloatWorkloadTest)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateDivisionFloatWorkloadTest")
{
ClCreateElementwiseWorkloadTest<ClDivisionWorkload,
DivisionQueueDescriptor,
@@ -149,7 +151,7 @@ BOOST_AUTO_TEST_CASE(CreateDivisionFloatWorkloadTest)
armnn::DataType::Float32>();
}
-BOOST_AUTO_TEST_CASE(CreateDivisionFloat16WorkloadTest)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateDivisionFloat16WorkloadTest")
{
ClCreateElementwiseWorkloadTest<ClDivisionWorkload,
DivisionQueueDescriptor,
@@ -174,13 +176,13 @@ static void ClCreateElementwiseUnaryWorkloadTest(armnn::UnaryOperation op)
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
auto predResult = CompareIClTensorHandleShape(inputHandle, {2, 3});
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
predResult = CompareIClTensorHandleShape(outputHandle, {2, 3});
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
}
-BOOST_AUTO_TEST_CASE(CreateRsqrtFloat32WorkloadTest)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateRsqrtFloat32WorkloadTest")
{
ClCreateElementwiseUnaryWorkloadTest<ClRsqrtWorkload, RsqrtQueueDescriptor, armnn::DataType::Float32>(
UnaryOperation::Rsqrt);
@@ -206,43 +208,43 @@ static void ClCreateBatchNormalizationWorkloadTest(DataLayout dataLayout)
{
case DataLayout::NHWC:
predResult = CompareIClTensorHandleShape(inputHandle, { 2, 4, 4, 3 });
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
predResult = CompareIClTensorHandleShape(outputHandle, { 2, 4, 4, 3 });
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
break;
default: // NCHW
predResult = CompareIClTensorHandleShape(inputHandle, { 2, 3, 4, 4 });
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
predResult = CompareIClTensorHandleShape(outputHandle, { 2, 3, 4, 4 });
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
}
}
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloatNchwWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateBatchNormalizationFloatNchwWorkload")
{
ClCreateBatchNormalizationWorkloadTest<ClBatchNormalizationFloatWorkload,
armnn::DataType::Float32>(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16NchwWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateBatchNormalizationFloat16NchwWorkload")
{
ClCreateBatchNormalizationWorkloadTest<ClBatchNormalizationFloatWorkload,
armnn::DataType::Float16>(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloatNhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateBatchNormalizationFloatNhwcWorkload")
{
ClCreateBatchNormalizationWorkloadTest<ClBatchNormalizationFloatWorkload,
armnn::DataType::Float32>(DataLayout::NHWC);
}
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationNhwcFloat16NhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateBatchNormalizationNhwcFloat16NhwcWorkload")
{
ClCreateBatchNormalizationWorkloadTest<ClBatchNormalizationFloatWorkload,
armnn::DataType::Float16>(DataLayout::NHWC);
}
-BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConvertFp16ToFp32Workload")
{
Graph graph;
ClWorkloadFactory factory =
@@ -254,14 +256,14 @@ BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Workload)
auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
auto predResult = CompareIClTensorHandleShape(inputHandle, {1, 3, 2, 3});
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
predResult = CompareIClTensorHandleShape(outputHandle, {1, 3, 2, 3});
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
- BOOST_TEST((inputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F16));
- BOOST_TEST((outputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F32));
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
+ CHECK((inputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F16));
+ CHECK((outputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F32));
}
-BOOST_AUTO_TEST_CASE(CreateConvertFp32ToFp16Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConvertFp32ToFp16Workload")
{
Graph graph;
ClWorkloadFactory factory =
@@ -274,11 +276,11 @@ BOOST_AUTO_TEST_CASE(CreateConvertFp32ToFp16Workload)
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
auto predResult = CompareIClTensorHandleShape(inputHandle, {1, 3, 2, 3});
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
predResult = CompareIClTensorHandleShape(outputHandle, {1, 3, 2, 3});
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
- BOOST_TEST((inputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F32));
- BOOST_TEST((outputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F16));
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
+ CHECK((inputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F32));
+ CHECK((outputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F16));
}
template <typename Convolution2dWorkloadType, typename armnn::DataType DataType>
@@ -301,31 +303,31 @@ static void ClConvolution2dWorkloadTest(DataLayout dataLayout)
Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST((inputHandle->GetShape() == inputShape));
- BOOST_TEST((outputHandle->GetShape() == outputShape));
+ CHECK((inputHandle->GetShape() == inputShape));
+ CHECK((outputHandle->GetShape() == outputShape));
}
-BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNchwWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConvolution2dFloatNchwWorkload")
{
ClConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConvolution2dFloatNhwcWorkload")
{
ClConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
}
-BOOST_AUTO_TEST_CASE(CreateConvolution2dFloat16NchwWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConvolution2dFloat16NchwWorkload")
{
ClConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreateConvolution2dFloat16NhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConvolution2dFloat16NhwcWorkload")
{
ClConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float16>(DataLayout::NHWC);
}
-BOOST_AUTO_TEST_CASE(CreateConvolution2dFastMathEnabledWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConvolution2dFastMathEnabledWorkload")
{
Graph graph;
@@ -353,7 +355,7 @@ BOOST_AUTO_TEST_CASE(CreateConvolution2dFastMathEnabledWorkload)
ARMNN_ASSERT(conv2dWorkload->GetConvolutionMethod() == arm_compute::ConvolutionMethod::WINOGRAD);
}
-BOOST_AUTO_TEST_CASE(CreateConvolution2dClCompiledContextWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConvolution2dClCompiledContextWorkload")
{
using namespace armnn;
@@ -434,7 +436,7 @@ BOOST_AUTO_TEST_CASE(CreateConvolution2dClCompiledContextWorkload)
// Check built programs are empty in context
- BOOST_TEST(clCompileContext.get_built_programs().empty());
+ CHECK(clCompileContext.get_built_programs().empty());
auto workload = std::make_unique<ClConvolution2dWorkload>(queueDescriptor,
workloadInfo,
@@ -442,7 +444,7 @@ BOOST_AUTO_TEST_CASE(CreateConvolution2dClCompiledContextWorkload)
clCompileContext);
ARMNN_ASSERT(workload != nullptr);
// Check built programs are not empty in context
- BOOST_TEST(!clCompileContext.get_built_programs().empty());
+ CHECK(!clCompileContext.get_built_programs().empty());
}
template <typename DepthwiseConvolutionWorkloadType, typename armnn::DataType DataType>
@@ -465,11 +467,11 @@ static void ClDepthwiseConvolutionWorkloadTest(DataLayout dataLayout)
TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({ 2, 2, 5, 5 })
: std::initializer_list<unsigned int>({ 2, 5, 5, 2 });
- BOOST_TEST((inputHandle->GetShape() == inputShape));
- BOOST_TEST((outputHandle->GetShape() == outputShape));
+ CHECK((inputHandle->GetShape() == inputShape));
+ CHECK((outputHandle->GetShape() == outputShape));
}
-BOOST_AUTO_TEST_CASE(CreateDepthwiseConvolutionFloat32NhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateDepthwiseConvolutionFloat32NhwcWorkload")
{
ClDepthwiseConvolutionWorkloadTest<ClDepthwiseConvolutionWorkload, DataType::Float32>(DataLayout::NHWC);
}
@@ -488,22 +490,22 @@ static void ClDirectConvolution2dWorkloadTest()
auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
auto predResult = CompareIClTensorHandleShape(inputHandle, {2, 3, 6, 6});
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
predResult = CompareIClTensorHandleShape(outputHandle, {2, 2, 6, 6});
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
}
-BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dFloatWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateDirectConvolution2dFloatWorkload")
{
ClDirectConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float32>();
}
-BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dFloat16Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateDirectConvolution2dFloat16Workload")
{
ClDirectConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float16>();
}
-BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dUint8Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateDirectConvolution2dUint8Workload")
{
ClDirectConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::QAsymmU8>();
}
@@ -523,18 +525,18 @@ static void ClCreateFullyConnectedWorkloadTest()
auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
auto predResult = CompareIClTensorHandleShape(inputHandle, {3, 1, 4, 5});
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
predResult = CompareIClTensorHandleShape(outputHandle, {3, 7});
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
}
-BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloatWorkloadTest)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateFullyConnectedFloatWorkloadTest")
{
ClCreateFullyConnectedWorkloadTest<ClFullyConnectedWorkload, armnn::DataType::Float32>();
}
-BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloat16WorkloadTest)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateFullyConnectedFloat16WorkloadTest")
{
ClCreateFullyConnectedWorkloadTest<ClFullyConnectedWorkload, armnn::DataType::Float16>();
}
@@ -558,26 +560,26 @@ static void ClNormalizationWorkloadTest(DataLayout dataLayout)
TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({3, 5, 5, 1})
: std::initializer_list<unsigned int>({3, 1, 5, 5});
- BOOST_TEST((inputHandle->GetShape() == inputShape));
- BOOST_TEST((outputHandle->GetShape() == outputShape));
+ CHECK((inputHandle->GetShape() == inputShape));
+ CHECK((outputHandle->GetShape() == outputShape));
}
-BOOST_AUTO_TEST_CASE(CreateNormalizationFloat32NchwWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateNormalizationFloat32NchwWorkload")
{
ClNormalizationWorkloadTest<ClNormalizationFloatWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreateNormalizationFloat16NchwWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateNormalizationFloat16NchwWorkload")
{
ClNormalizationWorkloadTest<ClNormalizationFloatWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreateNormalizationFloat32NhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateNormalizationFloat32NhwcWorkload")
{
ClNormalizationWorkloadTest<ClNormalizationFloatWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
}
-BOOST_AUTO_TEST_CASE(CreateNormalizationFloat16NhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateNormalizationFloat16NhwcWorkload")
{
ClNormalizationWorkloadTest<ClNormalizationFloatWorkload, armnn::DataType::Float16>(DataLayout::NHWC);
}
@@ -601,26 +603,26 @@ static void ClPooling2dWorkloadTest(DataLayout dataLayout)
auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST((inputHandle->GetShape() == inputShape));
- BOOST_TEST((outputHandle->GetShape() == outputShape));
+ CHECK((inputHandle->GetShape() == inputShape));
+ CHECK((outputHandle->GetShape() == outputShape));
}
-BOOST_AUTO_TEST_CASE(CreatePooling2dFloatNchwWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreatePooling2dFloatNchwWorkload")
{
ClPooling2dWorkloadTest<armnn::DataType::Float32>(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreatePooling2dFloatNhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreatePooling2dFloatNhwcWorkload")
{
ClPooling2dWorkloadTest<armnn::DataType::Float32>(DataLayout::NHWC);
}
-BOOST_AUTO_TEST_CASE(CreatePooling2dFloat16NchwWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreatePooling2dFloat16NchwWorkload")
{
ClPooling2dWorkloadTest<armnn::DataType::Float16>(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreatePooling2dFloat16NhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreatePooling2dFloat16NhwcWorkload")
{
ClPooling2dWorkloadTest<armnn::DataType::Float16>(DataLayout::NHWC);
}
@@ -647,22 +649,22 @@ static void ClCreatePreluWorkloadTest(const armnn::TensorShape& inputShape,
auto alphaHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[1]);
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST((inputHandle->GetShape() == inputShape));
- BOOST_TEST((alphaHandle->GetShape() == alphaShape));
- BOOST_TEST((outputHandle->GetShape() == outputShape));
+ CHECK((inputHandle->GetShape() == inputShape));
+ CHECK((alphaHandle->GetShape() == alphaShape));
+ CHECK((outputHandle->GetShape() == outputShape));
}
-BOOST_AUTO_TEST_CASE(CreatePreluFloat16Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreatePreluFloat16Workload")
{
ClCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, DataType::Float16);
}
-BOOST_AUTO_TEST_CASE(CreatePreluFloatWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreatePreluFloatWorkload")
{
ClCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, DataType::Float32);
}
-BOOST_AUTO_TEST_CASE(CreatePreluUint8Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreatePreluUint8Workload")
{
ClCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, DataType::QAsymmU8);
}
@@ -682,22 +684,22 @@ static void ClCreateReshapeWorkloadTest()
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
auto predResult = CompareIClTensorHandleShape(inputHandle, {4, 1});
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
predResult = CompareIClTensorHandleShape(outputHandle, {1, 4});
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
}
-BOOST_AUTO_TEST_CASE(CreateReshapeFloatWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateReshapeFloatWorkload")
{
ClCreateReshapeWorkloadTest<armnn::DataType::Float32>();
}
-BOOST_AUTO_TEST_CASE(CreateReshapeFloat16Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateReshapeFloat16Workload")
{
ClCreateReshapeWorkloadTest<armnn::DataType::Float16>();
}
-BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateReshapeUint8Workload")
{
ClCreateReshapeWorkloadTest<armnn::DataType::QAsymmU8>();
}
@@ -729,28 +731,28 @@ static void ClSoftmaxWorkloadTest()
}
auto predResult = CompareIClTensorHandleShape(inputHandle, {4, 1});
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
predResult = CompareIClTensorHandleShape(outputHandle, {4, 1});
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
}
-BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat32WorkloadTest)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSoftmaxFloat32WorkloadTest")
{
ClSoftmaxWorkloadTest<ClSoftmaxWorkload, armnn::DataType::Float32>();
}
-BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat16WorkloadTest)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSoftmaxFloat16WorkloadTest")
{
ClSoftmaxWorkloadTest<ClSoftmaxWorkload, armnn::DataType::Float16>();
}
-BOOST_AUTO_TEST_CASE(CreateSoftmaxQAsymmU8Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSoftmaxQAsymmU8Workload")
{
ClSoftmaxWorkloadTest<ClSoftmaxWorkload, armnn::DataType::QAsymmU8>();
}
-BOOST_AUTO_TEST_CASE(CreateSoftmaxQAsymmS8Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSoftmaxQAsymmS8Workload")
{
ClSoftmaxWorkloadTest<ClSoftmaxWorkload, armnn::DataType::QAsymmS8>();
}
@@ -768,27 +770,27 @@ static void ClSplitterWorkloadTest()
SplitterQueueDescriptor queueDescriptor = workload->GetData();
auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto predResult = CompareIClTensorHandleShape(inputHandle, {5, 7, 7});
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
auto outputHandle1 = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[1]);
predResult = CompareIClTensorHandleShape(outputHandle1, {2, 7, 7});
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
auto outputHandle2 = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[2]);
predResult = CompareIClTensorHandleShape(outputHandle2, {2, 7, 7});
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
auto outputHandle0 = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
predResult = CompareIClTensorHandleShape(outputHandle0, {1, 7, 7});
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
}
-BOOST_AUTO_TEST_CASE(CreateSplitterFloatWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSplitterFloatWorkload")
{
ClSplitterWorkloadTest<armnn::DataType::Float32>();
}
-BOOST_AUTO_TEST_CASE(CreateSplitterFloat16Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSplitterFloat16Workload")
{
ClSplitterWorkloadTest<armnn::DataType::Float16>();
}
@@ -819,35 +821,35 @@ static void ClSplitterConcatTest()
armnn::ClSubTensorHandle* mIn0 = dynamic_cast<armnn::ClSubTensorHandle*>(wlConcat->GetData().m_Inputs[0]);
armnn::ClSubTensorHandle* mIn1 = dynamic_cast<armnn::ClSubTensorHandle*>(wlConcat->GetData().m_Inputs[1]);
- BOOST_TEST(sOut0);
- BOOST_TEST(sOut1);
- BOOST_TEST(mIn0);
- BOOST_TEST(mIn1);
+ CHECK(sOut0);
+ CHECK(sOut1);
+ CHECK(mIn0);
+ CHECK(mIn1);
//Fliped order of inputs/outputs.
bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0);
- BOOST_TEST(validDataPointers);
+ CHECK(validDataPointers);
//Also make sure that the inputs are subtensors of one tensor and outputs are sub tensors of another tensor.
bool validSubTensorParents = (mIn0->GetTensor().parent() == mIn1->GetTensor().parent())
&& (sOut0->GetTensor().parent() == sOut1->GetTensor().parent());
- BOOST_TEST(validSubTensorParents);
+ CHECK(validSubTensorParents);
}
-BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloatWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSplitterConcatFloatWorkload")
{
ClSplitterConcatTest<armnn::DataType::Float32>();
}
-BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloat16Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSplitterConcatFloat16Workload")
{
ClSplitterConcatTest<armnn::DataType::Float16>();
}
-BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputs)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSingleOutputMultipleInputs")
{
// Test that it is possible to assign multiple (two) different layers to each of the outputs of a splitter layer.
// We create a splitter with two outputs. That each of those outputs is used by two different activation layers.
@@ -875,24 +877,24 @@ BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputs)
armnn::ClSubTensorHandle* activ1_1Im = dynamic_cast<armnn::ClSubTensorHandle*>(wlActiv1_1->GetData().m_Inputs[0]);
- BOOST_TEST(sOut0);
- BOOST_TEST(sOut1);
- BOOST_TEST(activ0_0Im);
- BOOST_TEST(activ0_1Im);
- BOOST_TEST(activ1_0Im);
- BOOST_TEST(activ1_1Im);
+ CHECK(sOut0);
+ CHECK(sOut1);
+ CHECK(activ0_0Im);
+ CHECK(activ0_1Im);
+ CHECK(activ1_0Im);
+ CHECK(activ1_1Im);
bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) &&
(sOut1 == activ1_0Im) && (sOut1 == activ1_1Im);
- BOOST_TEST(validDataPointers);
+ CHECK(validDataPointers);
}
#if defined(ARMNNREF_ENABLED)
// This test unit needs the reference backend, it's not available if the reference backend is not built
-BOOST_AUTO_TEST_CASE(CreateMemCopyWorkloadsCl)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateMemCopyWorkloadsCl")
{
ClWorkloadFactory factory =
ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager());
@@ -922,26 +924,26 @@ static void ClL2NormalizationWorkloadTest(DataLayout dataLayout)
TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({ 5, 20, 50, 67 })
: std::initializer_list<unsigned int>({ 5, 50, 67, 20 });
- BOOST_TEST((inputHandle->GetShape() == inputShape));
- BOOST_TEST((outputHandle->GetShape() == outputShape));
+ CHECK((inputHandle->GetShape() == inputShape));
+ CHECK((outputHandle->GetShape() == outputShape));
}
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloatNchwWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateL2NormalizationFloatNchwWorkload")
{
ClL2NormalizationWorkloadTest<ClL2NormalizationFloatWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloatNhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateL2NormalizationFloatNhwcWorkload")
{
ClL2NormalizationWorkloadTest<ClL2NormalizationFloatWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
}
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat16NchwWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateL2NormalizationFloat16NchwWorkload")
{
ClL2NormalizationWorkloadTest<ClL2NormalizationFloatWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat16NhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateL2NormalizationFloat16NhwcWorkload")
{
ClL2NormalizationWorkloadTest<ClL2NormalizationFloatWorkload, armnn::DataType::Float16>(DataLayout::NHWC);
}
@@ -961,12 +963,12 @@ static void ClCreateLogSoftmaxWorkloadTest()
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
auto predResult = CompareIClTensorHandleShape(inputHandle, {4, 1});
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
predResult = CompareIClTensorHandleShape(outputHandle, {4, 1});
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
}
-BOOST_AUTO_TEST_CASE(CreateLogSoftmaxFloat32WorkloadTest)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateLogSoftmaxFloat32WorkloadTest")
{
ClCreateLogSoftmaxWorkloadTest<ClLogSoftmaxWorkload, armnn::DataType::Float32>();
}
@@ -984,12 +986,12 @@ static void ClCreateLstmWorkloadTest()
auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[1]);
auto predResult = CompareIClTensorHandleShape(inputHandle, {2, 2});
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
predResult = CompareIClTensorHandleShape(outputHandle, {2, 4});
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
}
-BOOST_AUTO_TEST_CASE(CreateLSTMWorkloadFloatWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateLSTMWorkloadFloatWorkload")
{
ClCreateLstmWorkloadTest<ClLstmFloatWorkload>();
}
@@ -1013,44 +1015,44 @@ static void ClResizeWorkloadTest(DataLayout dataLayout)
{
case DataLayout::NHWC:
predResult = CompareIClTensorHandleShape(inputHandle, { 2, 4, 4, 3 });
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
predResult = CompareIClTensorHandleShape(outputHandle, { 2, 2, 2, 3 });
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
break;
default: // DataLayout::NCHW
predResult = CompareIClTensorHandleShape(inputHandle, { 2, 3, 4, 4 });
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
predResult = CompareIClTensorHandleShape(outputHandle, { 2, 3, 2, 2 });
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
}
}
-BOOST_AUTO_TEST_CASE(CreateResizeFloat32NchwWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateResizeFloat32NchwWorkload")
{
ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreateResizeFloat16NchwWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateResizeFloat16NchwWorkload")
{
ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreateResizeUint8NchwWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateResizeUint8NchwWorkload")
{
ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreateResizeFloat32NhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateResizeFloat32NhwcWorkload")
{
ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
}
-BOOST_AUTO_TEST_CASE(CreateResizeFloat16NhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateResizeFloat16NhwcWorkload")
{
ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::Float16>(DataLayout::NHWC);
}
-BOOST_AUTO_TEST_CASE(CreateResizeUint8NhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateResizeUint8NhwcWorkload")
{
ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
}
@@ -1071,22 +1073,22 @@ static void ClMeanWorkloadTest()
// The first dimension (batch size) in both input and output is singular thus it has been reduced by ACL.
auto predResult = CompareIClTensorHandleShape(inputHandle, { 1, 3, 7, 4 });
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
predResult = CompareIClTensorHandleShape(outputHandle, { 1, 4 });
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
}
-BOOST_AUTO_TEST_CASE(CreateMeanFloat32Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateMeanFloat32Workload")
{
ClMeanWorkloadTest<ClMeanWorkload, armnn::DataType::Float32>();
}
-BOOST_AUTO_TEST_CASE(CreateMeanFloat16Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateMeanFloat16Workload")
{
ClMeanWorkloadTest<ClMeanWorkload, armnn::DataType::Float16>();
}
-BOOST_AUTO_TEST_CASE(CreateMeanUint8Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateMeanUint8Workload")
{
ClMeanWorkloadTest<ClMeanWorkload, armnn::DataType::QAsymmU8>();
}
@@ -1107,39 +1109,39 @@ static void ClCreateConcatWorkloadTest(std::initializer_list<unsigned int> outpu
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
auto predResult = CompareIClTensorHandleShape(inputHandle0, { 2, 3, 2, 5 });
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
predResult = CompareIClTensorHandleShape(inputHandle1, { 2, 3, 2, 5 });
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
predResult = CompareIClTensorHandleShape(outputHandle, outputShape);
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
}
-BOOST_AUTO_TEST_CASE(CreateConcatDim0Float32Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConcatDim0Float32Workload")
{
ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::Float32>({ 4, 3, 2, 5 }, 0);
}
-BOOST_AUTO_TEST_CASE(CreateConcatDim1Float32Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConcatDim1Float32Workload")
{
ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::Float32>({ 2, 6, 2, 5 }, 1);
}
-BOOST_AUTO_TEST_CASE(CreateConcatDim3Float32Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConcatDim3Float32Workload")
{
ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 }, 3);
}
-BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint8Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConcatDim0Uint8Workload")
{
ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::QAsymmU8>({ 4, 3, 2, 5 }, 0);
}
-BOOST_AUTO_TEST_CASE(CreateConcatDim1Uint8Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConcatDim1Uint8Workload")
{
ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 6, 2, 5 }, 1);
}
-BOOST_AUTO_TEST_CASE(CreateConcatDim3Uint8Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConcatDim3Uint8Workload")
{
ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 2, 10 }, 3);
}
@@ -1158,27 +1160,27 @@ static void ClSpaceToDepthWorkloadTest()
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
auto predResult = CompareIClTensorHandleShape(inputHandle, { 1, 2, 2, 1 });
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
predResult = CompareIClTensorHandleShape(outputHandle, { 1, 1, 1, 4 });
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
}
-BOOST_AUTO_TEST_CASE(CreateSpaceToDepthFloat32Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSpaceToDepthFloat32Workload")
{
ClSpaceToDepthWorkloadTest<ClSpaceToDepthWorkload, armnn::DataType::Float32>();
}
-BOOST_AUTO_TEST_CASE(CreateSpaceToDepthFloat16Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSpaceToDepthFloat16Workload")
{
ClSpaceToDepthWorkloadTest<ClSpaceToDepthWorkload, armnn::DataType::Float16>();
}
-BOOST_AUTO_TEST_CASE(CreateSpaceToDepthQAsymm8Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSpaceToDepthQAsymm8Workload")
{
ClSpaceToDepthWorkloadTest<ClSpaceToDepthWorkload, armnn::DataType::QAsymmU8>();
}
-BOOST_AUTO_TEST_CASE(CreateSpaceToDepthQSymm16Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSpaceToDepthQSymm16Workload")
{
ClSpaceToDepthWorkloadTest<ClSpaceToDepthWorkload, armnn::DataType::QSymmS16>();
}
@@ -1206,24 +1208,24 @@ static void ClCreateStackWorkloadTest(const std::initializer_list<unsigned int>&
{
auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[i]);
auto predResult1 = CompareIClTensorHandleShape(inputHandle, inputShape);
- BOOST_TEST(predResult1.m_Result, predResult1.m_Message.str());
+ CHECK_MESSAGE(predResult1.m_Result, predResult1.m_Message.str());
}
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
auto predResult2 = CompareIClTensorHandleShape(outputHandle, outputShape);
- BOOST_TEST(predResult2.m_Result, predResult2.m_Message.str());
+ CHECK_MESSAGE(predResult2.m_Result, predResult2.m_Message.str());
}
-BOOST_AUTO_TEST_CASE(CreateStackFloat32Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateStackFloat32Workload")
{
ClCreateStackWorkloadTest<armnn::DataType::Float32>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
}
-BOOST_AUTO_TEST_CASE(CreateStackFloat16Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateStackFloat16Workload")
{
ClCreateStackWorkloadTest<armnn::DataType::Float16>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
}
-BOOST_AUTO_TEST_CASE(CreateStackUint8Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateStackUint8Workload")
{
ClCreateStackWorkloadTest<armnn::DataType::QAsymmU8>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
}
@@ -1239,19 +1241,19 @@ static void ClCreateQLstmWorkloadTest()
QLstmQueueDescriptor queueDescriptor = workload->GetData();
IAclTensorHandle* inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
- BOOST_TEST((inputHandle->GetShape() == TensorShape({2, 4})));
- BOOST_TEST((inputHandle->GetDataType() == arm_compute::DataType::QASYMM8_SIGNED));
+ CHECK((inputHandle->GetShape() == TensorShape({2, 4})));
+ CHECK((inputHandle->GetDataType() == arm_compute::DataType::QASYMM8_SIGNED));
IAclTensorHandle* cellStateOutHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[1]);
- BOOST_TEST((cellStateOutHandle->GetShape() == TensorShape({2, 4})));
- BOOST_TEST((cellStateOutHandle->GetDataType() == arm_compute::DataType::QSYMM16));
+ CHECK((cellStateOutHandle->GetShape() == TensorShape({2, 4})));
+ CHECK((cellStateOutHandle->GetDataType() == arm_compute::DataType::QSYMM16));
IAclTensorHandle* outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[2]);
- BOOST_TEST((outputHandle->GetShape() == TensorShape({2, 4})));
- BOOST_TEST((outputHandle->GetDataType() == arm_compute::DataType::QASYMM8_SIGNED));
+ CHECK((outputHandle->GetShape() == TensorShape({2, 4})));
+ CHECK((outputHandle->GetDataType() == arm_compute::DataType::QASYMM8_SIGNED));
}
-BOOST_AUTO_TEST_CASE(CreateQLstmWorkloadTest)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateQLstmWorkloadTest")
{
ClCreateQLstmWorkloadTest<ClQLstmWorkload>();
}
@@ -1270,29 +1272,29 @@ static void ClCreateQuantizedLstmWorkloadTest()
QuantizedLstmQueueDescriptor queueDescriptor = workload->GetData();
IAclTensorHandle* inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
- BOOST_TEST((inputHandle->GetShape() == TensorShape({2, 2})));
- BOOST_TEST((inputHandle->GetDataType() == arm_compute::DataType::QASYMM8));
+ CHECK((inputHandle->GetShape() == TensorShape({2, 2})));
+ CHECK((inputHandle->GetDataType() == arm_compute::DataType::QASYMM8));
IAclTensorHandle* cellStateInHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[1]);
- BOOST_TEST((cellStateInHandle->GetShape() == TensorShape({2, 4})));
- BOOST_TEST((cellStateInHandle->GetDataType() == arm_compute::DataType::QSYMM16));
+ CHECK((cellStateInHandle->GetShape() == TensorShape({2, 4})));
+ CHECK((cellStateInHandle->GetDataType() == arm_compute::DataType::QSYMM16));
IAclTensorHandle* outputStateInHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[2]);
- BOOST_TEST((outputStateInHandle->GetShape() == TensorShape({2, 4})));
- BOOST_TEST((outputStateInHandle->GetDataType() == arm_compute::DataType::QASYMM8));
+ CHECK((outputStateInHandle->GetShape() == TensorShape({2, 4})));
+ CHECK((outputStateInHandle->GetDataType() == arm_compute::DataType::QASYMM8));
IAclTensorHandle* cellStateOutHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST((cellStateOutHandle->GetShape() == TensorShape({2, 4})));
- BOOST_TEST((cellStateOutHandle->GetDataType() == arm_compute::DataType::QSYMM16));
+ CHECK((cellStateOutHandle->GetShape() == TensorShape({2, 4})));
+ CHECK((cellStateOutHandle->GetDataType() == arm_compute::DataType::QSYMM16));
IAclTensorHandle* outputStateOutHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[1]);
- BOOST_TEST((outputStateOutHandle->GetShape() == TensorShape({2, 4})));
- BOOST_TEST((outputStateOutHandle->GetDataType() == arm_compute::DataType::QASYMM8));
+ CHECK((outputStateOutHandle->GetShape() == TensorShape({2, 4})));
+ CHECK((outputStateOutHandle->GetDataType() == arm_compute::DataType::QASYMM8));
}
-BOOST_AUTO_TEST_CASE(CreateQuantizedLstmWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateQuantizedLstmWorkload")
{
ClCreateQuantizedLstmWorkloadTest<ClQuantizedLstmWorkload>();
}
-BOOST_AUTO_TEST_SUITE_END()
+}