aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference/test/RefCreateWorkloadTests.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/reference/test/RefCreateWorkloadTests.cpp')
-rw-r--r--src/backends/reference/test/RefCreateWorkloadTests.cpp300
1 files changed, 151 insertions, 149 deletions
diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp
index 0f86e7eeff..4293ef54f3 100644
--- a/src/backends/reference/test/RefCreateWorkloadTests.cpp
+++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp
@@ -10,6 +10,8 @@
#include <reference/RefWorkloadFactory.hpp>
#include <reference/workloads/RefWorkloads.hpp>
+#include <doctest/doctest.h>
+
namespace
{
@@ -19,8 +21,8 @@ void CheckInputOutput(std::unique_ptr<Workload> workload, const TensorInfo& inpu
auto queueDescriptor = workload->GetData();
auto inputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST((inputHandle->GetTensorInfo() == inputInfo));
- BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo));
+ CHECK((inputHandle->GetTensorInfo() == inputInfo));
+ CHECK((outputHandle->GetTensorInfo() == outputInfo));
}
template <typename Workload>
@@ -33,9 +35,9 @@ void CheckInputsOutput(std::unique_ptr<Workload> workload,
auto inputHandle0 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto inputHandle1 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Inputs[1]);
auto outputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST((inputHandle0->GetTensorInfo() == inputInfo0));
- BOOST_TEST((inputHandle1->GetTensorInfo() == inputInfo1));
- BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo));
+ CHECK((inputHandle0->GetTensorInfo() == inputInfo0));
+ CHECK((inputHandle1->GetTensorInfo() == inputInfo1));
+ CHECK((outputHandle->GetTensorInfo() == outputInfo));
}
armnn::RefWorkloadFactory GetFactory()
@@ -47,8 +49,8 @@ armnn::RefWorkloadFactory GetFactory()
}
-BOOST_AUTO_TEST_SUITE(CreateWorkloadRef)
-
+TEST_SUITE("CreateWorkloadRef")
+{
template <typename ActivationWorkloadType, armnn::DataType DataType>
static void RefCreateActivationWorkloadTest()
{
@@ -62,12 +64,12 @@ static void RefCreateActivationWorkloadTest()
TensorInfo({ 1, 1 }, DataType));
}
-BOOST_AUTO_TEST_CASE(CreateActivationFloat32Workload)
+TEST_CASE("CreateActivationFloat32Workload")
{
RefCreateActivationWorkloadTest<RefActivationWorkload, armnn::DataType::Float32>();
}
-BOOST_AUTO_TEST_CASE(CreateActivationUint8Workload)
+TEST_CASE("CreateActivationUint8Workload")
{
RefCreateActivationWorkloadTest<RefActivationWorkload, armnn::DataType::QAsymmU8>();
}
@@ -89,7 +91,7 @@ static void RefCreateElementwiseWorkloadTest()
TensorInfo({ 2, 3 }, DataType));
}
-BOOST_AUTO_TEST_CASE(CreateSubtractionWorkloadWithBlobTest)
+TEST_CASE("CreateSubtractionWorkloadWithBlobTest")
{
Graph graph;
RefWorkloadFactory factory = GetFactory();
@@ -106,7 +108,7 @@ BOOST_AUTO_TEST_CASE(CreateSubtractionWorkloadWithBlobTest)
TensorInfo({ 2, 3 }, DataType));
}
-BOOST_AUTO_TEST_CASE(CreateAdditionWorkloadWithBlobTest)
+TEST_CASE("CreateAdditionWorkloadWithBlobTest")
{
Graph graph;
RefWorkloadFactory factory = GetFactory();
@@ -122,7 +124,7 @@ BOOST_AUTO_TEST_CASE(CreateAdditionWorkloadWithBlobTest)
TensorInfo({ 2, 3 }, DataType));
}
-BOOST_AUTO_TEST_CASE(CreateMultiplicationWorkloadWithBlobTest)
+TEST_CASE("CreateMultiplicationWorkloadWithBlobTest")
{
Graph graph;
RefWorkloadFactory factory = GetFactory();
@@ -138,7 +140,7 @@ BOOST_AUTO_TEST_CASE(CreateMultiplicationWorkloadWithBlobTest)
TensorInfo({2, 3}, DataType));
}
-BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
+TEST_CASE("CreateAdditionFloatWorkload")
{
RefCreateElementwiseWorkloadTest<RefAdditionWorkload<>,
AdditionQueueDescriptor,
@@ -146,7 +148,7 @@ BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
armnn::DataType::Float32>();
}
-BOOST_AUTO_TEST_CASE(CreateAdditionUint8Workload)
+TEST_CASE("CreateAdditionUint8Workload")
{
RefCreateElementwiseWorkloadTest<RefAdditionWorkload<>,
AdditionQueueDescriptor,
@@ -154,7 +156,7 @@ BOOST_AUTO_TEST_CASE(CreateAdditionUint8Workload)
armnn::DataType::QAsymmU8>();
}
-BOOST_AUTO_TEST_CASE(CreateAdditionInt16Workload)
+TEST_CASE("CreateAdditionInt16Workload")
{
RefCreateElementwiseWorkloadTest<RefAdditionWorkload<>,
AdditionQueueDescriptor,
@@ -162,7 +164,7 @@ BOOST_AUTO_TEST_CASE(CreateAdditionInt16Workload)
armnn::DataType::QSymmS16>();
}
-BOOST_AUTO_TEST_CASE(CreateAdditionInt32Workload)
+TEST_CASE("CreateAdditionInt32Workload")
{
RefCreateElementwiseWorkloadTest<RefAdditionWorkload<int32_t>,
AdditionQueueDescriptor,
@@ -170,7 +172,7 @@ BOOST_AUTO_TEST_CASE(CreateAdditionInt32Workload)
armnn::DataType::Signed32>();
}
-BOOST_AUTO_TEST_CASE(CreateSubtractionFloat32Workload)
+TEST_CASE("CreateSubtractionFloat32Workload")
{
RefCreateElementwiseWorkloadTest<RefSubtractionWorkload<>,
SubtractionQueueDescriptor,
@@ -178,7 +180,7 @@ BOOST_AUTO_TEST_CASE(CreateSubtractionFloat32Workload)
armnn::DataType::Float32>();
}
-BOOST_AUTO_TEST_CASE(CreateSubtractionFloat16Workload)
+TEST_CASE("CreateSubtractionFloat16Workload")
{
RefCreateElementwiseWorkloadTest<RefSubtractionWorkload<>,
SubtractionQueueDescriptor,
@@ -186,7 +188,7 @@ BOOST_AUTO_TEST_CASE(CreateSubtractionFloat16Workload)
armnn::DataType::Float16>();
}
-BOOST_AUTO_TEST_CASE(CreateSubtractionUint8Workload)
+TEST_CASE("CreateSubtractionUint8Workload")
{
RefCreateElementwiseWorkloadTest<RefSubtractionWorkload<>,
SubtractionQueueDescriptor,
@@ -194,7 +196,7 @@ BOOST_AUTO_TEST_CASE(CreateSubtractionUint8Workload)
armnn::DataType::QAsymmU8>();
}
-BOOST_AUTO_TEST_CASE(CreateSubtractionInt16Workload)
+TEST_CASE("CreateSubtractionInt16Workload")
{
RefCreateElementwiseWorkloadTest<RefSubtractionWorkload<>,
SubtractionQueueDescriptor,
@@ -202,7 +204,7 @@ BOOST_AUTO_TEST_CASE(CreateSubtractionInt16Workload)
armnn::DataType::QSymmS16>();
}
-BOOST_AUTO_TEST_CASE(CreateSubtractionInt32Workload)
+TEST_CASE("CreateSubtractionInt32Workload")
{
RefCreateElementwiseWorkloadTest<RefSubtractionWorkload<int32_t>,
SubtractionQueueDescriptor,
@@ -210,7 +212,7 @@ BOOST_AUTO_TEST_CASE(CreateSubtractionInt32Workload)
armnn::DataType::Signed32>();
}
-BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkload)
+TEST_CASE("CreateMultiplicationFloatWorkload")
{
RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload<>,
MultiplicationQueueDescriptor,
@@ -218,7 +220,7 @@ BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkload)
armnn::DataType::Float32>();
}
-BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8Workload)
+TEST_CASE("CreateMultiplicationUint8Workload")
{
RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload<>,
MultiplicationQueueDescriptor,
@@ -226,7 +228,7 @@ BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8Workload)
armnn::DataType::QAsymmU8>();
}
-BOOST_AUTO_TEST_CASE(CreateMultiplicationInt16Workload)
+TEST_CASE("CreateMultiplicationInt16Workload")
{
RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload<>,
MultiplicationQueueDescriptor,
@@ -234,7 +236,7 @@ BOOST_AUTO_TEST_CASE(CreateMultiplicationInt16Workload)
armnn::DataType::QSymmS16>();
}
-BOOST_AUTO_TEST_CASE(CreateMultiplicationInt32Workload)
+TEST_CASE("CreateMultiplicationInt32Workload")
{
RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload<int32_t>,
MultiplicationQueueDescriptor,
@@ -242,7 +244,7 @@ BOOST_AUTO_TEST_CASE(CreateMultiplicationInt32Workload)
armnn::DataType::Signed32>();
}
-BOOST_AUTO_TEST_CASE(CreateDivisionFloat32Workload)
+TEST_CASE("CreateDivisionFloat32Workload")
{
RefCreateElementwiseWorkloadTest<RefDivisionWorkload<>,
DivisionQueueDescriptor,
@@ -250,7 +252,7 @@ BOOST_AUTO_TEST_CASE(CreateDivisionFloat32Workload)
armnn::DataType::Float32>();
}
-BOOST_AUTO_TEST_CASE(CreateDivisionFloat16Workload)
+TEST_CASE("CreateDivisionFloat16Workload")
{
RefCreateElementwiseWorkloadTest<RefDivisionWorkload<>,
DivisionQueueDescriptor,
@@ -258,7 +260,7 @@ BOOST_AUTO_TEST_CASE(CreateDivisionFloat16Workload)
armnn::DataType::Float16>();
}
-BOOST_AUTO_TEST_CASE(CreateDivisionUint8Workload)
+TEST_CASE("CreateDivisionUint8Workload")
{
RefCreateElementwiseWorkloadTest<RefDivisionWorkload<>,
DivisionQueueDescriptor,
@@ -266,7 +268,7 @@ BOOST_AUTO_TEST_CASE(CreateDivisionUint8Workload)
armnn::DataType::QAsymmU8>();
}
-BOOST_AUTO_TEST_CASE(CreateDivisionInt16Workload)
+TEST_CASE("CreateDivisionInt16Workload")
{
RefCreateElementwiseWorkloadTest<RefDivisionWorkload<>,
DivisionQueueDescriptor,
@@ -274,7 +276,7 @@ BOOST_AUTO_TEST_CASE(CreateDivisionInt16Workload)
armnn::DataType::QSymmS16>();
}
-BOOST_AUTO_TEST_CASE(CreateDivisionInt32Workload)
+TEST_CASE("CreateDivisionInt32Workload")
{
RefCreateElementwiseWorkloadTest<RefDivisionWorkload<int32_t>,
DivisionQueueDescriptor,
@@ -311,7 +313,7 @@ static void RefCreateBatchNormalizationWorkloadTest(DataLayout dataLayout)
CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType));
}
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationWithBlobFloat32Workload)
+TEST_CASE("CreateBatchNormalizationWithBlobFloat32Workload")
{
Graph graph;
RefWorkloadFactory factory = GetFactory();
@@ -329,55 +331,55 @@ BOOST_AUTO_TEST_CASE(CreateBatchNormalizationWithBlobFloat32Workload)
CheckInputOutput(std::move(workload), TensorInfo(inputShape, dataType), TensorInfo(outputShape, dataType));
}
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat32Workload)
+TEST_CASE("CreateBatchNormalizationFloat32Workload")
{
RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload,armnn::DataType::Float32>
(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat32WorkloadNhwc)
+TEST_CASE("CreateBatchNormalizationFloat32WorkloadNhwc")
{
RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::Float32>
(DataLayout::NHWC);
}
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16Workload)
+TEST_CASE("CreateBatchNormalizationFloat16Workload")
{
RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload,armnn::DataType::Float16>
(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16WorkloadNhwc)
+TEST_CASE("CreateBatchNormalizationFloat16WorkloadNhwc")
{
RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::Float16>
(DataLayout::NHWC);
}
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationUint8Workload)
+TEST_CASE("CreateBatchNormalizationUint8Workload")
{
RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QAsymmU8>
(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationUint8WorkloadNhwc)
+TEST_CASE("CreateBatchNormalizationUint8WorkloadNhwc")
{
RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QAsymmU8>
(DataLayout::NHWC);
}
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationInt16Workload)
+TEST_CASE("CreateBatchNormalizationInt16Workload")
{
RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QSymmS16>
(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationInt16WorkloadNhwc)
+TEST_CASE("CreateBatchNormalizationInt16WorkloadNhwc")
{
RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QSymmS16>
(DataLayout::NHWC);
}
-BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Float32Workload)
+TEST_CASE("CreateConvertFp16ToFp32Float32Workload")
{
Graph graph;
RefWorkloadFactory factory = GetFactory();
@@ -388,7 +390,7 @@ BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Float32Workload)
std::move(workload), TensorInfo({1, 3, 2, 3}, DataType::Float16), TensorInfo({1, 3, 2, 3}, DataType::Float32));
}
-BOOST_AUTO_TEST_CASE(CreateConvertFp32ToFp16Float16Workload)
+TEST_CASE("CreateConvertFp32ToFp16Float16Workload")
{
Graph graph;
RefWorkloadFactory factory = GetFactory();
@@ -417,17 +419,17 @@ static void RefCreateConvolution2dWorkloadTest(DataLayout dataLayout = DataLayou
TensorInfo(outputShape, DataType::Float32));
}
-BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNchwWorkload)
+TEST_CASE("CreateConvolution2dFloatNchwWorkload")
{
RefCreateConvolution2dWorkloadTest(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNhwcWorkload)
+TEST_CASE("CreateConvolution2dFloatNhwcWorkload")
{
RefCreateConvolution2dWorkloadTest(DataLayout::NHWC);
}
-BOOST_AUTO_TEST_CASE(CreateConvolution2dWithBlobWorkload)
+TEST_CASE("CreateConvolution2dWithBlobWorkload")
{
DataLayout dataLayout = DataLayout::NHWC;
Graph graph;
@@ -464,12 +466,12 @@ static void RefCreateDepthwiseConvolutionWorkloadTest(DataLayout dataLayout)
TensorInfo(outputShape, DataType::Float32));
}
-BOOST_AUTO_TEST_CASE(CreateDepthwiseConvolutionFloat32NhwcWorkload)
+TEST_CASE("CreateDepthwiseConvolutionFloat32NhwcWorkload")
{
RefCreateDepthwiseConvolutionWorkloadTest(DataLayout::NHWC);
}
-BOOST_AUTO_TEST_CASE(RefCreateFullyConnectedWithBlobWorkloadTest)
+TEST_CASE("RefCreateFullyConnectedWithBlobWorkloadTest")
{
Graph graph;
RefWorkloadFactory factory = GetFactory();
@@ -499,17 +501,17 @@ static void RefCreateFullyConnectedWorkloadTest()
TensorInfo({ 3, 7 }, DataType, outputQScale));
}
-BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadFloat32)
+TEST_CASE("CreateFullyConnectedWorkloadFloat32")
{
RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::Float32>();
}
-BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadQuantisedAsymm8)
+TEST_CASE("CreateFullyConnectedWorkloadQuantisedAsymm8")
{
RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::QAsymmU8>();
}
-BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadQuantisedSymm16)
+TEST_CASE("CreateFullyConnectedWorkloadQuantisedSymm16")
{
RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::QSymmS16>();
}
@@ -541,32 +543,32 @@ static void RefCreateNormalizationWorkloadTest(DataLayout dataLayout)
CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType));
}
-BOOST_AUTO_TEST_CASE(CreateRefNormalizationFloat32NchwWorkload)
+TEST_CASE("CreateRefNormalizationFloat32NchwWorkload")
{
RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreateRefNormalizationFloat32NhwcWorkload)
+TEST_CASE("CreateRefNormalizationFloat32NhwcWorkload")
{
RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
}
-BOOST_AUTO_TEST_CASE(CreateRefNormalizationUint8NchwWorkload)
+TEST_CASE("CreateRefNormalizationUint8NchwWorkload")
{
RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreateRefNormalizationUint8NhwcWorkload)
+TEST_CASE("CreateRefNormalizationUint8NhwcWorkload")
{
RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
}
-BOOST_AUTO_TEST_CASE(CreateRefNormalizationInt16NchwWorkload)
+TEST_CASE("CreateRefNormalizationInt16NchwWorkload")
{
RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreateRefNormalizationInt16NhwcWorkload)
+TEST_CASE("CreateRefNormalizationInt16NhwcWorkload")
{
RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NHWC);
}
@@ -599,32 +601,32 @@ static void RefCreatePooling2dWorkloadTest(DataLayout dataLayout)
TensorInfo(outputShape, DataType));
}
-BOOST_AUTO_TEST_CASE(CreatePooling2dFloat32Workload)
+TEST_CASE("CreatePooling2dFloat32Workload")
{
RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreatePooling2dFloat32NhwcWorkload)
+TEST_CASE("CreatePooling2dFloat32NhwcWorkload")
{
RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
}
-BOOST_AUTO_TEST_CASE(CreatePooling2dUint8Workload)
+TEST_CASE("CreatePooling2dUint8Workload")
{
RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreatePooling2dUint8NhwcWorkload)
+TEST_CASE("CreatePooling2dUint8NhwcWorkload")
{
RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
}
-BOOST_AUTO_TEST_CASE(CreatePooling2dInt16Workload)
+TEST_CASE("CreatePooling2dInt16Workload")
{
RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreatePooling2dInt16NhwcWorkload)
+TEST_CASE("CreatePooling2dInt16NhwcWorkload")
{
RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QSymmS16>(DataLayout::NHWC);
}
@@ -655,22 +657,22 @@ static void RefCreateSoftmaxWorkloadTest()
tensorInfo);
}
-BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat32Workload)
+TEST_CASE("CreateSoftmaxFloat32Workload")
{
RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::Float32>();
}
-BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat16Workload)
+TEST_CASE("CreateSoftmaxFloat16Workload")
{
RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::Float16>();
}
-BOOST_AUTO_TEST_CASE(CreateSoftmaxQuantisedAsymm8Workload)
+TEST_CASE("CreateSoftmaxQuantisedAsymm8Workload")
{
RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::QAsymmU8>();
}
-BOOST_AUTO_TEST_CASE(CreateSoftmaxQuantisedSymm16Workload)
+TEST_CASE("CreateSoftmaxQuantisedSymm16Workload")
{
RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::QSymmS16>();
}
@@ -685,29 +687,29 @@ static void RefCreateSplitterWorkloadTest()
// Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest).
SplitterQueueDescriptor queueDescriptor = workload->GetData();
auto inputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Inputs[0]);
- BOOST_TEST((inputHandle->GetTensorInfo() == TensorInfo({ 5, 7, 7 }, DataType)));
+ CHECK((inputHandle->GetTensorInfo() == TensorInfo({ 5, 7, 7 }, DataType)));
auto outputHandle0 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST((outputHandle0->GetTensorInfo() == TensorInfo({ 1, 7, 7 }, DataType)));
+ CHECK((outputHandle0->GetTensorInfo() == TensorInfo({ 1, 7, 7 }, DataType)));
auto outputHandle1 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[1]);
- BOOST_TEST((outputHandle1->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType)));
+ CHECK((outputHandle1->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType)));
auto outputHandle2 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[2]);
- BOOST_TEST((outputHandle2->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType)));
+ CHECK((outputHandle2->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType)));
}
-BOOST_AUTO_TEST_CASE(CreateSplitterFloat32Workload)
+TEST_CASE("CreateSplitterFloat32Workload")
{
RefCreateSplitterWorkloadTest<RefSplitterWorkload, armnn::DataType::Float32>();
}
-BOOST_AUTO_TEST_CASE(CreateSplitterFloat16Workload)
+TEST_CASE("CreateSplitterFloat16Workload")
{
RefCreateSplitterWorkloadTest<RefSplitterWorkload, armnn::DataType::Float16>();
}
-BOOST_AUTO_TEST_CASE(CreateSplitterUint8Workload)
+TEST_CASE("CreateSplitterUint8Workload")
{
RefCreateSplitterWorkloadTest<RefSplitterWorkload, armnn::DataType::QAsymmU8>();
}
@@ -735,27 +737,27 @@ static void RefCreateSplitterConcatWorkloadTest()
armnn::RefTensorHandle* mIn0 = dynamic_cast<armnn::RefTensorHandle*>(wlConcat->GetData().m_Inputs[0]);
armnn::RefTensorHandle* mIn1 = dynamic_cast<armnn::RefTensorHandle*>(wlConcat->GetData().m_Inputs[1]);
- BOOST_TEST(sOut0);
- BOOST_TEST(sOut1);
- BOOST_TEST(mIn0);
- BOOST_TEST(mIn1);
+ CHECK(sOut0);
+ CHECK(sOut1);
+ CHECK(mIn0);
+ CHECK(mIn1);
bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0);
- BOOST_TEST(validDataPointers);
+ CHECK(validDataPointers);
}
-BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloat32)
+TEST_CASE("CreateSplitterConcatFloat32")
{
RefCreateSplitterConcatWorkloadTest<RefSplitterWorkload, RefConcatWorkload, DataType::Float32>();
}
-BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloat16)
+TEST_CASE("CreateSplitterConcatFloat16")
{
RefCreateSplitterConcatWorkloadTest<RefSplitterWorkload, RefConcatWorkload, DataType::Float16>();
}
-BOOST_AUTO_TEST_CASE(CreateSplitterConcatUint8)
+TEST_CASE("CreateSplitterConcatUint8")
{
RefCreateSplitterConcatWorkloadTest<RefSplitterWorkload, RefConcatWorkload, DataType::QAsymmU8>();
}
@@ -785,26 +787,26 @@ static void RefCreateSingleOutputMultipleInputsTest()
armnn::RefTensorHandle* activ1_1Im = dynamic_cast<armnn::RefTensorHandle*>(wlActiv1_1->GetData().m_Inputs[0]);
- BOOST_TEST(sOut0);
- BOOST_TEST(sOut1);
- BOOST_TEST(activ0_0Im);
- BOOST_TEST(activ0_1Im);
- BOOST_TEST(activ1_0Im);
- BOOST_TEST(activ1_1Im);
+ CHECK(sOut0);
+ CHECK(sOut1);
+ CHECK(activ0_0Im);
+ CHECK(activ0_1Im);
+ CHECK(activ1_0Im);
+ CHECK(activ1_1Im);
bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) &&
(sOut1 == activ1_0Im) && (sOut1 == activ1_1Im);
- BOOST_TEST(validDataPointers);
+ CHECK(validDataPointers);
}
-BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsFloat32)
+TEST_CASE("CreateSingleOutputMultipleInputsFloat32")
{
RefCreateSingleOutputMultipleInputsTest<RefSplitterWorkload, RefActivationWorkload,
armnn::DataType::Float32>();
}
-BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsUint8)
+TEST_CASE("CreateSingleOutputMultipleInputsUint8")
{
RefCreateSingleOutputMultipleInputsTest<RefSplitterWorkload, RefActivationWorkload,
armnn::DataType::QAsymmU8>();
@@ -838,27 +840,27 @@ static void RefCreateResizeBilinearTest(DataLayout dataLayout)
TensorInfo(outputShape, DataType));
}
-BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32)
+TEST_CASE("CreateResizeBilinearFloat32")
{
RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat16)
+TEST_CASE("CreateResizeBilinearFloat16")
{
RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreateResizeBilinearUint8)
+TEST_CASE("CreateResizeBilinearUint8")
{
RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreateResizeBilinearQuantisedAsymm16)
+TEST_CASE("CreateResizeBilinearQuantisedAsymm16")
{
RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32Nhwc)
+TEST_CASE("CreateResizeBilinearFloat32Nhwc")
{
RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
}
@@ -876,22 +878,22 @@ static void RefCreateBatchToSpaceNdTest()
TensorInfo({ 1, 1, 1, 1 }, DataType));
}
-BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdFloat32)
+TEST_CASE("CreateBatchToSpaceNdFloat32")
{
RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::Float32>();
}
-BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdFloat16)
+TEST_CASE("CreateBatchToSpaceNdFloat16")
{
RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::Float16>();
}
-BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdUint8)
+TEST_CASE("CreateBatchToSpaceNdUint8")
{
RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::QAsymmU8>();
}
-BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdQSymm16)
+TEST_CASE("CreateBatchToSpaceNdQSymm16")
{
RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::QSymmS16>();
}
@@ -924,32 +926,32 @@ static void RefCreateL2NormalizationTest(DataLayout dataLayout)
CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType));
}
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat32)
+TEST_CASE("CreateL2NormalizationFloat32")
{
RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat32Nhwc)
+TEST_CASE("CreateL2NormalizationFloat32Nhwc")
{
RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
}
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationInt16)
+TEST_CASE("CreateL2NormalizationInt16")
{
RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationInt16Nhwc)
+TEST_CASE("CreateL2NormalizationInt16Nhwc")
{
RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NHWC);
}
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationUint8)
+TEST_CASE("CreateL2NormalizationUint8")
{
RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationUint8Nhwc)
+TEST_CASE("CreateL2NormalizationUint8Nhwc")
{
RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
}
@@ -968,17 +970,17 @@ static void RefCreateReshapeWorkloadTest()
TensorInfo({ 1, 4 }, DataType));
}
-BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadFloat32)
+TEST_CASE("CreateReshapeWorkloadFloat32")
{
RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::Float32>();
}
-BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadQuantisedAsymm8)
+TEST_CASE("CreateReshapeWorkloadQuantisedAsymm8")
{
RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::QAsymmU8>();
}
-BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadQuantisedSymm16)
+TEST_CASE("CreateReshapeWorkloadQuantisedSymm16")
{
RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::QSymmS16>();
}
@@ -997,52 +999,52 @@ static void RefCreateConcatWorkloadTest(const armnn::TensorShape& outputShape,
TensorInfo(outputShape, DataType));
}
-BOOST_AUTO_TEST_CASE(CreateConcatDim0Float32Workload)
+TEST_CASE("CreateConcatDim0Float32Workload")
{
RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 4, 3, 2, 5 }, 0);
}
-BOOST_AUTO_TEST_CASE(CreateConcatDim0Float16Workload)
+TEST_CASE("CreateConcatDim0Float16Workload")
{
RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float16>({ 4, 3, 2, 5 }, 0);
}
-BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint8Workload)
+TEST_CASE("CreateConcatDim0Uint8Workload")
{
RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 4, 3, 2, 5 }, 0);
}
-BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint16Workload)
+TEST_CASE("CreateConcatDim0Uint16Workload")
{
RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QSymmS16>({ 4, 3, 2, 5 }, 0);
}
-BOOST_AUTO_TEST_CASE(CreateConcatDim1Float32Workload)
+TEST_CASE("CreateConcatDim1Float32Workload")
{
RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 6, 2, 5 }, 1);
}
-BOOST_AUTO_TEST_CASE(CreateConcatDim1Uint8Workload)
+TEST_CASE("CreateConcatDim1Uint8Workload")
{
RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 6, 2, 5 }, 1);
}
-BOOST_AUTO_TEST_CASE(CreateConcatDim2Float32Workload)
+TEST_CASE("CreateConcatDim2Float32Workload")
{
RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 3, 4, 5 }, 2);
}
-BOOST_AUTO_TEST_CASE(CreateConcatDim2Uint8Workload)
+TEST_CASE("CreateConcatDim2Uint8Workload")
{
RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 4, 5 }, 2);
}
-BOOST_AUTO_TEST_CASE(CreateConcatDim3Float32Workload)
+TEST_CASE("CreateConcatDim3Float32Workload")
{
RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 }, 3);
}
-BOOST_AUTO_TEST_CASE(CreateConcatDim3Uint8Workload)
+TEST_CASE("CreateConcatDim3Uint8Workload")
{
RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 2, 10 }, 3);
}
@@ -1057,25 +1059,25 @@ static void RefCreateConstantWorkloadTest(const armnn::TensorShape& outputShape)
// Check output is as expected
auto queueDescriptor = workload->GetData();
auto outputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST((outputHandle->GetTensorInfo() == TensorInfo(outputShape, DataType)));
+ CHECK((outputHandle->GetTensorInfo() == TensorInfo(outputShape, DataType)));
}
-BOOST_AUTO_TEST_CASE(CreateConstantUint8Workload)
+TEST_CASE("CreateConstantUint8Workload")
{
RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 2, 10 });
}
-BOOST_AUTO_TEST_CASE(CreateConstantInt16Workload)
+TEST_CASE("CreateConstantInt16Workload")
{
RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::QSymmS16>({ 2, 3, 2, 10 });
}
-BOOST_AUTO_TEST_CASE(CreateConstantFloat32Workload)
+TEST_CASE("CreateConstantFloat32Workload")
{
RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 });
}
-BOOST_AUTO_TEST_CASE(CreateConstantSigned32Workload)
+TEST_CASE("CreateConstantSigned32Workload")
{
RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::Signed32>({ 2, 3, 2, 10 });
}
@@ -1097,53 +1099,53 @@ static void RefCreatePreluWorkloadTest(const armnn::TensorShape& inputShape,
// Check output is as expected
auto queueDescriptor = workload->GetData();
auto outputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST((outputHandle->GetTensorInfo() == TensorInfo(outputShape, dataType)));
+ CHECK((outputHandle->GetTensorInfo() == TensorInfo(outputShape, dataType)));
}
-BOOST_AUTO_TEST_CASE(CreatePreluFloat32Workload)
+TEST_CASE("CreatePreluFloat32Workload")
{
RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::Float32);
}
-BOOST_AUTO_TEST_CASE(CreatePreluFloat16Workload)
+TEST_CASE("CreatePreluFloat16Workload")
{
RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::Float16);
}
-BOOST_AUTO_TEST_CASE(CreatePreluUint8Workload)
+TEST_CASE("CreatePreluUint8Workload")
{
RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::QAsymmU8);
}
-BOOST_AUTO_TEST_CASE(CreatePreluInt16Workload)
+TEST_CASE("CreatePreluInt16Workload")
{
RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::QSymmS16);
}
-BOOST_AUTO_TEST_CASE(CreatePreluFloat32NoBroadcastWorkload)
+TEST_CASE("CreatePreluFloat32NoBroadcastWorkload")
{
- BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
+ CHECK_THROWS_AS(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
armnn::DataType::Float32),
armnn::InvalidArgumentException);
}
-BOOST_AUTO_TEST_CASE(CreatePreluFloat16NoBroadcastWorkload)
+TEST_CASE("CreatePreluFloat16NoBroadcastWorkload")
{
- BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
+ CHECK_THROWS_AS(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
armnn::DataType::Float16),
armnn::InvalidArgumentException);
}
-BOOST_AUTO_TEST_CASE(CreatePreluUint8NoBroadcastWorkload)
+TEST_CASE("CreatePreluUint8NoBroadcastWorkload")
{
- BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
+ CHECK_THROWS_AS(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
armnn::DataType::QAsymmU8),
armnn::InvalidArgumentException);
}
-BOOST_AUTO_TEST_CASE(CreatePreluInt16NoBroadcastWorkload)
+TEST_CASE("CreatePreluInt16NoBroadcastWorkload")
{
- BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
+ CHECK_THROWS_AS(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
armnn::DataType::QSymmS16),
armnn::InvalidArgumentException);
}
@@ -1161,22 +1163,22 @@ static void RefCreateSpaceToDepthWorkloadTest()
TensorInfo({ 1, 1, 1, 4 }, DataType));
}
-BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadFloat32)
+TEST_CASE("CreateSpaceToDepthWorkloadFloat32")
{
RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::Float32>();
}
-BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadFloat16)
+TEST_CASE("CreateSpaceToDepthWorkloadFloat16")
{
RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::Float16>();
}
-BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadQASymm8)
+TEST_CASE("CreateSpaceToDepthWorkloadQASymm8")
{
RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::QAsymmU8>();
}
-BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadQSymm16)
+TEST_CASE("CreateSpaceToDepthWorkloadQSymm16")
{
RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::QSymmS16>();
}
@@ -1201,23 +1203,23 @@ static void RefCreateStackWorkloadTest(const armnn::TensorShape& inputShape,
for (unsigned int i = 0; i < numInputs; ++i)
{
auto inputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Inputs[i]);
- BOOST_TEST((inputHandle->GetTensorInfo() == TensorInfo(inputShape, DataType)));
+ CHECK((inputHandle->GetTensorInfo() == TensorInfo(inputShape, DataType)));
}
auto outputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST((outputHandle->GetTensorInfo() == TensorInfo(outputShape, DataType)));
+ CHECK((outputHandle->GetTensorInfo() == TensorInfo(outputShape, DataType)));
}
-BOOST_AUTO_TEST_CASE(CreateStackFloat32Workload)
+TEST_CASE("CreateStackFloat32Workload")
{
RefCreateStackWorkloadTest<armnn::DataType::Float32>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
}
-BOOST_AUTO_TEST_CASE(CreateStackUint8Workload)
+TEST_CASE("CreateStackUint8Workload")
{
RefCreateStackWorkloadTest<armnn::DataType::QAsymmU8>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
}
-BOOST_AUTO_TEST_CASE(CreateStackUint16Workload)
+TEST_CASE("CreateStackUint16Workload")
{
RefCreateStackWorkloadTest<armnn::DataType::QSymmS16>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
}
@@ -1241,14 +1243,14 @@ static void RefCreateQLstmWorkloadTest()
auto cellStateOutHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[1]);
auto outputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[2]);
- BOOST_TEST((inputHandle->GetTensorInfo() == inputInfo));
- BOOST_TEST((cellStateOutHandle->GetTensorInfo() == cellStateInfo));
- BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo));
+ CHECK((inputHandle->GetTensorInfo() == inputInfo));
+ CHECK((cellStateOutHandle->GetTensorInfo() == cellStateInfo));
+ CHECK((outputHandle->GetTensorInfo() == outputInfo));
}
-BOOST_AUTO_TEST_CASE(CreateQLstmWorkload)
+TEST_CASE("CreateQLstmWorkload")
{
RefCreateQLstmWorkloadTest<RefQLstmWorkload>();
}
-BOOST_AUTO_TEST_SUITE_END()
+}