13 #include <doctest/doctest.h> 18 template<
typename Workload>
19 void CheckInputOutput(std::unique_ptr<Workload> workload,
const TensorInfo& inputInfo,
const TensorInfo& outputInfo)
21 auto queueDescriptor = workload->GetData();
22 auto inputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Inputs[0]);
23 auto outputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
24 CHECK((inputHandle->GetTensorInfo() == inputInfo));
25 CHECK((outputHandle->GetTensorInfo() == outputInfo));
28 template <
typename Workload>
29 void CheckInputsOutput(std::unique_ptr<Workload> workload,
34 auto queueDescriptor = workload->GetData();
35 auto inputHandle0 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Inputs[0]);
36 auto inputHandle1 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Inputs[1]);
37 auto outputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
38 CHECK((inputHandle0->GetTensorInfo() == inputInfo0));
39 CHECK((inputHandle1->GetTensorInfo() == inputInfo1));
40 CHECK((outputHandle->GetTensorInfo() == outputInfo));
45 std::shared_ptr<RefMemoryManager> memoryManager = std::make_shared<RefMemoryManager>();
54 template <
typename ActivationWorkloadType, armnn::DataType DataType>
55 static void RefCreateActivationWorkloadTest()
59 auto workload = CreateActivationWorkloadTest<ActivationWorkloadType, DataType>(factory, graph);
62 CheckInputOutput(std::move(workload),
67 TEST_CASE(
"CreateActivationFloat32Workload")
69 RefCreateActivationWorkloadTest<RefActivationWorkload, armnn::DataType::Float32>();
72 TEST_CASE(
"CreateActivationUint8Workload")
74 RefCreateActivationWorkloadTest<RefActivationWorkload, armnn::DataType::QAsymmU8>();
77 template <
typename WorkloadType,
78 typename DescriptorType,
81 static void RefCreateElementwiseWorkloadTest()
85 auto workload = CreateElementwiseWorkloadTest<WorkloadType, DescriptorType, LayerType, DataType>(
88 CheckInputsOutput(std::move(workload),
94 TEST_CASE(
"CreateSubtractionWorkloadWithBlobTest")
100 auto workload = CreateSubtractionWithBlobWorkloadTest<RefSubtractionWorkload<>,
105 CheckInputsOutput(std::move(workload),
111 TEST_CASE(
"CreateAdditionWorkloadWithBlobTest")
117 auto workload = CreateAdditionWithBlobWorkloadTest<RefAdditionWorkload<>,
121 CheckInputsOutput(std::move(workload),
127 TEST_CASE(
"CreateMultiplicationWorkloadWithBlobTest")
133 auto workload = CreateMultiplicationWithBlobWorkloadTest<RefMultiplicationWorkload<>,
137 CheckInputsOutput(std::move(workload),
143 TEST_CASE(
"CreateAdditionFloatWorkload")
145 RefCreateElementwiseWorkloadTest<RefAdditionWorkload<>,
151 TEST_CASE(
"CreateAdditionUint8Workload")
153 RefCreateElementwiseWorkloadTest<RefAdditionWorkload<>,
159 TEST_CASE(
"CreateAdditionInt16Workload")
161 RefCreateElementwiseWorkloadTest<RefAdditionWorkload<>,
167 TEST_CASE(
"CreateAdditionInt32Workload")
169 RefCreateElementwiseWorkloadTest<RefAdditionWorkload<int32_t>,
175 TEST_CASE(
"CreateSubtractionFloat32Workload")
177 RefCreateElementwiseWorkloadTest<RefSubtractionWorkload<>,
183 TEST_CASE(
"CreateSubtractionFloat16Workload")
185 RefCreateElementwiseWorkloadTest<RefSubtractionWorkload<>,
191 TEST_CASE(
"CreateSubtractionUint8Workload")
193 RefCreateElementwiseWorkloadTest<RefSubtractionWorkload<>,
199 TEST_CASE(
"CreateSubtractionInt16Workload")
201 RefCreateElementwiseWorkloadTest<RefSubtractionWorkload<>,
207 TEST_CASE(
"CreateSubtractionInt32Workload")
209 RefCreateElementwiseWorkloadTest<RefSubtractionWorkload<int32_t>,
215 TEST_CASE(
"CreateMultiplicationFloatWorkload")
217 RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload<>,
223 TEST_CASE(
"CreateMultiplicationUint8Workload")
225 RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload<>,
231 TEST_CASE(
"CreateMultiplicationInt16Workload")
233 RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload<>,
239 TEST_CASE(
"CreateMultiplicationInt32Workload")
241 RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload<int32_t>,
247 TEST_CASE(
"CreateDivisionFloat32Workload")
249 RefCreateElementwiseWorkloadTest<RefDivisionWorkload<>,
255 TEST_CASE(
"CreateDivisionFloat16Workload")
257 RefCreateElementwiseWorkloadTest<RefDivisionWorkload<>,
263 TEST_CASE(
"CreateDivisionUint8Workload")
265 RefCreateElementwiseWorkloadTest<RefDivisionWorkload<>,
271 TEST_CASE(
"CreateDivisionInt16Workload")
273 RefCreateElementwiseWorkloadTest<RefDivisionWorkload<>,
279 TEST_CASE(
"CreateDivisionInt32Workload")
281 RefCreateElementwiseWorkloadTest<RefDivisionWorkload<int32_t>,
287 template <
typename BatchNormalizationWorkloadType, armnn::DataType DataType>
288 static void RefCreateBatchNormalizationWorkloadTest(
DataLayout dataLayout)
292 auto workload = CreateBatchNormalizationWorkloadTest<BatchNormalizationWorkloadType, DataType>(factory,
301 case DataLayout::NHWC:
302 inputShape = { 2, 4, 4, 3 };
303 outputShape = { 2, 4, 4, 3 };
305 case DataLayout::NCHW:
307 inputShape = { 2, 3, 4, 4 };
308 outputShape = { 2, 3, 4, 4 };
316 TEST_CASE(
"CreateBatchNormalizationWithBlobFloat32Workload")
327 inputShape = { 2, 4, 4, 3 };
328 outputShape = { 2, 4, 4, 3 };
331 CheckInputOutput(std::move(workload),
TensorInfo(inputShape, dataType),
TensorInfo(outputShape, dataType));
334 TEST_CASE(
"CreateBatchNormalizationFloat32Workload")
336 RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload,armnn::DataType::Float32>
340 TEST_CASE(
"CreateBatchNormalizationFloat32WorkloadNhwc")
342 RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::Float32>
346 TEST_CASE(
"CreateBatchNormalizationFloat16Workload")
348 RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload,armnn::DataType::Float16>
352 TEST_CASE(
"CreateBatchNormalizationFloat16WorkloadNhwc")
354 RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::Float16>
358 TEST_CASE(
"CreateBatchNormalizationUint8Workload")
360 RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QAsymmU8>
364 TEST_CASE(
"CreateBatchNormalizationUint8WorkloadNhwc")
366 RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QAsymmU8>
370 TEST_CASE(
"CreateBatchNormalizationInt16Workload")
372 RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QSymmS16>
376 TEST_CASE(
"CreateBatchNormalizationInt16WorkloadNhwc")
378 RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QSymmS16>
382 TEST_CASE(
"CreateConvertFp16ToFp32Float32Workload")
386 auto workload = CreateConvertFp16ToFp32WorkloadTest<RefConvertFp16ToFp32Workload>(factory, graph);
390 std::move(workload),
TensorInfo({1, 3, 2, 3}, DataType::Float16),
TensorInfo({1, 3, 2, 3}, DataType::Float32));
393 TEST_CASE(
"CreateConvertFp32ToFp16Float16Workload")
397 auto workload = CreateConvertFp32ToFp16WorkloadTest<RefConvertFp32ToFp16Workload>(factory, graph);
401 std::move(workload),
TensorInfo({1, 3, 2, 3}, DataType::Float32),
TensorInfo({1, 3, 2, 3}, DataType::Float16));
404 static void RefCreateConvolution2dWorkloadTest(
DataLayout dataLayout = DataLayout::NCHW)
408 auto workload = CreateConvolution2dWorkloadTest<RefConvolution2dWorkload, DataType::Float32>
409 (factory, graph, dataLayout);
411 TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({2, 3, 8, 16})
412 : std::initializer_list<unsigned int>({2, 8, 16, 3});
413 TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({2, 2, 2, 10})
414 : std::initializer_list<unsigned int>({2, 2, 10, 2});
417 CheckInputOutput(std::move(workload),
422 TEST_CASE(
"CreateConvolution2dFloatNchwWorkload")
424 RefCreateConvolution2dWorkloadTest(DataLayout::NCHW);
427 TEST_CASE(
"CreateConvolution2dFloatNhwcWorkload")
429 RefCreateConvolution2dWorkloadTest(DataLayout::NHWC);
432 TEST_CASE(
"CreateConvolution2dWithBlobWorkload")
437 auto workload = CreateConvolution2dFusedActivationWithBlobWorkloadTest<RefConvolution2dWorkload, DataType::Float32>
438 (factory, graph, dataLayout);
440 TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({2, 3, 8, 16})
441 : std::initializer_list<unsigned int>({2, 8, 16, 3});
442 TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({2, 2, 2, 10})
443 : std::initializer_list<unsigned int>({2, 2, 10, 2});
446 CheckInputOutput(std::move(workload),
451 static void RefCreateDepthwiseConvolutionWorkloadTest(
DataLayout dataLayout)
455 auto workload = CreateDepthwiseConvolution2dWorkloadTest<RefDepthwiseConvolution2dWorkload, DataType::Float32>
456 (factory, graph, dataLayout);
458 TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({ 2, 2, 5, 5 })
459 : std::initializer_list<unsigned int>({ 2, 5, 5, 2 });
460 TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({ 2, 2, 5, 5 })
461 : std::initializer_list<unsigned int>({ 2, 5, 5, 2 });
464 CheckInputOutput(std::move(workload),
469 TEST_CASE(
"CreateDepthwiseConvolutionFloat32NhwcWorkload")
471 RefCreateDepthwiseConvolutionWorkloadTest(DataLayout::NHWC);
474 TEST_CASE(
"RefCreateFullyConnectedWithBlobWorkloadTest")
482 float inputsQScale = 0.0f;
483 float outputQScale = 0.0f;
484 CheckInputOutput(std::move(workload),
489 TEST_CASE(
"CreateFullyConnectedWorkloadWeightsBiasesAsInputsFloat32")
499 float inputsQScale = 0.0f;
500 float outputQScale = 0.0f;
501 CheckInputsOutput(std::move(workload),
507 template <
typename FullyConnectedWorkloadType, armnn::DataType DataType>
508 static void RefCreateFullyConnectedWorkloadTest()
512 auto workload = CreateFullyConnectedWorkloadTest<FullyConnectedWorkloadType, DataType>(factory, graph);
517 CheckInputOutput(std::move(workload),
522 TEST_CASE(
"CreateFullyConnectedWorkloadFloat32")
524 RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::Float32>();
527 TEST_CASE(
"CreateFullyConnectedWorkloadQuantisedAsymm8")
529 RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::QAsymmU8>();
532 TEST_CASE(
"CreateFullyConnectedWorkloadQuantisedSymm16")
534 RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::QSymmS16>();
537 template <
typename NormalizationWorkloadType, armnn::DataType DataType>
538 static void RefCreateNormalizationWorkloadTest(
DataLayout dataLayout)
542 auto workload = CreateNormalizationWorkloadTest<NormalizationWorkloadType, DataType>(factory, graph, dataLayout);
549 case DataLayout::NHWC:
550 inputShape = { 3, 1, 5, 5 };
551 outputShape = { 3, 1, 5, 5 };
553 case DataLayout::NCHW:
555 inputShape = { 3, 5, 5, 1 };
556 outputShape = { 3, 5, 5, 1 };
564 TEST_CASE(
"CreateRefNormalizationFloat32NchwWorkload")
566 RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
569 TEST_CASE(
"CreateRefNormalizationFloat32NhwcWorkload")
571 RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
574 TEST_CASE(
"CreateRefNormalizationUint8NchwWorkload")
576 RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
579 TEST_CASE(
"CreateRefNormalizationUint8NhwcWorkload")
581 RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
584 TEST_CASE(
"CreateRefNormalizationInt16NchwWorkload")
586 RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
589 TEST_CASE(
"CreateRefNormalizationInt16NhwcWorkload")
591 RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NHWC);
594 template <
typename Pooling2dWorkloadType, armnn::DataType DataType>
595 static void RefCreatePooling2dWorkloadTest(
DataLayout dataLayout)
599 auto workload = CreatePooling2dWorkloadTest<Pooling2dWorkloadType, DataType>(factory, graph, dataLayout);
606 case DataLayout::NHWC:
607 inputShape = { 3, 5, 5, 2 };
608 outputShape = { 3, 2, 4, 2 };
610 case DataLayout::NCHW:
612 inputShape = { 3, 2, 5, 5 };
613 outputShape = { 3, 2, 2, 4 };
617 CheckInputOutput(std::move(workload),
622 TEST_CASE(
"CreatePooling2dFloat32Workload")
624 RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
627 TEST_CASE(
"CreatePooling2dFloat32NhwcWorkload")
629 RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
632 TEST_CASE(
"CreatePooling2dUint8Workload")
634 RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
637 TEST_CASE(
"CreatePooling2dUint8NhwcWorkload")
639 RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
642 TEST_CASE(
"CreatePooling2dInt16Workload")
644 RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
647 TEST_CASE(
"CreatePooling2dInt16NhwcWorkload")
649 RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QSymmS16>(DataLayout::NHWC);
652 template <
typename SoftmaxWorkloadType, armnn::DataType DataType>
653 static void RefCreateSoftmaxWorkloadTest()
657 auto workload = CreateSoftmaxWorkloadTest<SoftmaxWorkloadType, DataType>(factory, graph);
665 tensorInfo.SetQuantizationScale(1.f / 256);
669 tensorInfo.SetQuantizationOffset(-128);
670 tensorInfo.SetQuantizationScale(1.f / 256);
678 TEST_CASE(
"CreateSoftmaxFloat32Workload")
680 RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::Float32>();
683 TEST_CASE(
"CreateSoftmaxFloat16Workload")
685 RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::Float16>();
688 TEST_CASE(
"CreateSoftmaxQuantisedAsymm8Workload")
690 RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::QAsymmU8>();
693 TEST_CASE(
"CreateSoftmaxQuantisedSymm16Workload")
695 RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::QSymmS16>();
698 template <
typename SplitterWorkloadType, armnn::DataType DataType>
699 static void RefCreateSplitterWorkloadTest()
703 auto workload = CreateSplitterWorkloadTest<SplitterWorkloadType, DataType>(factory, graph);
707 auto inputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Inputs[0]);
710 auto outputHandle0 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
713 auto outputHandle1 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[1]);
716 auto outputHandle2 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[2]);
720 TEST_CASE(
"CreateSplitterFloat32Workload")
722 RefCreateSplitterWorkloadTest<RefSplitterWorkload, armnn::DataType::Float32>();
725 TEST_CASE(
"CreateSplitterFloat16Workload")
727 RefCreateSplitterWorkloadTest<RefSplitterWorkload, armnn::DataType::Float16>();
730 TEST_CASE(
"CreateSplitterUint8Workload")
732 RefCreateSplitterWorkloadTest<RefSplitterWorkload, armnn::DataType::QAsymmU8>();
735 template <
typename SplitterWorkloadType,
typename ConcatWorkloadType, armnn::DataType DataType>
736 static void RefCreateSplitterConcatWorkloadTest()
746 auto workloads = CreateSplitterConcatWorkloadTest<SplitterWorkloadType, ConcatWorkloadType, DataType>
749 auto wlSplitter = std::move(workloads.first);
750 auto wlConcat = std::move(workloads.second);
763 bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0);
765 CHECK(validDataPointers);
768 TEST_CASE(
"CreateSplitterConcatFloat32")
770 RefCreateSplitterConcatWorkloadTest<RefSplitterWorkload, RefConcatWorkload, DataType::Float32>();
773 TEST_CASE(
"CreateSplitterConcatFloat16")
775 RefCreateSplitterConcatWorkloadTest<RefSplitterWorkload, RefConcatWorkload, DataType::Float16>();
778 TEST_CASE(
"CreateSplitterConcatUint8")
780 RefCreateSplitterConcatWorkloadTest<RefSplitterWorkload, RefConcatWorkload, DataType::QAsymmU8>();
783 template <
typename SplitterWorkloadType,
typename ActivationWorkloadType, armnn::DataType DataType>
784 static void RefCreateSingleOutputMultipleInputsTest()
791 std::unique_ptr<SplitterWorkloadType> wlSplitter;
792 std::unique_ptr<ActivationWorkloadType> wlActiv0_0;
793 std::unique_ptr<ActivationWorkloadType> wlActiv0_1;
794 std::unique_ptr<ActivationWorkloadType> wlActiv1_0;
795 std::unique_ptr<ActivationWorkloadType> wlActiv1_1;
797 CreateSplitterMultipleInputsOneOutputWorkloadTest<SplitterWorkloadType,
798 ActivationWorkloadType,
DataType>(factory, graph, wlSplitter, wlActiv0_0, wlActiv0_1, wlActiv1_0, wlActiv1_1);
815 bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) &&
816 (sOut1 == activ1_0Im) && (sOut1 == activ1_1Im);
818 CHECK(validDataPointers);
821 TEST_CASE(
"CreateSingleOutputMultipleInputsFloat32")
827 TEST_CASE(
"CreateSingleOutputMultipleInputsUint8")
833 template <
typename ResizeBilinearWorkloadType, armnn::DataType DataType>
834 static void RefCreateResizeBilinearTest(
DataLayout dataLayout)
838 auto workload = CreateResizeBilinearWorkloadTest<ResizeBilinearWorkloadType, DataType>(factory, graph, dataLayout);
845 case DataLayout::NHWC:
846 inputShape = { 2, 4, 4, 3 };
847 outputShape = { 2, 2, 2, 3 };
849 case DataLayout::NCHW:
851 inputShape = { 2, 3, 4, 4 };
852 outputShape = { 2, 3, 2, 2 };
856 CheckInputOutput(std::move(workload),
861 TEST_CASE(
"CreateResizeBilinearFloat32")
863 RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
866 TEST_CASE(
"CreateResizeBilinearFloat16")
868 RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
871 TEST_CASE(
"CreateResizeBilinearUint8")
873 RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
876 TEST_CASE(
"CreateResizeBilinearQuantisedAsymm16")
878 RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
881 TEST_CASE(
"CreateResizeBilinearFloat32Nhwc")
883 RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
886 template <
typename BatchToSpaceNdWorkloadType, armnn::DataType DataType>
887 static void RefCreateBatchToSpaceNdTest()
892 auto workload = CreateBatchToSpaceNdWorkloadTest<BatchToSpaceNdWorkloadType, DataType>(factory, graph);
894 CheckInputOutput(std::move(workload),
899 TEST_CASE(
"CreateBatchToSpaceNdFloat32")
901 RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::Float32>();
904 TEST_CASE(
"CreateBatchToSpaceNdFloat16")
906 RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::Float16>();
909 TEST_CASE(
"CreateBatchToSpaceNdUint8")
911 RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::QAsymmU8>();
914 TEST_CASE(
"CreateBatchToSpaceNdQSymm16")
916 RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::QSymmS16>();
919 template <
typename L2NormalizationWorkloadType, armnn::DataType DataType>
920 static void RefCreateL2NormalizationTest(
DataLayout dataLayout)
925 CreateL2NormalizationWorkloadTest<L2NormalizationWorkloadType, DataType>(factory, graph, dataLayout);
932 case DataLayout::NHWC:
933 inputShape = { 5, 50, 67, 20 };
934 outputShape = { 5, 50, 67, 20 };
936 case DataLayout::NCHW:
938 inputShape = { 5, 20, 50, 67 };
939 outputShape = { 5, 20, 50, 67 };
947 TEST_CASE(
"CreateL2NormalizationFloat32")
949 RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
952 TEST_CASE(
"CreateL2NormalizationFloat32Nhwc")
954 RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
957 TEST_CASE(
"CreateL2NormalizationInt16")
959 RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
962 TEST_CASE(
"CreateL2NormalizationInt16Nhwc")
964 RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NHWC);
967 TEST_CASE(
"CreateL2NormalizationUint8")
969 RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
972 TEST_CASE(
"CreateL2NormalizationUint8Nhwc")
974 RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
977 template <
typename ReshapeWorkloadType, armnn::DataType DataType>
978 static void RefCreateReshapeWorkloadTest()
982 auto workload = CreateReshapeWorkloadTest<ReshapeWorkloadType, DataType>(factory, graph);
991 TEST_CASE(
"CreateReshapeWorkloadFloat32")
993 RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::Float32>();
996 TEST_CASE(
"CreateReshapeWorkloadQuantisedAsymm8")
998 RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::QAsymmU8>();
1001 TEST_CASE(
"CreateReshapeWorkloadQuantisedSymm16")
1003 RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::QSymmS16>();
1006 template <
typename ConcatWorkloadType, armnn::DataType DataType>
1008 unsigned int concatAxis)
1012 auto workload = CreateConcatWorkloadTest<ConcatWorkloadType, DataType>(factory, graph, outputShape, concatAxis);
1014 CheckInputsOutput(std::move(workload),
1020 TEST_CASE(
"CreateConcatDim0Float32Workload")
1022 RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 4, 3, 2, 5 }, 0);
1025 TEST_CASE(
"CreateConcatDim0Float16Workload")
1027 RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float16>({ 4, 3, 2, 5 }, 0);
1030 TEST_CASE(
"CreateConcatDim0Uint8Workload")
1032 RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 4, 3, 2, 5 }, 0);
1035 TEST_CASE(
"CreateConcatDim0Uint16Workload")
1037 RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QSymmS16>({ 4, 3, 2, 5 }, 0);
1040 TEST_CASE(
"CreateConcatDim1Float32Workload")
1042 RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 6, 2, 5 }, 1);
1045 TEST_CASE(
"CreateConcatDim1Uint8Workload")
1047 RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 6, 2, 5 }, 1);
1050 TEST_CASE(
"CreateConcatDim2Float32Workload")
1052 RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 3, 4, 5 }, 2);
1055 TEST_CASE(
"CreateConcatDim2Uint8Workload")
1057 RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 4, 5 }, 2);
1060 TEST_CASE(
"CreateConcatDim3Float32Workload")
1062 RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 }, 3);
1065 TEST_CASE(
"CreateConcatDim3Uint8Workload")
1067 RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 2, 10 }, 3);
1070 template <
typename ConstantWorkloadType, armnn::DataType DataType>
1075 auto workload = CreateConstantWorkloadTest<ConstantWorkloadType, DataType>(factory, graph, outputShape);
1078 auto queueDescriptor = workload->GetData();
1079 auto outputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
1083 TEST_CASE(
"CreateConstantUint8Workload")
1085 RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 2, 10 });
1088 TEST_CASE(
"CreateConstantInt16Workload")
1090 RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::QSymmS16>({ 2, 3, 2, 10 });
1093 TEST_CASE(
"CreateConstantFloat32Workload")
1095 RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 });
1098 TEST_CASE(
"CreateConstantSigned32Workload")
1100 RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::Signed32>({ 2, 3, 2, 10 });
1110 auto workload = CreatePreluWorkloadTest<RefPreluWorkload>(factory,
1118 auto queueDescriptor = workload->GetData();
1119 auto outputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
1120 CHECK((outputHandle->GetTensorInfo() ==
TensorInfo(outputShape, dataType)));
1123 TEST_CASE(
"CreatePreluFloat32Workload")
1125 RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
armnn::DataType::Float32);
1128 TEST_CASE(
"CreatePreluFloat16Workload")
1130 RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
armnn::DataType::Float16);
1133 TEST_CASE(
"CreatePreluUint8Workload")
1135 RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
armnn::DataType::QAsymmU8);
1138 TEST_CASE(
"CreatePreluInt16Workload")
1140 RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
armnn::DataType::QSymmS16);
1143 TEST_CASE(
"CreatePreluFloat32NoBroadcastWorkload")
1145 CHECK_THROWS_AS(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
1150 TEST_CASE(
"CreatePreluFloat16NoBroadcastWorkload")
1152 CHECK_THROWS_AS(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
1157 TEST_CASE(
"CreatePreluUint8NoBroadcastWorkload")
1159 CHECK_THROWS_AS(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
1164 TEST_CASE(
"CreatePreluInt16NoBroadcastWorkload")
1166 CHECK_THROWS_AS(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
1171 template <
typename SpaceToDepthWorkloadType, armnn::DataType DataType>
1172 static void RefCreateSpaceToDepthWorkloadTest()
1177 auto workload = CreateSpaceToDepthWorkloadTest<SpaceToDepthWorkloadType, DataType>(factory, graph);
1179 CheckInputOutput(std::move(workload),
1184 TEST_CASE(
"CreateSpaceToDepthWorkloadFloat32")
1186 RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::Float32>();
1189 TEST_CASE(
"CreateSpaceToDepthWorkloadFloat16")
1191 RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::Float16>();
1194 TEST_CASE(
"CreateSpaceToDepthWorkloadQASymm8")
1196 RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::QAsymmU8>();
1199 TEST_CASE(
"CreateSpaceToDepthWorkloadQSymm16")
1201 RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::QSymmS16>();
1204 template <armnn::DataType DataType>
1208 unsigned int numInputs)
1212 auto workload = CreateStackWorkloadTest<RefStackWorkload, DataType>(factory,
1221 for (
unsigned int i = 0; i < numInputs; ++i)
1223 auto inputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Inputs[i]);
1226 auto outputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
1230 TEST_CASE(
"CreateStackFloat32Workload")
1232 RefCreateStackWorkloadTest<armnn::DataType::Float32>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
1235 TEST_CASE(
"CreateStackUint8Workload")
1237 RefCreateStackWorkloadTest<armnn::DataType::QAsymmU8>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
1240 TEST_CASE(
"CreateStackUint16Workload")
1242 RefCreateStackWorkloadTest<armnn::DataType::QSymmS16>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
1245 template <
typename QLstmWorkloadType>
1246 static void RefCreateQLstmWorkloadTest()
1251 auto workload = CreateQLstmWorkloadTest<QLstmWorkloadType>(factory, graph);
1260 auto inputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Inputs[0]);
1261 auto cellStateOutHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[1]);
1262 auto outputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[2]);
1264 CHECK((inputHandle->GetTensorInfo() == inputInfo));
1265 CHECK((cellStateOutHandle->GetTensorInfo() == cellStateInfo));
1266 CHECK((outputHandle->GetTensorInfo() == outputInfo));
1269 TEST_CASE(
"CreateQLstmWorkload")
1271 RefCreateQLstmWorkloadTest<RefQLstmWorkload>();
TEST_SUITE("CreateWorkloadRef")
This layer represents an addition operation.
This layer represents a subtraction operation.
This layer represents a division operation.
void SetQuantizationOffset(int32_t offset)
This layer represents a multiplication operation.
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...