From 51847d5dd9cad6bc81673642a01fd531def44311 Mon Sep 17 00:00:00 2001 From: Giorgio Arena Date: Tue, 19 Oct 2021 15:45:57 +0100 Subject: Implement CLDirectConv3DKernel - uint8/int8 Resolve COMPMID-4663 Signed-off-by: Giorgio Arena Change-Id: I5c3c1cffed5385c06b789543318f7f4d6096987e Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/6468 Tested-by: Arm Jenkins Comments-Addressed: Arm Jenkins Reviewed-by: Sheri Zhang --- tests/validation/CL/Convolution3D.cpp | 122 ++++++++++++++++++++++++++++++++-- 1 file changed, 118 insertions(+), 4 deletions(-) (limited to 'tests/validation/CL') diff --git a/tests/validation/CL/Convolution3D.cpp b/tests/validation/CL/Convolution3D.cpp index 75e2e99b03..381aacc465 100644 --- a/tests/validation/CL/Convolution3D.cpp +++ b/tests/validation/CL/Convolution3D.cpp @@ -38,10 +38,11 @@ namespace validation { namespace { -RelativeTolerance tolerance_fp16(half(0.2)); /**< Tolerance for floating point tests */ -RelativeTolerance tolerance_fp32(0.05f); /**< Tolerance for floating point tests */ -constexpr float abs_tolerance_f32(0.0001f); /**< Absolute tolerance for FP32 tests*/ -constexpr float tolerance_num = 0.07f; /**< Tolerance number */ +RelativeTolerance tolerance_fp16(half(0.2)); /**< Tolerance for floating point tests */ +RelativeTolerance tolerance_fp32(0.05f); /**< Tolerance for floating point tests */ +constexpr AbsoluteTolerance tolerance_qasymm8(1); /**< Tolerance for quantized tests */ +constexpr float abs_tolerance_f32(0.0001f); /**< Absolute tolerance for FP32 tests*/ +constexpr float tolerance_num = 0.07f; /**< Tolerance number */ } // namespace TEST_SUITE(CL) @@ -165,6 +166,8 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zi template using CLDirectConvolution3DFixture = DirectConvolution3DValidationFixture; +template +using CLDirectConvolution3DQuantizedFixture = DirectConvolution3DValidationQuantizedFixture; TEST_SUITE(NDHWC) TEST_SUITE(FP16) @@ -266,6 +269,117 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLDirectConvolution3DFixture, framework: // clang-format on // *INDENT-ON* TEST_SUITE_END() // FP32 + +TEST_SUITE(QASYMM8) +FIXTURE_DATA_TEST_CASE(RunSmall, CLDirectConvolution3DQuantizedFixture, framework::DatasetMode::PRECOMMIT, + combine(combine(combine(combine(combine(combine(zip(zip(zip(zip(zip(zip(zip(zip(zip(zip(zip( + framework::dataset::make("InputShape", { TensorShape(7U, 5U, 3U, 13U, 3U), + TensorShape(15U, 7U, 11U, 7U), + TensorShape(19U, 5U, 16U, 4U), + TensorShape(13U, 5U, 17U, 2U) + }), + framework::dataset::make("StrideX", { 1, 3, 2, 1 })), + framework::dataset::make("StrideY", { 2, 1, 3, 1 })), + framework::dataset::make("StrideZ", { 3, 2, 1, 1 })), + framework::dataset::make("PadX", { 0, 2, 1, 0 })), + framework::dataset::make("PadY", { 1, 0, 2, 0 })), + framework::dataset::make("PadZ", { 2, 1, 0, 0 })), + framework::dataset::make("KernelWidth", { 3, 7, 5, 1 })), + framework::dataset::make("KernelHeight", { 5, 3, 7, 1 })), + framework::dataset::make("KernelDepth", { 7, 5, 3, 1 })), + framework::dataset::make("NumKernels", { 5, 3, 1, 11 })), + framework::dataset::make("HasBias", { true, true, true, false })), + framework::dataset::make("Activation", ActivationLayerInfo())), + framework::dataset::make("DataType", DataType::QASYMM8)), + framework::dataset::make("DataLayout", DataLayout::NDHWC)), + framework::dataset::make("SrcQuantizationInfo", QuantizationInfo(0.1f, 10))), + framework::dataset::make("WeightsQuantizationInfo", QuantizationInfo(0.3f, 20))), + framework::dataset::make("DstQuantizationInfo", QuantizationInfo(0.2f, 5)))) +{ + validate(CLAccessor(_target), _reference, tolerance_qasymm8); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, CLDirectConvolution3DQuantizedFixture, framework::DatasetMode::NIGHTLY, + combine(combine(combine(combine(combine(combine(zip(zip(zip(zip(zip(zip(zip(zip(zip(zip(zip( + framework::dataset::make("InputShape", { TensorShape(400U, 400U, 200U, 11U) }), + framework::dataset::make("StrideX", { 1 })), + framework::dataset::make("StrideY", { 1 })), + framework::dataset::make("StrideZ", { 1 })), + framework::dataset::make("PadX", { 1 })), + framework::dataset::make("PadY", { 1 })), + framework::dataset::make("PadZ", { 1 })), + framework::dataset::make("KernelWidth", { 9 })), + framework::dataset::make("KernelHeight", { 9 })), + framework::dataset::make("KernelDepth", { 9 })), + framework::dataset::make("NumKernels", { 300 })), + framework::dataset::make("HasBias", { true })), + framework::dataset::make("Activation", ActivationLayerInfo())), + framework::dataset::make("DataType", DataType::QASYMM8)), + framework::dataset::make("DataLayout", DataLayout::NDHWC)), + framework::dataset::make("SrcQuantizationInfo", QuantizationInfo(0.1f, 10))), + framework::dataset::make("WeightsQuantizationInfo", QuantizationInfo(0.3f, 20))), + framework::dataset::make("DstQuantizationInfo", QuantizationInfo(0.2f, 5)))) +{ + validate(CLAccessor(_target), _reference, tolerance_qasymm8); +} + +TEST_SUITE_END() // QASYMM8 + +TEST_SUITE(QASYMM8_SIGNED) +FIXTURE_DATA_TEST_CASE(RunSmall, CLDirectConvolution3DQuantizedFixture, framework::DatasetMode::PRECOMMIT, + combine(combine(combine(combine(combine(combine(zip(zip(zip(zip(zip(zip(zip(zip(zip(zip(zip( + framework::dataset::make("InputShape", { TensorShape(7U, 5U, 3U, 13U, 3U), + TensorShape(15U, 7U, 11U, 7U), + TensorShape(19U, 5U, 16U, 4U), + TensorShape(13U, 5U, 17U, 2U) + }), + framework::dataset::make("StrideX", { 1, 3, 2, 1 })), + framework::dataset::make("StrideY", { 2, 1, 3, 1 })), + framework::dataset::make("StrideZ", { 3, 2, 1, 1 })), + framework::dataset::make("PadX", { 0, 2, 1, 0 })), + framework::dataset::make("PadY", { 1, 0, 2, 0 })), + framework::dataset::make("PadZ", { 2, 1, 0, 0 })), + framework::dataset::make("KernelWidth", { 3, 7, 5, 1 })), + framework::dataset::make("KernelHeight", { 5, 3, 7, 1 })), + framework::dataset::make("KernelDepth", { 7, 5, 3, 1 })), + framework::dataset::make("NumKernels", { 5, 3, 1, 11 })), + framework::dataset::make("HasBias", { true, true, true, false })), + framework::dataset::make("Activation", ActivationLayerInfo())), + framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), + framework::dataset::make("DataLayout", DataLayout::NDHWC)), + framework::dataset::make("SrcQuantizationInfo", QuantizationInfo(0.1f, 10))), + framework::dataset::make("WeightsQuantizationInfo", QuantizationInfo(0.3f, 20))), + framework::dataset::make("DstQuantizationInfo", QuantizationInfo(0.2f, 5)))) +{ + validate(CLAccessor(_target), _reference, tolerance_qasymm8); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, CLDirectConvolution3DQuantizedFixture, framework::DatasetMode::NIGHTLY, + combine(combine(combine(combine(combine(combine(zip(zip(zip(zip(zip(zip(zip(zip(zip(zip(zip( + framework::dataset::make("InputShape", { TensorShape(400U, 400U, 200U, 11U) }), + framework::dataset::make("StrideX", { 1 })), + framework::dataset::make("StrideY", { 1 })), + framework::dataset::make("StrideZ", { 1 })), + framework::dataset::make("PadX", { 1 })), + framework::dataset::make("PadY", { 1 })), + framework::dataset::make("PadZ", { 1 })), + framework::dataset::make("KernelWidth", { 9 })), + framework::dataset::make("KernelHeight", { 9 })), + framework::dataset::make("KernelDepth", { 9 })), + framework::dataset::make("NumKernels", { 300 })), + framework::dataset::make("HasBias", { true })), + framework::dataset::make("Activation", ActivationLayerInfo())), + framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), + framework::dataset::make("DataLayout", DataLayout::NDHWC)), + framework::dataset::make("SrcQuantizationInfo", QuantizationInfo(0.1f, 10))), + framework::dataset::make("WeightsQuantizationInfo", QuantizationInfo(0.3f, 20))), + framework::dataset::make("DstQuantizationInfo", QuantizationInfo(0.2f, 5)))) +{ + validate(CLAccessor(_target), _reference, tolerance_qasymm8); +} + +TEST_SUITE_END() // QASYMM8_SIGNED + TEST_SUITE_END() // NDHWC TEST_SUITE_END() // DirectConvolution3D TEST_SUITE_END() // CL -- cgit v1.2.1