diff options
Diffstat (limited to 'tests/validation')
-rw-r--r-- | tests/validation/CL/MatMulKernel.cpp | 286 | ||||
-rw-r--r-- | tests/validation/CL/MatMulLowpNativeKernel.cpp | 337 | ||||
-rw-r--r-- | tests/validation/Helpers.cpp | 102 | ||||
-rw-r--r-- | tests/validation/Helpers.h | 13 | ||||
-rw-r--r-- | tests/validation/fixtures/MatMulKernelFixture.h | 130 |
5 files changed, 697 insertions, 171 deletions
diff --git a/tests/validation/CL/MatMulKernel.cpp b/tests/validation/CL/MatMulKernel.cpp index 9c19e42d04..ff872aaa0a 100644 --- a/tests/validation/CL/MatMulKernel.cpp +++ b/tests/validation/CL/MatMulKernel.cpp @@ -73,7 +73,7 @@ const auto k0_values_nightly_rhs_t = framework::dataset::make("K0", { 1, const auto k0_values_nightly_lhs_t_rhs_nt = framework::dataset::make("K0", { 1, 2, 3, 4, 5, 6, 7, 8 }); template <typename T> -using CLMatMulKernelFixture = MatMulKernelValidationFixture<T>; +using CLMatMulKernelFixture = MatMulKernelValidationFixture<T, ClMatMulNativeKernel>; TEST_SUITE(CL) TEST_SUITE(MatMulKernel) @@ -95,8 +95,8 @@ TEST_CASE(SupportedBlockSizes, framework::DatasetMode::ALL) { MatMulKernelInfo(false, false, 9, 1, 2), true }, { MatMulKernelInfo(false, false, 3, 16, 3), true }, { MatMulKernelInfo(false, false, 7, 3, 4), true }, - { MatMulKernelInfo(false, false, 7, 3, 4, true), false }, // N0 not in {4, 8, 16} - { MatMulKernelInfo(false, false, 7, 1, 4, true), false }, // N0 not in {4, 8, 16} + { MatMulKernelInfo(false, false, 7, 3, 4, true), false }, // N0 not in {4, 8, 16} + { MatMulKernelInfo(false, false, 7, 1, 4, true), false }, // N0 not in {4, 8, 16} { MatMulKernelInfo(false, false, 7, 12, 4, true), false }, // N0 not in {4, 8, 16} { MatMulKernelInfo(false, false, 7, 4, 4, true), true }, { MatMulKernelInfo(false, false, 7, 8, 4, true), true }, @@ -166,7 +166,7 @@ TEST_CASE(SupportedBlockSizes, framework::DatasetMode::ALL) if(!pair.first.export_rhs_to_cl_image || export_to_cl_image_supported) { - ARM_COMPUTE_EXPECT(bool(status) == pair.second, framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(bool(status) == pair.second, framework::LogLevel::ERRORS); } } } @@ -176,9 +176,9 @@ TEST_CASE(ExportToCLImage, framework::DatasetMode::ALL) // We skip this test if the hardware does not support exporting to CL Image if(image2d_from_buffer_supported(CLKernelLibrary::get().get_device())) { - constexpr size_t pixel_size = 4; - const size_t max_image_w = pixel_size * CLKernelLibrary::get().get_device().getInfo<CL_DEVICE_IMAGE2D_MAX_WIDTH>(); - const size_t max_image_h = CLKernelLibrary::get().get_device().getInfo<CL_DEVICE_IMAGE2D_MAX_HEIGHT>(); + constexpr size_t pixel_size = 4; + const size_t max_image_w = pixel_size * CLKernelLibrary::get().get_device().getInfo<CL_DEVICE_IMAGE2D_MAX_WIDTH>(); + const size_t max_image_h = CLKernelLibrary::get().get_device().getInfo<CL_DEVICE_IMAGE2D_MAX_HEIGHT>(); using ShapeConfigurationTuple = std::tuple<TensorShape, TensorShape, bool, bool, bool>; const std::vector<ShapeConfigurationTuple> shape_configurations = @@ -186,18 +186,18 @@ TEST_CASE(ExportToCLImage, framework::DatasetMode::ALL) // lhs_shape, rhs_shape, adj_lhs, adj_rhs, expected // Lhs t/Nt, Rhs Nt // Transposition of Lhs doesn't add any value to the tests, therefore always assumed false below - { TensorShape(5U, 1U), TensorShape(3U, 5U), false, false, false }, // N should be multiple of 4 + { TensorShape(5U, 1U), TensorShape(3U, 5U), false, false, false }, // N should be multiple of 4 { TensorShape(5U, 1U), TensorShape(14U, 5U), false, false, false }, // N should be multiple of 4 { TensorShape(5U, 1U), TensorShape(12U, 5U), false, false, true }, { TensorShape(5U, 1U), TensorShape(8U, 5U), false, false, true }, { TensorShape(5U, 1U), TensorShape(4U, 5U), false, false, true }, { TensorShape(max_image_h + 1, 1U), TensorShape(4U, max_image_h + 1), false, false, false }, // Cannot fit into CL Image memory's height - { TensorShape(5U, 1U), TensorShape(max_image_w + 1, 5U), false, false, false }, // Cannot fit into CL Image memory's width - { TensorShape(max_image_h, 1U), TensorShape(4U, max_image_h), false, false, true }, // Barely fits into CL Image memory's height - { TensorShape(5U, 1U), TensorShape(max_image_w, 5U), false, false, true }, // Barely fits into CL Image memory's width + { TensorShape(5U, 1U), TensorShape(max_image_w + 1, 5U), false, false, false }, // Cannot fit into CL Image memory's width + { TensorShape(max_image_h, 1U), TensorShape(4U, max_image_h), false, false, true }, // Barely fits into CL Image memory's height + { TensorShape(5U, 1U), TensorShape(max_image_w, 5U), false, false, true }, // Barely fits into CL Image memory's width // Lhs Nt/T , Rhs T - { TensorShape(5U, 1U), TensorShape(5U, 3U), false, true, false }, // K should be multiple of 4 + { TensorShape(5U, 1U), TensorShape(5U, 3U), false, true, false }, // K should be multiple of 4 { TensorShape(5U, 1U), TensorShape(5U, 14U), false, true, false }, // K should be multiple of 4 { TensorShape(4U, 1U), TensorShape(4U, 10U), false, true, true }, { TensorShape(8U, 1U), TensorShape(8U, 9U), false, true, true }, @@ -216,7 +216,10 @@ TEST_CASE(ExportToCLImage, framework::DatasetMode::ALL) const bool adj_rhs = std::get<3>(tuple); // We choose M0, N0, K0 equal to 4 so that they're always valid for CLImage in any combination - const MatMulKernelInfo matmul_kernel_info {adj_lhs, adj_rhs, 4, 4, 4, true /* export_rhs_to_cl_image */}; + const MatMulKernelInfo matmul_kernel_info + { + adj_lhs, adj_rhs, 4, 4, 4, true /* export_rhs_to_cl_image */ + }; TensorInfo output_info; Status status = ClMatMulNativeKernel::validate(&lhs_info, &rhs_info, &output_info, matmul_kernel_info); @@ -330,60 +333,60 @@ TEST_SUITE(Float) TEST_SUITE(FP32) TEST_SUITE(Buffer) FIXTURE_DATA_TEST_CASE(RunTiny, CLMatMulKernelFixture<float>, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(datasets::TinyMatMulDataset(), - framework::dataset::make("pretransose_A", { false, true })), - framework::dataset::make("pretransose_B", { false, true })), - m0_values_precommit), - n0_values_precommit), - k0_values_precommit), - framework::dataset::make("export_rhs_to_cl_image", { false })), - framework::dataset::make("DataType", DataType::F32))) + framework::dataset::make("TransposeA", { false, true })), + framework::dataset::make("TransposeB", { false, true })), + m0_values_precommit), + n0_values_precommit), + k0_values_precommit), + framework::dataset::make("ExportRhsToCLImage", { false })), + framework::dataset::make("DataType", DataType::F32))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f32, 0.f, abs_tolerance_f32); } FIXTURE_DATA_TEST_CASE(RunSmall, CLMatMulKernelFixture<float>, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(datasets::SmallMatMulDataset(), - framework::dataset::make("pretransose_A", { false, true })), - framework::dataset::make("pretransose_B", { false, true })), - m0_values_precommit), - n0_values_precommit), - k0_values_precommit), - framework::dataset::make("export_rhs_to_cl_image", { false })), - framework::dataset::make("DataType", DataType::F32))) + framework::dataset::make("TransposeA", { false, true })), + framework::dataset::make("TransposeB", { false, true })), + m0_values_precommit), + n0_values_precommit), + k0_values_precommit), + framework::dataset::make("ExportRhsToCLImage", { false })), + framework::dataset::make("DataType", DataType::F32))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f32, 0.f, abs_tolerance_f32); } FIXTURE_DATA_TEST_CASE(RunLargeNoTranspose, CLMatMulKernelFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDataset(), - framework::dataset::make("pretransose_A", { false })), - framework::dataset::make("pretransose_B", { false })), + framework::dataset::make("TransposeA", { false })), + framework::dataset::make("TransposeB", { false })), m0_values_nightly_lhs_nt), n0_values_nightly_rhs_nt), k0_values_nightly_lhs_nt_rhs_nt), - framework::dataset::make("export_rhs_to_cl_image", { false })), + framework::dataset::make("ExportRhsToCLImage", { false })), framework::dataset::make("DataType", DataType::F32))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f32, 0.f, abs_tolerance_f32); } FIXTURE_DATA_TEST_CASE(RunLargeRhsTransposed, CLMatMulKernelFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDataset(), - framework::dataset::make("pretransose_A", { false })), - framework::dataset::make("pretransose_B", { true })), + framework::dataset::make("TransposeA", { false })), + framework::dataset::make("TransposeB", { true })), m0_values_nightly_lhs_nt), n0_values_nightly_rhs_t), k0_values_nightly_rhs_t), - framework::dataset::make("export_rhs_to_cl_image", { false })), + framework::dataset::make("ExportRhsToCLImage", { false })), framework::dataset::make("DataType", DataType::F32))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f32, 0.f, abs_tolerance_f32); } FIXTURE_DATA_TEST_CASE(RunLargeLhsTransposed, CLMatMulKernelFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDataset(), - framework::dataset::make("pretransose_A", { true })), - framework::dataset::make("pretransose_B", { false })), + framework::dataset::make("TransposeA", { true })), + framework::dataset::make("TransposeB", { false })), m0_values_nightly_lhs_t), n0_values_nightly_rhs_nt), k0_values_nightly_lhs_t_rhs_nt), - framework::dataset::make("export_rhs_to_cl_image", { false })), + framework::dataset::make("ExportRhsToCLImage", { false })), framework::dataset::make("DataType", DataType::F32))) { // Validate output @@ -391,12 +394,12 @@ FIXTURE_DATA_TEST_CASE(RunLargeLhsTransposed, CLMatMulKernelFixture<float>, fram } FIXTURE_DATA_TEST_CASE(RunLargeLhsTransposedRhsTransposed, CLMatMulKernelFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDataset(), - framework::dataset::make("pretransose_A", { true })), - framework::dataset::make("pretransose_B", { true })), - m0_values_nightly_lhs_t), - n0_values_nightly_rhs_t), - k0_values_nightly_rhs_t), - framework::dataset::make("export_rhs_to_cl_image", { false })), + framework::dataset::make("TransposeA", { true })), + framework::dataset::make("TransposeB", { true })), + m0_values_nightly_lhs_t), + n0_values_nightly_rhs_t), + k0_values_nightly_rhs_t), + framework::dataset::make("ExportRhsToCLImage", { false })), framework::dataset::make("DataType", DataType::F32))) { // Validate output @@ -405,13 +408,13 @@ FIXTURE_DATA_TEST_CASE(RunLargeLhsTransposedRhsTransposed, CLMatMulKernelFixture // Running High Dimensional test is enough for FP32, because we're stressing the number of dimensions, not data type or M0/N0/K0 // It's a good idea to test for each Lhs/Rhs T/NT combinations because they're different CL kernels FIXTURE_DATA_TEST_CASE(RunHighDimensional, CLMatMulKernelFixture<float>, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(datasets::HighDimensionalMatMulDataset(), - framework::dataset::make("pretransose_A", { false, true })), - framework::dataset::make("pretransose_B", { false, true })), - framework::dataset::make("M0", { 2 })), - framework::dataset::make("N0", { 2 })), - framework::dataset::make("K0", { 2 })), - framework::dataset::make("export_rhs_to_cl_image", { false })), - framework::dataset::make("DataType", DataType::F32))) + framework::dataset::make("TransposeA", { false, true })), + framework::dataset::make("TransposeB", { false, true })), + framework::dataset::make("M0", { 2 })), + framework::dataset::make("N0", { 2 })), + framework::dataset::make("K0", { 2 })), + framework::dataset::make("ExportRhsToCLImage", { false })), + framework::dataset::make("DataType", DataType::F32))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f32, 0.f, abs_tolerance_f32); @@ -419,14 +422,15 @@ FIXTURE_DATA_TEST_CASE(RunHighDimensional, CLMatMulKernelFixture<float>, framewo TEST_SUITE_END() // Buffer TEST_SUITE(ExportRhsToCLImage) -FIXTURE_DATA_TEST_CASE(RunSmallRhsNotTransposed, CLMatMulKernelFixture<float>, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(datasets::SmallMatMulDatasetRhsExportToCLImageRhsNT(), - framework::dataset::make("pretransose_A", { true, false })), - framework::dataset::make("pretransose_B", { false })), - framework::dataset::make("M0", { 2 })), - framework::dataset::make("N0", { 4, 8, 16 })), - framework::dataset::make("K0", { 2, 4 })), - framework::dataset::make("export_rhs_to_cl_image", { true })), - framework::dataset::make("DataType", DataType::F32))) +FIXTURE_DATA_TEST_CASE(RunSmallRhsNotTransposed, CLMatMulKernelFixture<float>, framework::DatasetMode::ALL, + combine(combine(combine(combine(combine(combine(combine(datasets::SmallMatMulDatasetRhsExportToCLImageRhsNT(), + framework::dataset::make("TransposeA", { true, false })), + framework::dataset::make("TransposeB", { false })), + framework::dataset::make("M0", { 2 })), + framework::dataset::make("N0", { 4, 8, 16 })), + framework::dataset::make("K0", { 2, 4 })), + framework::dataset::make("ExportRhsToCLImage", { true })), + framework::dataset::make("DataType", DataType::F32))) { // Validate output if(_device_supports_export_to_cl_image) @@ -434,14 +438,15 @@ FIXTURE_DATA_TEST_CASE(RunSmallRhsNotTransposed, CLMatMulKernelFixture<float>, f validate(CLAccessor(_target), _reference, tolerance_f32, 0.f, abs_tolerance_f32); } } -FIXTURE_DATA_TEST_CASE(RunLargeRhsNotTransposed, CLMatMulKernelFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDatasetRhsExportToCLImageRhsNT(), - framework::dataset::make("pretransose_A", { true, false })), - framework::dataset::make("pretransose_B", { false })), - framework::dataset::make("M0", { 2 })), // Choices of M0 does not matter much because it's related to Lhs tensor - framework::dataset::make("N0", { 4, 8, 16 })), - framework::dataset::make("K0", { 1, 2, 3, 4 })), - framework::dataset::make("export_rhs_to_cl_image", { true })), - framework::dataset::make("DataType", DataType::F32))) +FIXTURE_DATA_TEST_CASE(RunLargeRhsNotTransposed, CLMatMulKernelFixture<float>, framework::DatasetMode::NIGHTLY, + combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDatasetRhsExportToCLImageRhsNT(), + framework::dataset::make("TransposeA", { true, false })), + framework::dataset::make("TransposeB", { false })), + framework::dataset::make("M0", { 2 })), // Choices of M0 does not matter much because it's related to Lhs tensor + framework::dataset::make("N0", { 4, 8, 16 })), + framework::dataset::make("K0", { 1, 2, 3, 4 })), + framework::dataset::make("ExportRhsToCLImage", { true })), + framework::dataset::make("DataType", DataType::F32))) { // Validate output if(_device_supports_export_to_cl_image) @@ -449,14 +454,15 @@ FIXTURE_DATA_TEST_CASE(RunLargeRhsNotTransposed, CLMatMulKernelFixture<float>, f validate(CLAccessor(_target), _reference, tolerance_f32, 0.f, abs_tolerance_f32); } } -FIXTURE_DATA_TEST_CASE(RunSmallRhsTransposed, CLMatMulKernelFixture<float>, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(datasets::SmallMatMulDatasetRhsExportToCLImageRhsT(), - framework::dataset::make("pretransose_A", { true, false })), - framework::dataset::make("pretransose_B", { true })), - framework::dataset::make("M0", { 2 })), - framework::dataset::make("N0", { 2, 4 })), - framework::dataset::make("K0", { 4, 8, 16 })), - framework::dataset::make("export_rhs_to_cl_image", { true })), - framework::dataset::make("DataType", DataType::F32))) +FIXTURE_DATA_TEST_CASE(RunSmallRhsTransposed, CLMatMulKernelFixture<float>, framework::DatasetMode::ALL, + combine(combine(combine(combine(combine(combine(combine(datasets::SmallMatMulDatasetRhsExportToCLImageRhsT(), + framework::dataset::make("TransposeA", { true, false })), + framework::dataset::make("TransposeB", { true })), + framework::dataset::make("M0", { 2 })), + framework::dataset::make("N0", { 2, 4 })), + framework::dataset::make("K0", { 4, 8, 16 })), + framework::dataset::make("ExportRhsToCLImage", { true })), + framework::dataset::make("DataType", DataType::F32))) { // Validate output if(_device_supports_export_to_cl_image) @@ -464,14 +470,15 @@ FIXTURE_DATA_TEST_CASE(RunSmallRhsTransposed, CLMatMulKernelFixture<float>, fram validate(CLAccessor(_target), _reference, tolerance_f32, 0.f, abs_tolerance_f32); } } -FIXTURE_DATA_TEST_CASE(RunLargeRhsTransposed, CLMatMulKernelFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDatasetRhsExportToCLImageRhsT(), - framework::dataset::make("pretransose_A", { true, false })), - framework::dataset::make("pretransose_B", { true })), - framework::dataset::make("M0", { 2 })), // Choices of M0 does not matter much because it's related to Lhs tensor - framework::dataset::make("N0", { 1, 2, 3, 4 })), - framework::dataset::make("K0", { 4, 8, 16 })), - framework::dataset::make("export_rhs_to_cl_image", { true })), - framework::dataset::make("DataType", DataType::F32))) +FIXTURE_DATA_TEST_CASE(RunLargeRhsTransposed, CLMatMulKernelFixture<float>, framework::DatasetMode::NIGHTLY, + combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDatasetRhsExportToCLImageRhsT(), + framework::dataset::make("TransposeA", { true, false })), + framework::dataset::make("TransposeB", { true })), + framework::dataset::make("M0", { 2 })), // Choices of M0 does not matter much because it's related to Lhs tensor + framework::dataset::make("N0", { 1, 2, 3, 4 })), + framework::dataset::make("K0", { 4, 8, 16 })), + framework::dataset::make("ExportRhsToCLImage", { true })), + framework::dataset::make("DataType", DataType::F32))) { // Validate output if(_device_supports_export_to_cl_image) @@ -485,61 +492,62 @@ TEST_SUITE_END() // FP32 TEST_SUITE(FP16) TEST_SUITE(Buffer) FIXTURE_DATA_TEST_CASE(RunSmall, CLMatMulKernelFixture<half>, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(datasets::SmallMatMulDataset(), - framework::dataset::make("pretransose_A", { false, true })), - framework::dataset::make("pretransose_B", { false, true })), - m0_values_precommit), - n0_values_precommit), - k0_values_precommit), - framework::dataset::make("export_rhs_to_cl_image", { false })), - framework::dataset::make("DataType", DataType::F16))) + framework::dataset::make("TransposeA", { false, true })), + framework::dataset::make("TransposeB", { false, true })), + m0_values_precommit), + n0_values_precommit), + k0_values_precommit), + framework::dataset::make("ExportRhsToCLImage", { false })), + framework::dataset::make("DataType", DataType::F16))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f16, 0.f, abs_tolerance_f16); } FIXTURE_DATA_TEST_CASE(RunLargeNoTranspose, CLMatMulKernelFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDataset(), - framework::dataset::make("pretransose_A", { false })), - framework::dataset::make("pretransose_B", { false })), + framework::dataset::make("TransposeA", { false })), + framework::dataset::make("TransposeB", { false })), m0_values_nightly_lhs_nt), n0_values_nightly_rhs_nt), k0_values_nightly_lhs_nt_rhs_nt), - framework::dataset::make("export_rhs_to_cl_image", { false })), + framework::dataset::make("ExportRhsToCLImage", { false })), framework::dataset::make("DataType", DataType::F16))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f16, 0.f, abs_tolerance_f16); } FIXTURE_DATA_TEST_CASE(RunLargeRhsTransposed, CLMatMulKernelFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDataset(), - framework::dataset::make("pretransose_A", { false })), - framework::dataset::make("pretransose_B", { true })), + framework::dataset::make("TransposeA", { false })), + framework::dataset::make("TransposeB", { true })), m0_values_nightly_lhs_nt), n0_values_nightly_rhs_t), k0_values_nightly_rhs_t), - framework::dataset::make("export_rhs_to_cl_image", { false })), + framework::dataset::make("ExportRhsToCLImage", { false })), framework::dataset::make("DataType", DataType::F16))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f16, 0.f, abs_tolerance_f16); } FIXTURE_DATA_TEST_CASE(RunLargeLhsTransposed, CLMatMulKernelFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDataset(), - framework::dataset::make("pretransose_A", { true })), - framework::dataset::make("pretransose_B", { false })), + framework::dataset::make("TransposeA", { true })), + framework::dataset::make("TransposeB", { false })), m0_values_nightly_lhs_t), n0_values_nightly_rhs_nt), k0_values_nightly_lhs_t_rhs_nt), - framework::dataset::make("export_rhs_to_cl_image", { false })), + framework::dataset::make("ExportRhsToCLImage", { false })), framework::dataset::make("DataType", DataType::F16))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f16, 0.f, abs_tolerance_f16); } -FIXTURE_DATA_TEST_CASE(RunLargeLhsTransposedRhsTransposed, CLMatMulKernelFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDataset(), - framework::dataset::make("pretransose_A", { true })), - framework::dataset::make("pretransose_B", { true })), - m0_values_nightly_lhs_t), - n0_values_nightly_rhs_t), - k0_values_nightly_rhs_t), - framework::dataset::make("export_rhs_to_cl_image", { false })), - framework::dataset::make("DataType", DataType::F16))) +FIXTURE_DATA_TEST_CASE(RunLargeLhsTransposedRhsTransposed, CLMatMulKernelFixture<half>, framework::DatasetMode::NIGHTLY, + combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDataset(), + framework::dataset::make("TransposeA", { true })), + framework::dataset::make("TransposeB", { true })), + m0_values_nightly_lhs_t), + n0_values_nightly_rhs_t), + k0_values_nightly_rhs_t), + framework::dataset::make("ExportRhsToCLImage", { false })), + framework::dataset::make("DataType", DataType::F16))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f16, 0.f, abs_tolerance_f16); @@ -547,14 +555,15 @@ FIXTURE_DATA_TEST_CASE(RunLargeLhsTransposedRhsTransposed, CLMatMulKernelFixture TEST_SUITE_END() // Buffer TEST_SUITE(ExportRhsToCLImage) -FIXTURE_DATA_TEST_CASE(RunSmallRhsNotTransposed, CLMatMulKernelFixture<half>, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(datasets::SmallMatMulDatasetRhsExportToCLImageRhsNT(), - framework::dataset::make("pretransose_A", { true, false })), - framework::dataset::make("pretransose_B", { false })), - framework::dataset::make("M0", { 2 })), - framework::dataset::make("N0", { 4, 8, 16 })), - framework::dataset::make("K0", { 2, 4 })), - framework::dataset::make("export_rhs_to_cl_image", { true })), - framework::dataset::make("DataType", DataType::F16))) +FIXTURE_DATA_TEST_CASE(RunSmallRhsNotTransposed, CLMatMulKernelFixture<half>, framework::DatasetMode::ALL, + combine(combine(combine(combine(combine(combine(combine(datasets::SmallMatMulDatasetRhsExportToCLImageRhsNT(), + framework::dataset::make("TransposeA", { true, false })), + framework::dataset::make("TransposeB", { false })), + framework::dataset::make("M0", { 2 })), + framework::dataset::make("N0", { 4, 8, 16 })), + framework::dataset::make("K0", { 2, 4 })), + framework::dataset::make("ExportRhsToCLImage", { true })), + framework::dataset::make("DataType", DataType::F16))) { // Validate output if(_device_supports_export_to_cl_image) @@ -562,14 +571,15 @@ FIXTURE_DATA_TEST_CASE(RunSmallRhsNotTransposed, CLMatMulKernelFixture<half>, fr validate(CLAccessor(_target), _reference, tolerance_f16, 0.f, abs_tolerance_f16); } } -FIXTURE_DATA_TEST_CASE(RunLargeRhsNotTransposed, CLMatMulKernelFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDatasetRhsExportToCLImageRhsNT(), - framework::dataset::make("pretransose_A", { true, false })), - framework::dataset::make("pretransose_B", { false })), - framework::dataset::make("M0", { 2 })), // Choices of M0 does not matter much because it's related to Lhs tensor - framework::dataset::make("N0", { 4, 8, 16 })), - framework::dataset::make("K0", { 1, 2, 3, 4 })), - framework::dataset::make("export_rhs_to_cl_image", { true })), - framework::dataset::make("DataType", DataType::F16))) +FIXTURE_DATA_TEST_CASE(RunLargeRhsNotTransposed, CLMatMulKernelFixture<half>, framework::DatasetMode::NIGHTLY, + combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDatasetRhsExportToCLImageRhsNT(), + framework::dataset::make("TransposeA", { true, false })), + framework::dataset::make("TransposeB", { false })), + framework::dataset::make("M0", { 2 })), // Choices of M0 does not matter much because it's related to Lhs tensor + framework::dataset::make("N0", { 4, 8, 16 })), + framework::dataset::make("K0", { 1, 2, 3, 4 })), + framework::dataset::make("ExportRhsToCLImage", { true })), + framework::dataset::make("DataType", DataType::F16))) { // Validate output if(_device_supports_export_to_cl_image) @@ -577,14 +587,15 @@ FIXTURE_DATA_TEST_CASE(RunLargeRhsNotTransposed, CLMatMulKernelFixture<half>, fr validate(CLAccessor(_target), _reference, tolerance_f16, 0.f, abs_tolerance_f16); } } -FIXTURE_DATA_TEST_CASE(RunSmallRhsTransposed, CLMatMulKernelFixture<half>, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(datasets::SmallMatMulDatasetRhsExportToCLImageRhsT(), - framework::dataset::make("pretransose_A", { true, false })), - framework::dataset::make("pretransose_B", { true })), - framework::dataset::make("M0", { 2 })), - framework::dataset::make("N0", { 2, 4 })), - framework::dataset::make("K0", { 4, 8, 16 })), - framework::dataset::make("export_rhs_to_cl_image", { true })), - framework::dataset::make("DataType", DataType::F16))) +FIXTURE_DATA_TEST_CASE(RunSmallRhsTransposed, CLMatMulKernelFixture<half>, framework::DatasetMode::ALL, + combine(combine(combine(combine(combine(combine(combine(datasets::SmallMatMulDatasetRhsExportToCLImageRhsT(), + framework::dataset::make("TransposeA", { true, false })), + framework::dataset::make("TransposeB", { true })), + framework::dataset::make("M0", { 2 })), + framework::dataset::make("N0", { 2, 4 })), + framework::dataset::make("K0", { 4, 8, 16 })), + framework::dataset::make("ExportRhsToCLImage", { true })), + framework::dataset::make("DataType", DataType::F16))) { // Validate output if(_device_supports_export_to_cl_image) @@ -592,14 +603,15 @@ FIXTURE_DATA_TEST_CASE(RunSmallRhsTransposed, CLMatMulKernelFixture<half>, frame validate(CLAccessor(_target), _reference, tolerance_f16, 0.f, abs_tolerance_f16); } } -FIXTURE_DATA_TEST_CASE(RunLargeRhsTransposed, CLMatMulKernelFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDatasetRhsExportToCLImageRhsT(), - framework::dataset::make("pretransose_A", { true, false })), - framework::dataset::make("pretransose_B", { true })), - framework::dataset::make("M0", { 2 })), // Choices of M0 does not matter much because it's related to Lhs tensor - framework::dataset::make("N0", { 1, 2, 3, 4 })), - framework::dataset::make("K0", { 4, 8, 16 })), - framework::dataset::make("export_rhs_to_cl_image", { true })), - framework::dataset::make("DataType", DataType::F16))) +FIXTURE_DATA_TEST_CASE(RunLargeRhsTransposed, CLMatMulKernelFixture<half>, framework::DatasetMode::NIGHTLY, + combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDatasetRhsExportToCLImageRhsT(), + framework::dataset::make("TransposeA", { true, false })), + framework::dataset::make("TransposeB", { true })), + framework::dataset::make("M0", { 2 })), // Choices of M0 does not matter much because it's related to Lhs tensor + framework::dataset::make("N0", { 1, 2, 3, 4 })), + framework::dataset::make("K0", { 4, 8, 16 })), + framework::dataset::make("ExportRhsToCLImage", { true })), + framework::dataset::make("DataType", DataType::F16))) { // Validate output if(_device_supports_export_to_cl_image) diff --git a/tests/validation/CL/MatMulLowpNativeKernel.cpp b/tests/validation/CL/MatMulLowpNativeKernel.cpp new file mode 100644 index 0000000000..5932fa7c21 --- /dev/null +++ b/tests/validation/CL/MatMulLowpNativeKernel.cpp @@ -0,0 +1,337 @@ +/* + * Copyright (c) 2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "arm_compute/runtime/CL/CLTensor.h" + +#include "src/gpu/cl/kernels/ClMatMulLowpNativeKernel.h" + +#include "tests/datasets/LargeMatMulDataset.h" +#include "tests/datasets/SmallMatMulDataset.h" +#include "tests/framework/Macros.h" +#include "tests/framework/datasets/Datasets.h" +#include "tests/validation/Validation.h" +#include "tests/validation/fixtures/MatMulKernelFixture.h" +#include "tests/validation/reference/Permute.h" + +#include <tuple> + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +namespace +{ +constexpr AbsoluteTolerance<float> tolerance_quant(1); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */ +} +template <typename T> +using CLMatMulLowpNativeKernelFixture = MatMulKernelValidationFixture<T, ClMatMulLowpNativeKernel>; + +/** M0 values to test --precommit*/ +const auto m0_values_precommit = framework::dataset::make("M0", { 1, 3 }); + +/** N0 values to test --precommit*/ +const auto n0_values_precommit = framework::dataset::make("N0", { 2, 4 }); + +/** K0 values to test --precommit*/ +const auto k0_values_precommit = framework::dataset::make("K0", { 2, 3 }); + +/** M0 values to test --nightly*/ +const auto m0_values_nightly_lhs_nt = framework::dataset::make("M0", { 1, 2, 3, 4, 5, 6, 7, 8 }); +const auto m0_values_nightly_lhs_t = framework::dataset::make("M0", { 1, 2, 3, 4, 8 }); + +/** N0 values to test --nightly*/ +const auto n0_values_nightly_rhs_nt = framework::dataset::make("N0", { 1, 2, 3, 4, 8, 16 }); +// const auto n0_values_nightly_rhs_t = framework::dataset::make("N0", { 1, 2, 3, 4, 8 }); + +/** K0 values to test --nightly*/ +const auto k0_values_nightly_lhs_nt_rhs_nt = framework::dataset::make("K0", { 1, 2, 3, 4, 8, 16 }); +// const auto k0_values_nightly_rhs_t = framework::dataset::make("K0", { 1, 2, 3, 4, 8 }); +const auto k0_values_nightly_lhs_t_rhs_nt = framework::dataset::make("K0", { 1, 2, 3, 4, 5, 6, 7, 8 }); + +TEST_SUITE(CL) +TEST_SUITE(MatMulLowpNativeKernel) +TEST_SUITE(Validate) + +TEST_CASE(SupportedKernelConfigurations, framework::DatasetMode::ALL) +{ + using MatMulConfigurationPair = std::pair<MatMulKernelInfo, bool>; + + const std::vector<MatMulConfigurationPair> supported_block_sizes = + { + // MatMulKernelInfo(adj_lhs, adj_rhs, M0, N0, K0, export_rhs_to_cl_image = false) + // Lhs not-transposed, Rhs-not-transposed + { MatMulKernelInfo(false, false, 0, 1, 1), false }, // M0 should be > 0 + { MatMulKernelInfo(false, false, 3, 5, 1), false }, // N0 not in {1, 2, 3, 4, 8, 16} + { MatMulKernelInfo(false, false, 3, 6, 1), false }, // N0 not in {1, 2, 3, 4, 8, 16} + { MatMulKernelInfo(false, false, 3, 3, 17), false }, // K0 not in {1, 2, 3, 4, 8, 16} + { MatMulKernelInfo(false, false, 3, 3, 7), false }, // K0 not in {1, 2, 3, 4, 8, 16} + { MatMulKernelInfo(false, false, 9, 1, 2), true }, + { MatMulKernelInfo(false, false, 3, 16, 3), true }, + { MatMulKernelInfo(false, false, 7, 3, 4), true }, + { MatMulKernelInfo(false, false, 7, 3, 4, true), true }, // export to CLImage is unsupported for quantized types + }; + + // Set big enough shapes so that block sizes are not truncated. Also, set all dimensions equal + // so that it doesn't fail for different NT/T configurations. We aim to test the block sizes here, + // not the shapes themselves. + const TensorInfo lhs_info = TensorInfo(TensorShape(100U, 100U), 1, DataType::QASYMM8_SIGNED); + const TensorInfo rhs_info = TensorInfo(TensorShape(100U, 100U), 1, DataType::QASYMM8_SIGNED); + + for(auto &pair : supported_block_sizes) + { + TensorInfo output_info; + Status status = ClMatMulLowpNativeKernel::validate(&lhs_info, &rhs_info, &output_info, pair.first); + + ARM_COMPUTE_EXPECT(bool(status) == pair.second, framework::LogLevel::ERRORS); + } +} + +TEST_CASE(ValidateInputShapes, framework::DatasetMode::ALL) +{ + // Configurations are assumed to be Nt/Nt, but will be transposed inside the test to test other configurations + using ShapeConfigurationTuple = std::tuple<TensorShape, TensorShape, bool>; + const std::vector<ShapeConfigurationTuple> shape_configurations = + { + { TensorShape(5U, 1U), TensorShape(3U, 5U), true }, + { TensorShape(10U, 12U), TensorShape(3U, 10U), true }, + { TensorShape(8U, 4U), TensorShape(2U, 8U), true }, + { TensorShape(8U, 4U), TensorShape(2U, 5U), false }, // Mismatch in the K dimension + { TensorShape(5U, 0U), TensorShape(2U, 5U), false }, // Invalid dimension + { TensorShape(5U, 4U, 3U, 4U, 5U, 6U), TensorShape(2U, 5U, 3U, 4U, 5U, 6U), true }, + { TensorShape(5U, 4U, 3U, 4U, 5U, 1U), TensorShape(2U, 5U, 3U, 4U, 5U, 6U), false }, // no batch broadcasting + { TensorShape(5U, 4U, 3U, 4U, 9U, 6U), TensorShape(2U, 5U, 3U, 4U, 5U, 6U), false }, // mismatch in batch dimension + }; + + for(auto &tuple : shape_configurations) + { + const bool expected = std::get<2>(tuple); + + for(bool adj_lhs : + { + false, true + }) + { + for(bool adj_rhs : + { + false, true + }) + { + TensorShape lhs_shape = std::get<0>(tuple); + TensorShape rhs_shape = std::get<1>(tuple); + + if(adj_lhs) + { + permute(lhs_shape, PermutationVector(1U, 0U)); + } + + if(adj_rhs) + { + permute(rhs_shape, PermutationVector(1U, 0U)); + } + + const TensorInfo lhs_info = TensorInfo(lhs_shape, 1, DataType::QASYMM8_SIGNED); + const TensorInfo rhs_info = TensorInfo(rhs_shape, 1, DataType::QASYMM8_SIGNED); + TensorInfo output_info; + + MatMulKernelInfo matmul_kernel_info{ adj_lhs, adj_rhs, 1, 1, 1, false /* export_rhs_to_cl_image */ }; + + Status status = ClMatMulLowpNativeKernel::validate(&lhs_info, &rhs_info, &output_info, matmul_kernel_info); + ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS); + } + } + } +} + +TEST_CASE(ValidateDataTypes, framework::DatasetMode::ALL) +{ + using DataTypeConfigurationTuple = std::tuple<DataType, DataType, DataType, bool>; + const std::vector<DataTypeConfigurationTuple> data_type_configurations = + { + { DataType::F32, DataType::F32, DataType::F32, false }, // no floating point types + { DataType::F16, DataType::F16, DataType::F16, false }, // no floating point types + { DataType::F64, DataType::F64, DataType::F64, false }, // no double precision + { DataType::QASYMM8, DataType::QASYMM8, DataType::QASYMM8, true }, + { DataType::QASYMM8_SIGNED, DataType::QASYMM8_SIGNED, DataType::QASYMM8_SIGNED, true }, + { DataType::QSYMM8_PER_CHANNEL, DataType::QSYMM8_PER_CHANNEL, DataType::QSYMM8_PER_CHANNEL, false }, // only qasymm8/qasymm8_signed is supported + { DataType::QASYMM16, DataType::QASYMM16, DataType::QASYMM16, false }, // only qasymm8/qasymm8_signed is supported + { DataType::QSYMM16, DataType::QSYMM16, DataType::QSYMM16, false }, // only qasymm8/qasymm8_signed is supported + { DataType::QSYMM8, DataType::QSYMM8, DataType::QSYMM8, false }, // only qasymm8/qasymm8_signed is supported + { DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QASYMM8, false }, // no mixed data types + { DataType::S64, DataType::S64, DataType::S64, false }, // no integral types + { DataType::S32, DataType::S32, DataType::S32, false }, // no integral types + { DataType::S16, DataType::S16, DataType::S16, false }, // no integral types + { DataType::S8, DataType::S8, DataType::S8, false }, // no integral types + { DataType::U64, DataType::U64, DataType::U64, false }, // no integral types + { DataType::U32, DataType::U32, DataType::U32, false }, // no integral types + { DataType::U16, DataType::U16, DataType::U16, false }, // no integral types + { DataType::U8, DataType::U8, DataType::U8, false }, // no integral types + }; + + // It's enough to test a single shape and block size configuration while checking data types + const TensorShape shape = TensorShape(10U, 10U); + const MatMulKernelInfo matmul_kernel_info{ false, false, 1, 1, 1, false }; + for(auto &tuple : data_type_configurations) + { + const bool expected = std::get<3>(tuple); + + const TensorInfo lhs_info(shape, 1, std::get<0>(tuple)); + const TensorInfo rhs_info(shape, 1, std::get<1>(tuple)); + TensorInfo output_info(shape, 1, std::get<2>(tuple)); + + Status status = ClMatMulLowpNativeKernel::validate(&lhs_info, &rhs_info, &output_info, matmul_kernel_info); + ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS); + } +} + +TEST_SUITE_END() // Validate + +TEST_SUITE(Quantized) +TEST_SUITE(QASYMM8_SIGNED) +FIXTURE_DATA_TEST_CASE(RunTiny, CLMatMulLowpNativeKernelFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(datasets::TinyMatMulDataset(), + framework::dataset::make("TransposeA", { true, false })), + framework::dataset::make("TransposeB", { false })), + m0_values_precommit), + n0_values_precommit), + k0_values_precommit), + framework::dataset::make("ExportRhsToCLImage", { false })), + framework::dataset::make("DataType", DataType::QASYMM8_SIGNED))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_quant); +} +FIXTURE_DATA_TEST_CASE(RunSmall, CLMatMulLowpNativeKernelFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(datasets::SmallMatMulDataset(), + framework::dataset::make("TransposeA", { true, false })), + framework::dataset::make("TransposeB", { false })), + m0_values_precommit), + n0_values_precommit), + k0_values_precommit), + framework::dataset::make("ExportRhsToCLImage", { false })), + framework::dataset::make("DataType", DataType::QASYMM8_SIGNED))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_quant); +} +FIXTURE_DATA_TEST_CASE(RunLargeNoTranspose, CLMatMulLowpNativeKernelFixture<int8_t>, framework::DatasetMode::NIGHTLY, + combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDataset(), + framework::dataset::make("TransposeA", { false })), + framework::dataset::make("TransposeB", { false })), + m0_values_nightly_lhs_nt), + n0_values_nightly_rhs_nt), + k0_values_nightly_lhs_nt_rhs_nt), + framework::dataset::make("ExportRhsToCLImage", { false })), + framework::dataset::make("DataType", DataType::QASYMM8_SIGNED))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_quant); +} +FIXTURE_DATA_TEST_CASE(RunLargeLhsTransposed, CLMatMulLowpNativeKernelFixture<int8_t>, framework::DatasetMode::NIGHTLY, + combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDataset(), + framework::dataset::make("TransposeA", { true })), + framework::dataset::make("TransposeB", { false })), + m0_values_nightly_lhs_t), + n0_values_nightly_rhs_nt), + k0_values_nightly_lhs_t_rhs_nt), + framework::dataset::make("ExportRhsToCLImage", { false })), + framework::dataset::make("DataType", DataType::QASYMM8_SIGNED))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_quant); +} +// Running High Dimensional test is enough for qasymm8_signed, because we're stressing the number of dimensions, not data type or M0/N0/K0 +// It's a good idea to test for each Lhs/Rhs T/NT combinations because they're different CL kernels +FIXTURE_DATA_TEST_CASE(RunHighDimensional, CLMatMulLowpNativeKernelFixture<int8_t>, framework::DatasetMode::ALL, + combine(combine(combine(combine(combine(combine(combine(datasets::HighDimensionalMatMulDataset(), + framework::dataset::make("TransposeA", { true, false })), + framework::dataset::make("TransposeB", { false })), + framework::dataset::make("M0", { 2 })), + framework::dataset::make("N0", { 2 })), + framework::dataset::make("K0", { 2 })), + framework::dataset::make("ExportRhsToCLImage", { false })), + framework::dataset::make("DataType", DataType::QASYMM8_SIGNED))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_quant); +} +TEST_SUITE_END() // QASYMM8_SIGNED + +TEST_SUITE(QASYMM8) +FIXTURE_DATA_TEST_CASE(RunTiny, CLMatMulLowpNativeKernelFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(datasets::TinyMatMulDataset(), + framework::dataset::make("TransposeA", { true, false })), + framework::dataset::make("TransposeB", { false })), + m0_values_precommit), + n0_values_precommit), + k0_values_precommit), + framework::dataset::make("ExportRhsToCLImage", { false })), + framework::dataset::make("DataType", DataType::QASYMM8))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_quant); +} +FIXTURE_DATA_TEST_CASE(RunSmall, CLMatMulLowpNativeKernelFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(datasets::SmallMatMulDataset(), + framework::dataset::make("TransposeA", { true, false })), + framework::dataset::make("TransposeB", { false })), + m0_values_precommit), + n0_values_precommit), + k0_values_precommit), + framework::dataset::make("ExportRhsToCLImage", { false })), + framework::dataset::make("DataType", DataType::QASYMM8))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_quant); +} +FIXTURE_DATA_TEST_CASE(RunLargeNoTranspose, CLMatMulLowpNativeKernelFixture<uint8_t>, framework::DatasetMode::NIGHTLY, + combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDataset(), + framework::dataset::make("TransposeA", { false })), + framework::dataset::make("TransposeB", { false })), + m0_values_nightly_lhs_nt), + n0_values_nightly_rhs_nt), + k0_values_nightly_lhs_nt_rhs_nt), + framework::dataset::make("ExportRhsToCLImage", { false })), + framework::dataset::make("DataType", DataType::QASYMM8))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_quant); +} +FIXTURE_DATA_TEST_CASE(RunLargeLhsTransposed, CLMatMulLowpNativeKernelFixture<uint8_t>, framework::DatasetMode::NIGHTLY, + combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDataset(), + framework::dataset::make("TransposeA", { true })), + framework::dataset::make("TransposeB", { false })), + m0_values_nightly_lhs_t), + n0_values_nightly_rhs_nt), + k0_values_nightly_lhs_t_rhs_nt), + framework::dataset::make("ExportRhsToCLImage", { false })), + framework::dataset::make("DataType", DataType::QASYMM8))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_quant); +} +TEST_SUITE_END() // QASYMM8 +TEST_SUITE_END() // Quantized +TEST_SUITE_END() // MatMulLowpNativeKernel +TEST_SUITE_END() // CL +} // namespace validation +} // namespace test +} // namespace arm_compute diff --git a/tests/validation/Helpers.cpp b/tests/validation/Helpers.cpp index be194dd266..110325c5a0 100644 --- a/tests/validation/Helpers.cpp +++ b/tests/validation/Helpers.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2022 Arm Limited. + * Copyright (c) 2017-2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -22,6 +22,7 @@ * SOFTWARE. */ #include "tests/validation/Helpers.h" +#include "tests/framework/Asserts.h" #include <algorithm> #include <cmath> @@ -373,6 +374,105 @@ void add_padding_y(std::initializer_list<ITensor *> tensors, const DataLayout &d } } +QuantizationInfo calculate_mat_mul_dst_q_info(const QuantizationInfo &a_q_info, const QuantizationInfo &b_q_info, int m, int n, int k, DataType data_type) +{ + ARM_COMPUTE_UNUSED(m, n); + QuantizationInfo c_q_info; + + ARM_COMPUTE_ASSERT(data_type == DataType::QASYMM8 || data_type == DataType::QASYMM8_SIGNED); + + const int32_t t_max = static_cast<int32_t>(data_type == DataType::QASYMM8 ? std::numeric_limits<uint8_t>::max() : std::numeric_limits<int8_t>::max()); + const int32_t t_min = static_cast<int32_t>(data_type == DataType::QASYMM8 ? std::numeric_limits<uint8_t>::min() : std::numeric_limits<int8_t>::min()); + + /** Quantization Setup of matrix multiplication + * + * We have a matrix multiplication of the form C = A * B + * where A is (M X K), B is (K x N) and C is therefore (M x N). + * + * If we have some distributions statistics of A and B, i.e. mean and variance, + * we can estimate the mean and variance of a single value in C matrix and + * pick good scale and offset values for the output and have non-saturated tests. + * + * Each element in the output matrix can be calculated as follows: + * C_ij = sum_k(A_ik * B_kj) + * + * All values are float above. + * + * Note: All possible A_ik, B_kj random variables are assumed mutually independent. + * + * Terminology: + * E[X]: Mean of the random variable X (sometimes referred as mu_x) + * var(X): Variance of the random variable X (someimes referred as sigma^2_x) + * std(X): sqrt(var(X)), standard deviation of X + * + * 1) Calculate the mean: + * E[C_ij] = sum_k( E[A_ik] * E[B_kj] ) = K * mean_a * mean_b + * + * Since elements of A and B are uniformly distributed random variables, we have + * mean_a = (max_a + min_a) / 2, mean_b = (max_b + min_b ) / 2 + * max_a and min_a can be calculated with the scale_a/b and offset_a/b + * by replacing data type minimum and maximums in the equations + * + * 2) Calculate the variance: + * var(C_ij) = sum_k( var(A_ik * B_kj) ) + * = sum_k ( E[A_ik^2 * B_kj^2] - E[A_ik]^2E[B_kj^2] ) + * = ... + * = K * (var_a * var_b + var_a * mean^2_b + var_b * mean^2_a) + * + * Similarly, due to uniform random variable properties, we have + * var_a = (max_a - min_a)^2 / 12 + * var_b = (max_b - min_b)^2 / 12 + * + * + * 3) Now, we have an idea of what would an average C_ij will like and how much deviation + * is present around it. The exact distribution of C is not easy to come up with dependent on K. + * But, as K increases, due to Central Limit Theorem, it'll look more like a bell shaped figure, + * approaching normal distribution. + * + * This is useful because, in normal distribution, we know that values +- 2 std_deviation around + * the mean constitute 95% of the values. Therefore, setting a plausible range for us: + * C_range = [C_min, C_max] = [mean_c - 2 * std_c, mean_c + 2 * std_c] + * + * 4) + * If we map this [C_min, C_max] to [0, 255] or [-128, 127] depending on the signedness of the + * data type, we can find a suitable scale and offset for the output. On average, it's expected + * that 5% of the output values will saturate and 95% will remain in the range. + * + * The equations to be solved for offset_c and scale_c are: + * C_min = scale_c * (type_min - offset_c) + * C_max = scale_c * (type_max - offset_c) + */ + + const int32_t a_offset = a_q_info.uniform().offset; + const float a_scale = a_q_info.uniform().scale; + const int32_t b_offset = b_q_info.uniform().offset; + const float b_scale = b_q_info.uniform().scale; + + // Lhs/A stats + const float max_a = (t_max - a_offset) * a_scale; + const float min_a = (t_min - a_offset) * a_scale; + const float mean_a = (max_a + min_a) / 2; + const float var_a = (max_a - min_a) * (max_a - min_a) / 12; + + // Rhs/B stats + const float max_b = (t_max - b_offset) * b_scale; + const float min_b = (t_min - b_offset) * b_scale; + const float mean_b = (max_b + min_b) / 2; + const float var_b = (max_b - min_b) * (max_b - min_b) / 12; + + // Output stats + const float mean_out = k * mean_a * mean_b; + const float var_out = k * (var_a * var_b + var_a * mean_b * mean_b + var_b * mean_a * mean_a); + const float std_out = sqrt(var_out); + + // Output quantization setup + const float scale_out = 4 * std_out / 255; + const int32_t offset_out = static_cast<int32_t>(t_min - (mean_out - 2.f * std_out) / scale_out); + + c_q_info = QuantizationInfo(scale_out, offset_out); + return c_q_info; +} + template void get_tile(const SimpleTensor<float> &in, SimpleTensor<float> &roi, const Coordinates &coord); template void get_tile(const SimpleTensor<half> &in, SimpleTensor<half> &roi, const Coordinates &coord); template void get_tile(const SimpleTensor<int> &in, SimpleTensor<int> &roi, const Coordinates &coord); diff --git a/tests/validation/Helpers.h b/tests/validation/Helpers.h index 2e48a6b8c6..3449239e45 100644 --- a/tests/validation/Helpers.h +++ b/tests/validation/Helpers.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2022 Arm Limited. + * Copyright (c) 2017-2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_TEST_VALIDATION_HELPERS_H -#define ARM_COMPUTE_TEST_VALIDATION_HELPERS_H +#ifndef ACL_TESTS_VALIDATION_HELPERS +#define ACL_TESTS_VALIDATION_HELPERS #include "arm_compute/core/Types.h" #include "arm_compute/core/Utils.h" @@ -250,7 +250,12 @@ void add_padding_x(std::initializer_list<ITensor *> tensors, const DataLayout &d * @note This function adds padding to the input tensors only if data_layout == DataLayout::NHWC */ void add_padding_y(std::initializer_list<ITensor *> tensors, const DataLayout &data_layout = DataLayout::NHWC); + +/** For MatMulLowp, given the Lhs/Rhs matrix quantization informations and the matrix multiplication dimensions, + * calculate a suitable output quantization for obtaining non-saturated outputs with high probability. + */ +QuantizationInfo calculate_mat_mul_dst_q_info(const QuantizationInfo &lhs_q_info, const QuantizationInfo &rhs_q_info, int m, int n, int k, DataType data_type); } // namespace validation } // namespace test } // namespace arm_compute -#endif /* ARM_COMPUTE_TEST_VALIDATION_HELPERS_H */ +#endif /* ACL_TESTS_VALIDATION_HELPERS */ diff --git a/tests/validation/fixtures/MatMulKernelFixture.h b/tests/validation/fixtures/MatMulKernelFixture.h index 10e2a0659a..7d0b1a40a9 100644 --- a/tests/validation/fixtures/MatMulKernelFixture.h +++ b/tests/validation/fixtures/MatMulKernelFixture.h @@ -25,11 +25,15 @@ #define ACL_TESTS_VALIDATION_FIXTURES_MATMULKERNELFIXTURE #include "arm_compute/core/KernelDescriptors.h" -#include "src/gpu/cl/kernels/ClMatMulNativeKernel.h" +#include "arm_compute/core/Utils.h" +#include "arm_compute/core/utils/quantization/AsymmHelpers.h" + #include "tests/CL/CLAccessor.h" #include "tests/CL/Helper.h" #include "tests/framework/Fixture.h" +#include "tests/validation/Helpers.h" #include "tests/validation/reference/GEMM.h" +#include "tests/validation/reference/GEMMLowp.h" #include "tests/validation/reference/Permute.h" #include "tests/validation/reference/ReshapeLayer.h" @@ -43,14 +47,43 @@ namespace validation { using namespace arm_compute::opencl::kernels; -template <typename T> +template <typename T, typename KernelType> class MatMulKernelValidationFixture : public framework::Fixture { public: template <typename...> - void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool pretranspose_a, bool pretranspose_b, const int M0, const int N0, const int K0, bool export_rhs_to_cl_image, DataType data_type) + void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool pretranspose_a, bool pretranspose_b, int M0, int N0, int K0, bool export_rhs_to_cl_image, DataType data_type) { // For brevity, the input shapes are assumed to be not-transposed for both Lhs and Rhs matrices. + QuantizationInfo lhs_q_info; + QuantizationInfo rhs_q_info; + QuantizationInfo dst_q_info; + + if(is_data_type_quantized(data_type)) + { + const int32_t t_max = static_cast<int32_t>(std::numeric_limits<T>::max()); + const int32_t t_min = static_cast<int32_t>(std::numeric_limits<T>::min()); + + std::mt19937 generator(library->seed()); + std::uniform_real_distribution<float> distribution_float(-5.0f, 3.0f); + std::uniform_int_distribution<int32_t> distribution_t(t_min, t_max); + + const float scale_lhs = pow(2, distribution_float(generator)); // [2^-5, 2^3] + const float scale_rhs = pow(2, distribution_float(generator)); // [2^-5, 2^3] + + const int32_t offset_lhs = distribution_t(generator); + const int32_t offset_rhs = distribution_t(generator); + + lhs_q_info = QuantizationInfo(scale_lhs, offset_lhs); + rhs_q_info = QuantizationInfo(scale_rhs, offset_rhs); + + const int m = shape_a.y(); + const int n = shape_b.x(); + const int k = shape_a.x(); + + dst_q_info = calculate_mat_mul_dst_q_info(lhs_q_info, rhs_q_info, m, n, k, data_type); + } + if(pretranspose_a) { permute(shape_a, PermutationVector(1U, 0U)); @@ -65,8 +98,8 @@ public: if(!export_rhs_to_cl_image || _device_supports_export_to_cl_image) { - _target = compute_target(shape_a, shape_b, output_shape, pretranspose_a, pretranspose_b, M0, N0, K0, export_rhs_to_cl_image, data_type); - _reference = compute_reference(shape_a, shape_b, output_shape, pretranspose_a, pretranspose_b, data_type); + _target = compute_target(shape_a, shape_b, output_shape, pretranspose_a, pretranspose_b, M0, N0, K0, export_rhs_to_cl_image, data_type, lhs_q_info, rhs_q_info, dst_q_info); + _reference = compute_reference(shape_a, shape_b, output_shape, pretranspose_a, pretranspose_b, data_type, lhs_q_info, rhs_q_info, dst_q_info); } } @@ -93,23 +126,29 @@ protected: } } + template <typename U, typename D> + void fill_constant(U &&tensor, D value) + { + library->fill_tensor_value(tensor, value); + } + CLTensor compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &output_shape, bool pretranspose_a, bool pretranspose_b, const int M0, const int N0, const int K0, - bool export_rhs_to_cl_image, DataType data_type) + bool export_rhs_to_cl_image, DataType data_type, const QuantizationInfo &lhs_q_info, const QuantizationInfo &rhs_q_info, const QuantizationInfo &dst_q_info) { - // Create tensors - CLTensor a = create_tensor<CLTensor>(shape_a, data_type, 1); - CLTensor b = create_tensor<CLTensor>(shape_b, data_type, 1); - CLTensor dst = create_tensor<CLTensor>(output_shape, data_type, 1); - - CLSynthetizeOperator<ClMatMulNativeKernel> matMul{}; - MatMulKernelInfo matmul_info; - matmul_info.adj_lhs = pretranspose_a; - matmul_info.adj_rhs = pretranspose_b; - matmul_info.m0 = M0; - matmul_info.n0 = N0; - matmul_info.k0 = K0; + CLSynthetizeOperator<KernelType> matMul{}; + MatMulKernelInfo matmul_info; + matmul_info.adj_lhs = pretranspose_a; + matmul_info.adj_rhs = pretranspose_b; + matmul_info.m0 = M0; + matmul_info.n0 = N0; + matmul_info.k0 = K0; matmul_info.export_rhs_to_cl_image = export_rhs_to_cl_image; + // Create tensors + CLTensor a = create_tensor<CLTensor>(shape_a, data_type, 1, lhs_q_info); + CLTensor b = create_tensor<CLTensor>(shape_b, data_type, 1, rhs_q_info); + CLTensor dst = create_tensor<CLTensor>(output_shape, data_type, 1, dst_q_info); + matMul.configure(a.info(), b.info(), dst.info(), matmul_info); ARM_COMPUTE_ASSERT(a.info()->is_resizable()); ARM_COMPUTE_ASSERT(b.info()->is_resizable()); @@ -138,18 +177,19 @@ protected: return dst; } - SimpleTensor<T> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &output_shape, bool pretranspose_a, bool pretranspose_b, DataType data_type) + SimpleTensor<T> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &output_shape, bool pretranspose_a, bool pretranspose_b, DataType data_type, + const QuantizationInfo &lhs_q_info, const QuantizationInfo &rhs_q_info, const QuantizationInfo &dst_q_info) { // We collapse dimensions > 3 onto dimension 3, i.e. 5D+ tensors will look like 4D // This is necessary unless we choose to extend gemm reference for 5D+ tensors - TensorShape output_shape_collapsed = output_shape.collapsed_from(Window::DimW); - TensorShape shape_a_collapsed = shape_a.collapsed_from(Window::DimW); - TensorShape shape_b_collapsed = shape_b.collapsed_from(Window::DimW); + TensorShape output_shape_collapsed = output_shape.collapsed_from(Window::DimZ); + TensorShape shape_a_collapsed = shape_a.collapsed_from(Window::DimZ); + TensorShape shape_b_collapsed = shape_b.collapsed_from(Window::DimZ); // Create reference - SimpleTensor<T> a{ shape_a_collapsed, data_type, 1 }; - SimpleTensor<T> b{ shape_b_collapsed, data_type, 1 }; - SimpleTensor<T> c{ output_shape_collapsed, data_type, 1 }; + SimpleTensor<T> a{ shape_a_collapsed, data_type, 1, lhs_q_info }; + SimpleTensor<T> b{ shape_b_collapsed, data_type, 1, rhs_q_info }; + SimpleTensor<T> c{ output_shape_collapsed, data_type, 1, dst_q_info }; // Fill reference fill(a, 0); @@ -185,10 +225,8 @@ protected: b_transposed = reference::permute<T>(b, PermutationVector(1U, 0U)); } - // Setting beta to 0 will effectively disable C for the - // computation of the reference: alpha * A * B + 0 * C // Use transposed tensors if boolean enabled else use original tensors - SimpleTensor<T> result = reference::gemm<T>((pretranspose_a) ? a_transposed : a, (pretranspose_b) ? b_transposed : b, c, 1.0f, 0.f); + SimpleTensor<T> result = gemm_reference<T>((pretranspose_a) ? a_transposed : a, (pretranspose_b) ? b_transposed : b, c); // We reshape the gemm output back if the tensor is high dimensional if(output_shape_collapsed != output_shape) @@ -199,9 +237,43 @@ protected: return result; } + template <typename U = T> + typename std::enable_if < std::is_same<U, float>::value || std::is_same<U, half>::value, SimpleTensor<U >>::type gemm_reference(SimpleTensor<U> &a, SimpleTensor<U> &b, SimpleTensor<U> &c) + { + // Setting beta to 0 will effectively disable C for the + // computation of the reference: alpha * A * B + 0 * C + return reference::gemm<U>(a, b, c, 1.0f, 0.f); + } + + template <typename U = T> + typename std::enable_if < std::is_same<U, int8_t>::value || std::is_same<U, uint8_t>::value, SimpleTensor<U >>::type gemm_reference(SimpleTensor<U> &a, SimpleTensor<U> &b, SimpleTensor<U> &c) + { + const UniformQuantizationInfo aq = a.quantization_info().uniform(); + const UniformQuantizationInfo bq = b.quantization_info().uniform(); + const UniformQuantizationInfo cq = c.quantization_info().uniform(); + + const SimpleTensor<int32_t> result = reference::gemmlowp_matrix_multiply_core<int32_t, U, U>(a, b, c.shape(), -aq.offset, -bq.offset); + + std::vector<int32_t> gemmlowp_multipliers{ 1 }; + std::vector<int32_t> gemmlowp_shifts{ 1 }; + const int gemmlowp_offset = cq.offset; + const float scale = aq.scale * bq.scale / cq.scale; + + quantization::calculate_quantized_multiplier(scale, &gemmlowp_multipliers[0], &gemmlowp_shifts[0]); + constexpr int32_t gemmlowp_min_bound = std::numeric_limits<int32_t>::min(); + constexpr int32_t gemmlowp_max_bound = std::numeric_limits<int32_t>::max(); + + SimpleTensor<int> bias{ c.shape(), DataType::S32 }; + fill_constant(bias, static_cast<int32_t>(0)); + + const SimpleTensor<U> final_result = reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, U>(result, bias, + gemmlowp_multipliers, gemmlowp_shifts, gemmlowp_offset, gemmlowp_min_bound, gemmlowp_max_bound); + return final_result; + } + CLTensor _target{}; SimpleTensor<T> _reference{}; - bool _device_supports_export_to_cl_image { true }; + bool _device_supports_export_to_cl_image{ true }; }; } // namespace validation |