aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/CL/MatMulLowpNativeKernel.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'tests/validation/CL/MatMulLowpNativeKernel.cpp')
-rw-r--r--tests/validation/CL/MatMulLowpNativeKernel.cpp92
1 files changed, 57 insertions, 35 deletions
diff --git a/tests/validation/CL/MatMulLowpNativeKernel.cpp b/tests/validation/CL/MatMulLowpNativeKernel.cpp
index fd7a4cb156..90eee4fb82 100644
--- a/tests/validation/CL/MatMulLowpNativeKernel.cpp
+++ b/tests/validation/CL/MatMulLowpNativeKernel.cpp
@@ -49,6 +49,9 @@ constexpr AbsoluteTolerance<float> tolerance_quant(1); /**< Tolerance value for
template <typename T>
using CLMatMulLowpNativeKernelFixture = MatMulKernelValidationFixture<T, ClMatMulLowpNativeKernel>;
+template <typename T>
+using CLMatMulLowpKernelWithBiasFixture = MatMulKernelWithBiasValidation<T, ClMatMulLowpNativeKernel>;
+
/** M0 values to test --precommit*/
const auto m0_values_precommit = framework::dataset::make("M0", { 1, 3 });
@@ -103,7 +106,7 @@ TEST_CASE(SupportedKernelConfigurations, framework::DatasetMode::ALL)
for(auto &pair : supported_block_sizes)
{
TensorInfo output_info;
- Status status = ClMatMulLowpNativeKernel::validate(&lhs_info, &rhs_info, &output_info, pair.first);
+ Status status = ClMatMulLowpNativeKernel::validate(&lhs_info, &rhs_info, nullptr, &output_info, pair.first);
ARM_COMPUTE_EXPECT(bool(status) == pair.second, framework::LogLevel::ERRORS);
}
@@ -112,22 +115,24 @@ TEST_CASE(SupportedKernelConfigurations, framework::DatasetMode::ALL)
TEST_CASE(ValidateInputShapes, framework::DatasetMode::ALL)
{
// Configurations are assumed to be Nt/Nt, but will be transposed inside the test to test other configurations
- using ShapeConfigurationTuple = std::tuple<TensorShape, TensorShape, bool>;
+ using ShapeConfigurationTuple = std::tuple<TensorShape, TensorShape, TensorShape, bool>;
const std::vector<ShapeConfigurationTuple> shape_configurations =
{
- { TensorShape(5U, 1U), TensorShape(3U, 5U), true },
- { TensorShape(10U, 12U), TensorShape(3U, 10U), true },
- { TensorShape(8U, 4U), TensorShape(2U, 8U), true },
- { TensorShape(8U, 4U), TensorShape(2U, 5U), false }, // Mismatch in the K dimension
- { TensorShape(5U, 0U), TensorShape(2U, 5U), false }, // Invalid dimension
- { TensorShape(5U, 4U, 3U, 4U, 5U, 6U), TensorShape(2U, 5U, 3U, 4U, 5U, 6U), true },
- { TensorShape(5U, 4U, 3U, 4U, 5U, 1U), TensorShape(2U, 5U, 3U, 4U, 5U, 6U), false }, // no batch broadcasting
- { TensorShape(5U, 4U, 3U, 4U, 9U, 6U), TensorShape(2U, 5U, 3U, 4U, 5U, 6U), false }, // mismatch in batch dimension
+ { TensorShape(5U, 1U), TensorShape(3U, 5U), TensorShape(3U), true },
+ { TensorShape(10U, 12U), TensorShape(3U, 10U), TensorShape(3U), true },
+ { TensorShape(8U, 4U), TensorShape(2U, 8U), TensorShape(2U), true },
+ { TensorShape(8U, 4U), TensorShape(2U, 5U), TensorShape(2U), false }, // Mismatch in the K dimension
+ { TensorShape(5U, 0U), TensorShape(2U, 5U), TensorShape(2U), false }, // Invalid dimension
+ { TensorShape(5U, 4U, 3U, 4U, 5U, 6U), TensorShape(2U, 5U, 3U, 4U, 5U, 6U), TensorShape(2U), true },
+ { TensorShape(5U, 4U, 3U, 4U, 5U, 1U), TensorShape(2U, 5U, 3U, 4U, 5U, 6U), TensorShape(2U), false }, // no batch broadcasting
+ { TensorShape(5U, 4U, 3U, 4U, 9U, 6U), TensorShape(2U, 5U, 3U, 4U, 5U, 6U), TensorShape(2U), false }, // mismatch in batch dimension
+ { TensorShape(5U, 1U), TensorShape(3U, 5U), TensorShape(1U), false }, // invalid broadcast of bias
+ { TensorShape(5U, 1U), TensorShape(3U, 5U), TensorShape(3U, 3U), false }, // 2d bias is invalid
};
for(auto &tuple : shape_configurations)
{
- const bool expected = std::get<2>(tuple);
+ const bool expected = std::get<3>(tuple);
for(bool adj_lhs :
{
@@ -141,6 +146,7 @@ TEST_CASE(ValidateInputShapes, framework::DatasetMode::ALL)
{
TensorShape lhs_shape = std::get<0>(tuple);
TensorShape rhs_shape = std::get<1>(tuple);
+ TensorShape bia_shape = std::get<2>(tuple);
if(adj_lhs)
{
@@ -154,11 +160,12 @@ TEST_CASE(ValidateInputShapes, framework::DatasetMode::ALL)
const TensorInfo lhs_info = TensorInfo(lhs_shape, 1, DataType::QASYMM8_SIGNED);
const TensorInfo rhs_info = TensorInfo(rhs_shape, 1, DataType::QASYMM8_SIGNED);
+ const TensorInfo bia_info = TensorInfo(bia_shape, 1, DataType::S32);
TensorInfo output_info;
MatMulKernelInfo matmul_kernel_info{ adj_lhs, adj_rhs, 1, 1, 1, false /* export_rhs_to_cl_image */ };
- Status status = ClMatMulLowpNativeKernel::validate(&lhs_info, &rhs_info, &output_info, matmul_kernel_info);
+ Status status = ClMatMulLowpNativeKernel::validate(&lhs_info, &rhs_info, &bia_info, &output_info, matmul_kernel_info);
ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
}
}
@@ -167,41 +174,44 @@ TEST_CASE(ValidateInputShapes, framework::DatasetMode::ALL)
TEST_CASE(ValidateDataTypes, framework::DatasetMode::ALL)
{
- using DataTypeConfigurationTuple = std::tuple<DataType, DataType, DataType, bool>;
+ using DataTypeConfigurationTuple = std::tuple<DataType, DataType, DataType, DataType, bool>;
const std::vector<DataTypeConfigurationTuple> data_type_configurations =
{
- { DataType::F32, DataType::F32, DataType::F32, false }, // no floating point types
- { DataType::F16, DataType::F16, DataType::F16, false }, // no floating point types
- { DataType::F64, DataType::F64, DataType::F64, false }, // no double precision
- { DataType::QASYMM8, DataType::QASYMM8, DataType::QASYMM8, true },
- { DataType::QASYMM8_SIGNED, DataType::QASYMM8_SIGNED, DataType::QASYMM8_SIGNED, true },
- { DataType::QSYMM8_PER_CHANNEL, DataType::QSYMM8_PER_CHANNEL, DataType::QSYMM8_PER_CHANNEL, false }, // only qasymm8/qasymm8_signed is supported
- { DataType::QASYMM16, DataType::QASYMM16, DataType::QASYMM16, false }, // only qasymm8/qasymm8_signed is supported
- { DataType::QSYMM16, DataType::QSYMM16, DataType::QSYMM16, false }, // only qasymm8/qasymm8_signed is supported
- { DataType::QSYMM8, DataType::QSYMM8, DataType::QSYMM8, false }, // only qasymm8/qasymm8_signed is supported
- { DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QASYMM8, false }, // no mixed data types
- { DataType::S64, DataType::S64, DataType::S64, false }, // no integral types
- { DataType::S32, DataType::S32, DataType::S32, false }, // no integral types
- { DataType::S16, DataType::S16, DataType::S16, false }, // no integral types
- { DataType::S8, DataType::S8, DataType::S8, false }, // no integral types
- { DataType::U64, DataType::U64, DataType::U64, false }, // no integral types
- { DataType::U32, DataType::U32, DataType::U32, false }, // no integral types
- { DataType::U16, DataType::U16, DataType::U16, false }, // no integral types
- { DataType::U8, DataType::U8, DataType::U8, false }, // no integral types
+ { DataType::F32, DataType::F32, DataType::F32, DataType::F32, false }, // no floating point types
+ { DataType::F16, DataType::F16, DataType::F16, DataType::F16, false }, // no floating point types
+ { DataType::F64, DataType::F64, DataType::F64, DataType::F64, false }, // no double precision
+ { DataType::QASYMM8, DataType::QASYMM8, DataType::S32, DataType::QASYMM8, true },
+ { DataType::QASYMM8_SIGNED, DataType::QASYMM8_SIGNED, DataType::S32, DataType::QASYMM8_SIGNED, true },
+ { DataType::QSYMM8_PER_CHANNEL, DataType::QSYMM8_PER_CHANNEL, DataType::S32, DataType::QSYMM8_PER_CHANNEL, false }, // only qasymm8/qasymm8_signed is supported
+ { DataType::QASYMM16, DataType::QASYMM16, DataType::S32, DataType::QASYMM16, false }, // only qasymm8/qasymm8_signed is supported
+ { DataType::QSYMM16, DataType::QSYMM16, DataType::S32, DataType::QSYMM16, false }, // only qasymm8/qasymm8_signed is supported
+ { DataType::QSYMM8, DataType::QSYMM8, DataType::S32, DataType::QSYMM8, false }, // only qasymm8/qasymm8_signed is supported
+ { DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::S32, DataType::QASYMM8, false }, // no mixed data types
+ { DataType::S64, DataType::S64, DataType::S64, DataType::S64, false }, // no integral types
+ { DataType::S32, DataType::S32, DataType::S32, DataType::S32, false }, // no integral types
+ { DataType::S16, DataType::S16, DataType::S16, DataType::S16, false }, // no integral types
+ { DataType::S8, DataType::S8, DataType::S8, DataType::S8, false }, // no integral types
+ { DataType::U64, DataType::U64, DataType::U64, DataType::U64, false }, // no integral types
+ { DataType::U32, DataType::U32, DataType::U32, DataType::U32, false }, // no integral types
+ { DataType::U16, DataType::U16, DataType::U16, DataType::U16, false }, // no integral types
+ { DataType::U8, DataType::U8, DataType::U8, DataType::U8, false }, // no integral types
+ { DataType::QASYMM8, DataType::QASYMM8, DataType::F32, DataType::QASYMM8, false } // Only S32 bias is supported
};
// It's enough to test a single shape and block size configuration while checking data types
- const TensorShape shape = TensorShape(10U, 10U);
+ const TensorShape shape = TensorShape(10U, 10U);
+ const TensorShape bia_shape = TensorShape(10U);
const MatMulKernelInfo matmul_kernel_info{ false, false, 1, 1, 1, false };
for(auto &tuple : data_type_configurations)
{
- const bool expected = std::get<3>(tuple);
+ const bool expected = std::get<4>(tuple);
const TensorInfo lhs_info(shape, 1, std::get<0>(tuple));
const TensorInfo rhs_info(shape, 1, std::get<1>(tuple));
- TensorInfo output_info(shape, 1, std::get<2>(tuple));
+ const TensorInfo bia_info(bia_shape, 1, std::get<2>(tuple));
+ TensorInfo output_info(shape, 1, std::get<3>(tuple));
- Status status = ClMatMulLowpNativeKernel::validate(&lhs_info, &rhs_info, &output_info, matmul_kernel_info);
+ Status status = ClMatMulLowpNativeKernel::validate(&lhs_info, &rhs_info, &bia_info, &output_info, matmul_kernel_info);
ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
}
}
@@ -234,6 +244,18 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLMatMulLowpNativeKernelFixture<int8_t>, framew
// Validate output
validate(CLAccessor(_target), _reference, tolerance_quant);
}
+FIXTURE_DATA_TEST_CASE(RunWithBias, CLMatMulLowpKernelWithBiasFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(datasets::SmallMatMulDataset(),
+ framework::dataset::make("TransposeA", { true, false })),
+ framework::dataset::make("TransposeB", { true, false })),
+ m0_values_precommit),
+ n0_values_precommit),
+ k0_values_precommit),
+ framework::dataset::make("ExportRhsToCLImage", { false })),
+ framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_quant);
+}
FIXTURE_DATA_TEST_CASE(RunLargeNoTranspose, CLMatMulLowpNativeKernelFixture<int8_t>, framework::DatasetMode::NIGHTLY,
combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDataset(),
framework::dataset::make("TransposeA", { false })),