aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/NEON
diff options
context:
space:
mode:
authorGunes Bayir <gunes.bayir@arm.com>2024-07-02 15:45:01 +0100
committerGunes Bayir <gunes.bayir@arm.com>2024-07-02 16:00:11 +0000
commita3f238a44d9f306c77be0177f13d22ae3f3bcc57 (patch)
tree44bf40fb59fb8c4452d65d25e3a967c035bc6863 /tests/validation/NEON
parentf92b0fffa0d32dc08340c1abfa1a7f09c6e53795 (diff)
downloadComputeLibrary-a3f238a44d9f306c77be0177f13d22ae3f3bcc57.tar.gz
Revert "Update CPU kernels and add mixed sign GEMM support"
This reverts commit fc94f4d23abd4bc427b701f54ad85282e9ec7872 and 5d6fff041ade7eb44af0945867212f3979be3d3e (because the latter fixes a build failure caused by the former) Change-Id: I7d07fea8307e9a7033b30874bbb14ba9202b23d8 Signed-off-by: Gunes Bayir <gunes.bayir@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/11815 Benchmark: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Adnan AlSinan <adnan.alsinan@arm.com>
Diffstat (limited to 'tests/validation/NEON')
-rw-r--r--tests/validation/NEON/ConvolutionLayer.cpp101
-rw-r--r--tests/validation/NEON/GEMMLowp.cpp36
2 files changed, 9 insertions, 128 deletions
diff --git a/tests/validation/NEON/ConvolutionLayer.cpp b/tests/validation/NEON/ConvolutionLayer.cpp
index 7eada81ce5..d739d4e1a4 100644
--- a/tests/validation/NEON/ConvolutionLayer.cpp
+++ b/tests/validation/NEON/ConvolutionLayer.cpp
@@ -147,45 +147,6 @@ const auto QuantizationData = make("QuantizationInfo",
TEST_SUITE(NEON)
TEST_SUITE(ConvolutionLayer)
-DATA_TEST_CASE(SupportedTypes, framework::DatasetMode::ALL, zip(
- make("DataType", {
- DataType::F32,
- DataType::QASYMM8,
- DataType::QASYMM8,
- DataType::QASYMM8_SIGNED
- }),
- make("WeightsDataType", {
- DataType::F32,
- DataType::QASYMM8,
- DataType::QASYMM8_SIGNED,
- DataType::QASYMM8
- }),
- make("Expected",
- {
- true,
- true,
- true,
- false
- })),
-data_type_const, weights_data_type_const, expected_const)
-{
- TensorInfo input_info = TensorInfo(TensorShape(3U, 3U, 1U), 1, data_type_const);
- TensorInfo weights_info = TensorInfo(TensorShape(2U, 2U, 1U, 1U), 1, weights_data_type_const);
- TensorInfo output_info = TensorInfo(TensorShape(2U, 2U, 1U), 1, data_type_const);
-
- input_info.set_quantization_info(arm_compute::QuantizationInfo(1, 0));
- weights_info.set_quantization_info(arm_compute::QuantizationInfo(1, 0));
- output_info.set_quantization_info(arm_compute::QuantizationInfo(1, 0));
-
- Status status = NEConvolutionLayer::validate(
- &input_info,
- &weights_info,
- nullptr,
- &output_info,
- PadStrideInfo());
-
- ARM_COMPUTE_EXPECT(bool(status) == expected_const, framework::LogLevel::ERRORS);
-}
// *INDENT-OFF*
// clang-format off
@@ -296,7 +257,7 @@ TEST_CASE(MemoryInjection, framework::DatasetMode::ALL)
for(size_t i = 0; i < result_0.info()->tensor_shape().total_size(); ++i)
{
- ARM_COMPUTE_EXPECT(reinterpret_cast<float *>(result_0.buffer())[i] == reinterpret_cast<float *>(result_1.buffer())[i], framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(((float *)result_0.buffer())[i] == ((float *)result_1.buffer())[i], framework::LogLevel::ERRORS);
}
}
@@ -342,7 +303,7 @@ TEST_CASE(MultipleExecutionWithConfigure, framework::DatasetMode::ALL)
for(size_t i = 0; i < result_0.info()->tensor_shape().total_size(); ++i)
{
- ARM_COMPUTE_EXPECT(reinterpret_cast<float *>(result_0.buffer())[i] == reinterpret_cast<float *>(result_1.buffer())[i], framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(((float *)result_0.buffer())[i] == ((float *)result_1.buffer())[i], framework::LogLevel::ERRORS);
}
}
@@ -619,7 +580,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, frame
/// It's enough to run the activations for a single weight/input combination and data type because
/// activation function is called on top of the winograd output as a separate operator
-/// TODO(COMPMID-6573): Enable after COMPMID-6573 is resolved
+/// TODO: Enable after COMPMID-6573 is resolved
FIXTURE_DATA_TEST_CASE(RunActivations, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::DISABLED,
combine(
make("Input", TensorShape(3U, 3U, 32U)),
@@ -1158,7 +1119,7 @@ TEST_CASE(MemoryInjection, framework::DatasetMode::ALL)
auto result_1 = run_conv();
for(size_t i = 0; i < result_0.info()->tensor_shape().total_size(); ++i)
{
- ARM_COMPUTE_EXPECT(reinterpret_cast<float *>(result_0.buffer())[i] == reinterpret_cast<float *>(result_1.buffer())[i], framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(((float *)result_0.buffer())[i] == ((float *)result_1.buffer())[i], framework::LogLevel::ERRORS);
}
}
@@ -1199,7 +1160,7 @@ TEST_CASE(MultipleExecutionWithConfigure, framework::DatasetMode::ALL)
auto result_1 = run_conv();
for(size_t i = 0; i < result_0.info()->tensor_shape().total_size(); ++i)
{
- ARM_COMPUTE_EXPECT(reinterpret_cast<float *>(result_0.buffer())[i] == reinterpret_cast<float *>(result_1.buffer())[i], framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(((float *)result_0.buffer())[i] == ((float *)result_1.buffer())[i], framework::LogLevel::ERRORS);
}
}
@@ -1290,14 +1251,12 @@ FIXTURE_DATA_TEST_CASE(RunVeryLarge, NEGEMMConvolutionLayerFixture<float>, frame
TEST_SUITE_END() // FP32
TEST_SUITE_END() // Float
-// TODO(COMPMID-6573): Extend quantized tests with at least one suite where the weight is padded (the legacy case, see floating point's RunPaddedWeights)
+// TODO: COMPMID-6596 Extend quantized tests with at least one suite where the weight is padded (the legacy case, see floating point's RunPaddedWeights)
template <typename T>
using NEGEMMConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<Tensor, Accessor, NEConvolutionLayer, T>;
template <typename T>
using NEGEMMConvolutionLayerQuantizedMixedDataLayoutFixture = ConvolutionValidationQuantizedFixture<Tensor, Accessor, NEConvolutionLayer, T, true>;
-using NEGEMMConvolutionLayerQuantizedMixedSignFixture = ConvolutionValidationQuantizedMixedTypeFixture<Tensor, Accessor, NEConvolutionLayer, uint8_t, int8_t>;
-
template <typename T>
using NEGEMMConvolutionLayerQuantizedPerChannelFixture = ConvolutionValidationQuantizedPerChannelFixture<Tensor, Accessor, NEConvolutionLayer, T, int8_t>;
@@ -1373,50 +1332,6 @@ FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEGEMMConvolutionLayerQuantizedFixtur
}
TEST_SUITE_END() // QASYMM8_SIGNED
-TEST_SUITE(QASYMM8_MIXED)
-FIXTURE_DATA_TEST_CASE(
- RunSmall,
- NEGEMMConvolutionLayerQuantizedMixedSignFixture,
- framework::DatasetMode::ALL,
- combine(combine(combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", {true})),
- framework::dataset::make("DataType", DataType::QASYMM8)),
- framework::dataset::make("WeightsDataType", DataType::QASYMM8_SIGNED)),
- framework::dataset::make("DataLayout", {DataLayout::NCHW, DataLayout::NHWC})),
- framework::dataset::make("QuantizationInfoIfActivationEnabled",
-{QuantizationInfo(2.f / 255.f, 10)})),
-framework::dataset::make("WeightQuantizationInfoIfActivationEnabled",
-{QuantizationInfo(2.f / 255.f, 10)})),
-QuantizedActivationFunctionsDataset))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_qasymm8);
-}
-FIXTURE_DATA_TEST_CASE(
- RunMixedDataLayout,
- NEGEMMConvolutionLayerQuantizedMixedSignFixture,
- framework::DatasetMode::ALL,
- combine(
- framework::dataset::make("Input", TensorShape(23U, 27U, 5U)),
- framework::dataset::make("Weights", TensorShape(3U, 3U, 5U, 2U)),
- framework::dataset::make("Bias", TensorShape(2U)),
- framework::dataset::make("Output", TensorShape(11U, 25U, 2U)),
- framework::dataset::make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0)),
- framework::dataset::make("Dilation", Size2D(1, 1)),
- framework::dataset::make("ReshapeWeights", {true}),
- framework::dataset::make("DataType", DataType::QASYMM8),
- framework::dataset::make("WeightsDataType", DataType::QASYMM8_SIGNED),
- framework::dataset::make("DataLayout", {DataLayout::NCHW, DataLayout::NHWC}),
- framework::dataset::make("QuantizationInfoIfActivationEnabled", {QuantizationInfo(2.f / 255.f, 10)}),
- framework::dataset::make("WeightQuantizationInfoIfActivationEnabled", {QuantizationInfo(2.f / 255.f, 10)}),
- QuantizedActivationFunctionsDataset)
- )
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_qasymm8);
-}
-TEST_SUITE_END() // QASYMM8_MIXED
-
TEST_SUITE(QSYMM8_PER_CHANNEL)
FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerQuantizedPerChannelFixture<uint8_t>, framework::DatasetMode::ALL,
combine(combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
@@ -1521,7 +1436,7 @@ TEST_CASE(MemoryInjection, framework::DatasetMode::ALL)
auto result_1 = run_conv();
for(size_t i = 0; i < result_0.info()->tensor_shape().total_size(); ++i)
{
- ARM_COMPUTE_EXPECT(reinterpret_cast<float *>(result_0.buffer())[i] == reinterpret_cast<float *>(result_1.buffer())[i], framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(((float *)result_0.buffer())[i] == ((float *)result_1.buffer())[i], framework::LogLevel::ERRORS);
}
}
@@ -1561,7 +1476,7 @@ TEST_CASE(MultipleExecutionWithConfigure, framework::DatasetMode::ALL)
auto result_1 = run_conv();
for(size_t i = 0; i < result_0.info()->tensor_shape().total_size(); ++i)
{
- ARM_COMPUTE_EXPECT(reinterpret_cast<float *>(result_0.buffer())[i] == reinterpret_cast<float *>(result_1.buffer())[i], framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(((float *)result_0.buffer())[i] == ((float *)result_1.buffer())[i], framework::LogLevel::ERRORS);
}
}
diff --git a/tests/validation/NEON/GEMMLowp.cpp b/tests/validation/NEON/GEMMLowp.cpp
index 61202ee2b7..d25f43a330 100644
--- a/tests/validation/NEON/GEMMLowp.cpp
+++ b/tests/validation/NEON/GEMMLowp.cpp
@@ -141,23 +141,20 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(
TensorInfo(TensorShape(20U, 13U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)), // Invalid dimensions
TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)), // Invalid dimensions
TensorInfo(TensorShape(16U, 32U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)),
- TensorInfo(TensorShape(16U, 32U), 1, DataType::QASYMM8_SIGNED, QuantizationInfo(1.f/255, 10)), // Invalid types
}),
make("InputBInfo",{ TensorInfo(TensorShape(33U, 21U), 1, DataType::QASYMM8, QuantizationInfo(1.f/256, 10)),
TensorInfo(TensorShape(33U, 21U), 1, DataType::QASYMM8, QuantizationInfo(1.f/256, 10)),
TensorInfo(TensorShape(33U, 21U), 1, DataType::QASYMM8, QuantizationInfo(1.f/256, 10)),
TensorInfo(TensorShape(33U, 21U), 1, DataType::QASYMM8, QuantizationInfo(1.f/256, 10)),
TensorInfo(TensorShape(64U, 16U), 1, DataType::QASYMM8, QuantizationInfo(1.f/256, 10)),
- TensorInfo(TensorShape(64U, 16U), 1, DataType::QASYMM8, QuantizationInfo(1.f/256, 10)),
}),
make("OutputInfo",{ TensorInfo(TensorShape(33U, 13U), 1, DataType::S32),
TensorInfo(TensorShape(33U, 13U), 1, DataType::S32),
TensorInfo(TensorShape(33U, 13U), 1, DataType::S32),
TensorInfo(TensorShape(8U, 11U), 1, DataType::S32),
TensorInfo(TensorShape(64U, 32U), 1, DataType::S32),
- TensorInfo(TensorShape(64U, 32U), 1, DataType::S32),
}),
- make("Expected", { true, false, false, false, true, false })),
+ make("Expected", { true, false, false, false, true })),
a_info, b_info, output_info, expected)
{
// Lock tensors
@@ -362,39 +359,10 @@ TEST_SUITE_END() // DynamicQuantization
#ifdef __aarch64__
// Deqaunt tests involve returning F32 from the MatrixMultiplyCore kernels and is only implemented in aarch64
TEST_SUITE(Dequant)
-DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(
- make("InputAInfo", {
- TensorInfo(TensorShape(16U, 32U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)),
- TensorInfo(TensorShape(16U, 32U), 1, DataType::QASYMM8_SIGNED, QuantizationInfo(1.f/255, 10)),
- TensorInfo(TensorShape(16U, 32U), 1, DataType::QASYMM8_SIGNED, QuantizationInfo(1.f/255, 10)), // Invalid types
- }),
- make("InputBInfo",{
- TensorInfo(TensorShape(64U, 16U), 1, DataType::QASYMM8_SIGNED, QuantizationInfo(1.f/256, 10)),
- TensorInfo(TensorShape(64U, 16U), 1, DataType::QASYMM8_SIGNED, QuantizationInfo(1.f/256, 10)),
- TensorInfo(TensorShape(64U, 16U), 1, DataType::QASYMM8, QuantizationInfo(1.f/256, 10)),
- }),
- make("OutputInfo",{
- TensorInfo(TensorShape(64U, 32U), 1, DataType::F32),
- TensorInfo(TensorShape(64U, 32U), 1, DataType::F32),
- TensorInfo(TensorShape(64U, 32U), 1, DataType::F32),
- }),
- make("Expected", { true, true, false })),
- a_info, b_info, output_info, expected)
-{
- // Lock tensors
- Status status = NEGEMMLowpMatrixMultiplyCore::validate(&a_info.clone()->set_is_resizable(false),
- &b_info.clone()->set_is_resizable(false),
- nullptr,
- &output_info.clone()->set_is_resizable(false));
- ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
-}
-
constexpr AbsoluteTolerance<float> tolerance_dequantized(0.01f);
FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpDequantizedMatrixMultiplyValidationFixture, framework::DatasetMode::ALL,
combine(
datasets::SmallGEMMLowpDataset(),
- make("DataTypeA", {DataType::QASYMM8_SIGNED, DataType::QASYMM8}),
- make("DataTypeB", DataType::QASYMM8_SIGNED),
make("accumulate", {true, false})
))
{
@@ -405,8 +373,6 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpDequantizedMatrixMultiplyValidationFi
FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMLowpDequantizedMatrixMultiplyValidationFixture, framework::DatasetMode::NIGHTLY,
combine(
datasets::LargeGEMMLowpDataset(),
- make("DataTypeA", {DataType::QASYMM8_SIGNED, DataType::QASYMM8}),
- make("DataTypeB", DataType::QASYMM8_SIGNED),
make("accumulate", {false})
))
{