diff options
author | Gunes Bayir <gunes.bayir@arm.com> | 2024-04-25 12:32:36 +0100 |
---|---|---|
committer | Michael Kozlov <michael.kozlov@arm.com> | 2024-04-25 18:54:44 +0100 |
commit | 02f7616dc4d58b68848a85b66e494bd259cf1c38 (patch) | |
tree | d37bd9cdc7369a785217dd9aa0c3ec1ca7f23c5e /tests/validation/NEON | |
parent | f652703243db6bda49e2464b186e48e3d8c2a740 (diff) | |
download | ComputeLibrary-02f7616dc4d58b68848a85b66e494bd259cf1c38.tar.gz |
Add memory stress tests for per channel quantized convolutionv24.04branches/arm_compute_24_04
Partially Resolves: MLCE-1255
Change-Id: Ibadcfedd43530232c65f05e571bc8b4568a63e67
Signed-off-by: Gunes Bayir <gunes.bayir@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/11499
Benchmark: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Pablo Marquez Tello <pablo.tello@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation/NEON')
-rw-r--r-- | tests/validation/NEON/ConvolutionLayer.cpp | 21 |
1 files changed, 21 insertions, 0 deletions
diff --git a/tests/validation/NEON/ConvolutionLayer.cpp b/tests/validation/NEON/ConvolutionLayer.cpp index 1f76925d96..d739d4e1a4 100644 --- a/tests/validation/NEON/ConvolutionLayer.cpp +++ b/tests/validation/NEON/ConvolutionLayer.cpp @@ -1357,6 +1357,27 @@ FIXTURE_DATA_TEST_CASE(RunSmallSigned, NEGEMMConvolutionLayerQuantizedPerChannel // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8); } + +FIXTURE_DATA_TEST_CASE(MemoryStressLargeChannels, NEGEMMConvolutionLayerQuantizedPerChannelFixture<int8_t>, + framework::DatasetMode::ALL, + combine( + make("In", TensorShape(1U)), + make("Weights", TensorShape(1U, 1U, 1U, 17000U)), + make("Biases", TensorShape(17000U)), + make("Out", TensorShape(1U, 1U, 17000U)), + make("Info", PadStrideInfo(1, 1, 0, 0)), + make("Dilation", Size2D(1, 1)), + make("ReshapeWeights", { true }), + make("DataType", { DataType::QASYMM8_SIGNED }), + make("DataLayout", { DataLayout::NHWC }), + make("QuantizationInfo", QuantizationInfo(0.5f, 10)), + make("ActivationInfo", ActivationLayerInfo()), + make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL }))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_qasymm8); +} + TEST_SUITE_END() // QSYMM8_PER_CHANNEL TEST_SUITE_END() // Quantized |