aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGunes Bayir <gunes.bayir@arm.com>2023-10-11 09:56:05 +0100
committerSiCong Li <sicong.li@arm.com>2023-10-30 12:01:04 +0000
commitdfcd41af4eea67f7c348c8688e05cb12e3eee5f5 (patch)
treedac23e9d51fd03375e8ed756fdab96e8359228a9
parent72b7471955f32d2fccb170ab1ab40c7eecfa7f44 (diff)
downloadComputeLibrary-dfcd41af4eea67f7c348c8688e05cb12e3eee5f5.tar.gz
Use dynamic quantization in OpenCL™ Direct Convolution tests
This patch calculates the output quantization info based on the inputs' quantization information. The previous approach was using the same quantization information for input, weights and output. This implementation does not cover the cases where we have fused activation function. Note that no Neon™ tests are changed since there were not any quantized Neon Direct Convolution tests. Resolves: COMPMID-6485 Change-Id: Id32241320acae0b58552546d6d6f665cd5c63044 Signed-off-by: Gunes Bayir <gunes.bayir@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10470 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: SiCong Li <sicong.li@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--tests/validation/CL/DirectConvolutionLayer.cpp199
-rw-r--r--tests/validation/Helpers.cpp37
-rw-r--r--tests/validation/Helpers.h26
-rw-r--r--tests/validation/fixtures/DirectConvolutionLayerFixture.h142
4 files changed, 326 insertions, 78 deletions
diff --git a/tests/validation/CL/DirectConvolutionLayer.cpp b/tests/validation/CL/DirectConvolutionLayer.cpp
index 342a093ca3..ff22ae5ef0 100644
--- a/tests/validation/CL/DirectConvolutionLayer.cpp
+++ b/tests/validation/CL/DirectConvolutionLayer.cpp
@@ -605,103 +605,197 @@ FIXTURE_DATA_TEST_CASE(Run, CLDirectConvolutionValidationWithTensorShapesFixture
TEST_SUITE_END() // FP32_CustomDataset
TEST_SUITE_END() // Float
+/// @note: Every quantized test has a version with or without activation because the quantization info given is
+/// ignored when there is no activation. Instead of using the same quantization information for all the tensors, the
+/// fixture generates separate quantization info for each input and the output tensor.
+/// When we can also support dynamic quantization with the presence of activation, these two versions should be merged
+/// again, with the explicitly specified quantization info removed
const auto QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
{
- ActivationLayerInfo(),
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f)
});
+const auto NoActivation = framework::dataset::make("ActivationInfo",
+{
+ ActivationLayerInfo()
+});
+const auto IgnoredQuantizationInfo = framework::dataset::make("IgnoredQuantizationInfo",
+{
+ QuantizationInfo()
+});
TEST_SUITE(Quantized)
TEST_SUITE(QASYMM8)
-FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLDirectConvolutionLayerQuantizedMixedDataLayoutFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(data_precommit,
- framework::dataset::make("DataType", DataType::QASYMM8)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255, 10) })),
- QuantizedActivationFunctionsDataset),
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLDirectConvolutionLayerQuantizedMixedDataLayoutFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(data_precommit,
+ framework::dataset::make("DataType", DataType::QASYMM8),
+ IgnoredQuantizationInfo,
+ NoActivation,
+ framework::dataset::make("DataLayout", { DataLayout::NCHW })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_qasymm8);
+}
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayoutWithActivation, CLDirectConvolutionLayerQuantizedMixedDataLayoutFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(data_precommit,
+ framework::dataset::make("DataType", DataType::QASYMM8),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255, 10) }),
+ QuantizedActivationFunctionsDataset,
+ framework::dataset::make("DataLayout", { DataLayout::NCHW })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_qasymm8);
+}
+FIXTURE_DATA_TEST_CASE(RunSmall, CLDirectConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(data_precommit,
+ framework::dataset::make("DataType", DataType::QASYMM8),
+ IgnoredQuantizationInfo,
+ NoActivation,
+ framework::dataset::make("DataLayout", { DataLayout::NCHW })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_qasymm8);
+}
+FIXTURE_DATA_TEST_CASE(RunSmallWithActivation, CLDirectConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(data_precommit,
+ framework::dataset::make("DataType", DataType::QASYMM8),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255, 10), QuantizationInfo(1.1f, 10) }),
+ QuantizedActivationFunctionsDataset,
framework::dataset::make("DataLayout", { DataLayout::NCHW })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLDirectConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(data_precommit,
- framework::dataset::make("DataType", DataType::QASYMM8)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255, 10), QuantizationInfo(1.1f, 10) })),
- QuantizedActivationFunctionsDataset),
+FIXTURE_DATA_TEST_CASE(RunSmall9x9, CLDirectConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(data_precommit_9x9,
+ framework::dataset::make("DataType",
+ DataType::QASYMM8),
+ IgnoredQuantizationInfo,
+ NoActivation,
framework::dataset::make("DataLayout", { DataLayout::NCHW })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
}
-FIXTURE_DATA_TEST_CASE(RunSmall9x9, CLDirectConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(data_precommit_9x9,
+FIXTURE_DATA_TEST_CASE(RunSmall9x9WithActivation, CLDirectConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(data_precommit_9x9,
framework::dataset::make("DataType",
- DataType::QASYMM8)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(3.f / 255, 10), QuantizationInfo(1.1f, 10) })),
- QuantizedActivationFunctionsDataset),
+ DataType::QASYMM8),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(3.f / 255, 10), QuantizationInfo(1.1f, 10) }),
+ QuantizedActivationFunctionsDataset,
+ framework::dataset::make("DataLayout", { DataLayout::NCHW })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_qasymm8);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, CLDirectConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(data_nightly, framework::dataset::make("DataType",
+ DataType::QASYMM8),
+ IgnoredQuantizationInfo,
+ NoActivation,
framework::dataset::make("DataLayout", { DataLayout::NCHW })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLDirectConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(data_nightly, framework::dataset::make("DataType",
- DataType::QASYMM8)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255, 10), QuantizationInfo(1.1f, 10) })),
- QuantizedActivationFunctionsDataset),
+FIXTURE_DATA_TEST_CASE(RunLargeWithActivation, CLDirectConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(data_nightly, framework::dataset::make("DataType",
+ DataType::QASYMM8),
+ framework::dataset::make("QuantizationInfoIf", { QuantizationInfo(2.f / 255, 10), QuantizationInfo(1.1f, 10) }),
+ QuantizedActivationFunctionsDataset,
framework::dataset::make("DataLayout", { DataLayout::NCHW })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
}
-FIXTURE_DATA_TEST_CASE(RunLarge9x9, CLDirectConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(data_nightly_9x9,
+FIXTURE_DATA_TEST_CASE(RunLarge9x9, CLDirectConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(data_nightly_9x9,
framework::dataset::make("DataType",
- DataType::QASYMM8)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(3.f / 255, 10), QuantizationInfo(1.1f, 10) })),
- QuantizedActivationFunctionsDataset),
+ DataType::QASYMM8),
+ IgnoredQuantizationInfo,
+ NoActivation,
framework::dataset::make("DataLayout", { DataLayout::NCHW })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
}
-
-TEST_SUITE_END() // QASYMM8
-
-TEST_SUITE(QASYMM8_CustomDataset)
-FIXTURE_DATA_TEST_CASE(Run, CLDirectConvolutionValidationWithTensorShapesQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY,
- combine(combine(combine(combine(datasets::DirectConvolutionLayerDataset(),
- framework::dataset::make("DataType", DataType::QASYMM8)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255, 127), QuantizationInfo(1.1f, 10) })),
- QuantizedActivationFunctionsDataset),
+FIXTURE_DATA_TEST_CASE(RunLarge9x9WithActivation, CLDirectConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(data_nightly_9x9,
+ framework::dataset::make("DataType",
+ DataType::QASYMM8),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(3.f / 255, 10), QuantizationInfo(1.1f, 10) }),
+ QuantizedActivationFunctionsDataset,
+ framework::dataset::make("DataLayout", { DataLayout::NCHW })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_qasymm8);
+}
+FIXTURE_DATA_TEST_CASE(CustomDataset, CLDirectConvolutionValidationWithTensorShapesQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY,
+ combine(datasets::DirectConvolutionLayerDataset(),
+ framework::dataset::make("DataType", DataType::QASYMM8),
+ IgnoredQuantizationInfo,
+ NoActivation,
+ framework::dataset::make("DataLayout", { DataLayout::NCHW })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_qasymm8);
+}
+FIXTURE_DATA_TEST_CASE(CustomDatasetWithActivation, CLDirectConvolutionValidationWithTensorShapesQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY,
+ combine(datasets::DirectConvolutionLayerDataset(),
+ framework::dataset::make("DataType", DataType::QASYMM8),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255, 127), QuantizationInfo(1.1f, 10) }),
+ QuantizedActivationFunctionsDataset,
framework::dataset::make("DataLayout", { DataLayout::NCHW })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
}
-TEST_SUITE_END() // QASYMM8_CustomDataset
+TEST_SUITE_END() // QASYMM8
TEST_SUITE(QASYMM8_SIGNED)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLDirectConvolutionLayerQuantizedFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(combine(data_precommit, framework::dataset::make("DataType",
- DataType::QASYMM8_SIGNED)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255, 10), QuantizationInfo(1.1f, -10) })),
- QuantizedActivationFunctionsDataset),
+FIXTURE_DATA_TEST_CASE(RunSmall, CLDirectConvolutionLayerQuantizedFixture<int8_t>, framework::DatasetMode::ALL, combine(data_precommit, framework::dataset::make("DataType",
+ DataType::QASYMM8_SIGNED),
+ IgnoredQuantizationInfo,
+ NoActivation,
framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
}
-FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLDirectConvolutionLayerQuantizedMixedDataLayoutFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(combine(data_precommit,
+FIXTURE_DATA_TEST_CASE(RunSmallWithActivation, CLDirectConvolutionLayerQuantizedFixture<int8_t>, framework::DatasetMode::ALL, combine(data_precommit, framework::dataset::make("DataType",
+ DataType::QASYMM8_SIGNED),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255, 10), QuantizationInfo(1.1f, -10) }),
+ QuantizedActivationFunctionsDataset,
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_qasymm8);
+}
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLDirectConvolutionLayerQuantizedMixedDataLayoutFixture<int8_t>, framework::DatasetMode::ALL, combine(data_precommit,
framework::dataset::make("DataType",
- DataType::QASYMM8_SIGNED)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.1f, -10) })),
- QuantizedActivationFunctionsDataset),
+ DataType::QASYMM8_SIGNED),
+ IgnoredQuantizationInfo,
+ NoActivation,
framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
}
-FIXTURE_DATA_TEST_CASE(RunSmall9x9, CLDirectConvolutionLayerQuantizedFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(combine(data_precommit_9x9,
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayoutWithActivation, CLDirectConvolutionLayerQuantizedMixedDataLayoutFixture<int8_t>, framework::DatasetMode::ALL, combine(data_precommit,
framework::dataset::make("DataType",
- DataType::QASYMM8_SIGNED)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255, 10), QuantizationInfo(1.1f, 10) })),
- QuantizedActivationFunctionsDataset),
+ DataType::QASYMM8_SIGNED),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.1f, -10) }),
+ QuantizedActivationFunctionsDataset,
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_qasymm8);
+}
+FIXTURE_DATA_TEST_CASE(RunSmall9x9, CLDirectConvolutionLayerQuantizedFixture<int8_t>, framework::DatasetMode::ALL, combine(data_precommit_9x9,
+ framework::dataset::make("DataType",
+ DataType::QASYMM8_SIGNED),
+ IgnoredQuantizationInfo,
+ NoActivation,
+ framework::dataset::make("DataLayout", { DataLayout::NCHW })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_qasymm8);
+}
+FIXTURE_DATA_TEST_CASE(RunSmall9x9WithActivation, CLDirectConvolutionLayerQuantizedFixture<int8_t>, framework::DatasetMode::ALL, combine(data_precommit_9x9,
+ framework::dataset::make("DataType",
+ DataType::QASYMM8_SIGNED),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255, 10), QuantizationInfo(1.1f, 10) }),
+ QuantizedActivationFunctionsDataset,
framework::dataset::make("DataLayout", { DataLayout::NCHW })))
{
// Validate output
@@ -709,10 +803,21 @@ FIXTURE_DATA_TEST_CASE(RunSmall9x9, CLDirectConvolutionLayerQuantizedFixture<int
}
FIXTURE_DATA_TEST_CASE(RunCustomDataset, CLDirectConvolutionValidationWithTensorShapesQuantizedFixture<int8_t>, framework::DatasetMode::NIGHTLY,
- combine(combine(combine(combine(datasets::DirectConvolutionLayerDataset(),
- framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255, 127), QuantizationInfo(1.1f, 10) })),
- QuantizedActivationFunctionsDataset),
+ combine(datasets::DirectConvolutionLayerDataset(),
+ framework::dataset::make("DataType", DataType::QASYMM8_SIGNED),
+ IgnoredQuantizationInfo,
+ NoActivation,
+ framework::dataset::make("DataLayout", { DataLayout::NCHW })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_qasymm8);
+}
+
+FIXTURE_DATA_TEST_CASE(RunCustomDatasetWithActivation, CLDirectConvolutionValidationWithTensorShapesQuantizedFixture<int8_t>, framework::DatasetMode::NIGHTLY,
+ combine(datasets::DirectConvolutionLayerDataset(),
+ framework::dataset::make("DataType", DataType::QASYMM8_SIGNED),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255, 127), QuantizationInfo(1.1f, 10) }),
+ QuantizedActivationFunctionsDataset,
framework::dataset::make("DataLayout", { DataLayout::NCHW })))
{
// Validate output
diff --git a/tests/validation/Helpers.cpp b/tests/validation/Helpers.cpp
index 2f273e7042..cb4d87601c 100644
--- a/tests/validation/Helpers.cpp
+++ b/tests/validation/Helpers.cpp
@@ -352,8 +352,43 @@ void add_padding_x(std::initializer_list<ITensor *> tensors, const DataLayout &d
}
}
+QuantizationHint suggest_conv_dst_q_info_and_bias(const QuantizationInfo &in_q_info,
+ const QuantizationInfo &weight_q_info,
+ int32_t height,
+ int32_t width,
+ int32_t channels,
+ DataType data_type,
+ float bias_fraction)
+{
+ /** Quantization Setup of convolution
+ *
+ * Just like any other multiply-accummulate, convolution (2D) operation
+ * multiplies and accumulates the input and weight tensors. This operation
+ * takes place in three dimensions: height, width and channels. All of them
+ * belong to the weight tensor.
+ *
+ * The formula for simple convolution can be written as:
+ * C = sum_h sum_w sum_c(I[h_offset + h, w_offset + w, c] * W[h, w, c])
+ *
+ * Here, h_offset and w_offset are the starting positions in the image. Effects
+ * of paddings are ignored. This accumulation reduces to something like
+ *
+ * C = sum_m(I_index * W_hwc)
+ * where m is height x width x channels.
+ *
+ * Non-unit strides and/or dilations do not change the probabilistic nature of
+ * this sum because we always iterate as the size of the weight tensor.
+ *
+ * Paddings may affect this summation, but it's a boundary condition and so is
+ * neglected for brevity.
+ */
+
+ return suggest_mac_dst_q_info_and_bias(in_q_info, weight_q_info, height * width * channels, data_type, bias_fraction);
+}
+
QuantizationHint suggest_matmul_dst_q_info_and_bias(const QuantizationInfo &lhs_q_info,
- const QuantizationInfo &rhs_q_info, int32_t m, int32_t n, int32_t k, DataType data_type,
+ const QuantizationInfo &rhs_q_info,
+ int32_t m, int32_t n, int32_t k, DataType data_type,
float bias_fraction)
{
ARM_COMPUTE_UNUSED(m, n);
diff --git a/tests/validation/Helpers.h b/tests/validation/Helpers.h
index 7d53c1de37..5a1e69afbd 100644
--- a/tests/validation/Helpers.h
+++ b/tests/validation/Helpers.h
@@ -240,6 +240,27 @@ std::pair<int, int> get_symm_quantized_per_channel_bounds(const QuantizationInfo
*/
void add_padding_x(std::initializer_list<ITensor *> tensors, const DataLayout &data_layout = DataLayout::NHWC, bool only_right_pad = false);
+/** For 2d convolution, given the Lhs/Rhs matrix quantization informations and the convolution dimension,
+ * calculate a suitable output quantization and suggested bias range for obtaining non-saturated outputs with high probability.
+ *
+ * @param[in] in_q_info Input matrix quantization info
+ * @param[in] weight_q_info Weights matrix quantization info
+ * @param[in] height Height of the weights tensor
+ * @param[in] width Width of the weights tensors
+ * @param[in] channels Number of input channels
+ * @param[in] data_type data type, only QASYMM8, QASYMM8_SIGNED are supported
+ * @param[in] bias_fraction see @ref suggest_mac_dst_q_info_and_bias() for explanation
+ *
+ * @return QuantizationHint object containing the suggested output quantization info and min/max bias range
+ */
+QuantizationHint suggest_conv_dst_q_info_and_bias(const QuantizationInfo &in_q_info,
+ const QuantizationInfo &weight_q_info,
+ int32_t height,
+ int32_t width,
+ int32_t channels,
+ DataType data_type,
+ float bias_fraction);
+
/** For a matrix multiplication, given the Lhs/Rhs matrix quantization informations and the matrix multiplication dimensions,
* calculate a suitable output quantization and suggested bias range for obtaining non-saturated outputs with high probability.
*
@@ -249,7 +270,7 @@ void add_padding_x(std::initializer_list<ITensor *> tensors, const DataLayout &d
* @param[in] n Number of columns of Rhs Matrix
* @param[in] k Number of rows/columns of Rhs/Lhs Matrix
* @param[in] data_type data type, only QASYMM8, QASYMM8_SIGNED are supported
- * @param[in] bias_fraction the fraction of bias amplitude compared to integer accummulation. 0 if there is no bias.
+ * @param[in] bias_fraction see @ref suggest_mac_dst_q_info_and_bias() for explanation
*
* @return QuantizationHint object containing the suggested output quantization info and min/max bias range
*/
@@ -269,7 +290,8 @@ QuantizationHint suggest_matmul_dst_q_info_and_bias(const QuantizationInfo &lhs_
* @return QuantizationHint object containing the suggested output quantization info and min/max bias range
*/
QuantizationHint suggest_mac_dst_q_info_and_bias(const QuantizationInfo &lhs_q_info,
- const QuantizationInfo &rhs_q_info, int32_t k, DataType data_type, float bias_fraction);
+ const QuantizationInfo &rhs_q_info, int32_t k, DataType data_type,
+ float bias_fraction);
} // namespace validation
} // namespace test
} // namespace arm_compute
diff --git a/tests/validation/fixtures/DirectConvolutionLayerFixture.h b/tests/validation/fixtures/DirectConvolutionLayerFixture.h
index a666ff96a5..6f204642ca 100644
--- a/tests/validation/fixtures/DirectConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/DirectConvolutionLayerFixture.h
@@ -21,6 +21,10 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_DIRECTCONVOLUTIONLAYERFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_DIRECTCONVOLUTIONLAYERFIXTURE_H
+
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -51,10 +55,52 @@ class DirectConvolutionValidationGenericFixture : public framework::Fixture
public:
using TBias = typename std::conditional < std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, int32_t, T >::type;
+ void setup_quantization(const TensorShape &input_shape, const TensorShape &weights_shape, QuantizationInfo &input_q_info,
+ QuantizationInfo &weights_q_info, DataType data_type)
+ {
+ const int32_t t_max = static_cast<int32_t>(std::numeric_limits<T>::max());
+ const int32_t t_min = static_cast<int32_t>(std::numeric_limits<T>::min());
+
+ std::mt19937 generator(library->seed() + _hash);
+ std::uniform_real_distribution<float> distribution_float(-5.0f, 3.0f);
+ std::uniform_int_distribution<int32_t> distribution_t(t_min, t_max);
+
+ const float scale_lhs = pow(2, distribution_float(generator)); // [2^-5, 2^3]
+ const float scale_rhs = pow(2, distribution_float(generator)); // [2^-5, 2^3]
+
+ const int32_t offset_lhs = distribution_t(generator);
+ const int32_t offset_rhs = distribution_t(generator);
+
+ input_q_info = QuantizationInfo(scale_lhs, offset_lhs);
+ weights_q_info = QuantizationInfo(scale_rhs, offset_rhs);
+
+ QuantizationHint q_hint = suggest_conv_dst_q_info_and_bias(input_q_info, weights_q_info,
+ weights_shape.y() /* heights */, weights_shape.x() /* width */, input_shape.z() /* channels */,
+ data_type, 0.5f /* bias_fraction */);
+
+ _dst_q_info = q_hint.q_info;
+ _min_bias = q_hint.bias_min;
+ _max_bias = q_hint.bias_max;
+
+ // Do not change here as these limits are the natural limits of the associated data types and
+ // are embeded in the computation of the dst quantization info.
+ _min_u8 = 0;
+ _max_u8 = 255;
+ _min_s8 = -128;
+ _max_s8 = 127;
+ }
+
void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels,
DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout, bool mixed_layout = false)
{
- _quantization_info = quantization_info;
+ // This hash is used by random generators. There may be hash collisions but
+ // this is intentional as it's a very easy way to make the the current
+ // random generation process almost different for many test configurations,
+ // which were using the same set of values before.
+ _hash = input_shape[0] + input_shape[1] + input_shape[2] + input_shape[3] +
+ stride_x + stride_y + pad_x + pad_y + kernel_size + num_kernels + mixed_layout
+ + (data_layout == DataLayout::NHWC);
+
_data_type = data_type;
_mixed_layout = mixed_layout;
@@ -68,8 +114,17 @@ public:
const TensorShape output_shape = compute_deep_convolution_shape(input_info, weights_info, info);
- _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info, act_info, data_layout);
- _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info, act_info);
+ QuantizationInfo input_q_info = quantization_info;
+ QuantizationInfo weights_q_info = quantization_info;
+ _dst_q_info = quantization_info;
+
+ if(is_data_type_quantized(data_type) && (!act_info.enabled() || act_info.activation() == ActivationFunction::IDENTITY))
+ {
+ setup_quantization(input_shape, weights_shape, input_q_info, weights_q_info, data_type);
+ }
+
+ _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, input_q_info, weights_q_info, act_info, data_layout);
+ _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, input_q_info, weights_q_info, act_info);
}
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
@@ -78,13 +133,29 @@ public:
ARM_COMPUTE_ERROR_ON(data_layout == DataLayout::UNKNOWN);
ARM_COMPUTE_UNUSED(dilation);
- _quantization_info = quantization_info;
+ // This hash is used by random generators. There may be hash collisions but
+ // this is intentional as it's a very easy way to make the the current
+ // random generation process almost different for many test configurations,
+ // which were using the same set of values before.
+ _hash = input_shape[0] + input_shape[1] + input_shape[2] + input_shape[3] +
+ weights_shape[0] + weights_shape[1] + weights_shape[2] + weights_shape[3] + dilation.x() +
+ dilation.y() + info.pad_bottom() + info.pad_left() + info.pad_right() + info.pad_top();
+
_data_type = data_type;
const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
- _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info, act_info, data_layout);
- _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info, act_info);
+ QuantizationInfo input_q_info = quantization_info;
+ QuantizationInfo weights_q_info = quantization_info;
+ _dst_q_info = quantization_info;
+
+ if(is_data_type_quantized(data_type) && (!act_info.enabled() || act_info.activation() == ActivationFunction::IDENTITY))
+ {
+ setup_quantization(input_shape, weights_shape, input_q_info, weights_q_info, data_type);
+ }
+
+ _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, input_q_info, weights_q_info, act_info, data_layout);
+ _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, input_q_info, weights_q_info, act_info);
}
protected:
@@ -110,14 +181,14 @@ protected:
{
case DataType::QASYMM8:
{
- std::uniform_int_distribution<uint32_t> distribution(0, 50);
+ std::uniform_int_distribution<uint32_t> distribution(_min_u8, _max_u8);
library->fill(tensor, distribution, i);
break;
}
case DataType::QASYMM8_SIGNED:
{
// Use small input range to avoid all the test results being saturated at the end.
- std::uniform_int_distribution<int32_t> distribution(-25, 25);
+ std::uniform_int_distribution<int32_t> distribution(_min_s8, _max_s8);
library->fill(tensor, distribution, i);
break;
}
@@ -135,7 +206,7 @@ protected:
}
case DataType::S32:
{
- std::uniform_int_distribution<int32_t> distribution(-5, 5);
+ std::uniform_int_distribution<int32_t> distribution(_min_bias, _max_bias);
library->fill(tensor, distribution, i);
break;
}
@@ -145,7 +216,7 @@ protected:
}
TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, const TensorShape &bias_shape, TensorShape output_shape, const PadStrideInfo &info,
- DataType data_type, DataType bias_data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, const DataLayout &data_layout)
+ DataType data_type, DataType bias_data_type, QuantizationInfo input_q_info, QuantizationInfo weights_q_info, ActivationLayerInfo act_info, const DataLayout &data_layout)
{
if(data_layout == DataLayout::NHWC)
{
@@ -155,10 +226,10 @@ protected:
}
// Create tensors
- TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, quantization_info, data_layout);
- TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, quantization_info, data_layout);
- TensorType bias = create_tensor<TensorType>(bias_shape, bias_data_type, 1, quantization_info);
- TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, quantization_info, data_layout);
+ TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, input_q_info, data_layout);
+ TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, weights_q_info, data_layout);
+ TensorType bias = create_tensor<TensorType>(bias_shape, bias_data_type, 1, QuantizationInfo());
+ TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, _dst_q_info, data_layout);
add_padding_x({ &src, &bias, &dst }, data_layout);
add_padding_x({ &weights }, data_layout, input_shape[0] % 4 == 0); // Don't add left padding if cl image will be used
@@ -184,9 +255,9 @@ protected:
ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
- fill(AccessorType(src), 0);
- fill(AccessorType(weights), 1);
- fill(AccessorType(bias), 2);
+ fill(AccessorType(src), 0 + _hash);
+ fill(AccessorType(weights), 1 + _hash);
+ fill(AccessorType(bias), 2 + _hash);
if(_mixed_layout)
{
@@ -202,26 +273,39 @@ protected:
}
SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
- DataType data_type, DataType bias_data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info)
+ DataType data_type, DataType bias_data_type, QuantizationInfo input_q_info, QuantizationInfo weights_q_info, ActivationLayerInfo act_info)
{
// Create reference
- SimpleTensor<T> src{ input_shape, data_type, 1, quantization_info };
- SimpleTensor<T> weights{ weights_shape, data_type, 1, quantization_info };
- SimpleTensor<TBias> bias{ bias_shape, bias_data_type, 1, quantization_info };
+ SimpleTensor<T> src{ input_shape, data_type, 1, input_q_info };
+ SimpleTensor<T> weights{ weights_shape, data_type, 1, weights_q_info };
+ SimpleTensor<TBias> bias{ bias_shape, bias_data_type, 1, QuantizationInfo() };
// Fill reference
- fill(src, 0);
- fill(weights, 1);
- fill(bias, 2);
-
- SimpleTensor<T> dst = reference::convolution_layer<T>(src, weights, bias, output_shape, info);
- return (act_info.enabled()) ? reference::activation_layer<T>(dst, act_info) : dst;
+ fill(src, 0 + _hash);
+ fill(weights, 1 + _hash);
+ fill(bias, 2 + _hash);
+
+ SimpleTensor<T> dst = reference::convolution_layer<T>(src, weights, bias, output_shape, info,
+ Size2D(1U, 1U) /* dilation */, 1 /* num_groups */, _dst_q_info);
+ SimpleTensor<T> dst2 = (act_info.enabled()) ? reference::activation_layer<T>(dst, act_info) : dst;
+ return dst2;
}
TensorType _target{};
SimpleTensor<T> _reference{};
- QuantizationInfo _quantization_info{};
+ QuantizationInfo _dst_q_info{};
DataType _data_type{};
bool _mixed_layout{ false };
+ int32_t _hash{0};
+
+ // Random initialization limits
+ // Default values are previously handcrafted limits
+ // that sould be used when we don't use dynamic quantization
+ int32_t _min_bias{-5};
+ int32_t _max_bias{5};
+ int32_t _min_u8{0};
+ int32_t _max_u8{50};
+ int32_t _min_s8{-25};
+ int32_t _max_s8{25};
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
@@ -275,3 +359,5 @@ public:
} // namespace validation
} // namespace test
} // namespace arm_compute
+
+#endif // ACL_TESTS_VALIDATION_FIXTURES_DIRECTCONVOLUTIONLAYERFIXTURE_H