aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGunes Bayir <gunes.bayir@arm.com>2023-10-13 16:58:41 +0100
committerSiCong Li <sicong.li@arm.com>2023-10-31 11:08:15 +0000
commit93a77cd48609277a3849269c4cea5bf4b1ceb76a (patch)
treee0a454b1a3dd5c3998187ce397ba18848ec7ec12
parentfde45d836cf753a94915ac42d8a13da7edc52221 (diff)
downloadComputeLibrary-93a77cd48609277a3849269c4cea5bf4b1ceb76a.tar.gz
Use dynamic quantization in Convolution and Dilated Convolution tests
This patch calculates the output quantization info based on the inputs' quantization information. The previous approach was using the same quantization information for input, weights and output. This implementation does not cover the cases where we have fused activation function. Resolves: COMPMID-6482 Change-Id: I4a9d87cfef8ad18ef241d457d23f44c8519a1389 Signed-off-by: SiCong Li <sicong.li@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10541 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gunes Bayir <gunes.bayir@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--tests/validation/CL/ConvolutionLayer.cpp283
-rw-r--r--tests/validation/CL/DilatedConvolutionLayer.cpp11
-rw-r--r--tests/validation/NEON/ConvolutionLayer.cpp13
-rw-r--r--tests/validation/NEON/DilatedConvolutionLayer.cpp11
-rw-r--r--tests/validation/fixtures/ConvolutionLayerFixture.h115
5 files changed, 297 insertions, 136 deletions
diff --git a/tests/validation/CL/ConvolutionLayer.cpp b/tests/validation/CL/ConvolutionLayer.cpp
index 986d76708d..8820a6a31e 100644
--- a/tests/validation/CL/ConvolutionLayer.cpp
+++ b/tests/validation/CL/ConvolutionLayer.cpp
@@ -47,6 +47,7 @@ namespace test
{
namespace validation
{
+using framework::dataset::make;
namespace
{
class SmallConvolutionLayerDatasetCases final : public datasets::ConvolutionLayerDataset
@@ -65,7 +66,7 @@ constexpr AbsoluteTolerance<float> tolerance_qasymm8(1); /**< T
constexpr float tolerance_num = 0.07f; /**< Tolerance number */
/** CNN data types */
-const auto CNNDataTypes = framework::dataset::make("DataType",
+const auto CNNDataTypes = make("DataType",
{
DataType::F16,
DataType::F32,
@@ -74,20 +75,20 @@ const auto CNNDataTypes = framework::dataset::make("DataType",
});
/** Grouped CNN data types */
-const auto GroupedCNNDataTypes = framework::dataset::make("DataType",
+const auto GroupedCNNDataTypes = make("DataType",
{
DataType::F16,
DataType::F32
});
-const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
+const auto ActivationFunctionsDataset = make("ActivationInfo",
{
ActivationLayerInfo(),
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 0.5f),
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 0.5f)
});
-const auto ActivationFunctionsSmallDataset = framework::dataset::make("ActivationInfo",
+const auto ActivationFunctionsSmallDataset = make("ActivationInfo",
{
ActivationLayerInfo(),
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 0.5f)
@@ -100,7 +101,7 @@ TEST_SUITE(ConvolutionLayer)
// *INDENT-OFF*
// clang-format off
DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip(
- framework::dataset::make("InputInfo", { TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32), // Select GEMM
+ make("InputInfo", { TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32), // Select GEMM
TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32), // Select GEMM
TensorInfo(TensorShape(23U, 27U, 5U, 4U), 1, DataType::F32), // Select GEMM
TensorInfo(TensorShape(23U, 27U, 31U, 4U), 1, DataType::F32), // Select WINOGRAD
@@ -110,7 +111,7 @@ DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(z
TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32), // Select GEMM
TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::QASYMM8_SIGNED), // Select GEMM
}),
- framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32),
+ make("WeightsInfo", { TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32),
TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32),
TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32),
TensorInfo(TensorShape(3U, 3U, 31U, 21U), 1, DataType::F32),
@@ -120,7 +121,7 @@ DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(z
TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32),
TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::QASYMM8_SIGNED),
})),
- framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32),
+ make("OutputInfo", { TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32),
TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32),
TensorInfo(TensorShape(21U, 25U, 21U, 4U), 1, DataType::F32),
TensorInfo(TensorShape(21U, 25U, 21U, 4U), 1, DataType::F32),
@@ -130,7 +131,7 @@ DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(z
TensorInfo(TensorShape(17U, 31U, 19U), 1, DataType::F32),
TensorInfo(TensorShape(17U, 31U, 19U), 1, DataType::QASYMM8_SIGNED),
})),
- framework::dataset::make("ConvInfo", { PadStrideInfo(1, 2, 1, 1),
+ make("ConvInfo", { PadStrideInfo(1, 2, 1, 1),
PadStrideInfo(1, 2, 1, 1),
PadStrideInfo(1, 1, 0, 0),
PadStrideInfo(1, 1, 0, 0),
@@ -140,7 +141,7 @@ DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(z
PadStrideInfo(1, 1, 2, 2),
PadStrideInfo(1, 1, 2, 2),
})),
- framework::dataset::make("GpuTarget", { GPUTarget::BIFROST,
+ make("GpuTarget", { GPUTarget::BIFROST,
GPUTarget::MIDGARD,
GPUTarget::G71,
GPUTarget::G71,
@@ -150,7 +151,7 @@ DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(z
GPUTarget::BIFROST,
GPUTarget::BIFROST,
})),
- framework::dataset::make("Dilation", { Size2D(1U, 1U),
+ make("Dilation", { Size2D(1U, 1U),
Size2D(1U, 1U),
Size2D(1U, 1U),
Size2D(1U, 1U),
@@ -160,8 +161,8 @@ DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(z
Size2D(2U, 1U),
Size2D(2U, 1U),
})),
- framework::dataset::make("EnableFastMath", { false, false, false, false, false, false, true, true, true })),
- framework::dataset::make("Expected",{ ConvolutionMethod::GEMM,
+ make("EnableFastMath", { false, false, false, false, false, false, true, true, true })),
+ make("Expected",{ ConvolutionMethod::GEMM,
ConvolutionMethod::GEMM,
ConvolutionMethod::GEMM,
ConvolutionMethod::WINOGRAD,
@@ -199,7 +200,7 @@ TEST_SUITE(Float)
TEST_SUITE(FP16)
FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixture<half>, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
+ make("ReshapeWeights", { true })), make("DataType", DataType::F16)), make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
ActivationFunctionsSmallDataset))
{
// Validate output
@@ -210,7 +211,7 @@ TEST_SUITE_END() // FP16
TEST_SUITE(FP32)
FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixture<float>, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
+ make("ReshapeWeights", { true })), make("DataType", DataType::F32)), make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
ActivationFunctionsSmallDataset))
{
// Validate output
@@ -218,15 +219,15 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixture<float>, framework
}
FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLGEMMConvolutionLayerMixedDataLayoutFixture<float>, framework::DatasetMode::ALL,
combine(combine(combine(combine(combine(combine(combine(combine(combine(
- framework::dataset::make("Input", TensorShape(23U, 27U, 5U)),
- framework::dataset::make("Weights", TensorShape(3U, 3U, 5U, 2U))),
- framework::dataset::make("Bias", TensorShape(2U))),
- framework::dataset::make("Output", TensorShape(11U, 25U, 2U))),
- framework::dataset::make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0))),
- framework::dataset::make("Dilation", Size2D(1, 1))),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType", DataType::F32)),
- framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
+ make("Input", TensorShape(23U, 27U, 5U)),
+ make("Weights", TensorShape(3U, 3U, 5U, 2U))),
+ make("Bias", TensorShape(2U))),
+ make("Output", TensorShape(11U, 25U, 2U))),
+ make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0))),
+ make("Dilation", Size2D(1, 1))),
+ make("ReshapeWeights", { true })),
+ make("DataType", DataType::F32)),
+ make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
ActivationFunctionsSmallDataset))
{
// Validate output
@@ -234,11 +235,11 @@ FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLGEMMConvolutionLayerMixedDataLayout
}
FIXTURE_DATA_TEST_CASE(RunSmallWithPadding, CLConvolutionValidationWithPaddingFixture<float>, framework::DatasetMode::ALL,
combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerPrePaddingDataset(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType", DataType::F32)),
- framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
- framework::dataset::make("ActivationInfo", { ActivationLayerInfo() })),
-framework::dataset::make("PrePadLayer", { PaddingList({ { 1, 1 }, { 1, 1 } }) })))
+ make("ReshapeWeights", { true })),
+ make("DataType", DataType::F32)),
+ make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
+ make("ActivationInfo", { ActivationLayerInfo() })),
+make("PrePadLayer", { PaddingList({ { 1, 1 }, { 1, 1 } }) })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f32);
@@ -254,64 +255,108 @@ using CLGEMMConvolutionLayerQuantizedMixedDataLayoutFixture = ConvolutionValidat
template <typename T>
using CLGEMMConvolutionLayerQuantizedPerChannelFixture = ConvolutionValidationQuantizedPerChannelFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T, int8_t>;
-const auto QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
-{
- ActivationLayerInfo(),
- ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
- ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f)
-});
-const auto QuantizedActivationFunctionsSmallDataset = framework::dataset::make("ActivationInfo",
+TEST_SUITE(Quantized)
+
+const auto QuantizationData = make("QuantizationInfo",
{
- ActivationLayerInfo(),
- ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f)
+ QuantizationInfo(0.5f, 10),
+ QuantizationInfo(0.3f, 3),
+ QuantizationInfo(1.1f, 10),
});
-TEST_SUITE(Quantized)
+/// @note: Every asymmetric quantized test has a version with or without activation because the quantization info given
+/// is ignored when there is no activation. Instead of using the same quantization information for all the tensors, the
+/// fixture generates separate quantization info for each input and the output tensor.
+/// When we can also support dynamic quantization with the presence of activation, these two versions should be merged
+/// again, with the explicitly specified quantization info removed
+const auto NoActivation = make("ActivationInfo", ActivationLayerInfo());
+
+const auto IgnoredQuantizationInfo = make("IgnoredQuantizationInfo", QuantizationInfo());
-const auto QuantizationData = framework::dataset::make("QuantizationInfo",
+const auto QuantizedActivationFunctionsSmallDataset = make("ActivationInfo",
{
- QuantizationInfo(0.5f, 10),
- QuantizationInfo(0.3f, 3),
- QuantizationInfo(1.1f, 10),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f)
});
+
TEST_SUITE(QASYMM8)
FIXTURE_DATA_TEST_CASE(RunSmallCases, CLGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL,
- combine(combine(combine(combine(combine(SmallConvolutionLayerDatasetCases(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType", DataType::QASYMM8)),
- framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
- QuantizationData),
- QuantizedActivationFunctionsSmallDataset))
+ combine(SmallConvolutionLayerDatasetCases(),
+ make("ReshapeWeights", { true }),
+ make("DataType", DataType::QASYMM8),
+ make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }),
+ IgnoredQuantizationInfo,
+ NoActivation))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_qasymm8);
+}
+
+FIXTURE_DATA_TEST_CASE(RunSmallCasesWithActivation, CLGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL,
+ combine(SmallConvolutionLayerDatasetCases(),
+ make("ReshapeWeights", { true }),
+ make("DataType", DataType::QASYMM8),
+ make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }),
+ QuantizationData,
+ QuantizedActivationFunctionsSmallDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
}
FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL,
- combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType", DataType::QASYMM8)),
- framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
- QuantizationData),
- QuantizedActivationFunctionsSmallDataset))
+ combine(datasets::SmallConvolutionLayerDataset(),
+ make("ReshapeWeights", { true }),
+ make("DataType", DataType::QASYMM8),
+ make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }),
+ IgnoredQuantizationInfo,
+ NoActivation))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_qasymm8);
+}
+
+FIXTURE_DATA_TEST_CASE(RunSmallWithActivation, CLGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL,
+ combine(datasets::SmallConvolutionLayerDataset(),
+ make("ReshapeWeights", { true }),
+ make("DataType", DataType::QASYMM8),
+ make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }),
+ QuantizationData,
+ QuantizedActivationFunctionsSmallDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
}
FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLGEMMConvolutionLayerQuantizedMixedDataLayoutFixture<uint8_t>, framework::DatasetMode::ALL,
- combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
- framework::dataset::make("Input", TensorShape(23U, 27U, 5U)),
- framework::dataset::make("Weights", TensorShape(3U, 3U, 5U, 2U))),
- framework::dataset::make("Bias", TensorShape(2U))),
- framework::dataset::make("Output", TensorShape(11U, 25U, 2U))),
- framework::dataset::make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0))),
- framework::dataset::make("Dilation", Size2D(1, 1))),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType", DataType::QASYMM8)),
- framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
- QuantizationData),
- QuantizedActivationFunctionsSmallDataset))
+ combine(
+ make("Input", TensorShape(23U, 27U, 5U)),
+ make("Weights", TensorShape(3U, 3U, 5U, 2U)),
+ make("Bias", TensorShape(2U)),
+ make("Output", TensorShape(11U, 25U, 2U)),
+ make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0)),
+ make("Dilation", Size2D(1, 1)),
+ make("ReshapeWeights", { true }),
+ make("DataType", DataType::QASYMM8),
+ make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }),
+ IgnoredQuantizationInfo,
+ NoActivation))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_qasymm8);
+}
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayoutWithActivation, CLGEMMConvolutionLayerQuantizedMixedDataLayoutFixture<uint8_t>, framework::DatasetMode::ALL,
+ combine(
+ make("Input", TensorShape(23U, 27U, 5U)),
+ make("Weights", TensorShape(3U, 3U, 5U, 2U)),
+ make("Bias", TensorShape(2U)),
+ make("Output", TensorShape(11U, 25U, 2U)),
+ make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0)),
+ make("Dilation", Size2D(1, 1)),
+ make("ReshapeWeights", { true }),
+ make("DataType", DataType::QASYMM8),
+ make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }),
+ QuantizationData,
+ QuantizedActivationFunctionsSmallDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
@@ -319,44 +364,78 @@ FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLGEMMConvolutionLayerQuantizedMixedD
TEST_SUITE_END() // QASYMM8
TEST_SUITE(QASYMM8_SIGNED)
FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerQuantizedFixture<int8_t>, framework::DatasetMode::ALL,
- combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
- framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
- QuantizationData),
- QuantizedActivationFunctionsSmallDataset))
+ combine(datasets::SmallConvolutionLayerDataset(),
+ make("ReshapeWeights", { true }),
+ make("DataType", DataType::QASYMM8_SIGNED),
+ make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }),
+ IgnoredQuantizationInfo,
+ NoActivation))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_qasymm8);
+}
+FIXTURE_DATA_TEST_CASE(RunSmallWithActivation, CLGEMMConvolutionLayerQuantizedFixture<int8_t>, framework::DatasetMode::ALL,
+ combine(datasets::SmallConvolutionLayerDataset(),
+ make("ReshapeWeights", { true }),
+ make("DataType", DataType::QASYMM8_SIGNED),
+ make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }),
+ QuantizationData,
+ QuantizedActivationFunctionsSmallDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
}
FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLGEMMConvolutionLayerQuantizedMixedDataLayoutFixture<int8_t>, framework::DatasetMode::ALL,
- combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
- framework::dataset::make("Input", TensorShape(23U, 27U, 5U)),
- framework::dataset::make("Weights", TensorShape(3U, 3U, 5U, 2U))),
- framework::dataset::make("Bias", TensorShape(2U))),
- framework::dataset::make("Output", TensorShape(11U, 25U, 2U))),
- framework::dataset::make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0))),
- framework::dataset::make("Dilation", Size2D(1, 1))),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
- framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
- QuantizationData),
- QuantizedActivationFunctionsSmallDataset))
+ combine(
+ make("Input", TensorShape(23U, 27U, 5U)),
+ make("Weights", TensorShape(3U, 3U, 5U, 2U)),
+ make("Bias", TensorShape(2U)),
+ make("Output", TensorShape(11U, 25U, 2U)),
+ make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0)),
+ make("Dilation", Size2D(1, 1)),
+ make("ReshapeWeights", { true }),
+ make("DataType", DataType::QASYMM8_SIGNED),
+ make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }),
+ IgnoredQuantizationInfo,
+ NoActivation))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_qasymm8);
+}
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayoutWithActivation, CLGEMMConvolutionLayerQuantizedMixedDataLayoutFixture<int8_t>, framework::DatasetMode::ALL,
+ combine(
+ make("Input", TensorShape(23U, 27U, 5U)),
+ make("Weights", TensorShape(3U, 3U, 5U, 2U)),
+ make("Bias", TensorShape(2U)),
+ make("Output", TensorShape(11U, 25U, 2U)),
+ make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0)),
+ make("Dilation", Size2D(1, 1)),
+ make("ReshapeWeights", { true }),
+ make("DataType", DataType::QASYMM8_SIGNED),
+ make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }),
+ QuantizationData,
+ QuantizedActivationFunctionsSmallDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
}
TEST_SUITE_END() // QASYMM8_SIGNED
TEST_SUITE(QSYMM8_PER_CHANNEL)
+const auto QuantizedActivationFunctionsSmallPerChannelDataset = make("ActivationInfo",
+{
+ ActivationLayerInfo(),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f)
+});
+
FIXTURE_DATA_TEST_CASE(RunSmallSigned, CLGEMMConvolutionLayerQuantizedPerChannelFixture<int8_t>, framework::DatasetMode::ALL,
combine(combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType", { DataType::QASYMM8_SIGNED })),
- framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
+ make("ReshapeWeights", { true })),
+ make("DataType", { DataType::QASYMM8_SIGNED })),
+ make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
QuantizationData),
- QuantizedActivationFunctionsSmallDataset),
- framework::dataset::make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL })))
+ QuantizedActivationFunctionsSmallPerChannelDataset),
+ make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
@@ -364,12 +443,12 @@ FIXTURE_DATA_TEST_CASE(RunSmallSigned, CLGEMMConvolutionLayerQuantizedPerChannel
FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerQuantizedPerChannelFixture<uint8_t>, framework::DatasetMode::ALL,
combine(combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType", { DataType::QASYMM8 })),
- framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
+ make("ReshapeWeights", { true })),
+ make("DataType", { DataType::QASYMM8 })),
+ make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
QuantizationData),
- QuantizedActivationFunctionsSmallDataset),
- framework::dataset::make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL })))
+ QuantizedActivationFunctionsSmallPerChannelDataset),
+ make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
@@ -388,7 +467,7 @@ TEST_SUITE(Float)
TEST_SUITE(FP32)
FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMGroupedConvolutionLayerFixture<float>, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SmallGroupedConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("DataLayout", { DataLayout::NCHW })),
+ make("ReshapeWeights", { true })), make("DataType", DataType::F32)), make("DataLayout", { DataLayout::NCHW })),
ActivationFunctionsSmallDataset))
{
// Validate output
@@ -397,9 +476,9 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMGroupedConvolutionLayerFixture<float>, fr
FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMGroupedConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
combine(combine(combine(combine(datasets::LargeGroupedConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType", DataType::F32)),
- framework::dataset::make("DataLayout", { DataLayout::NCHW })),
+ make("ReshapeWeights", { true })),
+ make("DataType", DataType::F32)),
+ make("DataLayout", { DataLayout::NCHW })),
ActivationFunctionsDataset))
{
// Validate output
@@ -410,7 +489,7 @@ TEST_SUITE_END() // FP32
TEST_SUITE(FP16)
FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMGroupedConvolutionLayerFixture<half>, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SmallGroupedConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("DataLayout", { DataLayout::NCHW })),
+ make("ReshapeWeights", { true })), make("DataType", DataType::F16)), make("DataLayout", { DataLayout::NCHW })),
ActivationFunctionsSmallDataset))
{
// Validate output
@@ -419,9 +498,9 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMGroupedConvolutionLayerFixture<half>, fra
FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMGroupedConvolutionLayerFixture<half>, framework::DatasetMode::NIGHTLY,
combine(combine(combine(combine(datasets::LargeGroupedConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType", DataType::F16)),
- framework::dataset::make("DataLayout", { DataLayout::NCHW })),
+ make("ReshapeWeights", { true })),
+ make("DataType", DataType::F16)),
+ make("DataLayout", { DataLayout::NCHW })),
ActivationFunctionsDataset))
{
// Validate output
diff --git a/tests/validation/CL/DilatedConvolutionLayer.cpp b/tests/validation/CL/DilatedConvolutionLayer.cpp
index 9a9df2c7e4..776bf34151 100644
--- a/tests/validation/CL/DilatedConvolutionLayer.cpp
+++ b/tests/validation/CL/DilatedConvolutionLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2020, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -167,13 +167,18 @@ template <typename T>
using CLGEMMDilatedConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
TEST_SUITE(Quantized)
+/// @note: Every asymmetric quantized test where there's no fused activation will have its quantization info ignored
+/// This is because instead of using the same quantization information for all the tensors, the fixture generates
+/// separate quantization info for each input and the output tensor.
+/// When we can also support dynamic quantization with the presence of activation, we can remove the explicit
+/// quantization info.
TEST_SUITE(QASYMM8)
FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMDilatedConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT,
combine(combine(combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true })),
framework::dataset::make("DataType", DataType::QASYMM8)),
framework::dataset::make("DataLayout", { DataLayout::NCHW })),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })),
+ framework::dataset::make("IgnoredQuantizationInfo", { QuantizationInfo() })),
framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() })))
{
// Validate output
@@ -185,7 +190,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMDilatedConvolutionLayerQuantizedFixture<u
framework::dataset::make("ReshapeWeights", { true })),
framework::dataset::make("DataType", DataType::QASYMM8)),
framework::dataset::make("DataLayout", { DataLayout::NCHW })),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 0) })),
+ framework::dataset::make("IgnoredQuantizationInfo", { QuantizationInfo() })),
framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() })))
{
// Validate output
diff --git a/tests/validation/NEON/ConvolutionLayer.cpp b/tests/validation/NEON/ConvolutionLayer.cpp
index 2f66100fb6..7a274906a6 100644
--- a/tests/validation/NEON/ConvolutionLayer.cpp
+++ b/tests/validation/NEON/ConvolutionLayer.cpp
@@ -1202,12 +1202,17 @@ const auto QuantizedActivationFunctionsDataset = framework::dataset::make("Activ
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f)
});
TEST_SUITE(Quantized)
+/// @note: Every asymmetric quantized test where there's no fused activation will have its quantization info ignored
+/// This is because instead of using the same quantization information for all the tensors, the fixture generates
+/// separate quantization info for each input and the output tensor.
+/// When we can also support dynamic quantization with the presence of activation, these two versions should be merged
+/// again, with the explicitly specified quantization info removed
TEST_SUITE(QASYMM8)
FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true })),
framework::dataset::make("DataType", DataType::QASYMM8)),
framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })),
+ framework::dataset::make("QuantizationInfoIfActivationEnabled", { QuantizationInfo(2.f / 255.f, 10) })),
QuantizedActivationFunctionsDataset))
{
// Validate output
@@ -1224,7 +1229,7 @@ FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEGEMMConvolutionLayerQuantizedFixtur
framework::dataset::make("ReshapeWeights", { true })),
framework::dataset::make("DataType", DataType::QASYMM8)),
framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })),
+ framework::dataset::make("QuantizationInfoIfActivationEnabled", { QuantizationInfo(2.f / 255.f, 10) })),
QuantizedActivationFunctionsDataset))
{
// Validate output
@@ -1237,7 +1242,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerQuantizedFixture<int8_t>,
framework::dataset::make("ReshapeWeights", { true })),
framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.01f, -10) })),
+ framework::dataset::make("QuantizationInfoIfActivationEnabled", { QuantizationInfo(0.01f, -10) })),
QuantizedActivationFunctionsDataset))
{
// Validate output
@@ -1254,7 +1259,7 @@ FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEGEMMConvolutionLayerQuantizedFixtur
framework::dataset::make("ReshapeWeights", { true })),
framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })),
+ framework::dataset::make("QuantizationInfoIfActivationEnabled", { QuantizationInfo(2.f / 255.f, 10) })),
QuantizedActivationFunctionsDataset))
{
// Validate output
diff --git a/tests/validation/NEON/DilatedConvolutionLayer.cpp b/tests/validation/NEON/DilatedConvolutionLayer.cpp
index 30bf690456..2ede4fac4f 100644
--- a/tests/validation/NEON/DilatedConvolutionLayer.cpp
+++ b/tests/validation/NEON/DilatedConvolutionLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021 Arm Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -162,13 +162,18 @@ template <typename T>
using NEGEMMDilatedConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<Tensor, Accessor, NEGEMMConvolutionLayer, T>;
TEST_SUITE(Quantized)
+/// @note: Every asymmetric quantized test where there's no fused activation will have its quantization info ignored
+/// This is because instead of using the same quantization information for all the tensors, the fixture generates
+/// separate quantization info for each input and the output tensor.
+/// When we can also support dynamic quantization with the presence of activation, we can remove the explicit
+/// quantization info.
TEST_SUITE(QASYMM8)
FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMDilatedConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT,
combine(combine(combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true })),
framework::dataset::make("DataType", DataType::QASYMM8)),
framework::dataset::make("DataLayout", { DataLayout::NCHW })),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })),
+ framework::dataset::make("IgnoredQuantizationInfo", { QuantizationInfo() })),
framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
{
// Validate output
@@ -179,7 +184,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMDilatedConvolutionLayerQuantizedFixture<u
framework::dataset::make("ReshapeWeights", { true })),
framework::dataset::make("DataType", DataType::QASYMM8)),
framework::dataset::make("DataLayout", { DataLayout::NCHW })),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })),
+ framework::dataset::make("IgnoredQuantizationInfo", { QuantizationInfo() })),
framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
{
// Validate output
diff --git a/tests/validation/fixtures/ConvolutionLayerFixture.h b/tests/validation/fixtures/ConvolutionLayerFixture.h
index 8562955b79..2051add225 100644
--- a/tests/validation/fixtures/ConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/ConvolutionLayerFixture.h
@@ -21,8 +21,9 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_CONVOLUTION_LAYER_FIXTURE
-#define ARM_COMPUTE_TEST_CONVOLUTION_LAYER_FIXTURE
+
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_CONVOLUTIONLAYERFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_CONVOLUTIONLAYERFIXTURE_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -91,21 +92,64 @@ public:
|| std::is_same<typename std::decay<T>::type, int8_t>::value,
int32_t, T >::type;
+ void setup_quantization(TensorShape input_shape, TensorShape weights_shape, QuantizationInfo &input_q_info,
+ QuantizationInfo &weights_q_info, DataType data_type)
+ {
+ const int32_t t_max = static_cast<int32_t>(std::numeric_limits<T>::max());
+ const int32_t t_min = static_cast<int32_t>(std::numeric_limits<T>::min());
+
+ std::mt19937 generator(library->seed() + _hash);
+ std::uniform_real_distribution<float> distribution_float(-5.0f, 3.0f);
+ std::uniform_int_distribution<int32_t> distribution_t(t_min, t_max);
+
+ const float scale_lhs = pow(2, distribution_float(generator)); // [2^-5, 2^3]
+ const float scale_rhs = pow(2, distribution_float(generator)); // [2^-5, 2^3]
+
+ const int32_t offset_lhs = distribution_t(generator);
+ const int32_t offset_rhs = distribution_t(generator);
+
+ _quantization_info = QuantizationInfo(scale_lhs, offset_lhs);
+ _weight_quantization_info = QuantizationInfo(scale_rhs, offset_rhs);
+
+ QuantizationHint q_hint = suggest_conv_dst_q_info_and_bias(input_q_info, weights_q_info,
+ weights_shape.y() /* heights */, weights_shape.x() /* width */, input_shape.z() /* channels */,
+ data_type, 0.5f /* bias_fraction */);
+
+ _dst_q_info = q_hint.q_info;
+ _min_bias = q_hint.bias_min;
+ _max_bias = q_hint.bias_max;
+ }
+
public:
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights,
DataType data_type, DataType weights_data_type, DataLayout data_layout, QuantizationInfo quantization_info, QuantizationInfo weight_quantization_info, ActivationLayerInfo act_info,
bool mixed_layout = false, PaddingList pre_pad_layer = PaddingList({}))
{
+ // This hash is used by random generators. There may be hash collisions but
+ // this is intentional as it's a very easy way to make the the current
+ // random generation process almost different for many test configurations,
+ // which were using the same set of values before.
+ _hash = input_shape[0] + input_shape[1] + input_shape[2] + input_shape[3] +
+ + weights_shape[0] + weights_shape[1] + weights_shape[2] + weights_shape[3] +
+ mixed_layout + (data_type == DataType::QASYMM8_SIGNED) + (data_layout == DataLayout::NHWC);
+
_mixed_layout = mixed_layout;
_data_type = data_type;
_weights_data_type = weights_data_type;
- _is_quantized = is_data_type_quantized_asymmetric(data_type);
+ const bool is_quantized = is_data_type_quantized(weights_data_type);
_is_bfloat16 = data_type == DataType::BFLOAT16;
- _bias_data_type = _is_quantized ? DataType::S32 : (_is_bfloat16 ? DataType::F32 : data_type);
+ _bias_data_type = is_quantized ? DataType::S32 : (_is_bfloat16 ? DataType::F32 : data_type);
_output_data_type = _is_bfloat16 ? DataType::F32 : data_type;
_quantization_info = quantization_info;
_weight_quantization_info = weight_quantization_info;
_data_layout = data_layout;
+ _dst_q_info = quantization_info;
+
+ if(is_quantized && !is_data_type_quantized_symmetric(weights_data_type) && (!act_info.enabled() || act_info.activation() == ActivationFunction::IDENTITY))
+ {
+ setup_quantization(input_shape, weights_shape, _quantization_info, _weight_quantization_info, data_type);
+ _use_dynamic_output_quant = true;
+ }
_target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, reshape_weights, dilation, act_info, pre_pad_layer);
_reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, dilation, act_info, pre_pad_layer);
@@ -142,16 +186,34 @@ protected:
{
case DataType::QASYMM8:
{
- std::pair<int, int> bounds = get_quantized_bounds(tensor.quantization_info(), -1.0f, 1.0f);
- std::uniform_int_distribution<uint32_t> distribution(bounds.first, bounds.second);
- library->fill(tensor, distribution, i);
+ if(_use_dynamic_output_quant)
+ {
+ std::uniform_int_distribution<int32_t> distribution(0, 255);
+ library->fill(tensor, distribution, i);
+ }
+ else
+ {
+ // Legacy initialization in case the output quantization info can't be reliably estimated
+ std::pair<int, int> bounds = get_quantized_bounds(tensor.quantization_info(), -1.0f, 1.0f);
+ std::uniform_int_distribution<uint32_t> distribution(bounds.first, bounds.second);
+ library->fill(tensor, distribution, i);
+ }
break;
}
case DataType::QASYMM8_SIGNED:
{
- std::pair<int, int> bounds = get_quantized_qasymm8_signed_bounds(tensor.quantization_info(), -1.0f, 1.0f);
- std::uniform_int_distribution<int32_t> distribution(bounds.first, bounds.second);
- library->fill(tensor, distribution, i);
+ if(_use_dynamic_output_quant)
+ {
+ std::uniform_int_distribution<int32_t> distribution(-128, 127);
+ library->fill(tensor, distribution, i);
+ }
+ else
+ {
+ // Legacy initialization in case the output quantization info can't be reliably estimated
+ std::pair<int, int> bounds = get_quantized_qasymm8_signed_bounds(tensor.quantization_info(), -1.0f, 1.0f);
+ std::uniform_int_distribution<int32_t> distribution(bounds.first, bounds.second);
+ library->fill(tensor, distribution, i);
+ }
break;
}
case DataType::QSYMM8_PER_CHANNEL:
@@ -176,7 +238,7 @@ protected:
}
case DataType::S32:
{
- std::uniform_int_distribution<int32_t> distribution(-100, 100);
+ std::uniform_int_distribution<int32_t> distribution(_min_bias, _max_bias);
library->fill(tensor, distribution, i);
break;
}
@@ -239,8 +301,8 @@ protected:
// Create tensors
TensorType src = create_tensor<TensorType>(input_shape, _data_type, 1, _quantization_info, _data_layout);
TensorType weights = create_tensor<TensorType>(reshaped_weights_shape, _weights_data_type, 1, _weight_quantization_info, _data_layout);
- TensorType bias = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, _quantization_info, _data_layout);
- TensorType dst = create_tensor<TensorType>(output_shape, _output_data_type, 1, _quantization_info, _data_layout);
+ TensorType bias = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, QuantizationInfo() /*bias is not a quantized type*/, _data_layout);
+ TensorType dst = create_tensor<TensorType>(output_shape, _output_data_type, 1, _dst_q_info, _data_layout);
// Create and configure function
FunctionType conv;
@@ -288,9 +350,9 @@ protected:
ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
- fill(AccessorType(src), 0);
- fill(AccessorType(weights), 1);
- fill(AccessorType(bias), 2);
+ fill(AccessorType(src), 0 + _hash);
+ fill(AccessorType(weights), 1 + _hash);
+ fill(AccessorType(bias), 2 + _hash);
if(_mixed_layout)
{
@@ -322,9 +384,9 @@ protected:
SimpleTensor<TW> weights{ weights_shape, weights_dt, 1, _weight_quantization_info };
SimpleTensor<TBias> bias{ bias_shape, bias_dt, 1, _quantization_info };
- fill(src, 0);
- fill(weights, 1);
- fill(bias, 2);
+ fill(src, 0 + _hash);
+ fill(weights, 1 + _hash);
+ fill(bias, 2 + _hash);
// Fill with bfloat16 to perform the conversion and reduce the mismatches in the output
if(_is_bfloat16)
@@ -338,9 +400,9 @@ protected:
src = reference::pad_layer<T>(src, pre_pad_layer, PixelValue(0), PaddingMode::CONSTANT);
}
- return (act_info.enabled()) ? reference::activation_layer<T>(reference::convolution_layer<T>(src, weights, bias, output_shape, info, dilation, num_groups),
+ return (act_info.enabled()) ? reference::activation_layer<T>(reference::convolution_layer<T>(src, weights, bias, output_shape, info, dilation, num_groups, _dst_q_info),
act_info) :
- reference::convolution_layer<T>(src, weights, bias, output_shape, info, dilation, num_groups);
+ reference::convolution_layer<T>(src, weights, bias, output_shape, info, dilation, num_groups, _dst_q_info);
}
TensorType _target{};
@@ -352,9 +414,13 @@ protected:
DataLayout _data_layout{};
QuantizationInfo _quantization_info{};
QuantizationInfo _weight_quantization_info{};
- bool _is_quantized = false;
+ QuantizationInfo _dst_q_info{};
bool _is_bfloat16 = false;
bool _mixed_layout = false;
+ bool _use_dynamic_output_quant{false};
+ int32_t _hash{0};
+ int32_t _min_bias{-100};
+ int32_t _max_bias{100};
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
@@ -707,7 +773,7 @@ public:
const WeightsInfo weights_info(false, 3U, 3U, 64U, false, query_weight_format);
_kernel_found = bool(ConvolutionClass::has_opt_impl(_computed_weight_format, &src_info, &weight_info,
&bias_info, &dst_info, conv_info, weights_info,
- /*dilation*/ Size2D(1U, 1U), /*act_info*/ ActivationLayerInfo(), enable_fast_math));
+ Size2D(1U, 1U) /*dilation*/, ActivationLayerInfo() /*act_info*/, enable_fast_math));
}
protected:
@@ -719,4 +785,5 @@ protected:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_CONVOLUTION_LAYER_FIXTURE */
+
+#endif // ACL_TESTS_VALIDATION_FIXTURES_CONVOLUTIONLAYERFIXTURE_H