aboutsummaryrefslogtreecommitdiff
path: root/tests/validation
diff options
context:
space:
mode:
Diffstat (limited to 'tests/validation')
-rw-r--r--tests/validation/CL/ActivationLayer.cpp42
-rw-r--r--tests/validation/CL/ArithmeticAddition.cpp15
-rw-r--r--tests/validation/CL/BatchConcatenateLayer.cpp51
-rw-r--r--tests/validation/CL/BatchNormalizationLayer.cpp30
-rw-r--r--tests/validation/CL/GEMMLowp.cpp39
-rw-r--r--tests/validation/CL/GEMMMatrixMultiplyNative.cpp72
-rw-r--r--tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp89
-rw-r--r--tests/validation/CL/GEMMReshapeLHSMatrix.cpp57
-rw-r--r--tests/validation/CL/Im2Col.cpp39
-rw-r--r--tests/validation/CL/PoolingLayer.cpp53
-rw-r--r--tests/validation/CL/Winograd.cpp35
11 files changed, 0 insertions, 522 deletions
diff --git a/tests/validation/CL/ActivationLayer.cpp b/tests/validation/CL/ActivationLayer.cpp
index f776e334a0..9b725a44e7 100644
--- a/tests/validation/CL/ActivationLayer.cpp
+++ b/tests/validation/CL/ActivationLayer.cpp
@@ -90,28 +90,6 @@ const auto CNNDataTypes = framework::dataset::make("DataType",
/** Input data sets. */
const auto ActivationDataset = combine(combine(framework::dataset::make("InPlace", { false, true }), datasets::ActivationFunctions()), framework::dataset::make("AlphaBeta", { 0.5f, 1.f }));
-/** Zero padding test */
-bool validate_zero_padding(unsigned int width, unsigned int height, unsigned int channels, unsigned int batches, const ActivationLayerInfo &act_info, DataType data_type)
-{
- TensorShape shape(width, height, channels, batches);
-
- // Create tensors
- CLTensor src = create_tensor<CLTensor>(shape, data_type);
- CLTensor dst = create_tensor<CLTensor>(shape, data_type);
-
- src.info()->set_quantization_info(QuantizationInfo(1.f / 256.f, 0));
- dst.info()->set_quantization_info(QuantizationInfo(1.f / 256.f, 0));
-
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Create and configure function
- CLActivationLayer act;
- act.configure(&src, &dst, act_info);
-
- // Padding can be added along rhs and bias's X dimension
- return src.info()->padding().empty() && dst.info()->padding().empty();
-}
} // namespace
TEST_SUITE(CL)
@@ -155,26 +133,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
ARM_COMPUTE_EXPECT(bool(CLActivationLayer::validate(&input_info.clone()->set_is_resizable(false), (output_info.total_size() == 0) ? nullptr : &output_info.clone()->set_is_resizable(false), act_info)) == expected, framework::LogLevel::ERRORS);
}
-/** Validate zero padding tests
- *
- * A series of validation tests to check that no padding is added as part of configuration for 4 different scenarios.
- *
- * Checks performed in order:
- * - First dimension multiple of 16
- * - First dimension non-multiple of 16
- * - First dimension less than 16 (vec_size for qasymm8) but multiple
- * - First dimension less than 16 (vec_size for qasymm8) non-multiple
- * - Tensor with only one element
- */
-DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, zip(
-framework::dataset::make("Width", { 32U, 37U, 12U, 13U, 1U }),
-framework::dataset::make("DataType", { DataType::F32, DataType::QASYMM8 })),
-width, data_type)
-{
- const bool one_elem = (width == 1U);
- bool status = validate_zero_padding(width, one_elem ? 1U : 17U, one_elem ? 1U : 7U, one_elem ? 1U : 2U, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 1U, 6U), data_type);
- ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
-}
// clang-format on
// *INDENT-ON*
diff --git a/tests/validation/CL/ArithmeticAddition.cpp b/tests/validation/CL/ArithmeticAddition.cpp
index 6635c25fe7..c74f6a3b23 100644
--- a/tests/validation/CL/ArithmeticAddition.cpp
+++ b/tests/validation/CL/ArithmeticAddition.cpp
@@ -124,21 +124,6 @@ TEST_CASE(FusedActivation, framework::DatasetMode::ALL)
ARM_COMPUTE_EXPECT(bool(result) == false, framework::LogLevel::ERRORS);
}
-TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL)
-{
- CLTensor src1 = create_tensor<CLTensor>(TensorShape(3U, 3U), DataType::F32, 1, QuantizationInfo());
- CLTensor src2 = create_tensor<CLTensor>(TensorShape(3U, 3U), DataType::F32, 1, QuantizationInfo());
- CLTensor dst = create_tensor<CLTensor>(TensorShape(3U, 3U), DataType::F32, 1, QuantizationInfo());
-
- // Create and configure function
- CLArithmeticAddition add;
- add.configure(&src1, &src2, &dst, ConvertPolicy::WRAP);
-
- validate(src1.info()->padding(), PaddingSize(0, 0, 0, 0));
- validate(src2.info()->padding(), PaddingSize(0, 0, 0, 0));
- validate(dst.info()->padding(), PaddingSize(0, 0, 0, 0));
-}
-
template <typename T>
using CLArithmeticAdditionFixture = ArithmeticAdditionValidationFixture<CLTensor, CLAccessor, CLArithmeticAddition, T>;
diff --git a/tests/validation/CL/BatchConcatenateLayer.cpp b/tests/validation/CL/BatchConcatenateLayer.cpp
index e5de3a75c7..522a6ab8ee 100644
--- a/tests/validation/CL/BatchConcatenateLayer.cpp
+++ b/tests/validation/CL/BatchConcatenateLayer.cpp
@@ -39,37 +39,6 @@ namespace test
{
namespace validation
{
-namespace
-{
-/** Zero padding test */
-bool validate_zero_padding(unsigned int width, unsigned int height, unsigned int channels, unsigned int batches, DataType data_type)
-{
- TensorShape src_shape(width, height, channels, batches);
- TensorShape dst_shape(width, height, channels, batches * 2);
-
- // Create tensors
- CLTensor src0 = create_tensor<CLTensor>(src_shape, data_type);
- CLTensor src1 = create_tensor<CLTensor>(src_shape, data_type);
- CLTensor dst = create_tensor<CLTensor>(dst_shape, data_type);
-
- src0.info()->set_quantization_info(QuantizationInfo(1.f / 256.f, 0));
- src1.info()->set_quantization_info(QuantizationInfo(1.f / 256.f, 0));
- dst.info()->set_quantization_info(QuantizationInfo(1.f / 256.f, 0));
-
- ARM_COMPUTE_EXPECT(src0.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(src1.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- std::vector<const ICLTensor *> srcs = { &src0, &src1 };
-
- // Create and configure function
- CLConcatenateLayer concat;
- concat.configure(srcs, &dst, 3U);
-
- // Padding can be added along rhs and bias's X dimension
- return src0.info()->padding().empty() && src1.info()->padding().empty() && dst.info()->padding().empty();
-}
-}
TEST_SUITE(CL)
TEST_SUITE(BatchConcatenateLayer)
@@ -112,26 +81,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
}
-/** Validate zero padding tests
- *
- * A series of validation tests to check that no padding is added as part of configuration for 4 different scenarios.
- *
- * Checks performed in order:
- * - First dimension multiple of 16
- * - First dimension non-multiple of 16
- * - First dimension less than 16 (vec_size for qasymm8) but multiple
- * - First dimension less than 16 (vec_size for qasymm8) non-multiple
- * - Tensor with only one element
- */
-DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, zip(
-framework::dataset::make("Width", { 32U, 37U, 12U, 13U, 1U }),
-framework::dataset::make("DataType", { DataType::F32, DataType::QASYMM8 })),
-width, data_type)
-{
- const bool one_elem = (width == 1U);
- bool status = validate_zero_padding(width, one_elem ? 1U : 17U, one_elem ? 1U : 7U, one_elem ? 1U : 2U, data_type);
- ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
-}
// clang-format on
// *INDENT-ON*
diff --git a/tests/validation/CL/BatchNormalizationLayer.cpp b/tests/validation/CL/BatchNormalizationLayer.cpp
index 88f00b0eff..8b3bdbc3ea 100644
--- a/tests/validation/CL/BatchNormalizationLayer.cpp
+++ b/tests/validation/CL/BatchNormalizationLayer.cpp
@@ -64,28 +64,6 @@ framework::dataset::make("UseBeta", { false, true })),
framework::dataset::make("UseGamma", { false, true })),
framework::dataset::make("Epsilon", { 0.001f }));
-bool validate_zero_padding(TensorShape shape0, const TensorShape shape1, float epsilon, ActivationLayerInfo act_info, DataType dt, DataLayout data_layout)
-{
- if(data_layout == DataLayout::NHWC)
- {
- permute(shape0, PermutationVector(2U, 0U, 1U));
- }
-
- // Create tensors
- CLTensor src = create_tensor<CLTensor>(shape0, dt, 1, QuantizationInfo(), data_layout);
- CLTensor dst = create_tensor<CLTensor>(shape0, dt, 1, QuantizationInfo(), data_layout);
- CLTensor mean = create_tensor<CLTensor>(shape1, dt, 1);
- CLTensor var = create_tensor<CLTensor>(shape1, dt, 1);
- CLTensor beta = create_tensor<CLTensor>(shape1, dt, 1);
- CLTensor gamma = create_tensor<CLTensor>(shape1, dt, 1);
-
- // Create and configure function
- CLBatchNormalizationLayer norm;
- norm.configure(&src, &dst, &mean, &var, &beta, &gamma, epsilon, act_info);
-
- return src.info()->padding().empty() && dst.info()->padding().empty() && mean.info()->padding().empty() && var.info()->padding().empty() && beta.info()->padding().empty()
- && gamma.info()->padding().empty();
-}
} // namespace
TEST_SUITE(CL)
@@ -142,14 +120,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
// clang-format on
// *INDENT-ON*
-DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, combine(combine(combine(datasets::SmallRandomBatchNormalizationLayerDataset(), act_infos), framework::dataset::make("DataType", { DataType::F32, DataType::F16 })),
- framework::dataset::make("DataLayout", { DataLayout::NHWC })),
- shape0, shape1, episilon, act_infos, data_type, data_layout)
-{
- bool status = validate_zero_padding(shape0, shape1, episilon, act_infos, data_type, data_layout);
- ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
-}
-
TEST_SUITE(Float)
TEST_SUITE(FP32)
FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallRandomBatchNormalizationLayerDataset(),
diff --git a/tests/validation/CL/GEMMLowp.cpp b/tests/validation/CL/GEMMLowp.cpp
index 00f831b2e2..5a1971b54c 100644
--- a/tests/validation/CL/GEMMLowp.cpp
+++ b/tests/validation/CL/GEMMLowp.cpp
@@ -47,25 +47,6 @@ namespace validation
namespace
{
constexpr AbsoluteTolerance<float> tolerance_quant(1); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
-
-bool validate_output_stage_zero_padding(const TensorShape shape, const DataType dt, const GEMMLowpOutputStageType type)
-{
- // Create tensors
- CLTensor src = create_tensor<CLTensor>(shape, DataType::S32, 1);
- CLTensor bias = create_tensor<CLTensor>(TensorShape(shape.x()), DataType::S32, 1);
- CLTensor dst = create_tensor<CLTensor>(shape, dt, 1);
-
- GEMMLowpOutputStageInfo info;
- info.type = type;
- info.output_data_type = dt;
- std::tie(info.gemmlowp_min_bound, info.gemmlowp_max_bound) = quantization::get_min_max_values_from_quantized_data_type(dt);
-
- // Create and configure function
- CLGEMMLowpOutputStage output_stage;
- output_stage.configure(&src, &bias, &dst, info);
-
- return src.info()->padding().empty() && bias.info()->padding().empty() && dst.info()->padding().empty();
-}
}
TEST_SUITE(CL)
TEST_SUITE(GEMMLowp)
@@ -147,13 +128,6 @@ TEST_SUITE(OutputStage)
TEST_SUITE(QuantizeDownInt32Scale)
-DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::QASYMM8, DataType::QASYMM8_SIGNED })),
- shape, data_type)
-{
- bool status = validate_output_stage_zero_padding(shape, data_type, GEMMLowpOutputStageType::QUANTIZE_DOWN);
- ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
-}
-
TEST_SUITE(QASYMM8)
const auto quantize_down_int32_to_uint8_scale_cases = framework::dataset::make("result_offset", -2, 1) * framework::dataset::make("result_mult_int", 1, 2) * framework::dataset::make("result_shift", 2,
@@ -212,12 +186,6 @@ TEST_SUITE_END() // QASYMM8_SIGNED
TEST_SUITE_END() // QuantizeDownInt32Scale
TEST_SUITE(QuantizeDownInt32ScaleByFixedPoint)
-DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QSYMM16 })),
- shape, data_type)
-{
- bool status = validate_output_stage_zero_padding(shape, data_type, GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT);
- ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
-}
TEST_SUITE(QASYMM8)
@@ -353,13 +321,6 @@ TEST_SUITE_END() // QuantizeDownInt32ScaleByFixedPoint
TEST_SUITE(QuantizeDownInt32ScaleByFloat)
-DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::QASYMM8, DataType::QASYMM8_SIGNED })),
- shape, data_type)
-{
- bool status = validate_output_stage_zero_padding(shape, data_type, GEMMLowpOutputStageType::QUANTIZE_DOWN_FLOAT);
- ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
-}
-
TEST_SUITE(QASYMM8)
using CLGEMMLowpQuantizeDownInt32ScaleByFloatFixture =
GEMMLowpQuantizeDownInt32ScaleByFloatValidationFixture<CLTensor, CLAccessor, CLGEMMLowpOutputStage, uint8_t>;
diff --git a/tests/validation/CL/GEMMMatrixMultiplyNative.cpp b/tests/validation/CL/GEMMMatrixMultiplyNative.cpp
index 1cf1209dee..ec6b87fbae 100644
--- a/tests/validation/CL/GEMMMatrixMultiplyNative.cpp
+++ b/tests/validation/CL/GEMMMatrixMultiplyNative.cpp
@@ -186,55 +186,6 @@ void validate_configuration(unsigned int m_value, unsigned int n_value, unsigned
CLGEMMMatrixMultiplyNative gemm;
gemm.configure(&lhs, &rhs, &bias, &dst, 1.0f, 1.0f, lhs_info, rhs_info, kernel_info);
}
-/** Zero padding test */
-bool validate_zero_padding(unsigned int m_value, unsigned int n_value, unsigned int k_value, unsigned int b_value, unsigned int m0_value, unsigned int n0_value, unsigned int k0_value, bool broadcast_bias, DataType data_type, const ActivationLayerInfo &act_info)
-{
- const unsigned int M = m_value;
- const unsigned int N = n_value;
- const unsigned int K = k_value;
-
- GEMMLHSMatrixInfo lhs_info;
- lhs_info.m0 = m0_value;
- lhs_info.k0 = k0_value;
-
- GEMMRHSMatrixInfo rhs_info;
- rhs_info.n0 = n0_value;
- rhs_info.k0 = k0_value;
-
- GEMMKernelInfo kernel_info;
- kernel_info.m = M;
- kernel_info.n = N;
- kernel_info.k = K;
- kernel_info.broadcast_bias = broadcast_bias;
- kernel_info.activation_info = act_info;
-
- const TensorShape lhs_shape(K, M, b_value);
- const TensorShape rhs_shape(N, K, b_value);
- const TensorShape bias_shape(N,
- broadcast_bias? 1 : M,
- broadcast_bias? 1 : b_value);
- const TensorShape dst_shape = compute_mm_shape(TensorInfo(lhs_shape, 1, data_type),
- TensorInfo(rhs_shape, 1, data_type),
- kernel_info);
-
- // Create tensors
- CLTensor lhs = create_tensor<CLTensor>(lhs_shape, data_type);
- CLTensor rhs = create_tensor<CLTensor>(rhs_shape, data_type);
- CLTensor bias = create_tensor<CLTensor>(bias_shape, data_type);
- CLTensor dst = create_tensor<CLTensor>(dst_shape, data_type);
-
- ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Create and configure function
- CLGEMMMatrixMultiplyNative gemm;
- gemm.configure(&lhs, &rhs, &bias, &dst, 1.0f, 1.0f, lhs_info, rhs_info, kernel_info);
-
- // Padding can be added along rhs and bias's X dimension
- return dst.info()->padding().empty() && lhs.info()->padding().empty() && bias.info()->padding().bottom == 0 && bias.info()->padding().top == 0;
-}
} // namespace
TEST_SUITE(CL)
@@ -256,29 +207,6 @@ m_value, n_value, k_value, b_value, m0_value, n0_value, k0_value, broadcast_bias
validate_configuration(m_value, n_value, k_value, b_value, m0_value, n0_value, k0_value, broadcast_bias, DataType::F32, act_value);
}
-/** Validate zero padding tests
- *
- * A series of validation tests to check that no padding is added as part of configuration for 4 different scenarios.
- *
- * Checks performed in order:
- * - No partial blocks in both x and y dimensions
- * - Partial blocks in x dimension
- * - Partial blocks in y dimension
- * - Partial blocks in both x and y dimensions
- * - No blocks in both x and y dimensions, scalar store (N0==1)
- * - Special case: partial_n0 == 5 (vstore1 should be invoked instead of vstore_partial_1)
- */
-DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, zip(zip(zip(
-framework::dataset::make("M", { 24, 64, 101, 1, 50, 256, }),
-framework::dataset::make("N", { 48, 29, 16, 122, 20, 21, })),
-framework::dataset::make("M0", { 4, 8, 7, 2, 1, 8, })),
-framework::dataset::make("N0", { 4, 4, 16, 3, 1, 8, })),
-m_value, n_value, m0_value, n0_value)
-{
- bool status = validate_zero_padding(m_value, n_value, 23, 1, m0_value, n0_value, 4, false, DataType::F32, ActivationLayerInfo());
- ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
-}
-
FIXTURE_DATA_TEST_CASE(RunSmallBoundaryHandlingPartialInXPartialInY, CLGEMMMatrixMultiplyNativeFixture<float>, framework::DatasetMode::ALL,
combine(combine(
framework::dataset::make("M", 3),
diff --git a/tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp b/tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp
index 0a0a1fc397..95979b3131 100644
--- a/tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp
+++ b/tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp
@@ -171,100 +171,11 @@ const auto broadcast_bias_values = framework::dataset::make("broadcast_bias", {
/** LHS transposed values */
const auto lhs_transpose_values = framework::dataset::make("lhs_transpose", { false, true } );
-/** Zero padding test */
-bool validate_zero_padding(unsigned int m_value, unsigned int n_value, unsigned int k_value, unsigned int b_value,
- unsigned int m0_value, unsigned int n0_value, unsigned int k0_value, unsigned int h0_value,
- bool i_value_rhs, bool t_value_rhs, bool export_to_cl_image, bool broadcast_bias, unsigned int depth_output_gemm3d, const ActivationLayerInfo &act_info,
- DataType dt_input0, DataType dt_input1, DataType dt_input2, DataType dt_output, float alpha, float beta)
-{
- const unsigned int M = m_value;
- const unsigned int N = n_value;
- const unsigned int K = k_value;
-
- GEMMLHSMatrixInfo lhs_info;
- lhs_info.m0 = m0_value;
- lhs_info.k0 = k0_value;
-
- GEMMRHSMatrixInfo rhs_info;
- rhs_info.n0 = n0_value;
- rhs_info.k0 = k0_value;
- rhs_info.h0 = h0_value;
- rhs_info.interleave = i_value_rhs;
- rhs_info.transpose = t_value_rhs;
- rhs_info.export_to_cl_image = export_to_cl_image;
-
- GEMMKernelInfo kernel_info;
- kernel_info.m = M;
- kernel_info.n = N;
- kernel_info.k = K;
- kernel_info.depth_output_gemm3d = depth_output_gemm3d;
- kernel_info.reinterpret_input_as_3d = false;
- kernel_info.broadcast_bias = broadcast_bias;
- kernel_info.activation_info = act_info;
-
- const TensorShape lhs_shape(K, M, b_value);
- const TensorShape rhs_shape(N, K, b_value);
- const TensorShape lhs_shape_reshaped = compute_lhs_reshaped_shape(TensorInfo(lhs_shape, 1, dt_input0),
- lhs_info);
- const TensorShape rhs_shape_reshaped = compute_rhs_reshaped_shape(TensorInfo(rhs_shape, 1, dt_input1),
- rhs_info);
-
- const TensorShape dst_shape = compute_mm_shape(TensorInfo(lhs_shape_reshaped, 1, dt_input0),
- TensorInfo(rhs_shape_reshaped, 1, dt_input1),
- kernel_info);
-
- const TensorShape bias_shape(N,
- M, // Correct calculation should be: broadcast_bias? 1 : M, it's wrong here on purpose just for validation test
- broadcast_bias? 1 : b_value);
-
- // Create tensors
- CLTensor lhs_reshaped = create_tensor<CLTensor>(lhs_shape_reshaped, dt_input0);
- CLTensor rhs_reshaped = create_tensor<CLTensor>(rhs_shape_reshaped, dt_input1);
- CLTensor bias = create_tensor<CLTensor>(bias_shape, dt_input2);
- CLTensor dst = create_tensor<CLTensor>(dst_shape, dt_output);
-
- ARM_COMPUTE_EXPECT(lhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Validate zero-padding
- CLGEMMMatrixMultiplyReshaped gemm;
-
- gemm.configure(&lhs_reshaped, &rhs_reshaped, &bias, &dst, alpha, beta, lhs_info, rhs_info, kernel_info);
-
- // Padding can be added along rhs and bias's X/Y dimension
- return dst.info()->padding().empty() && lhs_reshaped.info()->padding().empty();
-}
} // namespace
TEST_SUITE(CL)
TEST_SUITE(GEMMMatrixMultiplyReshaped)
-/** Validate zero padding tests
- *
- * A series of validation tests to check the zero padding requirement
- *
- * Checks performed in order:
- * - No partial blocks in both x and y dimensions
- * - Partial blocks in x dimension
- * - Partial blocks in y dimension
- * - Partial blocks in both x and y dimensions
- * - Special case: partial_n0 == 9 (vstore1 should be invoked instead of vstore_partial_1)
- */
-DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, zip(zip(zip(
-framework::dataset::make("M", { 24, 64, 101, 1, 103 }),
-framework::dataset::make("N", { 48, 29, 16, 121, 41 })),
-framework::dataset::make("M0", { 4, 8, 4, 2, 4 })),
-framework::dataset::make("N0", { 4, 4, 16, 2, 16 })),
-m_value, n_value, m0_value, n0_value)
-{
- constexpr DataType dt = DataType::F32;
-
- bool status = validate_zero_padding(m_value, n_value, 23, 1, m0_value, n0_value, 4, 1, false, false, false, 0, 0, ActivationLayerInfo(), dt, dt, dt, dt, 1.0f, 1.0f);
- ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
-}
-
// *INDENT-OFF*
// clang-format off
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip(
diff --git a/tests/validation/CL/GEMMReshapeLHSMatrix.cpp b/tests/validation/CL/GEMMReshapeLHSMatrix.cpp
index 4af495944e..34c37dffde 100644
--- a/tests/validation/CL/GEMMReshapeLHSMatrix.cpp
+++ b/tests/validation/CL/GEMMReshapeLHSMatrix.cpp
@@ -82,68 +82,11 @@ const auto i_values = framework::dataset::make("interleave", { true, false });
/** Transpose values to test */
const auto t_values = framework::dataset::make("transpose", { true, false });
-/** Zero padding test */
-bool validate_zero_padding(unsigned int m_value, unsigned int k_value, unsigned int b_value, unsigned int m0_value, unsigned int k0_value, unsigned int v0_value,
- bool i_value_lhs, bool t_value_lhs, bool input_as_3d, DataType dt)
-{
- const unsigned int M = m_value;
- const unsigned int K = k_value;
- const unsigned int B = b_value;
-
- GEMMLHSMatrixInfo lhs_info;
- lhs_info.m0 = m0_value;
- lhs_info.k0 = k0_value;
- lhs_info.v0 = v0_value;
- lhs_info.interleave = i_value_lhs;
- lhs_info.transpose = t_value_lhs;
-
- const TensorShape lhs_shape(K, M, B);
- const TensorShape lhs_shape_reshaped = compute_lhs_reshaped_shape(TensorInfo(lhs_shape, 1, dt), lhs_info, input_as_3d);
-
- // Create tensors
- CLTensor lhs = create_tensor<CLTensor>(lhs_shape, dt);
- CLTensor dst = create_tensor<CLTensor>(lhs_shape_reshaped, dt);
-
- ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Validate zero-padding
- CLGEMMReshapeLHSMatrixKernel lhs_reshape;
-
- lhs_reshape.configure(&lhs, &dst, lhs_info, input_as_3d);
-
- return lhs.info()->padding().empty();
-}
} // namespace
TEST_SUITE(CL)
TEST_SUITE(GEMMReshapeLHSMatrix)
-/** Validate zero padding tests for the LHS input tensor
- *
- * A series of validation tests to test the zero padding requirement
- *
- * Checks performed in order:
- * - Case where M and K are smaller than M0 and K0
- * - Generic test case with batch size = 1
- * - Generic test case with batch size = 4
- * - Generic test case with input_as_3d_value = true
- */
-DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
-framework::dataset::make("M", { 1, 23, 63, 101 }),
-framework::dataset::make("K", { 1, 47, 29, 27 })),
-framework::dataset::make("B", { 1, 1, 4, 7 })),
-framework::dataset::make("M0", { 4, 2, 4, 8 })),
-framework::dataset::make("K0", { 2, 2, 4, 8 })),
-framework::dataset::make("input_as_3d", { false, false, false, true })),
-m_value, k_value, b_value, m0_value, k0_value, input_as_3d_value)
-{
- constexpr DataType dt = DataType::F32;
-
- bool status = validate_zero_padding(m_value, k_value, b_value, m0_value, k0_value, 2, false, false, input_as_3d_value, dt);
- ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
-}
-
FIXTURE_DATA_TEST_CASE(S32, CLGEMMReshapeLHSMatrixFixture<int>, framework::DatasetMode::ALL,
combine(combine(combine(combine(combine(combine(combine(datasets::SmallGEMMReshape2DShapes(),
b_values),
diff --git a/tests/validation/CL/Im2Col.cpp b/tests/validation/CL/Im2Col.cpp
index e7e46b7bc5..a31aec4d0c 100644
--- a/tests/validation/CL/Im2Col.cpp
+++ b/tests/validation/CL/Im2Col.cpp
@@ -138,45 +138,6 @@ using CLIm2ColFixture = Im2ColValidationFixture<CLTensor, CLAccessor, CLIm2Col,
TEST_SUITE(NHWC)
-/** Test that there's no padding added to input or output as part of configure
- *
- * @note 2 elements processed per iteration
- *
- * Three tests will be run:
- * - Channels are multiple of elements processed
- * - Channels larger and non multiple of elements used
- * - Channels smaller and not multiple of elements used
- *
- */
-DATA_TEST_CASE(ValidateZeroPaddingNumElemsPerIterEqual2, framework::DatasetMode::ALL,
- combine(combine(combine(combine(combine(
- framework::dataset::make("InputChannel",
-{
- 2, 9, 1,
-}),
-framework::dataset::make("DataType", { DataType::F32 })),
-framework::dataset::make("Kernel", { Size2D(3, 4) })),
-framework::dataset::make("PadStride", { PadStrideInfo(2, 1, 1, 2) })),
-framework::dataset::make("QInfo", { QuantizationInfo() })),
-framework::dataset::make("DataLayout", { DataLayout::NHWC })),
-input_channel, data_type, conv_size, pad_stride_info, qinfo, data_layout)
-{
- TensorShape input_shape(input_channel, 10U, 30U, 3U);
- const bool has_bias = false;
-
- const auto input_info = TensorInfo(input_shape, 1, data_type, data_layout);
- const auto output_shape = compute_im2col_conv_shape(&input_info, conv_size, pad_stride_info, has_bias, Size2D(1U, 1U), true);
-
- CLTensor input = create_tensor<CLTensor>(input_shape, data_type, 1, qinfo, data_layout);
- CLTensor output = create_tensor<CLTensor>(output_shape, data_type, 1, qinfo, data_layout);
-
- CLIm2ColKernel im2col;
- im2col.configure(&input, &output, conv_size, pad_stride_info, has_bias);
-
- // Ensure there're no paddings added at all
- const bool no_padding = input.info()->padding().empty() && output.info()->padding().empty();
- ARM_COMPUTE_EXPECT(no_padding, framework::LogLevel::ERRORS);
-}
/** Test special kernel used for NHWC for 3x3 kernels
*
* @note 2 elements processed per iteration
diff --git a/tests/validation/CL/PoolingLayer.cpp b/tests/validation/CL/PoolingLayer.cpp
index 071b58323c..c79775e1e2 100644
--- a/tests/validation/CL/PoolingLayer.cpp
+++ b/tests/validation/CL/PoolingLayer.cpp
@@ -85,39 +85,6 @@ const auto pool_data_layout_dataset = framework::datas
const auto pool_fp_mixed_precision_dataset = framework::dataset::make("FpMixedPrecision", { true, false });
-/** Zero padding test */
-bool validate_zero_padding(unsigned int width, DataType data_type)
-{
- const PoolingLayerInfo pool_info(PoolingType::MAX, Size2D(2U, 2U), DataLayout::NHWC);
-
- TensorShape shape(width, 23, 11, 1);
-
- // Create tensors
- CLTensor src = create_tensor<CLTensor>(shape, data_type);
- CLTensor idx;
- CLTensor dst;
-
- src.info()->set_quantization_info(QuantizationInfo(1.f / 256.f, 0));
- dst.info()->set_quantization_info(QuantizationInfo(1.f / 256.f, 0));
-
- CLPoolingLayer pool;
-
- if(is_data_type_quantized(data_type))
- {
- pool.configure(&src, &dst, pool_info, nullptr);
-
- // Padding can be added along rhs and bias's X dimension
- return src.info()->padding().empty() && dst.info()->padding().empty();
- }
- else
- {
- pool.configure(&src, &dst, pool_info, &idx);
-
- // Padding can be added along rhs and bias's X dimension
- return src.info()->padding().empty() && dst.info()->padding().empty() && idx.info()->padding().empty();
- }
-}
-
} // namespace
TEST_SUITE(CL)
@@ -159,26 +126,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
ARM_COMPUTE_EXPECT(bool(CLPoolingLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), pool_info)) == expected, framework::LogLevel::ERRORS);
}
-/** Validate zero padding tests
- *
- * A series of validation tests to check that no padding is added as part of configuration for 4 different scenarios.
- *
- * Checks performed in order:
- * - First dimension multiple of 16
- * - First dimension non-multiple of 16
- * - First dimension less than 16 (vec_size for qasymm8) but multiple
- * - First dimension less than 16 (vec_size for qasymm8) non-multiple
- * - Tensor with only one element
- */
-DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, zip(
-framework::dataset::make("Width", { 32U, 37U, 12U, 13U, 1U }),
-framework::dataset::make("DataType", { DataType::F32, DataType::QASYMM8 })),
-width, data_type)
-{
- bool status = validate_zero_padding(width, data_type);
- ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
-}
-
// clang-format on
// *INDENT-ON*
diff --git a/tests/validation/CL/Winograd.cpp b/tests/validation/CL/Winograd.cpp
index f206e92493..750799ace2 100644
--- a/tests/validation/CL/Winograd.cpp
+++ b/tests/validation/CL/Winograd.cpp
@@ -183,28 +183,6 @@ const auto ActivationFunctionsSmallDataset = framework::dataset::make("Activatio
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::SOFT_RELU)
});
-/** Zero padding test */
-bool validate_zero_padding(unsigned int width, unsigned height)
-{
- TensorShape shape(width, height, 11, 1);
-
- WinogradInfo winograd_info = WinogradInfo(Size2D(4U, 4U), Size2D(5U, 5U), Size2D(width, height), PadStrideInfo(), DataLayout::NHWC);
-
- // Create tensors
- CLTensor src = create_tensor<CLTensor>(shape, DataType::F32, 1, QuantizationInfo(), DataLayout::NHWC);
- CLTensor dst;
-
- src.info()->set_quantization_info(QuantizationInfo(1.f / 256.f, 0));
- dst.info()->set_quantization_info(QuantizationInfo(1.f / 256.f, 0));
-
- CLWinogradInputTransform input_transform;
-
- input_transform.configure(&src, &dst, winograd_info);
-
- // Padding can be added along rhs and bias's X dimension
- return src.info()->padding().empty() && dst.info()->padding().empty();
-}
-
} // namespace
using namespace arm_compute::misc::shape_calculator;
@@ -214,19 +192,6 @@ TEST_SUITE(Winograd)
TEST_SUITE(InputTransform)
-/** Validate zero padding tests
- *
- * A series of validation tests to check that no padding is added
- */
-DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, zip(
-framework::dataset::make("Width", { 32U, 37U, 12U, 1U }),
-framework::dataset::make("Height", { 13U, 27U, 19U, 1U })),
-width, height)
-{
- bool status = validate_zero_padding(width, height);
- ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
-}
-
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
framework::dataset::make("InputInfo",{
TensorInfo(TensorShape(53U, 21U, 5U, 3U), 1, DataType::F16), // F16 not supported