aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSiCong Li <sicong.li@arm.com>2020-11-17 14:09:01 +0000
committerSiCong Li <sicong.li@arm.com>2020-11-17 16:38:52 +0000
commit04a0706dddc6ca24cb80e3e0789c6b0f54c48b28 (patch)
treeeb136b5fd7bd4f3e9424cf5bcf4504e5a4f158d1
parent1d1bca75f766625140ab0fdf000b6336f013ed83 (diff)
downloadComputeLibrary-04a0706dddc6ca24cb80e3e0789c6b0f54c48b28.tar.gz
COMPMID-3979 Sanitise Padding Removal epic
* Add missing padding immutability asserts in all relevant CL kernels * Remove unnecessary zero padding validation tests. Change-Id: If93f9ccbc988e0286f5e7b135f812141476d5da0 Signed-off-by: SiCong Li <sicong.li@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4446 Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--src/core/CL/cl_kernels/activation_layer.cl2
-rw-r--r--src/core/CL/cl_kernels/activation_layer_quant.cl4
-rw-r--r--src/core/CL/kernels/CLActivationLayerKernel.cpp4
-rw-r--r--src/core/CL/kernels/CLBatchConcatenateLayerKernel.cpp4
-rw-r--r--src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp17
-rw-r--r--src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp4
-rw-r--r--src/core/CL/kernels/CLElementwiseOperationKernel.cpp4
-rw-r--r--src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel.cpp4
-rw-r--r--src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel.cpp4
-rw-r--r--src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.cpp4
-rw-r--r--src/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.cpp3
-rw-r--r--src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.cpp3
-rw-r--r--src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.cpp4
-rw-r--r--src/core/CL/kernels/CLIm2ColKernel.cpp5
-rw-r--r--src/core/CL/kernels/CLPoolingLayerKernel.cpp4
-rw-r--r--src/core/CL/kernels/CLWinogradInputTransformKernel.cpp4
-rw-r--r--tests/validation/CL/ActivationLayer.cpp42
-rw-r--r--tests/validation/CL/ArithmeticAddition.cpp15
-rw-r--r--tests/validation/CL/BatchConcatenateLayer.cpp51
-rw-r--r--tests/validation/CL/BatchNormalizationLayer.cpp30
-rw-r--r--tests/validation/CL/GEMMLowp.cpp39
-rw-r--r--tests/validation/CL/GEMMMatrixMultiplyNative.cpp72
-rw-r--r--tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp89
-rw-r--r--tests/validation/CL/GEMMReshapeLHSMatrix.cpp57
-rw-r--r--tests/validation/CL/Im2Col.cpp39
-rw-r--r--tests/validation/CL/PoolingLayer.cpp53
-rw-r--r--tests/validation/CL/Winograd.cpp35
27 files changed, 63 insertions, 533 deletions
diff --git a/src/core/CL/cl_kernels/activation_layer.cl b/src/core/CL/cl_kernels/activation_layer.cl
index 174b75398d..bc2c99b6c8 100644
--- a/src/core/CL/cl_kernels/activation_layer.cl
+++ b/src/core/CL/cl_kernels/activation_layer.cl
@@ -33,7 +33,7 @@
*
* @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short
* @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
- * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
+ * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
* @note Activation function should be given as a preprocessor argument using -DACT=name. e.g. -DACT=TANH
* @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively.
*
diff --git a/src/core/CL/cl_kernels/activation_layer_quant.cl b/src/core/CL/cl_kernels/activation_layer_quant.cl
index c031c86a5e..66261019ab 100644
--- a/src/core/CL/cl_kernels/activation_layer_quant.cl
+++ b/src/core/CL/cl_kernels/activation_layer_quant.cl
@@ -36,7 +36,7 @@
*
* @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short
* @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
- * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
+ * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
* @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively.
* @note Quantization scales of the input/output tensors are passed in with -DS1_VAL= and -DS2_VAL= respectively.
* @note Quantization offsets of the input/output tensors are passed in only if asymmetric with -DO1_VAL= and -DO2_VAL= respectively.
@@ -108,7 +108,7 @@ __kernel void activation_layer_quant_f32(
*
* @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short
* @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
- * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
+ * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
* @note Activation function should be given as a preprocessor argument using -DACT=name. e.g. -DACT=TANH
* @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively.
* @note Quantization scales of the input/output tensors are passed in with -DS1_VAL= and -DS2_VAL= respectively.
diff --git a/src/core/CL/kernels/CLActivationLayerKernel.cpp b/src/core/CL/kernels/CLActivationLayerKernel.cpp
index 8ddf8d8f9e..9f9538cb76 100644
--- a/src/core/CL/kernels/CLActivationLayerKernel.cpp
+++ b/src/core/CL/kernels/CLActivationLayerKernel.cpp
@@ -91,6 +91,8 @@ void CLActivationLayerKernel::configure(const CLCompileContext &compile_context,
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input);
+ auto padding_info = get_padding_info({ input, output });
+
_run_in_place = (output == nullptr) || (output == input);
if(output != nullptr)
@@ -207,6 +209,8 @@ void CLActivationLayerKernel::configure(const CLCompileContext &compile_context,
_config_id += support::cpp11::to_string(input->dimension(0));
_config_id += "_";
_config_id += support::cpp11::to_string(input->dimension(1));
+
+ ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
Status CLActivationLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &act_info)
diff --git a/src/core/CL/kernels/CLBatchConcatenateLayerKernel.cpp b/src/core/CL/kernels/CLBatchConcatenateLayerKernel.cpp
index 7e9424f58b..ccd6a5a0fc 100644
--- a/src/core/CL/kernels/CLBatchConcatenateLayerKernel.cpp
+++ b/src/core/CL/kernels/CLBatchConcatenateLayerKernel.cpp
@@ -65,6 +65,8 @@ void CLBatchConcatenateLayerKernel::configure(const CLCompileContext &compile_co
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input, batch_offset, output));
+ auto padding_info = get_padding_info({ input, output });
+
_batch_offset = batch_offset;
const unsigned int num_elems_processed_per_iteration = adjust_vec_size(16 / input->element_size(), input->dimension(0));
@@ -109,6 +111,8 @@ void CLBatchConcatenateLayerKernel::configure(const CLCompileContext &compile_co
_config_id += support::cpp11::to_string(input->dimension(2));
_config_id += "_";
_config_id += support::cpp11::to_string(input->dimension(3));
+
+ ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
Status CLBatchConcatenateLayerKernel::validate(const arm_compute::ITensorInfo *input,
diff --git a/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp b/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp
index 9aeca3bcfe..44bdc6f587 100644
--- a/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp
+++ b/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp
@@ -123,13 +123,14 @@ void CLBatchNormalizationLayerKernel::configure(const CLCompileContext &compile_
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, mean, var);
- _input = input;
- _output = output;
- _mean = mean;
- _var = var;
- _beta = beta;
- _gamma = gamma;
- _epsilon = epsilon;
+ auto padding_info = get_padding_info({ input, output, mean, var, beta, gamma });
+ _input = input;
+ _output = output;
+ _mean = mean;
+ _var = var;
+ _beta = beta;
+ _gamma = gamma;
+ _epsilon = epsilon;
_run_in_place = (output == nullptr) || (output == input);
@@ -186,6 +187,8 @@ void CLBatchNormalizationLayerKernel::configure(const CLCompileContext &compile_
ICLKernel::configure_internal(win_config.second);
}
+ ARM_COMPUTE_ERROR_ON(input->info()->data_layout() == DataLayout::NHWC && has_padding_changed(padding_info));
+
_config_id = "batch_normalization_layer_";
_config_id += string_from_data_type(input->info()->data_type());
_config_id += "_";
diff --git a/src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp b/src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp
index 78adfd202f..eb5bfc2d86 100644
--- a/src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp
+++ b/src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp
@@ -64,6 +64,8 @@ void CLDepthConcatenateLayerKernel::configure(const CLCompileContext &compile_co
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input, depth_offset, output));
+ auto padding_info = get_padding_info({ input, output });
+
_depth_offset = depth_offset;
const unsigned int num_elems_processed_per_iteration = adjust_vec_size(16 / input->element_size(), input->dimension(0));
@@ -94,6 +96,8 @@ void CLDepthConcatenateLayerKernel::configure(const CLCompileContext &compile_co
// Set output valid region
output->set_valid_region(ValidRegion(Coordinates(), output->tensor_shape()));
+
+ ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
Status CLDepthConcatenateLayerKernel::validate(const arm_compute::ITensorInfo *input,
diff --git a/src/core/CL/kernels/CLElementwiseOperationKernel.cpp b/src/core/CL/kernels/CLElementwiseOperationKernel.cpp
index 896ee119c1..efb3fe79e3 100644
--- a/src/core/CL/kernels/CLElementwiseOperationKernel.cpp
+++ b/src/core/CL/kernels/CLElementwiseOperationKernel.cpp
@@ -333,11 +333,13 @@ void CLSaturatedArithmeticOperationKernel::configure(const CLCompileContext &com
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
ARM_COMPUTE_ERROR_THROW_ON(CLSaturatedArithmeticOperationKernel::validate(op, input1, input2, output, policy, act_info));
+ auto padding_info = get_padding_info({ input1, input2, output });
_policy = policy;
_op = op;
_act_info = act_info;
configure_common(compile_context, input1, input2, output);
+ ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
Status CLSaturatedArithmeticOperationKernel::validate(ArithmeticOperation op, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ConvertPolicy &policy,
@@ -389,10 +391,12 @@ void CLArithmeticOperationKernel::configure(const CLCompileContext &compile_cont
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
ARM_COMPUTE_ERROR_THROW_ON(CLArithmeticOperationKernel::validate(op, input1, input2, output, act_info));
+ auto padding_info = get_padding_info({ input1, input2, output });
_op = op;
_act_info = act_info;
configure_common(compile_context, input1, input2, output);
+ ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
Status CLArithmeticOperationKernel::validate(ArithmeticOperation op, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info)
diff --git a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel.cpp b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel.cpp
index d0f016879e..6a58d5e202 100644
--- a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel.cpp
@@ -84,6 +84,8 @@ void CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel::configure(const CLCompi
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (bias != nullptr) ? bias->info() : nullptr, output->info(), info));
+ auto padding_info = get_padding_info({ input, bias, output });
+
// Output auto inizialitation if not yet initialized
auto_init_if_empty(*output->info(), input->info()->clone()->set_data_type(info->output_data_type));
@@ -116,6 +118,8 @@ void CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel::configure(const CLCompi
// Configure kernel window
auto win = calculate_max_window(*output->info(), Steps(num_elems_processed_per_iteration));
ICLKernel::configure_internal(win);
+
+ ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
void CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel::run(const Window &window, cl::CommandQueue &queue)
diff --git a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel.cpp b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel.cpp
index 1d29dfe4b3..a5888a5ded 100644
--- a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel.cpp
@@ -94,6 +94,8 @@ void CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel::configure(const CLCompileCon
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (bias != nullptr) ? bias->info() : nullptr, output->info(), info));
+ auto padding_info = get_padding_info({ input, bias, output });
+
// Output auto inizialitation if not yet initialized
auto_init_if_empty(*output->info(), input->info()->clone()->set_data_type(info->output_data_type));
@@ -123,6 +125,8 @@ void CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel::configure(const CLCompileCon
// Configure kernel window
Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration));
ICLKernel::configure_internal(win);
+
+ ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
void CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel::run(const Window &window, cl::CommandQueue &queue)
diff --git a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.cpp b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.cpp
index d32d328fc2..7d4352479c 100644
--- a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.cpp
@@ -92,6 +92,8 @@ void CLGEMMLowpQuantizeDownInt32ScaleKernel::configure(const CLCompileContext &c
output->info(),
output_stage));
+ auto padding_info = get_padding_info({ input, bias, output });
+
// Output auto inizialitation if not yet initialized
auto_init_if_empty(*output->info(), input->info()->clone()->set_data_type(output_stage->output_data_type));
@@ -123,6 +125,8 @@ void CLGEMMLowpQuantizeDownInt32ScaleKernel::configure(const CLCompileContext &c
// Configure kernel window
Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration));
ICLKernel::configure_internal(win);
+
+ ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
void CLGEMMLowpQuantizeDownInt32ScaleKernel::run(const Window &window, cl::CommandQueue &queue)
diff --git a/src/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.cpp
index f613937f54..387f1a4ebc 100644
--- a/src/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.cpp
@@ -219,6 +219,7 @@ void CLGEMMMatrixMultiplyNativeKernel::configure(const CLCompileContext &compile
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input0->info(), input1->info(), (input2 != nullptr ? input2->info() : nullptr), output->info(), alpha, beta, lhs_info, rhs_info, gemm_info));
+ auto padding_info = get_padding_info({ input0, output });
_input0 = input0;
_input1 = input1;
_input2 = helpers::float_ops::is_zero(beta) ? nullptr : input2;
@@ -317,6 +318,8 @@ void CLGEMMMatrixMultiplyNativeKernel::configure(const CLCompileContext &compile
_config_id += support::cpp11::to_string(rhs_info.n0);
_config_id += "_";
_config_id += support::cpp11::to_string(rhs_info.k0);
+
+ ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
Status CLGEMMMatrixMultiplyNativeKernel::validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float alpha, float beta,
diff --git a/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.cpp
index fb15b42fe2..23e18bac92 100644
--- a/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.cpp
@@ -225,6 +225,7 @@ void CLGEMMMatrixMultiplyReshapedKernel::configure(const CLCompileContext &compi
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input0->info(), input1->info(), (input2 != nullptr ? input2->info() : nullptr), output->info(), alpha, beta, lhs_info, rhs_info, gemm_info));
+ auto padding_info = get_padding_info({ input0, output });
_input0 = input0;
_input1 = input1;
_input2 = helpers::float_ops::is_zero(beta) ? nullptr : input2;
@@ -328,6 +329,8 @@ void CLGEMMMatrixMultiplyReshapedKernel::configure(const CLCompileContext &compi
_config_id += support::cpp11::to_string(lhs_info.interleave);
_config_id += "_";
_config_id += support::cpp11::to_string(rhs_info.interleave);
+
+ ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
Status CLGEMMMatrixMultiplyReshapedKernel::validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float alpha, float beta,
diff --git a/src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.cpp b/src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.cpp
index 3e2fc79704..52510075b7 100644
--- a/src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.cpp
@@ -125,6 +125,8 @@ void CLGEMMReshapeLHSMatrixKernel::configure(const CLCompileContext &compile_con
// Perform validate step
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), lhs_info, reinterpret_input_as_3d));
+ auto padding_info = get_padding_info({ input });
+
_input = input;
_output = output;
_reinterpret_input_as_3d = reinterpret_input_as_3d;
@@ -180,6 +182,8 @@ void CLGEMMReshapeLHSMatrixKernel::configure(const CLCompileContext &compile_con
_config_id += support::cpp11::to_string(lhs_info.interleave);
_config_id += "_";
_config_id += support::cpp11::to_string(lhs_info.transpose);
+
+ ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
Status CLGEMMReshapeLHSMatrixKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_input_as_3d)
diff --git a/src/core/CL/kernels/CLIm2ColKernel.cpp b/src/core/CL/kernels/CLIm2ColKernel.cpp
index 0789cdc8a7..07309de83c 100644
--- a/src/core/CL/kernels/CLIm2ColKernel.cpp
+++ b/src/core/CL/kernels/CLIm2ColKernel.cpp
@@ -317,7 +317,8 @@ void CLIm2ColKernel::configure(const CLCompileContext &compile_context, const IC
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), kernel_dims, conv_info, has_bias, dilation, num_groups));
- _data_layout = input->info()->data_layout();
+ auto padding_info = get_padding_info({ input, output });
+ _data_layout = input->info()->data_layout();
const unsigned int width_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
const unsigned int height_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
@@ -358,6 +359,8 @@ void CLIm2ColKernel::configure(const CLCompileContext &compile_context, const IC
_config_id += support::cpp11::to_string(output->info()->dimension(1));
_config_id += "_";
_config_id += lower_string(string_from_data_layout(_data_layout));
+
+ ARM_COMPUTE_ERROR_ON(input->info()->data_layout() == DataLayout::NHWC && has_padding_changed(padding_info));
}
Status CLIm2ColKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, const Size2D &dilation,
diff --git a/src/core/CL/kernels/CLPoolingLayerKernel.cpp b/src/core/CL/kernels/CLPoolingLayerKernel.cpp
index 905610c31f..79843cd299 100644
--- a/src/core/CL/kernels/CLPoolingLayerKernel.cpp
+++ b/src/core/CL/kernels/CLPoolingLayerKernel.cpp
@@ -216,6 +216,8 @@ void CLPoolingLayerKernel::configure(const CLCompileContext &compile_context, co
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+ auto padding_info = get_padding_info({ input, output, indices });
+
// Set instance variables
_input = input;
_output = output;
@@ -419,6 +421,8 @@ void CLPoolingLayerKernel::configure(const CLCompileContext &compile_context, co
_config_id += support::cpp11::to_string(output->info()->dimension(idx_channel));
_config_id += "_";
_config_id += lower_string(string_from_data_layout(input->info()->data_layout()));
+
+ ARM_COMPUTE_ERROR_ON(input->info()->data_layout() == DataLayout::NHWC && has_padding_changed(padding_info));
}
Status CLPoolingLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &pool_info, const ITensorInfo *indices)
diff --git a/src/core/CL/kernels/CLWinogradInputTransformKernel.cpp b/src/core/CL/kernels/CLWinogradInputTransformKernel.cpp
index 6f695c93db..695e1cbbf1 100644
--- a/src/core/CL/kernels/CLWinogradInputTransformKernel.cpp
+++ b/src/core/CL/kernels/CLWinogradInputTransformKernel.cpp
@@ -115,6 +115,8 @@ void CLWinogradInputTransformKernel::configure(const CLCompileContext &compile_c
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), winograd_info));
+ auto padding_info = get_padding_info({ input, output });
+
const PadStrideInfo conv_info = winograd_info.convolution_info;
const Size2D output_tile_size = winograd_info.output_tile_size;
const Size2D kernel_size = winograd_info.kernel_size;
@@ -204,6 +206,8 @@ void CLWinogradInputTransformKernel::configure(const CLCompileContext &compile_c
ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
ICLKernel::configure_internal(win_config.second, cl::NDRange(1, 1, 8));
+ ARM_COMPUTE_ERROR_ON((input->info()->data_layout() == DataLayout::NHWC) && has_padding_changed(padding_info));
+
_config_id = kernel_name;
_config_id += support::cpp11::to_string(input->info()->dimension(0));
_config_id += "_";
diff --git a/tests/validation/CL/ActivationLayer.cpp b/tests/validation/CL/ActivationLayer.cpp
index f776e334a0..9b725a44e7 100644
--- a/tests/validation/CL/ActivationLayer.cpp
+++ b/tests/validation/CL/ActivationLayer.cpp
@@ -90,28 +90,6 @@ const auto CNNDataTypes = framework::dataset::make("DataType",
/** Input data sets. */
const auto ActivationDataset = combine(combine(framework::dataset::make("InPlace", { false, true }), datasets::ActivationFunctions()), framework::dataset::make("AlphaBeta", { 0.5f, 1.f }));
-/** Zero padding test */
-bool validate_zero_padding(unsigned int width, unsigned int height, unsigned int channels, unsigned int batches, const ActivationLayerInfo &act_info, DataType data_type)
-{
- TensorShape shape(width, height, channels, batches);
-
- // Create tensors
- CLTensor src = create_tensor<CLTensor>(shape, data_type);
- CLTensor dst = create_tensor<CLTensor>(shape, data_type);
-
- src.info()->set_quantization_info(QuantizationInfo(1.f / 256.f, 0));
- dst.info()->set_quantization_info(QuantizationInfo(1.f / 256.f, 0));
-
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Create and configure function
- CLActivationLayer act;
- act.configure(&src, &dst, act_info);
-
- // Padding can be added along rhs and bias's X dimension
- return src.info()->padding().empty() && dst.info()->padding().empty();
-}
} // namespace
TEST_SUITE(CL)
@@ -155,26 +133,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
ARM_COMPUTE_EXPECT(bool(CLActivationLayer::validate(&input_info.clone()->set_is_resizable(false), (output_info.total_size() == 0) ? nullptr : &output_info.clone()->set_is_resizable(false), act_info)) == expected, framework::LogLevel::ERRORS);
}
-/** Validate zero padding tests
- *
- * A series of validation tests to check that no padding is added as part of configuration for 4 different scenarios.
- *
- * Checks performed in order:
- * - First dimension multiple of 16
- * - First dimension non-multiple of 16
- * - First dimension less than 16 (vec_size for qasymm8) but multiple
- * - First dimension less than 16 (vec_size for qasymm8) non-multiple
- * - Tensor with only one element
- */
-DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, zip(
-framework::dataset::make("Width", { 32U, 37U, 12U, 13U, 1U }),
-framework::dataset::make("DataType", { DataType::F32, DataType::QASYMM8 })),
-width, data_type)
-{
- const bool one_elem = (width == 1U);
- bool status = validate_zero_padding(width, one_elem ? 1U : 17U, one_elem ? 1U : 7U, one_elem ? 1U : 2U, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 1U, 6U), data_type);
- ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
-}
// clang-format on
// *INDENT-ON*
diff --git a/tests/validation/CL/ArithmeticAddition.cpp b/tests/validation/CL/ArithmeticAddition.cpp
index 6635c25fe7..c74f6a3b23 100644
--- a/tests/validation/CL/ArithmeticAddition.cpp
+++ b/tests/validation/CL/ArithmeticAddition.cpp
@@ -124,21 +124,6 @@ TEST_CASE(FusedActivation, framework::DatasetMode::ALL)
ARM_COMPUTE_EXPECT(bool(result) == false, framework::LogLevel::ERRORS);
}
-TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL)
-{
- CLTensor src1 = create_tensor<CLTensor>(TensorShape(3U, 3U), DataType::F32, 1, QuantizationInfo());
- CLTensor src2 = create_tensor<CLTensor>(TensorShape(3U, 3U), DataType::F32, 1, QuantizationInfo());
- CLTensor dst = create_tensor<CLTensor>(TensorShape(3U, 3U), DataType::F32, 1, QuantizationInfo());
-
- // Create and configure function
- CLArithmeticAddition add;
- add.configure(&src1, &src2, &dst, ConvertPolicy::WRAP);
-
- validate(src1.info()->padding(), PaddingSize(0, 0, 0, 0));
- validate(src2.info()->padding(), PaddingSize(0, 0, 0, 0));
- validate(dst.info()->padding(), PaddingSize(0, 0, 0, 0));
-}
-
template <typename T>
using CLArithmeticAdditionFixture = ArithmeticAdditionValidationFixture<CLTensor, CLAccessor, CLArithmeticAddition, T>;
diff --git a/tests/validation/CL/BatchConcatenateLayer.cpp b/tests/validation/CL/BatchConcatenateLayer.cpp
index e5de3a75c7..522a6ab8ee 100644
--- a/tests/validation/CL/BatchConcatenateLayer.cpp
+++ b/tests/validation/CL/BatchConcatenateLayer.cpp
@@ -39,37 +39,6 @@ namespace test
{
namespace validation
{
-namespace
-{
-/** Zero padding test */
-bool validate_zero_padding(unsigned int width, unsigned int height, unsigned int channels, unsigned int batches, DataType data_type)
-{
- TensorShape src_shape(width, height, channels, batches);
- TensorShape dst_shape(width, height, channels, batches * 2);
-
- // Create tensors
- CLTensor src0 = create_tensor<CLTensor>(src_shape, data_type);
- CLTensor src1 = create_tensor<CLTensor>(src_shape, data_type);
- CLTensor dst = create_tensor<CLTensor>(dst_shape, data_type);
-
- src0.info()->set_quantization_info(QuantizationInfo(1.f / 256.f, 0));
- src1.info()->set_quantization_info(QuantizationInfo(1.f / 256.f, 0));
- dst.info()->set_quantization_info(QuantizationInfo(1.f / 256.f, 0));
-
- ARM_COMPUTE_EXPECT(src0.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(src1.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- std::vector<const ICLTensor *> srcs = { &src0, &src1 };
-
- // Create and configure function
- CLConcatenateLayer concat;
- concat.configure(srcs, &dst, 3U);
-
- // Padding can be added along rhs and bias's X dimension
- return src0.info()->padding().empty() && src1.info()->padding().empty() && dst.info()->padding().empty();
-}
-}
TEST_SUITE(CL)
TEST_SUITE(BatchConcatenateLayer)
@@ -112,26 +81,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
}
-/** Validate zero padding tests
- *
- * A series of validation tests to check that no padding is added as part of configuration for 4 different scenarios.
- *
- * Checks performed in order:
- * - First dimension multiple of 16
- * - First dimension non-multiple of 16
- * - First dimension less than 16 (vec_size for qasymm8) but multiple
- * - First dimension less than 16 (vec_size for qasymm8) non-multiple
- * - Tensor with only one element
- */
-DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, zip(
-framework::dataset::make("Width", { 32U, 37U, 12U, 13U, 1U }),
-framework::dataset::make("DataType", { DataType::F32, DataType::QASYMM8 })),
-width, data_type)
-{
- const bool one_elem = (width == 1U);
- bool status = validate_zero_padding(width, one_elem ? 1U : 17U, one_elem ? 1U : 7U, one_elem ? 1U : 2U, data_type);
- ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
-}
// clang-format on
// *INDENT-ON*
diff --git a/tests/validation/CL/BatchNormalizationLayer.cpp b/tests/validation/CL/BatchNormalizationLayer.cpp
index 88f00b0eff..8b3bdbc3ea 100644
--- a/tests/validation/CL/BatchNormalizationLayer.cpp
+++ b/tests/validation/CL/BatchNormalizationLayer.cpp
@@ -64,28 +64,6 @@ framework::dataset::make("UseBeta", { false, true })),
framework::dataset::make("UseGamma", { false, true })),
framework::dataset::make("Epsilon", { 0.001f }));
-bool validate_zero_padding(TensorShape shape0, const TensorShape shape1, float epsilon, ActivationLayerInfo act_info, DataType dt, DataLayout data_layout)
-{
- if(data_layout == DataLayout::NHWC)
- {
- permute(shape0, PermutationVector(2U, 0U, 1U));
- }
-
- // Create tensors
- CLTensor src = create_tensor<CLTensor>(shape0, dt, 1, QuantizationInfo(), data_layout);
- CLTensor dst = create_tensor<CLTensor>(shape0, dt, 1, QuantizationInfo(), data_layout);
- CLTensor mean = create_tensor<CLTensor>(shape1, dt, 1);
- CLTensor var = create_tensor<CLTensor>(shape1, dt, 1);
- CLTensor beta = create_tensor<CLTensor>(shape1, dt, 1);
- CLTensor gamma = create_tensor<CLTensor>(shape1, dt, 1);
-
- // Create and configure function
- CLBatchNormalizationLayer norm;
- norm.configure(&src, &dst, &mean, &var, &beta, &gamma, epsilon, act_info);
-
- return src.info()->padding().empty() && dst.info()->padding().empty() && mean.info()->padding().empty() && var.info()->padding().empty() && beta.info()->padding().empty()
- && gamma.info()->padding().empty();
-}
} // namespace
TEST_SUITE(CL)
@@ -142,14 +120,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
// clang-format on
// *INDENT-ON*
-DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, combine(combine(combine(datasets::SmallRandomBatchNormalizationLayerDataset(), act_infos), framework::dataset::make("DataType", { DataType::F32, DataType::F16 })),
- framework::dataset::make("DataLayout", { DataLayout::NHWC })),
- shape0, shape1, episilon, act_infos, data_type, data_layout)
-{
- bool status = validate_zero_padding(shape0, shape1, episilon, act_infos, data_type, data_layout);
- ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
-}
-
TEST_SUITE(Float)
TEST_SUITE(FP32)
FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallRandomBatchNormalizationLayerDataset(),
diff --git a/tests/validation/CL/GEMMLowp.cpp b/tests/validation/CL/GEMMLowp.cpp
index 00f831b2e2..5a1971b54c 100644
--- a/tests/validation/CL/GEMMLowp.cpp
+++ b/tests/validation/CL/GEMMLowp.cpp
@@ -47,25 +47,6 @@ namespace validation
namespace
{
constexpr AbsoluteTolerance<float> tolerance_quant(1); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
-
-bool validate_output_stage_zero_padding(const TensorShape shape, const DataType dt, const GEMMLowpOutputStageType type)
-{
- // Create tensors
- CLTensor src = create_tensor<CLTensor>(shape, DataType::S32, 1);
- CLTensor bias = create_tensor<CLTensor>(TensorShape(shape.x()), DataType::S32, 1);
- CLTensor dst = create_tensor<CLTensor>(shape, dt, 1);
-
- GEMMLowpOutputStageInfo info;
- info.type = type;
- info.output_data_type = dt;
- std::tie(info.gemmlowp_min_bound, info.gemmlowp_max_bound) = quantization::get_min_max_values_from_quantized_data_type(dt);
-
- // Create and configure function
- CLGEMMLowpOutputStage output_stage;
- output_stage.configure(&src, &bias, &dst, info);
-
- return src.info()->padding().empty() && bias.info()->padding().empty() && dst.info()->padding().empty();
-}
}
TEST_SUITE(CL)
TEST_SUITE(GEMMLowp)
@@ -147,13 +128,6 @@ TEST_SUITE(OutputStage)
TEST_SUITE(QuantizeDownInt32Scale)
-DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::QASYMM8, DataType::QASYMM8_SIGNED })),
- shape, data_type)
-{
- bool status = validate_output_stage_zero_padding(shape, data_type, GEMMLowpOutputStageType::QUANTIZE_DOWN);
- ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
-}
-
TEST_SUITE(QASYMM8)
const auto quantize_down_int32_to_uint8_scale_cases = framework::dataset::make("result_offset", -2, 1) * framework::dataset::make("result_mult_int", 1, 2) * framework::dataset::make("result_shift", 2,
@@ -212,12 +186,6 @@ TEST_SUITE_END() // QASYMM8_SIGNED
TEST_SUITE_END() // QuantizeDownInt32Scale
TEST_SUITE(QuantizeDownInt32ScaleByFixedPoint)
-DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QSYMM16 })),
- shape, data_type)
-{
- bool status = validate_output_stage_zero_padding(shape, data_type, GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT);
- ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
-}
TEST_SUITE(QASYMM8)
@@ -353,13 +321,6 @@ TEST_SUITE_END() // QuantizeDownInt32ScaleByFixedPoint
TEST_SUITE(QuantizeDownInt32ScaleByFloat)
-DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::QASYMM8, DataType::QASYMM8_SIGNED })),
- shape, data_type)
-{
- bool status = validate_output_stage_zero_padding(shape, data_type, GEMMLowpOutputStageType::QUANTIZE_DOWN_FLOAT);
- ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
-}
-
TEST_SUITE(QASYMM8)
using CLGEMMLowpQuantizeDownInt32ScaleByFloatFixture =
GEMMLowpQuantizeDownInt32ScaleByFloatValidationFixture<CLTensor, CLAccessor, CLGEMMLowpOutputStage, uint8_t>;
diff --git a/tests/validation/CL/GEMMMatrixMultiplyNative.cpp b/tests/validation/CL/GEMMMatrixMultiplyNative.cpp
index 1cf1209dee..ec6b87fbae 100644
--- a/tests/validation/CL/GEMMMatrixMultiplyNative.cpp
+++ b/tests/validation/CL/GEMMMatrixMultiplyNative.cpp
@@ -186,55 +186,6 @@ void validate_configuration(unsigned int m_value, unsigned int n_value, unsigned
CLGEMMMatrixMultiplyNative gemm;
gemm.configure(&lhs, &rhs, &bias, &dst, 1.0f, 1.0f, lhs_info, rhs_info, kernel_info);
}
-/** Zero padding test */
-bool validate_zero_padding(unsigned int m_value, unsigned int n_value, unsigned int k_value, unsigned int b_value, unsigned int m0_value, unsigned int n0_value, unsigned int k0_value, bool broadcast_bias, DataType data_type, const ActivationLayerInfo &act_info)
-{
- const unsigned int M = m_value;
- const unsigned int N = n_value;
- const unsigned int K = k_value;
-
- GEMMLHSMatrixInfo lhs_info;
- lhs_info.m0 = m0_value;
- lhs_info.k0 = k0_value;
-
- GEMMRHSMatrixInfo rhs_info;
- rhs_info.n0 = n0_value;
- rhs_info.k0 = k0_value;
-
- GEMMKernelInfo kernel_info;
- kernel_info.m = M;
- kernel_info.n = N;
- kernel_info.k = K;
- kernel_info.broadcast_bias = broadcast_bias;
- kernel_info.activation_info = act_info;
-
- const TensorShape lhs_shape(K, M, b_value);
- const TensorShape rhs_shape(N, K, b_value);
- const TensorShape bias_shape(N,
- broadcast_bias? 1 : M,
- broadcast_bias? 1 : b_value);
- const TensorShape dst_shape = compute_mm_shape(TensorInfo(lhs_shape, 1, data_type),
- TensorInfo(rhs_shape, 1, data_type),
- kernel_info);
-
- // Create tensors
- CLTensor lhs = create_tensor<CLTensor>(lhs_shape, data_type);
- CLTensor rhs = create_tensor<CLTensor>(rhs_shape, data_type);
- CLTensor bias = create_tensor<CLTensor>(bias_shape, data_type);
- CLTensor dst = create_tensor<CLTensor>(dst_shape, data_type);
-
- ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Create and configure function
- CLGEMMMatrixMultiplyNative gemm;
- gemm.configure(&lhs, &rhs, &bias, &dst, 1.0f, 1.0f, lhs_info, rhs_info, kernel_info);
-
- // Padding can be added along rhs and bias's X dimension
- return dst.info()->padding().empty() && lhs.info()->padding().empty() && bias.info()->padding().bottom == 0 && bias.info()->padding().top == 0;
-}
} // namespace
TEST_SUITE(CL)
@@ -256,29 +207,6 @@ m_value, n_value, k_value, b_value, m0_value, n0_value, k0_value, broadcast_bias
validate_configuration(m_value, n_value, k_value, b_value, m0_value, n0_value, k0_value, broadcast_bias, DataType::F32, act_value);
}
-/** Validate zero padding tests
- *
- * A series of validation tests to check that no padding is added as part of configuration for 4 different scenarios.
- *
- * Checks performed in order:
- * - No partial blocks in both x and y dimensions
- * - Partial blocks in x dimension
- * - Partial blocks in y dimension
- * - Partial blocks in both x and y dimensions
- * - No blocks in both x and y dimensions, scalar store (N0==1)
- * - Special case: partial_n0 == 5 (vstore1 should be invoked instead of vstore_partial_1)
- */
-DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, zip(zip(zip(
-framework::dataset::make("M", { 24, 64, 101, 1, 50, 256, }),
-framework::dataset::make("N", { 48, 29, 16, 122, 20, 21, })),
-framework::dataset::make("M0", { 4, 8, 7, 2, 1, 8, })),
-framework::dataset::make("N0", { 4, 4, 16, 3, 1, 8, })),
-m_value, n_value, m0_value, n0_value)
-{
- bool status = validate_zero_padding(m_value, n_value, 23, 1, m0_value, n0_value, 4, false, DataType::F32, ActivationLayerInfo());
- ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
-}
-
FIXTURE_DATA_TEST_CASE(RunSmallBoundaryHandlingPartialInXPartialInY, CLGEMMMatrixMultiplyNativeFixture<float>, framework::DatasetMode::ALL,
combine(combine(
framework::dataset::make("M", 3),
diff --git a/tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp b/tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp
index 0a0a1fc397..95979b3131 100644
--- a/tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp
+++ b/tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp
@@ -171,100 +171,11 @@ const auto broadcast_bias_values = framework::dataset::make("broadcast_bias", {
/** LHS transposed values */
const auto lhs_transpose_values = framework::dataset::make("lhs_transpose", { false, true } );
-/** Zero padding test */
-bool validate_zero_padding(unsigned int m_value, unsigned int n_value, unsigned int k_value, unsigned int b_value,
- unsigned int m0_value, unsigned int n0_value, unsigned int k0_value, unsigned int h0_value,
- bool i_value_rhs, bool t_value_rhs, bool export_to_cl_image, bool broadcast_bias, unsigned int depth_output_gemm3d, const ActivationLayerInfo &act_info,
- DataType dt_input0, DataType dt_input1, DataType dt_input2, DataType dt_output, float alpha, float beta)
-{
- const unsigned int M = m_value;
- const unsigned int N = n_value;
- const unsigned int K = k_value;
-
- GEMMLHSMatrixInfo lhs_info;
- lhs_info.m0 = m0_value;
- lhs_info.k0 = k0_value;
-
- GEMMRHSMatrixInfo rhs_info;
- rhs_info.n0 = n0_value;
- rhs_info.k0 = k0_value;
- rhs_info.h0 = h0_value;
- rhs_info.interleave = i_value_rhs;
- rhs_info.transpose = t_value_rhs;
- rhs_info.export_to_cl_image = export_to_cl_image;
-
- GEMMKernelInfo kernel_info;
- kernel_info.m = M;
- kernel_info.n = N;
- kernel_info.k = K;
- kernel_info.depth_output_gemm3d = depth_output_gemm3d;
- kernel_info.reinterpret_input_as_3d = false;
- kernel_info.broadcast_bias = broadcast_bias;
- kernel_info.activation_info = act_info;
-
- const TensorShape lhs_shape(K, M, b_value);
- const TensorShape rhs_shape(N, K, b_value);
- const TensorShape lhs_shape_reshaped = compute_lhs_reshaped_shape(TensorInfo(lhs_shape, 1, dt_input0),
- lhs_info);
- const TensorShape rhs_shape_reshaped = compute_rhs_reshaped_shape(TensorInfo(rhs_shape, 1, dt_input1),
- rhs_info);
-
- const TensorShape dst_shape = compute_mm_shape(TensorInfo(lhs_shape_reshaped, 1, dt_input0),
- TensorInfo(rhs_shape_reshaped, 1, dt_input1),
- kernel_info);
-
- const TensorShape bias_shape(N,
- M, // Correct calculation should be: broadcast_bias? 1 : M, it's wrong here on purpose just for validation test
- broadcast_bias? 1 : b_value);
-
- // Create tensors
- CLTensor lhs_reshaped = create_tensor<CLTensor>(lhs_shape_reshaped, dt_input0);
- CLTensor rhs_reshaped = create_tensor<CLTensor>(rhs_shape_reshaped, dt_input1);
- CLTensor bias = create_tensor<CLTensor>(bias_shape, dt_input2);
- CLTensor dst = create_tensor<CLTensor>(dst_shape, dt_output);
-
- ARM_COMPUTE_EXPECT(lhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Validate zero-padding
- CLGEMMMatrixMultiplyReshaped gemm;
-
- gemm.configure(&lhs_reshaped, &rhs_reshaped, &bias, &dst, alpha, beta, lhs_info, rhs_info, kernel_info);
-
- // Padding can be added along rhs and bias's X/Y dimension
- return dst.info()->padding().empty() && lhs_reshaped.info()->padding().empty();
-}
} // namespace
TEST_SUITE(CL)
TEST_SUITE(GEMMMatrixMultiplyReshaped)
-/** Validate zero padding tests
- *
- * A series of validation tests to check the zero padding requirement
- *
- * Checks performed in order:
- * - No partial blocks in both x and y dimensions
- * - Partial blocks in x dimension
- * - Partial blocks in y dimension
- * - Partial blocks in both x and y dimensions
- * - Special case: partial_n0 == 9 (vstore1 should be invoked instead of vstore_partial_1)
- */
-DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, zip(zip(zip(
-framework::dataset::make("M", { 24, 64, 101, 1, 103 }),
-framework::dataset::make("N", { 48, 29, 16, 121, 41 })),
-framework::dataset::make("M0", { 4, 8, 4, 2, 4 })),
-framework::dataset::make("N0", { 4, 4, 16, 2, 16 })),
-m_value, n_value, m0_value, n0_value)
-{
- constexpr DataType dt = DataType::F32;
-
- bool status = validate_zero_padding(m_value, n_value, 23, 1, m0_value, n0_value, 4, 1, false, false, false, 0, 0, ActivationLayerInfo(), dt, dt, dt, dt, 1.0f, 1.0f);
- ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
-}
-
// *INDENT-OFF*
// clang-format off
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip(
diff --git a/tests/validation/CL/GEMMReshapeLHSMatrix.cpp b/tests/validation/CL/GEMMReshapeLHSMatrix.cpp
index 4af495944e..34c37dffde 100644
--- a/tests/validation/CL/GEMMReshapeLHSMatrix.cpp
+++ b/tests/validation/CL/GEMMReshapeLHSMatrix.cpp
@@ -82,68 +82,11 @@ const auto i_values = framework::dataset::make("interleave", { true, false });
/** Transpose values to test */
const auto t_values = framework::dataset::make("transpose", { true, false });
-/** Zero padding test */
-bool validate_zero_padding(unsigned int m_value, unsigned int k_value, unsigned int b_value, unsigned int m0_value, unsigned int k0_value, unsigned int v0_value,
- bool i_value_lhs, bool t_value_lhs, bool input_as_3d, DataType dt)
-{
- const unsigned int M = m_value;
- const unsigned int K = k_value;
- const unsigned int B = b_value;
-
- GEMMLHSMatrixInfo lhs_info;
- lhs_info.m0 = m0_value;
- lhs_info.k0 = k0_value;
- lhs_info.v0 = v0_value;
- lhs_info.interleave = i_value_lhs;
- lhs_info.transpose = t_value_lhs;
-
- const TensorShape lhs_shape(K, M, B);
- const TensorShape lhs_shape_reshaped = compute_lhs_reshaped_shape(TensorInfo(lhs_shape, 1, dt), lhs_info, input_as_3d);
-
- // Create tensors
- CLTensor lhs = create_tensor<CLTensor>(lhs_shape, dt);
- CLTensor dst = create_tensor<CLTensor>(lhs_shape_reshaped, dt);
-
- ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Validate zero-padding
- CLGEMMReshapeLHSMatrixKernel lhs_reshape;
-
- lhs_reshape.configure(&lhs, &dst, lhs_info, input_as_3d);
-
- return lhs.info()->padding().empty();
-}
} // namespace
TEST_SUITE(CL)
TEST_SUITE(GEMMReshapeLHSMatrix)
-/** Validate zero padding tests for the LHS input tensor
- *
- * A series of validation tests to test the zero padding requirement
- *
- * Checks performed in order:
- * - Case where M and K are smaller than M0 and K0
- * - Generic test case with batch size = 1
- * - Generic test case with batch size = 4
- * - Generic test case with input_as_3d_value = true
- */
-DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
-framework::dataset::make("M", { 1, 23, 63, 101 }),
-framework::dataset::make("K", { 1, 47, 29, 27 })),
-framework::dataset::make("B", { 1, 1, 4, 7 })),
-framework::dataset::make("M0", { 4, 2, 4, 8 })),
-framework::dataset::make("K0", { 2, 2, 4, 8 })),
-framework::dataset::make("input_as_3d", { false, false, false, true })),
-m_value, k_value, b_value, m0_value, k0_value, input_as_3d_value)
-{
- constexpr DataType dt = DataType::F32;
-
- bool status = validate_zero_padding(m_value, k_value, b_value, m0_value, k0_value, 2, false, false, input_as_3d_value, dt);
- ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
-}
-
FIXTURE_DATA_TEST_CASE(S32, CLGEMMReshapeLHSMatrixFixture<int>, framework::DatasetMode::ALL,
combine(combine(combine(combine(combine(combine(combine(datasets::SmallGEMMReshape2DShapes(),
b_values),
diff --git a/tests/validation/CL/Im2Col.cpp b/tests/validation/CL/Im2Col.cpp
index e7e46b7bc5..a31aec4d0c 100644
--- a/tests/validation/CL/Im2Col.cpp
+++ b/tests/validation/CL/Im2Col.cpp
@@ -138,45 +138,6 @@ using CLIm2ColFixture = Im2ColValidationFixture<CLTensor, CLAccessor, CLIm2Col,
TEST_SUITE(NHWC)
-/** Test that there's no padding added to input or output as part of configure
- *
- * @note 2 elements processed per iteration
- *
- * Three tests will be run:
- * - Channels are multiple of elements processed
- * - Channels larger and non multiple of elements used
- * - Channels smaller and not multiple of elements used
- *
- */
-DATA_TEST_CASE(ValidateZeroPaddingNumElemsPerIterEqual2, framework::DatasetMode::ALL,
- combine(combine(combine(combine(combine(
- framework::dataset::make("InputChannel",
-{
- 2, 9, 1,
-}),
-framework::dataset::make("DataType", { DataType::F32 })),
-framework::dataset::make("Kernel", { Size2D(3, 4) })),
-framework::dataset::make("PadStride", { PadStrideInfo(2, 1, 1, 2) })),
-framework::dataset::make("QInfo", { QuantizationInfo() })),
-framework::dataset::make("DataLayout", { DataLayout::NHWC })),
-input_channel, data_type, conv_size, pad_stride_info, qinfo, data_layout)
-{
- TensorShape input_shape(input_channel, 10U, 30U, 3U);
- const bool has_bias = false;
-
- const auto input_info = TensorInfo(input_shape, 1, data_type, data_layout);
- const auto output_shape = compute_im2col_conv_shape(&input_info, conv_size, pad_stride_info, has_bias, Size2D(1U, 1U), true);
-
- CLTensor input = create_tensor<CLTensor>(input_shape, data_type, 1, qinfo, data_layout);
- CLTensor output = create_tensor<CLTensor>(output_shape, data_type, 1, qinfo, data_layout);
-
- CLIm2ColKernel im2col;
- im2col.configure(&input, &output, conv_size, pad_stride_info, has_bias);
-
- // Ensure there're no paddings added at all
- const bool no_padding = input.info()->padding().empty() && output.info()->padding().empty();
- ARM_COMPUTE_EXPECT(no_padding, framework::LogLevel::ERRORS);
-}
/** Test special kernel used for NHWC for 3x3 kernels
*
* @note 2 elements processed per iteration
diff --git a/tests/validation/CL/PoolingLayer.cpp b/tests/validation/CL/PoolingLayer.cpp
index 071b58323c..c79775e1e2 100644
--- a/tests/validation/CL/PoolingLayer.cpp
+++ b/tests/validation/CL/PoolingLayer.cpp
@@ -85,39 +85,6 @@ const auto pool_data_layout_dataset = framework::datas
const auto pool_fp_mixed_precision_dataset = framework::dataset::make("FpMixedPrecision", { true, false });
-/** Zero padding test */
-bool validate_zero_padding(unsigned int width, DataType data_type)
-{
- const PoolingLayerInfo pool_info(PoolingType::MAX, Size2D(2U, 2U), DataLayout::NHWC);
-
- TensorShape shape(width, 23, 11, 1);
-
- // Create tensors
- CLTensor src = create_tensor<CLTensor>(shape, data_type);
- CLTensor idx;
- CLTensor dst;
-
- src.info()->set_quantization_info(QuantizationInfo(1.f / 256.f, 0));
- dst.info()->set_quantization_info(QuantizationInfo(1.f / 256.f, 0));
-
- CLPoolingLayer pool;
-
- if(is_data_type_quantized(data_type))
- {
- pool.configure(&src, &dst, pool_info, nullptr);
-
- // Padding can be added along rhs and bias's X dimension
- return src.info()->padding().empty() && dst.info()->padding().empty();
- }
- else
- {
- pool.configure(&src, &dst, pool_info, &idx);
-
- // Padding can be added along rhs and bias's X dimension
- return src.info()->padding().empty() && dst.info()->padding().empty() && idx.info()->padding().empty();
- }
-}
-
} // namespace
TEST_SUITE(CL)
@@ -159,26 +126,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
ARM_COMPUTE_EXPECT(bool(CLPoolingLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), pool_info)) == expected, framework::LogLevel::ERRORS);
}
-/** Validate zero padding tests
- *
- * A series of validation tests to check that no padding is added as part of configuration for 4 different scenarios.
- *
- * Checks performed in order:
- * - First dimension multiple of 16
- * - First dimension non-multiple of 16
- * - First dimension less than 16 (vec_size for qasymm8) but multiple
- * - First dimension less than 16 (vec_size for qasymm8) non-multiple
- * - Tensor with only one element
- */
-DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, zip(
-framework::dataset::make("Width", { 32U, 37U, 12U, 13U, 1U }),
-framework::dataset::make("DataType", { DataType::F32, DataType::QASYMM8 })),
-width, data_type)
-{
- bool status = validate_zero_padding(width, data_type);
- ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
-}
-
// clang-format on
// *INDENT-ON*
diff --git a/tests/validation/CL/Winograd.cpp b/tests/validation/CL/Winograd.cpp
index f206e92493..750799ace2 100644
--- a/tests/validation/CL/Winograd.cpp
+++ b/tests/validation/CL/Winograd.cpp
@@ -183,28 +183,6 @@ const auto ActivationFunctionsSmallDataset = framework::dataset::make("Activatio
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::SOFT_RELU)
});
-/** Zero padding test */
-bool validate_zero_padding(unsigned int width, unsigned height)
-{
- TensorShape shape(width, height, 11, 1);
-
- WinogradInfo winograd_info = WinogradInfo(Size2D(4U, 4U), Size2D(5U, 5U), Size2D(width, height), PadStrideInfo(), DataLayout::NHWC);
-
- // Create tensors
- CLTensor src = create_tensor<CLTensor>(shape, DataType::F32, 1, QuantizationInfo(), DataLayout::NHWC);
- CLTensor dst;
-
- src.info()->set_quantization_info(QuantizationInfo(1.f / 256.f, 0));
- dst.info()->set_quantization_info(QuantizationInfo(1.f / 256.f, 0));
-
- CLWinogradInputTransform input_transform;
-
- input_transform.configure(&src, &dst, winograd_info);
-
- // Padding can be added along rhs and bias's X dimension
- return src.info()->padding().empty() && dst.info()->padding().empty();
-}
-
} // namespace
using namespace arm_compute::misc::shape_calculator;
@@ -214,19 +192,6 @@ TEST_SUITE(Winograd)
TEST_SUITE(InputTransform)
-/** Validate zero padding tests
- *
- * A series of validation tests to check that no padding is added
- */
-DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, zip(
-framework::dataset::make("Width", { 32U, 37U, 12U, 1U }),
-framework::dataset::make("Height", { 13U, 27U, 19U, 1U })),
-width, height)
-{
- bool status = validate_zero_padding(width, height);
- ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
-}
-
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
framework::dataset::make("InputInfo",{
TensorInfo(TensorShape(53U, 21U, 5U, 3U), 1, DataType::F16), // F16 not supported