aboutsummaryrefslogtreecommitdiff
path: root/src/core/CL
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/CL')
-rw-r--r--src/core/CL/cl_kernels/activation_layer.cl2
-rw-r--r--src/core/CL/cl_kernels/activation_layer_quant.cl4
-rw-r--r--src/core/CL/kernels/CLActivationLayerKernel.cpp4
-rw-r--r--src/core/CL/kernels/CLBatchConcatenateLayerKernel.cpp4
-rw-r--r--src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp17
-rw-r--r--src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp4
-rw-r--r--src/core/CL/kernels/CLElementwiseOperationKernel.cpp4
-rw-r--r--src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel.cpp4
-rw-r--r--src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel.cpp4
-rw-r--r--src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.cpp4
-rw-r--r--src/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.cpp3
-rw-r--r--src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.cpp3
-rw-r--r--src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.cpp4
-rw-r--r--src/core/CL/kernels/CLIm2ColKernel.cpp5
-rw-r--r--src/core/CL/kernels/CLPoolingLayerKernel.cpp4
-rw-r--r--src/core/CL/kernels/CLWinogradInputTransformKernel.cpp4
16 files changed, 63 insertions, 11 deletions
diff --git a/src/core/CL/cl_kernels/activation_layer.cl b/src/core/CL/cl_kernels/activation_layer.cl
index 174b75398d..bc2c99b6c8 100644
--- a/src/core/CL/cl_kernels/activation_layer.cl
+++ b/src/core/CL/cl_kernels/activation_layer.cl
@@ -33,7 +33,7 @@
*
* @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short
* @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
- * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
+ * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
* @note Activation function should be given as a preprocessor argument using -DACT=name. e.g. -DACT=TANH
* @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively.
*
diff --git a/src/core/CL/cl_kernels/activation_layer_quant.cl b/src/core/CL/cl_kernels/activation_layer_quant.cl
index c031c86a5e..66261019ab 100644
--- a/src/core/CL/cl_kernels/activation_layer_quant.cl
+++ b/src/core/CL/cl_kernels/activation_layer_quant.cl
@@ -36,7 +36,7 @@
*
* @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short
* @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
- * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
+ * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
* @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively.
* @note Quantization scales of the input/output tensors are passed in with -DS1_VAL= and -DS2_VAL= respectively.
* @note Quantization offsets of the input/output tensors are passed in only if asymmetric with -DO1_VAL= and -DO2_VAL= respectively.
@@ -108,7 +108,7 @@ __kernel void activation_layer_quant_f32(
*
* @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short
* @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
- * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
+ * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
* @note Activation function should be given as a preprocessor argument using -DACT=name. e.g. -DACT=TANH
* @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively.
* @note Quantization scales of the input/output tensors are passed in with -DS1_VAL= and -DS2_VAL= respectively.
diff --git a/src/core/CL/kernels/CLActivationLayerKernel.cpp b/src/core/CL/kernels/CLActivationLayerKernel.cpp
index 8ddf8d8f9e..9f9538cb76 100644
--- a/src/core/CL/kernels/CLActivationLayerKernel.cpp
+++ b/src/core/CL/kernels/CLActivationLayerKernel.cpp
@@ -91,6 +91,8 @@ void CLActivationLayerKernel::configure(const CLCompileContext &compile_context,
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input);
+ auto padding_info = get_padding_info({ input, output });
+
_run_in_place = (output == nullptr) || (output == input);
if(output != nullptr)
@@ -207,6 +209,8 @@ void CLActivationLayerKernel::configure(const CLCompileContext &compile_context,
_config_id += support::cpp11::to_string(input->dimension(0));
_config_id += "_";
_config_id += support::cpp11::to_string(input->dimension(1));
+
+ ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
Status CLActivationLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &act_info)
diff --git a/src/core/CL/kernels/CLBatchConcatenateLayerKernel.cpp b/src/core/CL/kernels/CLBatchConcatenateLayerKernel.cpp
index 7e9424f58b..ccd6a5a0fc 100644
--- a/src/core/CL/kernels/CLBatchConcatenateLayerKernel.cpp
+++ b/src/core/CL/kernels/CLBatchConcatenateLayerKernel.cpp
@@ -65,6 +65,8 @@ void CLBatchConcatenateLayerKernel::configure(const CLCompileContext &compile_co
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input, batch_offset, output));
+ auto padding_info = get_padding_info({ input, output });
+
_batch_offset = batch_offset;
const unsigned int num_elems_processed_per_iteration = adjust_vec_size(16 / input->element_size(), input->dimension(0));
@@ -109,6 +111,8 @@ void CLBatchConcatenateLayerKernel::configure(const CLCompileContext &compile_co
_config_id += support::cpp11::to_string(input->dimension(2));
_config_id += "_";
_config_id += support::cpp11::to_string(input->dimension(3));
+
+ ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
Status CLBatchConcatenateLayerKernel::validate(const arm_compute::ITensorInfo *input,
diff --git a/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp b/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp
index 9aeca3bcfe..44bdc6f587 100644
--- a/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp
+++ b/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp
@@ -123,13 +123,14 @@ void CLBatchNormalizationLayerKernel::configure(const CLCompileContext &compile_
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, mean, var);
- _input = input;
- _output = output;
- _mean = mean;
- _var = var;
- _beta = beta;
- _gamma = gamma;
- _epsilon = epsilon;
+ auto padding_info = get_padding_info({ input, output, mean, var, beta, gamma });
+ _input = input;
+ _output = output;
+ _mean = mean;
+ _var = var;
+ _beta = beta;
+ _gamma = gamma;
+ _epsilon = epsilon;
_run_in_place = (output == nullptr) || (output == input);
@@ -186,6 +187,8 @@ void CLBatchNormalizationLayerKernel::configure(const CLCompileContext &compile_
ICLKernel::configure_internal(win_config.second);
}
+ ARM_COMPUTE_ERROR_ON(input->info()->data_layout() == DataLayout::NHWC && has_padding_changed(padding_info));
+
_config_id = "batch_normalization_layer_";
_config_id += string_from_data_type(input->info()->data_type());
_config_id += "_";
diff --git a/src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp b/src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp
index 78adfd202f..eb5bfc2d86 100644
--- a/src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp
+++ b/src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp
@@ -64,6 +64,8 @@ void CLDepthConcatenateLayerKernel::configure(const CLCompileContext &compile_co
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input, depth_offset, output));
+ auto padding_info = get_padding_info({ input, output });
+
_depth_offset = depth_offset;
const unsigned int num_elems_processed_per_iteration = adjust_vec_size(16 / input->element_size(), input->dimension(0));
@@ -94,6 +96,8 @@ void CLDepthConcatenateLayerKernel::configure(const CLCompileContext &compile_co
// Set output valid region
output->set_valid_region(ValidRegion(Coordinates(), output->tensor_shape()));
+
+ ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
Status CLDepthConcatenateLayerKernel::validate(const arm_compute::ITensorInfo *input,
diff --git a/src/core/CL/kernels/CLElementwiseOperationKernel.cpp b/src/core/CL/kernels/CLElementwiseOperationKernel.cpp
index 896ee119c1..efb3fe79e3 100644
--- a/src/core/CL/kernels/CLElementwiseOperationKernel.cpp
+++ b/src/core/CL/kernels/CLElementwiseOperationKernel.cpp
@@ -333,11 +333,13 @@ void CLSaturatedArithmeticOperationKernel::configure(const CLCompileContext &com
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
ARM_COMPUTE_ERROR_THROW_ON(CLSaturatedArithmeticOperationKernel::validate(op, input1, input2, output, policy, act_info));
+ auto padding_info = get_padding_info({ input1, input2, output });
_policy = policy;
_op = op;
_act_info = act_info;
configure_common(compile_context, input1, input2, output);
+ ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
Status CLSaturatedArithmeticOperationKernel::validate(ArithmeticOperation op, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ConvertPolicy &policy,
@@ -389,10 +391,12 @@ void CLArithmeticOperationKernel::configure(const CLCompileContext &compile_cont
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
ARM_COMPUTE_ERROR_THROW_ON(CLArithmeticOperationKernel::validate(op, input1, input2, output, act_info));
+ auto padding_info = get_padding_info({ input1, input2, output });
_op = op;
_act_info = act_info;
configure_common(compile_context, input1, input2, output);
+ ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
Status CLArithmeticOperationKernel::validate(ArithmeticOperation op, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info)
diff --git a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel.cpp b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel.cpp
index d0f016879e..6a58d5e202 100644
--- a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel.cpp
@@ -84,6 +84,8 @@ void CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel::configure(const CLCompi
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (bias != nullptr) ? bias->info() : nullptr, output->info(), info));
+ auto padding_info = get_padding_info({ input, bias, output });
+
// Output auto inizialitation if not yet initialized
auto_init_if_empty(*output->info(), input->info()->clone()->set_data_type(info->output_data_type));
@@ -116,6 +118,8 @@ void CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel::configure(const CLCompi
// Configure kernel window
auto win = calculate_max_window(*output->info(), Steps(num_elems_processed_per_iteration));
ICLKernel::configure_internal(win);
+
+ ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
void CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel::run(const Window &window, cl::CommandQueue &queue)
diff --git a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel.cpp b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel.cpp
index 1d29dfe4b3..a5888a5ded 100644
--- a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel.cpp
@@ -94,6 +94,8 @@ void CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel::configure(const CLCompileCon
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (bias != nullptr) ? bias->info() : nullptr, output->info(), info));
+ auto padding_info = get_padding_info({ input, bias, output });
+
// Output auto inizialitation if not yet initialized
auto_init_if_empty(*output->info(), input->info()->clone()->set_data_type(info->output_data_type));
@@ -123,6 +125,8 @@ void CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel::configure(const CLCompileCon
// Configure kernel window
Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration));
ICLKernel::configure_internal(win);
+
+ ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
void CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel::run(const Window &window, cl::CommandQueue &queue)
diff --git a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.cpp b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.cpp
index d32d328fc2..7d4352479c 100644
--- a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.cpp
@@ -92,6 +92,8 @@ void CLGEMMLowpQuantizeDownInt32ScaleKernel::configure(const CLCompileContext &c
output->info(),
output_stage));
+ auto padding_info = get_padding_info({ input, bias, output });
+
// Output auto inizialitation if not yet initialized
auto_init_if_empty(*output->info(), input->info()->clone()->set_data_type(output_stage->output_data_type));
@@ -123,6 +125,8 @@ void CLGEMMLowpQuantizeDownInt32ScaleKernel::configure(const CLCompileContext &c
// Configure kernel window
Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration));
ICLKernel::configure_internal(win);
+
+ ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
void CLGEMMLowpQuantizeDownInt32ScaleKernel::run(const Window &window, cl::CommandQueue &queue)
diff --git a/src/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.cpp
index f613937f54..387f1a4ebc 100644
--- a/src/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.cpp
@@ -219,6 +219,7 @@ void CLGEMMMatrixMultiplyNativeKernel::configure(const CLCompileContext &compile
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input0->info(), input1->info(), (input2 != nullptr ? input2->info() : nullptr), output->info(), alpha, beta, lhs_info, rhs_info, gemm_info));
+ auto padding_info = get_padding_info({ input0, output });
_input0 = input0;
_input1 = input1;
_input2 = helpers::float_ops::is_zero(beta) ? nullptr : input2;
@@ -317,6 +318,8 @@ void CLGEMMMatrixMultiplyNativeKernel::configure(const CLCompileContext &compile
_config_id += support::cpp11::to_string(rhs_info.n0);
_config_id += "_";
_config_id += support::cpp11::to_string(rhs_info.k0);
+
+ ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
Status CLGEMMMatrixMultiplyNativeKernel::validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float alpha, float beta,
diff --git a/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.cpp
index fb15b42fe2..23e18bac92 100644
--- a/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.cpp
@@ -225,6 +225,7 @@ void CLGEMMMatrixMultiplyReshapedKernel::configure(const CLCompileContext &compi
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input0->info(), input1->info(), (input2 != nullptr ? input2->info() : nullptr), output->info(), alpha, beta, lhs_info, rhs_info, gemm_info));
+ auto padding_info = get_padding_info({ input0, output });
_input0 = input0;
_input1 = input1;
_input2 = helpers::float_ops::is_zero(beta) ? nullptr : input2;
@@ -328,6 +329,8 @@ void CLGEMMMatrixMultiplyReshapedKernel::configure(const CLCompileContext &compi
_config_id += support::cpp11::to_string(lhs_info.interleave);
_config_id += "_";
_config_id += support::cpp11::to_string(rhs_info.interleave);
+
+ ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
Status CLGEMMMatrixMultiplyReshapedKernel::validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float alpha, float beta,
diff --git a/src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.cpp b/src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.cpp
index 3e2fc79704..52510075b7 100644
--- a/src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.cpp
@@ -125,6 +125,8 @@ void CLGEMMReshapeLHSMatrixKernel::configure(const CLCompileContext &compile_con
// Perform validate step
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), lhs_info, reinterpret_input_as_3d));
+ auto padding_info = get_padding_info({ input });
+
_input = input;
_output = output;
_reinterpret_input_as_3d = reinterpret_input_as_3d;
@@ -180,6 +182,8 @@ void CLGEMMReshapeLHSMatrixKernel::configure(const CLCompileContext &compile_con
_config_id += support::cpp11::to_string(lhs_info.interleave);
_config_id += "_";
_config_id += support::cpp11::to_string(lhs_info.transpose);
+
+ ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
Status CLGEMMReshapeLHSMatrixKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_input_as_3d)
diff --git a/src/core/CL/kernels/CLIm2ColKernel.cpp b/src/core/CL/kernels/CLIm2ColKernel.cpp
index 0789cdc8a7..07309de83c 100644
--- a/src/core/CL/kernels/CLIm2ColKernel.cpp
+++ b/src/core/CL/kernels/CLIm2ColKernel.cpp
@@ -317,7 +317,8 @@ void CLIm2ColKernel::configure(const CLCompileContext &compile_context, const IC
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), kernel_dims, conv_info, has_bias, dilation, num_groups));
- _data_layout = input->info()->data_layout();
+ auto padding_info = get_padding_info({ input, output });
+ _data_layout = input->info()->data_layout();
const unsigned int width_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
const unsigned int height_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
@@ -358,6 +359,8 @@ void CLIm2ColKernel::configure(const CLCompileContext &compile_context, const IC
_config_id += support::cpp11::to_string(output->info()->dimension(1));
_config_id += "_";
_config_id += lower_string(string_from_data_layout(_data_layout));
+
+ ARM_COMPUTE_ERROR_ON(input->info()->data_layout() == DataLayout::NHWC && has_padding_changed(padding_info));
}
Status CLIm2ColKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, const Size2D &dilation,
diff --git a/src/core/CL/kernels/CLPoolingLayerKernel.cpp b/src/core/CL/kernels/CLPoolingLayerKernel.cpp
index 905610c31f..79843cd299 100644
--- a/src/core/CL/kernels/CLPoolingLayerKernel.cpp
+++ b/src/core/CL/kernels/CLPoolingLayerKernel.cpp
@@ -216,6 +216,8 @@ void CLPoolingLayerKernel::configure(const CLCompileContext &compile_context, co
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+ auto padding_info = get_padding_info({ input, output, indices });
+
// Set instance variables
_input = input;
_output = output;
@@ -419,6 +421,8 @@ void CLPoolingLayerKernel::configure(const CLCompileContext &compile_context, co
_config_id += support::cpp11::to_string(output->info()->dimension(idx_channel));
_config_id += "_";
_config_id += lower_string(string_from_data_layout(input->info()->data_layout()));
+
+ ARM_COMPUTE_ERROR_ON(input->info()->data_layout() == DataLayout::NHWC && has_padding_changed(padding_info));
}
Status CLPoolingLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &pool_info, const ITensorInfo *indices)
diff --git a/src/core/CL/kernels/CLWinogradInputTransformKernel.cpp b/src/core/CL/kernels/CLWinogradInputTransformKernel.cpp
index 6f695c93db..695e1cbbf1 100644
--- a/src/core/CL/kernels/CLWinogradInputTransformKernel.cpp
+++ b/src/core/CL/kernels/CLWinogradInputTransformKernel.cpp
@@ -115,6 +115,8 @@ void CLWinogradInputTransformKernel::configure(const CLCompileContext &compile_c
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), winograd_info));
+ auto padding_info = get_padding_info({ input, output });
+
const PadStrideInfo conv_info = winograd_info.convolution_info;
const Size2D output_tile_size = winograd_info.output_tile_size;
const Size2D kernel_size = winograd_info.kernel_size;
@@ -204,6 +206,8 @@ void CLWinogradInputTransformKernel::configure(const CLCompileContext &compile_c
ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
ICLKernel::configure_internal(win_config.second, cl::NDRange(1, 1, 8));
+ ARM_COMPUTE_ERROR_ON((input->info()->data_layout() == DataLayout::NHWC) && has_padding_changed(padding_info));
+
_config_id = kernel_name;
_config_id += support::cpp11::to_string(input->info()->dimension(0));
_config_id += "_";