aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGiorgio Arena <giorgio.arena@arm.com>2020-10-07 16:03:43 +0100
committerGiorgio Arena <giorgio.arena@arm.com>2020-10-09 13:17:54 +0000
commit530488472ae961b53520e7cfe4eb12f35163d93e (patch)
tree15279ebbd77507f00dc1a36c6c8445cf338506ce
parentc226853f80d53619a2f49e646635e04ee0885c3b (diff)
downloadComputeLibrary-530488472ae961b53520e7cfe4eb12f35163d93e.tar.gz
COMPMID-3704 Remove OpenCL padding: CLBatchConcatenateLayerKernel
COMPMID-3709 Remove OpenCL padding: CLDepthConcatenateLayerKernel Signed-off-by: Giorgio Arena <giorgio.arena@arm.com> Change-Id: Iaea4fafd5d0f081fd5b45b0f6945302dc3365bd9 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4105 Tested-by: Arm Jenkins <bsgcomp@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
-rw-r--r--src/core/CL/cl_kernels/activation_layer.cl1
-rw-r--r--src/core/CL/cl_kernels/activation_layer_quant.cl2
-rw-r--r--src/core/CL/cl_kernels/concatenate.cl17
-rw-r--r--src/core/CL/kernels/CLBatchConcatenateLayerKernel.cpp30
-rw-r--r--src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp29
-rw-r--r--tests/datasets/ShapeDatasets.h1
-rw-r--r--tests/validation/CL/BatchConcatenateLayer.cpp52
7 files changed, 77 insertions, 55 deletions
diff --git a/src/core/CL/cl_kernels/activation_layer.cl b/src/core/CL/cl_kernels/activation_layer.cl
index 499378c87f..fca4e85255 100644
--- a/src/core/CL/cl_kernels/activation_layer.cl
+++ b/src/core/CL/cl_kernels/activation_layer.cl
@@ -33,6 +33,7 @@
*
* @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short
* @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
+ * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
* @note Activation function should be given as a preprocessor argument using -DACT=name. e.g. -DACT=TANH
* @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively.
*
diff --git a/src/core/CL/cl_kernels/activation_layer_quant.cl b/src/core/CL/cl_kernels/activation_layer_quant.cl
index d8f56c093a..dbaefacc13 100644
--- a/src/core/CL/cl_kernels/activation_layer_quant.cl
+++ b/src/core/CL/cl_kernels/activation_layer_quant.cl
@@ -36,6 +36,7 @@
*
* @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short
* @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
+ * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
* @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively.
* @note Quantization scales of the input/output tensors are passed in with -DS1_VAL= and -DS2_VAL= respectively.
* @note Quantization offsets of the input/output tensors are passed in only if asymmetric with -DO1_VAL= and -DO2_VAL= respectively.
@@ -107,6 +108,7 @@ __kernel void activation_layer_quant_f32(
*
* @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short
* @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
+ * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
* @note Activation function should be given as a preprocessor argument using -DACT=name. e.g. -DACT=TANH
* @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively.
* @note Quantization scales of the input/output tensors are passed in with -DS1_VAL= and -DS2_VAL= respectively.
diff --git a/src/core/CL/cl_kernels/concatenate.cl b/src/core/CL/cl_kernels/concatenate.cl
index 4281e675d7..0b211a6d1f 100644
--- a/src/core/CL/cl_kernels/concatenate.cl
+++ b/src/core/CL/cl_kernels/concatenate.cl
@@ -383,10 +383,13 @@ __kernel void concatenate_height(
#endif /* defined(HEIGHT_OFFSET) && defined(DEPTH) */
+#if defined(VEC_SIZE_LEFTOVER)
+
/** This kernel concatenates the input tensor into the output tensor along the third dimension
*
* @note The data type has to be passed at compile time using -DDATA_TYPE. i.e. -DDATA_TYPE=float
* @note Vector size has to be passed at compile time using -DVEC_SIZE. i.e. -DVEC_SIZE=16
+ * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
*
* @param[in] src_ptr Pointer to the source tensor. Supported data types: All
* @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
@@ -411,17 +414,19 @@ __kernel void concatenate(
TENSOR3D_DECLARATION(dst),
int offset)
{
- Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src);
- Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst);
+ uint x_offs = max((int)(get_global_id(0) * VEC_SIZE * sizeof(DATA_TYPE) - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE * sizeof(DATA_TYPE)), 0);
+
+ __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x_offs + get_global_id(1) * src_stride_y + get_global_id(2) * src_stride_z;
+ __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x_offs + get_global_id(1) * dst_stride_y + get_global_id(2) * dst_stride_z;
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- source_values = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)src.ptr);
+ source_values0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)src_addr);
#if defined(OFFSET_IN1) && defined(OFFSET_OUT) && defined(SCALE_IN1) && defined(SCALE_OUT)
- source_values = requantize(source_values, OFFSET_IN1, OFFSET_OUT, SCALE_IN1, SCALE_OUT);
+ source_values0 = requantize(source_values0, OFFSET_IN1, OFFSET_OUT, SCALE_IN1, SCALE_OUT);
#endif /* defined(OFFSET_IN1) && defined(OFFSET_OUT) && defined(SCALE_IN1) && defined(SCALE_OUT) */
- VSTORE(VEC_SIZE)
- (source_values, 0, (__global DATA_TYPE *)(dst.ptr + offset));
+ STORE_VECTOR_SELECT(source_values, DATA_TYPE, dst_addr + offset, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
}
+#endif /* defined(VEC_SIZE_LEFTOVER) */
#endif /* defined(DATA_TYPE) && defined(VEC_SIZE) */
diff --git a/src/core/CL/kernels/CLBatchConcatenateLayerKernel.cpp b/src/core/CL/kernels/CLBatchConcatenateLayerKernel.cpp
index feebe01cdb..dd9c234c56 100644
--- a/src/core/CL/kernels/CLBatchConcatenateLayerKernel.cpp
+++ b/src/core/CL/kernels/CLBatchConcatenateLayerKernel.cpp
@@ -38,25 +38,6 @@ namespace arm_compute
{
namespace
{
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, unsigned int batch_offset, ITensorInfo *output)
-{
- ARM_COMPUTE_UNUSED(batch_offset);
-
- const unsigned int num_elems_processed_per_iteration = 16 / input->element_size();
-
- // The window needs to be based on output, except for the batch size
- Window win = calculate_max_window(*output, Steps(num_elems_processed_per_iteration));
- // The total batch size is the concatenation of the batch size of the inputs
- win.set(3, Window::Dimension(0, input->tensor_shape()[3], 1));
-
- AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration);
- AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
- bool window_changed = update_window_and_padding(win, input_access, output_access);
- output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
-
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, win);
-}
Status validate_arguments(const ITensorInfo *input, unsigned int batch_offset, const ITensorInfo *output)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
@@ -86,12 +67,13 @@ void CLBatchConcatenateLayerKernel::configure(const CLCompileContext &compile_co
_batch_offset = batch_offset;
- const unsigned int num_elems_processed_per_iteration = 16 / input->element_size();
+ const unsigned int num_elems_processed_per_iteration = adjust_vec_size(16 / input->element_size(), input->dimension(0));
// Add build options
CLBuildOptions build_opts;
build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->data_type()));
build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
+ build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(input->dimension(0) % num_elems_processed_per_iteration));
if(is_data_type_quantized_asymmetric(input->data_type()) && input->quantization_info() != output->quantization_info())
{
const UniformQuantizationInfo iq_info = input->quantization_info().uniform();
@@ -107,10 +89,9 @@ void CLBatchConcatenateLayerKernel::configure(const CLCompileContext &compile_co
_kernel = create_kernel(compile_context, "concatenate", build_opts.options());
// Configure kernel window
- auto win_config = validate_and_configure_window(input, batch_offset, output);
- ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config));
-
- ICLKernel::configure_internal(std::get<1>(win_config));
+ auto win = calculate_max_window(*output, Steps(num_elems_processed_per_iteration));
+ win.set(3, Window::Dimension(0, input->tensor_shape()[3], 1));
+ ICLKernel::configure_internal(win);
// Set output valid region
output->set_valid_region(ValidRegion(Coordinates(), output->tensor_shape()));
@@ -135,7 +116,6 @@ Status CLBatchConcatenateLayerKernel::validate(const arm_compute::ITensorInfo *i
const arm_compute::ITensorInfo *output)
{
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, batch_offset, output));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), batch_offset, output->clone().get()).first);
return Status{};
}
diff --git a/src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp b/src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp
index 5978a0223f..87067cf717 100644
--- a/src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp
+++ b/src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp
@@ -38,24 +38,6 @@ namespace arm_compute
{
namespace
{
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, unsigned int depth_offset, ITensorInfo *output)
-{
- ARM_COMPUTE_UNUSED(depth_offset);
-
- const unsigned int num_elems_processed_per_iteration = 16 / input->element_size();
-
- // The window needs to be based on input as we copy all the depths of input
- Window win = calculate_max_window(*output, Steps(num_elems_processed_per_iteration));
- win.set(Window::DimZ, Window::Dimension(0, input->tensor_shape().z(), 1));
-
- AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration);
- AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
- bool window_changed = update_window_and_padding(win, input_access, output_access);
- output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
-
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, win);
-}
Status validate_arguments(const ITensorInfo *input, unsigned int depth_offset, const ITensorInfo *output)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
@@ -84,12 +66,13 @@ void CLDepthConcatenateLayerKernel::configure(const CLCompileContext &compile_co
_depth_offset = depth_offset;
- const unsigned int num_elems_processed_per_iteration = 16 / input->element_size();
+ const unsigned int num_elems_processed_per_iteration = adjust_vec_size(16 / input->element_size(), input->dimension(0));
// Add build options
CLBuildOptions build_opts;
build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->data_type()));
build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
+ build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(input->dimension(0) % num_elems_processed_per_iteration));
if(is_data_type_quantized_asymmetric(input->data_type()) && input->quantization_info() != output->quantization_info())
{
const UniformQuantizationInfo iq_info = input->quantization_info().uniform();
@@ -105,10 +88,9 @@ void CLDepthConcatenateLayerKernel::configure(const CLCompileContext &compile_co
_kernel = create_kernel(compile_context, "concatenate", build_opts.options());
// Configure kernel window
- auto win_config = validate_and_configure_window(input, depth_offset, output);
- ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config));
-
- ICLKernel::configure_internal(std::get<1>(win_config));
+ auto win = calculate_max_window(*output, Steps(num_elems_processed_per_iteration));
+ win.set(Window::DimZ, Window::Dimension(0, input->tensor_shape().z(), 1));
+ ICLKernel::configure_internal(win);
// Set output valid region
output->set_valid_region(ValidRegion(Coordinates(), output->tensor_shape()));
@@ -119,7 +101,6 @@ Status CLDepthConcatenateLayerKernel::validate(const arm_compute::ITensorInfo *i
const arm_compute::ITensorInfo *output)
{
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, depth_offset, output));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), depth_offset, output->clone().get()).first);
return Status{};
}
diff --git a/tests/datasets/ShapeDatasets.h b/tests/datasets/ShapeDatasets.h
index 565982efb6..1f7bdb2232 100644
--- a/tests/datasets/ShapeDatasets.h
+++ b/tests/datasets/ShapeDatasets.h
@@ -121,6 +121,7 @@ public:
TensorShape{ 2U, 5U, 4U },
TensorShape{ 7U, 7U, 5U },
+ TensorShape{ 16U, 16U, 5U },
TensorShape{ 27U, 13U, 37U },
})
{
diff --git a/tests/validation/CL/BatchConcatenateLayer.cpp b/tests/validation/CL/BatchConcatenateLayer.cpp
index 81b6ca109a..e5de3a75c7 100644
--- a/tests/validation/CL/BatchConcatenateLayer.cpp
+++ b/tests/validation/CL/BatchConcatenateLayer.cpp
@@ -39,6 +39,37 @@ namespace test
{
namespace validation
{
+namespace
+{
+/** Zero padding test */
+bool validate_zero_padding(unsigned int width, unsigned int height, unsigned int channels, unsigned int batches, DataType data_type)
+{
+ TensorShape src_shape(width, height, channels, batches);
+ TensorShape dst_shape(width, height, channels, batches * 2);
+
+ // Create tensors
+ CLTensor src0 = create_tensor<CLTensor>(src_shape, data_type);
+ CLTensor src1 = create_tensor<CLTensor>(src_shape, data_type);
+ CLTensor dst = create_tensor<CLTensor>(dst_shape, data_type);
+
+ src0.info()->set_quantization_info(QuantizationInfo(1.f / 256.f, 0));
+ src1.info()->set_quantization_info(QuantizationInfo(1.f / 256.f, 0));
+ dst.info()->set_quantization_info(QuantizationInfo(1.f / 256.f, 0));
+
+ ARM_COMPUTE_EXPECT(src0.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(src1.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ std::vector<const ICLTensor *> srcs = { &src0, &src1 };
+
+ // Create and configure function
+ CLConcatenateLayer concat;
+ concat.configure(srcs, &dst, 3U);
+
+ // Padding can be added along rhs and bias's X dimension
+ return src0.info()->padding().empty() && src1.info()->padding().empty() && dst.info()->padding().empty();
+}
+}
TEST_SUITE(CL)
TEST_SUITE(BatchConcatenateLayer)
@@ -80,6 +111,27 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
bool is_valid = bool(CLConcatenateLayer::validate(inputs_vector_info_raw, &output_info.clone()->set_is_resizable(false), 3));
ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
}
+
+/** Validate zero padding tests
+ *
+ * A series of validation tests to check that no padding is added as part of configuration for 4 different scenarios.
+ *
+ * Checks performed in order:
+ * - First dimension multiple of 16
+ * - First dimension non-multiple of 16
+ * - First dimension less than 16 (vec_size for qasymm8) but multiple
+ * - First dimension less than 16 (vec_size for qasymm8) non-multiple
+ * - Tensor with only one element
+ */
+DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, zip(
+framework::dataset::make("Width", { 32U, 37U, 12U, 13U, 1U }),
+framework::dataset::make("DataType", { DataType::F32, DataType::QASYMM8 })),
+width, data_type)
+{
+ const bool one_elem = (width == 1U);
+ bool status = validate_zero_padding(width, one_elem ? 1U : 17U, one_elem ? 1U : 7U, one_elem ? 1U : 2U, data_type);
+ ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
+}
// clang-format on
// *INDENT-ON*