aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorGiuseppe Rossini <giuseppe.rossini@arm.com>2018-07-17 18:13:13 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:54:54 +0000
commitd7647d4ebd0f0b5253b7f31ffcd48a851ba62947 (patch)
tree9bdd2c54130937bd1a858323c84640392f3e3b05 /src
parentc30b668bf7b3e7f841ea8ef9295f43fc69519e15 (diff)
downloadComputeLibrary-d7647d4ebd0f0b5253b7f31ffcd48a851ba62947.tar.gz
[COMPMID-1229] Implementing Pad on OpenCL -FP32/FP16
Change-Id: Ideead99410e5e0bda1035030af1bbcd0a65ea15e Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/144792 Tested-by: bsgcomp <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'src')
-rw-r--r--src/core/CL/CLKernelLibrary.cpp6
-rw-r--r--src/core/CL/cl_kernels/copy_tensor.cl56
-rw-r--r--src/core/CL/cl_kernels/memset.cl64
-rw-r--r--src/core/CL/kernels/CLCopyKernel.cpp126
-rw-r--r--src/core/CL/kernels/CLMemsetKernel.cpp95
-rw-r--r--src/core/Utils.cpp49
-rw-r--r--src/runtime/CL/functions/CLPadLayer.cpp63
7 files changed, 443 insertions, 16 deletions
diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp
index 392fbfefb0..dfc41da09f 100644
--- a/src/core/CL/CLKernelLibrary.cpp
+++ b/src/core/CL/CLKernelLibrary.cpp
@@ -196,6 +196,7 @@ const std::map<std::string, std::string> CLKernelLibrary::_kernel_program_map =
{ "convolution_separable1x9_static", "convolution9x9.cl" },
{ "convolution_separable9x1_static", "convolution9x9.cl" },
{ "copy_tensor", "copy_tensor.cl" },
+ { "copy_pad_tensor", "copy_tensor.cl" },
{ "copy_plane", "channel_extract.cl" },
{ "copy_planes_3p", "channel_combine.cl" },
{ "copy_to_keypoint", "fast_corners.cl" },
@@ -298,6 +299,7 @@ const std::map<std::string, std::string> CLKernelLibrary::_kernel_program_map =
{ "lktracker_stage1", "optical_flow_pyramid_lk.cl" },
{ "magnitude_phase", "magnitude_phase.cl" },
{ "mean_stddev_accumulate", "mean_stddev.cl" },
+ { "memset", "memset.cl" },
{ "minmax", "minmaxloc.cl" },
{ "minmax_border", "minmaxloc.cl" },
{ "minmax_layer", "minmax_layer.cl" },
@@ -659,6 +661,10 @@ const std::map<std::string, std::string> CLKernelLibrary::_program_source_map =
#include "./cl_kernels/mean_stddev.clembed"
},
{
+ "memset.cl",
+#include "./cl_kernels/memset.clembed"
+ },
+ {
"minmaxloc.cl",
#include "./cl_kernels/minmaxloc.clembed"
},
diff --git a/src/core/CL/cl_kernels/copy_tensor.cl b/src/core/CL/cl_kernels/copy_tensor.cl
index 930a6762a8..4bbbf11bea 100644
--- a/src/core/CL/cl_kernels/copy_tensor.cl
+++ b/src/core/CL/cl_kernels/copy_tensor.cl
@@ -23,6 +23,60 @@
*/
#include "helpers.h"
+#if defined(PAD00) && defined(PAD10) && defined(PAD20) && defined(PAD21) && defined(PAD30) && defined(DATA_TYPE) && defined(VEC_SIZE) // Compile time constants
+
+/** Perform a padded copy of input tensor to the output tensor. Padding values are defined at compile time
+ *
+ * @attention The following variables must be passed at compile time:
+ * -# -DPAD{d}{0,1} = padding before{0} and after{1} dimension d (d < 4)
+ * -# -DDEPTH = The third dimension (depth) of the tensor (it is needed only if d == 3)
+ * -# -DDATA_TYPE = Input and output datatypes.
+ *
+ * @param[in] in_ptr Pointer to the source tensor. Supported data types: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
+ * @param[in] in_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] in_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] in_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] in_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] in_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] in_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] in_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] out_ptr Pointer to the destination tensor. Supported data types: same as @p in_ptr
+ * @param[in] out_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] out_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] out_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] out_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] out_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] out_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] out_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+__kernel void copy_pad_tensor(
+ TENSOR3D_DECLARATION(in),
+ TENSOR3D_DECLARATION(out))
+
+{
+ Tensor3D in = CONVERT_TO_TENSOR3D_STRUCT(in);
+ Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(out);
+
+ const int offset_x = PAD00;
+ const int offset_y = PAD10;
+ const int offset_z = PAD20;
+
+#if PAD30 > 0
+ const size_t in_batch = get_global_id(2) / DEPTH;
+ const int total_depth = DEPTH + PAD20 + PAD21;
+ const int offset_w = PAD30 * total_depth + in_batch * (PAD20 + PAD21);
+#else // PAD30 == 0
+ const int offset_w = 0;
+#endif // PAD30
+
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)in.ptr);
+
+ VSTORE(VEC_SIZE)
+ (data, 0, (__global DATA_TYPE *)tensor3D_offset(&out, offset_x, offset_y, offset_z + offset_w));
+}
+#endif // Compile time constants
+
/** Performs a copy of input tensor to the output tensor.
*
* @param[in] in_ptr Pointer to the source tensor. Supported data types: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
@@ -56,4 +110,4 @@ __kernel void copy_tensor(
// Store result
VSTORE(VEC_SIZE)
(data, 0, (__global DATA_TYPE *)out.ptr);
-} \ No newline at end of file
+}
diff --git a/src/core/CL/cl_kernels/memset.cl b/src/core/CL/cl_kernels/memset.cl
new file mode 100644
index 0000000000..80b34ebdf4
--- /dev/null
+++ b/src/core/CL/cl_kernels/memset.cl
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+
+#if defined(DATA_TYPE) && defined(CONSTANT_VALUE) // Check for compile time constants
+
+/** Fill the tensor's planes with all value
+ * @attention The following variables must be passed at compile time:
+ * -# -DDATA_TYPE = Tensor data type. Supported data types: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
+ * -# -DCONSTANT_VALUE = The value use to fill the tensor's planes
+ * -# -DVEC_SIZE = Vector size
+ * -# -DLAST_ACCESSED_X = The element that is on the X border (threads trying to set this, might need to step back a bit)
+ *
+ * @param[in] tensor_ptr Pointer to the source image. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
+ * @param[in] tensor_stride_x Stride of the source image in X dimension (in bytes)
+ * @param[in] tensor_step_x tensor_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] tensor_stride_y Stride of the source image in Y dimension (in bytes)
+ * @param[in] tensor_step_y tensor_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] tensor_offset_first_element_in_bytes The offset of the first element in the source image
+ * @param[in] value The value used to fill the pages of the tensor
+ */
+__kernel void memset(
+ IMAGE_DECLARATION(tensor))
+{
+ Image tensor = CONVERT_TO_IMAGE_STRUCT(tensor);
+
+#if defined(VEC_SIZE) && defined(LAST_ACCESSED_X)
+ // Check if access on width gets out of bounds
+ // If it does shift access vector to access elements within bounds
+ const int xi = (int)(get_global_id(0) * VEC_SIZE);
+ tensor.ptr -= max(xi - (int)LAST_ACCESSED_X, 0) * tensor_stride_x;
+
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ data = (DATA_TYPE)(CONSTANT_VALUE);
+
+ VSTORE(VEC_SIZE)
+ (data, 0, (__global DATA_TYPE *)tensor.ptr);
+#else // !defined(VEC_SIZE) || !defined(LAST_ACCESSED_X)
+ *((__global DATA_TYPE *)(tensor.ptr)) = (DATA_TYPE)(CONSTANT_VALUE);
+#endif // defined(VEC_SIZE) && defined(LAST_ACCESSED_X)
+}
+
+#endif // Check for compile time constants
diff --git a/src/core/CL/kernels/CLCopyKernel.cpp b/src/core/CL/kernels/CLCopyKernel.cpp
index 2da67d2666..e14e5dafab 100644
--- a/src/core/CL/kernels/CLCopyKernel.cpp
+++ b/src/core/CL/kernels/CLCopyKernel.cpp
@@ -30,21 +30,22 @@
#include "arm_compute/core/Error.h"
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Validate.h"
#include "arm_compute/core/Window.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-using namespace arm_compute;
-
+namespace arm_compute
+{
namespace
{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output)
+Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &padding = PaddingList())
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON(padding.size() > 4);
// Validate output if initialized
if(output->total_size() != 0)
{
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(input->tensor_shape(), output->tensor_shape());
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(misc::shape_calculator::compute_padded_shape(input->tensor_shape(), padding), output->tensor_shape());
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
}
@@ -69,6 +70,64 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITen
Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
return std::make_pair(err, win);
}
+
+std::pair<Status, Window> validate_and_configure_window_with_padding(ITensorInfo *input, ITensorInfo *output, const PaddingList &padding)
+{
+ TensorShape input_shape = input->tensor_shape();
+ TensorShape padded_shape = misc::shape_calculator::compute_padded_shape(input_shape, padding);
+
+ auto_init_if_empty(*output, input->clone()->set_tensor_shape(padded_shape));
+
+ // Configure window
+ const unsigned int num_elems_processed_per_iteration = 16 / input->element_size();
+
+ Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration));
+
+ // Pad on the x dimension accounting for the padding offset along the same dimension
+ AccessWindowHorizontal output_access(output, padding[0].first, num_elems_processed_per_iteration);
+ AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration);
+ bool window_changed = update_window_and_padding(win, input_access, output_access);
+
+ Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
+ return std::make_pair(err, win);
+}
+
+/** Generate the string "-DPAD= @p dim @p index @p padding"
+ *
+ * @param[in] dim The dimension index
+ * @param[in] index Can be 0 for the start dimension and 1 for the end dimension
+ * @param[in] padding The value to pad for that index/dimension pair
+ *
+ * @return The correct concatenated string
+ */
+std::string generate_pad_string(const size_t dim, const size_t index, const size_t padding)
+{
+ return "-DPAD" + support::cpp11::to_string(dim) + support::cpp11::to_string(index) + "=" + support::cpp11::to_string(padding);
+}
+
+/** Pass the padding as build option to the kernel.
+ *
+ * @param[in] tensor The padded tensor
+ * @param[in] padding The list of the padding for each dimension
+ * @param[out] build_opts The build option to which adding the padding
+ */
+void add_padding_as_build_options(const PaddingList &padding, CLBuildOptions &build_opts)
+{
+ size_t dim = 0;
+ for(dim = 0; dim < padding.size(); dim++)
+ {
+ build_opts.add_option(generate_pad_string(dim, 0, padding[dim].first));
+ build_opts.add_option(generate_pad_string(dim, 1, padding[dim].second));
+ }
+
+ while(dim < TensorShape::num_max_dimensions)
+ {
+ build_opts.add_option(generate_pad_string(dim, 0, 0));
+ build_opts.add_option(generate_pad_string(dim, 1, 0));
+ dim++;
+ }
+}
+
} // namespace
CLCopyKernel::CLCopyKernel()
@@ -76,32 +135,68 @@ CLCopyKernel::CLCopyKernel()
{
}
-void CLCopyKernel::configure(const ICLTensor *input, ICLTensor *output)
+void CLCopyKernel::configure(const ICLTensor *input, ICLTensor *output, const PaddingList &padding)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info()));
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), padding));
_input = input;
_output = output;
- const unsigned int num_elems_processed_per_iteration = 16 / input->info()->element_size();
-
// Create kernel
CLBuildOptions build_opts;
build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
+
+ const unsigned int num_elems_processed_per_iteration = 16 / input->info()->element_size();
build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("copy_tensor", build_opts.options()));
- // Configure kernel window
- auto win_config = validate_and_configure_window(input->info(), output->info());
+ std::pair<Status, Window> win_config;
+
+ if(padding.empty())
+ {
+ // Build kernel
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("copy_tensor", build_opts.options()));
+
+ // Configure window
+ win_config = validate_and_configure_window(input->info(), output->info());
+ }
+ else
+ {
+ // Add compile time options
+ add_padding_as_build_options(padding, build_opts);
+
+ // If we are padding in the fourth dimension the kernel needs to know the depth of the
+ // different cubes
+ if(padding.size() == 4)
+ {
+ const size_t depth = input->info()->tensor_shape()[2];
+ build_opts.add_option("-DDEPTH=" + support::cpp11::to_string(depth));
+ }
+
+ // Build kernel
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("copy_pad_tensor", build_opts.options()));
+
+ // Configure window
+ win_config = validate_and_configure_window_with_padding(input->info(), output->info(), padding);
+ }
+
+ // Validate and set the window
ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
ICLKernel::configure_internal(win_config.second);
}
-Status CLCopyKernel::validate(const arm_compute::ITensorInfo *input, const arm_compute::ITensorInfo *output)
+Status CLCopyKernel::validate(const arm_compute::ITensorInfo *input, const arm_compute::ITensorInfo *output, const PaddingList &padding)
{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get()).first);
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, padding));
+
+ if(padding.empty())
+ {
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get()).first);
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window_with_padding(input->clone().get(), output->clone().get(), padding).first);
+ }
return Status{};
}
@@ -123,3 +218,4 @@ void CLCopyKernel::run(const Window &window, cl::CommandQueue &queue)
}
while(collapsed.slide_window_slice_3D(slice));
}
+} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLMemsetKernel.cpp b/src/core/CL/kernels/CLMemsetKernel.cpp
new file mode 100644
index 0000000000..ab53897543
--- /dev/null
+++ b/src/core/CL/kernels/CLMemsetKernel.cpp
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/CL/kernels/CLMemsetKernel.h"
+
+#include "arm_compute/core/AccessWindowStatic.h"
+#include "arm_compute/core/CL/CLHelpers.h"
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/Window.h"
+
+namespace arm_compute
+{
+CLMemsetKernel::CLMemsetKernel()
+ : ICLKernel(), _tensor(nullptr)
+{
+}
+
+void CLMemsetKernel::configure(ICLTensor *tensor,
+ const PixelValue &constant_value)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(tensor);
+ _tensor = tensor;
+
+ const DataType data_type = tensor->info()->data_type();
+ const int vec_size_x = 16 / tensor->info()->element_size();
+ const int output_width_x = tensor->info()->tensor_shape().x();
+ const bool multi_access_x = (output_width_x / vec_size_x > 0);
+
+ // Create and update the window (if needed)
+ Window win = calculate_max_window(*tensor->info());
+ if(multi_access_x)
+ {
+ win.set(Window::DimX,
+ Window::Dimension(win.x().start(), ceil_to_multiple(win.x().end(), vec_size_x), vec_size_x));
+ }
+ ICLKernel::configure_internal(win);
+
+ // Create kernel
+ CLBuildOptions build_opts;
+ build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type));
+ build_opts.add_option("-DCONSTANT_VALUE=" + string_from_pixel_value(constant_value, data_type));
+ build_opts.add_option_if(multi_access_x, "-DVEC_SIZE=" + support::cpp11::to_string(vec_size_x));
+ build_opts.add_option_if(multi_access_x, "-DLAST_ACCESSED_X=" + support::cpp11::to_string(std::max<int>(output_width_x - vec_size_x, 0)));
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("memset", build_opts.options()));
+}
+
+Status CLMemsetKernel::validate(const ITensorInfo *tensor, const PixelValue &constant_value)
+{
+ ARM_COMPUTE_UNUSED(tensor);
+ ARM_COMPUTE_UNUSED(constant_value);
+ return Status{};
+}
+
+void CLMemsetKernel::run(const Window &window, cl::CommandQueue &queue)
+{
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
+
+ // Collapse all the batches on the third
+ Window collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimY);
+ Window slice = collapsed.first_slice_window_2D();
+
+ do
+ {
+ unsigned int idx = 0;
+ add_2D_tensor_argument(idx, _tensor, slice);
+ enqueue(queue, *this, slice);
+ }
+ while(collapsed.slide_window_slice_2D(slice));
+}
+} // namespace arm_compute
diff --git a/src/core/Utils.cpp b/src/core/Utils.cpp
index 11bdbdafe0..229579d8d9 100644
--- a/src/core/Utils.cpp
+++ b/src/core/Utils.cpp
@@ -252,6 +252,55 @@ const std::string &arm_compute::string_from_pooling_type(PoolingType type)
return pool_type_map[type];
}
+std::string arm_compute::string_from_pixel_value(const PixelValue &value, const DataType data_type)
+{
+ std::stringstream ss;
+ std::string converted_string;
+
+ switch(data_type)
+ {
+ case DataType::U8:
+ case DataType::QASYMM8:
+ // Needs conversion to 32 bit, otherwise interpreted as ASCII values
+ ss << uint32_t(value.get<uint8_t>());
+ converted_string = ss.str();
+ break;
+ case DataType::S8:
+ // Needs conversion to 32 bit, otherwise interpreted as ASCII values
+ ss << int32_t(value.get<int8_t>());
+ converted_string = ss.str();
+ break;
+ case DataType::U16:
+ ss << value.get<uint16_t>();
+ converted_string = ss.str();
+ break;
+ case DataType::S16:
+ ss << value.get<int16_t>();
+ converted_string = ss.str();
+ break;
+ case DataType::U32:
+ ss << value.get<uint32_t>();
+ converted_string = ss.str();
+ break;
+ case DataType::S32:
+ ss << value.get<int32_t>();
+ converted_string = ss.str();
+ break;
+ case DataType::F32:
+ converted_string = float_to_string_with_full_precision(value.get<float>());
+ break;
+ case DataType::F16:
+ static_assert(sizeof(half) == 2, "Half must be 16 bit");
+ ss << value.get<half>();
+ converted_string = ss.str();
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Not handled");
+ }
+
+ return converted_string;
+}
+
std::string arm_compute::lower_string(const std::string &val)
{
std::string res = val;
diff --git a/src/runtime/CL/functions/CLPadLayer.cpp b/src/runtime/CL/functions/CLPadLayer.cpp
new file mode 100644
index 0000000000..de43c7dca2
--- /dev/null
+++ b/src/runtime/CL/functions/CLPadLayer.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/runtime/CL/functions/CLPadLayer.h"
+
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/Types.h"
+#include "support/ToolchainSupport.h"
+
+namespace arm_compute
+{
+CLPadLayer::CLPadLayer()
+ : _copy_kernel(), _fillborder_kernel(), _memset_kernel()
+{
+}
+
+void CLPadLayer::configure(ICLTensor *input, ICLTensor *output, const PaddingList &padding)
+{
+ // Copy the input to the output
+ _copy_kernel.configure(input, output, padding);
+
+ // Set the pages of the output to zero
+ _memset_kernel.configure(output, PixelValue());
+
+ // Fill padding on the first two dimensions with zeros
+ _fillborder_kernel.configure(input, input->info()->padding(), BorderMode::CONSTANT);
+}
+
+Status CLPadLayer::validate(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &padding)
+{
+ ARM_COMPUTE_RETURN_ON_ERROR(CLMemsetKernel::validate(input, PixelValue()));
+ ARM_COMPUTE_RETURN_ON_ERROR(CLCopyKernel::validate(input, output, padding));
+
+ return Status{};
+}
+
+void CLPadLayer::run()
+{
+ CLScheduler::get().enqueue(_memset_kernel, false);
+ CLScheduler::get().enqueue(_fillborder_kernel, false);
+ CLScheduler::get().enqueue(_copy_kernel, true);
+}
+} // namespace arm_compute