aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arm_compute/core/CL/CLKernels.h1
-rw-r--r--arm_compute/core/CL/kernels/CLCopyKernel.h14
-rw-r--r--arm_compute/core/CL/kernels/CLMemsetKernel.h74
-rw-r--r--arm_compute/core/Types.h6
-rw-r--r--arm_compute/core/Utils.h9
-rw-r--r--arm_compute/core/utils/misc/ShapeCalculator.h10
-rw-r--r--arm_compute/runtime/CL/CLFunctions.h1
-rw-r--r--arm_compute/runtime/CL/functions/CLPadLayer.h77
-rw-r--r--src/core/CL/CLKernelLibrary.cpp6
-rw-r--r--src/core/CL/cl_kernels/copy_tensor.cl56
-rw-r--r--src/core/CL/cl_kernels/memset.cl64
-rw-r--r--src/core/CL/kernels/CLCopyKernel.cpp126
-rw-r--r--src/core/CL/kernels/CLMemsetKernel.cpp95
-rw-r--r--src/core/Utils.cpp49
-rw-r--r--src/runtime/CL/functions/CLPadLayer.cpp63
-rw-r--r--tests/validation/CL/Padding.cpp135
-rw-r--r--tests/validation/fixtures/PadLayerFixture.h112
-rw-r--r--tests/validation/reference/PadLayer.cpp109
-rw-r--r--tests/validation/reference/PadLayer.h53
-rw-r--r--utils/TypePrinter.h31
20 files changed, 1069 insertions, 22 deletions
diff --git a/arm_compute/core/CL/CLKernels.h b/arm_compute/core/CL/CLKernels.h
index 2fd6ab2e1c..88658013fa 100644
--- a/arm_compute/core/CL/CLKernels.h
+++ b/arm_compute/core/CL/CLKernels.h
@@ -91,6 +91,7 @@
#include "arm_compute/core/CL/kernels/CLMagnitudePhaseKernel.h"
#include "arm_compute/core/CL/kernels/CLMeanStdDevKernel.h"
#include "arm_compute/core/CL/kernels/CLMedian3x3Kernel.h"
+#include "arm_compute/core/CL/kernels/CLMemsetKernel.h"
#include "arm_compute/core/CL/kernels/CLMinMaxLayerKernel.h"
#include "arm_compute/core/CL/kernels/CLMinMaxLocationKernel.h"
#include "arm_compute/core/CL/kernels/CLNonLinearFilterKernel.h"
diff --git a/arm_compute/core/CL/kernels/CLCopyKernel.h b/arm_compute/core/CL/kernels/CLCopyKernel.h
index 2aeb488e05..9fc3759f4e 100644
--- a/arm_compute/core/CL/kernels/CLCopyKernel.h
+++ b/arm_compute/core/CL/kernels/CLCopyKernel.h
@@ -47,18 +47,20 @@ public:
CLCopyKernel &operator=(CLCopyKernel &&) = default;
/** Initialize the kernel's input, output.
*
- * @param[in] input Source tensor. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
- * @param[out] output Destination tensor. Data types supported: same as @p input.
+ * @param[in] input Source tensor. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
+ * @param[out] output Destination tensor. Data types supported: same as @p input.
+ * @param[in] padding (Optional) Padding to be applied to the input tensor
*/
- void configure(const ICLTensor *input, ICLTensor *output);
+ void configure(const ICLTensor *input, ICLTensor *output, const PaddingList &padding = PaddingList());
/** Static function to check if given info will lead to a valid configuration of @ref CLCopyKernel
*
- * @param[in] input Source tensor. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
- * @param[in] output Destination tensor. Data types supported: same as @p input.
+ * @param[in] input Source tensor info. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
+ * @param[in] output Destination tensor info. Data types supported: same as @p input.
+ * @param[in] padding (Optional) Padding to be applied to the input tensor
*
* @return a status
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *output);
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &padding = PaddingList());
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLMemsetKernel.h b/arm_compute/core/CL/kernels/CLMemsetKernel.h
new file mode 100644
index 0000000000..e35a9f1cf2
--- /dev/null
+++ b/arm_compute/core/CL/kernels/CLMemsetKernel.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_CLMEMSETKERNEL_H__
+#define __ARM_COMPUTE_CLMEMSETKERNEL_H__
+
+#include "arm_compute/core/CL/ICLKernel.h"
+#include "arm_compute/core/PixelValue.h"
+#include "arm_compute/core/Types.h"
+
+namespace arm_compute
+{
+class ICLTensor;
+
+/** Interface for filling the planes of a tensor */
+class CLMemsetKernel : public ICLKernel
+{
+public:
+ /** Default constructor */
+ CLMemsetKernel();
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLMemsetKernel(const CLMemsetKernel &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLMemsetKernel &operator=(const CLMemsetKernel &) = delete;
+ /** Allow instances of this class to be moved */
+ CLMemsetKernel(CLMemsetKernel &&) = default;
+ /** Allow instances of this class to be moved */
+ CLMemsetKernel &operator=(CLMemsetKernel &&) = default;
+ /** Default destructor */
+ ~CLMemsetKernel() = default;
+
+ /** Initialise the kernel's tensor and filling value
+ *
+ * @param[in,out] tensor Input tensor to fill. Supported data types: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
+ * @param[in] constant_value The value used to fill the planes of the tensor
+ */
+ void configure(ICLTensor *tensor, const PixelValue &constant_value);
+ /** Static function to check if given info will lead to a valid configuration of @ref CLMemsetKernel
+ *
+ * @param[in] tensor Source tensor info. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
+ * @param[in] constant_value The value used to fill the planes of the tensor
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *tensor, const PixelValue &constant_value);
+
+ // Inherited methods overridden:
+ void run(const Window &window, cl::CommandQueue &queue) override;
+
+private:
+ ICLTensor *_tensor;
+};
+} // namespace arm_compute
+#endif /*__ARM_COMPUTE_CLMEMSETRKERNEL_H__ */
diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h
index 31f2c88278..4c81e1f700 100644
--- a/arm_compute/core/Types.h
+++ b/arm_compute/core/Types.h
@@ -470,6 +470,12 @@ struct Coordinates3D
uint32_t z; /**< Z coordinates */
};
+/** Padding information as a pair of unsigned int start/end */
+using PaddingInfo = std::pair<uint32_t, uint32_t>;
+
+/** List of padding information */
+using PaddingList = std::vector<PaddingInfo>;
+
/** Region of interest */
struct ROI
{
diff --git a/arm_compute/core/Utils.h b/arm_compute/core/Utils.h
index 222f867e2c..c742ebc50e 100644
--- a/arm_compute/core/Utils.h
+++ b/arm_compute/core/Utils.h
@@ -25,6 +25,7 @@
#define __ARM_COMPUTE_UTILS_H__
#include "arm_compute/core/Error.h"
+#include "arm_compute/core/PixelValue.h"
#include "arm_compute/core/Rounding.h"
#include "arm_compute/core/Types.h"
@@ -938,6 +939,14 @@ const std::string &string_from_norm_type(NormType type);
* @return The string describing the pooling type.
*/
const std::string &string_from_pooling_type(PoolingType type);
+/** Convert a PixelValue to a string, represented through the specific data type
+ *
+ * @param[in] value The PixelValue to convert
+ * @param[in] data_type The type to be used to convert the @p value
+ *
+ * @return String representation of the PixelValue through the given data type.
+ */
+std::string string_from_pixel_value(const PixelValue &value, const DataType data_type);
/** Lower a given string.
*
* @param[in] val Given string to lower.
diff --git a/arm_compute/core/utils/misc/ShapeCalculator.h b/arm_compute/core/utils/misc/ShapeCalculator.h
index 4ae97f7c1f..2db7b28161 100644
--- a/arm_compute/core/utils/misc/ShapeCalculator.h
+++ b/arm_compute/core/utils/misc/ShapeCalculator.h
@@ -573,6 +573,16 @@ inline TensorShape compute_space_to_batch_shape(const ITensorInfo *input, const
return output_shape;
}
+inline TensorShape compute_padded_shape(const TensorShape &input_shape, const PaddingList &padding)
+{
+ TensorShape padded_shape = input_shape;
+ for(size_t dim = 0; dim < padding.size(); ++dim)
+ {
+ padded_shape.set(dim, padding[dim].first + input_shape[dim] + padding[dim].second);
+ }
+ return padded_shape;
+}
+
template <typename T>
inline TensorShape extract_shape(T *data)
{
diff --git a/arm_compute/runtime/CL/CLFunctions.h b/arm_compute/runtime/CL/CLFunctions.h
index cf8911b163..4619aa5602 100644
--- a/arm_compute/runtime/CL/CLFunctions.h
+++ b/arm_compute/runtime/CL/CLFunctions.h
@@ -95,6 +95,7 @@
#include "arm_compute/runtime/CL/functions/CLNormalizationLayer.h"
#include "arm_compute/runtime/CL/functions/CLNormalizePlanarYUVLayer.h"
#include "arm_compute/runtime/CL/functions/CLOpticalFlow.h"
+#include "arm_compute/runtime/CL/functions/CLPadLayer.h"
#include "arm_compute/runtime/CL/functions/CLPermute.h"
#include "arm_compute/runtime/CL/functions/CLPhase.h"
#include "arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h"
diff --git a/arm_compute/runtime/CL/functions/CLPadLayer.h b/arm_compute/runtime/CL/functions/CLPadLayer.h
new file mode 100644
index 0000000000..b9dca665d0
--- /dev/null
+++ b/arm_compute/runtime/CL/functions/CLPadLayer.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_CLPADLAYER_H__
+#define __ARM_COMPUTE_CLPADLAYER_H__
+
+#include "arm_compute/core/CL/kernels/CLCopyKernel.h"
+#include "arm_compute/core/CL/kernels/CLFillBorderKernel.h"
+#include "arm_compute/core/CL/kernels/CLMemsetKernel.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/CL/CLScheduler.h"
+#include "arm_compute/runtime/IFunction.h"
+
+namespace arm_compute
+{
+class ICLTensor;
+
+/** Basic function to pad a tensor. This function calls the following OpenCL kernels:
+ *
+ * -# @ref CLMemsetKernel
+ * -# @ref CLFillBorderKernel
+ * -# @ref CLCopyKernel
+ */
+class CLPadLayer : public IFunction
+{
+public:
+ /** Default constructor*/
+ CLPadLayer();
+
+ /** Initialize the function
+ *
+ * @param[in] input Source tensor. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
+ * @param[out] output Output tensor. Data type supported: same as @p input
+ * @param[in] padding The padding for each spatial dimension of the input tensor. The pair padding[i]
+ * specifies the front and the end padding in the i-th dimension.
+ */
+ void configure(ICLTensor *input, ICLTensor *output, const PaddingList &padding);
+
+ /** Static function to check if given info will lead to a valid configuration of @ref CLPadLayer.
+ *
+ * @param[in] input Source tensor info. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
+ * @param[in] output Output tensor info. Data type supported: same as @p input
+ * @param[in] padding The padding for each spatial dimension of the input tensor. The pair padding[i]
+ * specifies the front and the end padding in the i-th dimension.
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &padding);
+
+ // Inherited methods overridden:
+ void run() override;
+
+private:
+ CLCopyKernel _copy_kernel;
+ CLFillBorderKernel _fillborder_kernel;
+ CLMemsetKernel _memset_kernel;
+};
+} // namespace arm_compute
+#endif /*__ARM_COMPUTE_PADLAYER_H__ */
diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp
index 392fbfefb0..dfc41da09f 100644
--- a/src/core/CL/CLKernelLibrary.cpp
+++ b/src/core/CL/CLKernelLibrary.cpp
@@ -196,6 +196,7 @@ const std::map<std::string, std::string> CLKernelLibrary::_kernel_program_map =
{ "convolution_separable1x9_static", "convolution9x9.cl" },
{ "convolution_separable9x1_static", "convolution9x9.cl" },
{ "copy_tensor", "copy_tensor.cl" },
+ { "copy_pad_tensor", "copy_tensor.cl" },
{ "copy_plane", "channel_extract.cl" },
{ "copy_planes_3p", "channel_combine.cl" },
{ "copy_to_keypoint", "fast_corners.cl" },
@@ -298,6 +299,7 @@ const std::map<std::string, std::string> CLKernelLibrary::_kernel_program_map =
{ "lktracker_stage1", "optical_flow_pyramid_lk.cl" },
{ "magnitude_phase", "magnitude_phase.cl" },
{ "mean_stddev_accumulate", "mean_stddev.cl" },
+ { "memset", "memset.cl" },
{ "minmax", "minmaxloc.cl" },
{ "minmax_border", "minmaxloc.cl" },
{ "minmax_layer", "minmax_layer.cl" },
@@ -659,6 +661,10 @@ const std::map<std::string, std::string> CLKernelLibrary::_program_source_map =
#include "./cl_kernels/mean_stddev.clembed"
},
{
+ "memset.cl",
+#include "./cl_kernels/memset.clembed"
+ },
+ {
"minmaxloc.cl",
#include "./cl_kernels/minmaxloc.clembed"
},
diff --git a/src/core/CL/cl_kernels/copy_tensor.cl b/src/core/CL/cl_kernels/copy_tensor.cl
index 930a6762a8..4bbbf11bea 100644
--- a/src/core/CL/cl_kernels/copy_tensor.cl
+++ b/src/core/CL/cl_kernels/copy_tensor.cl
@@ -23,6 +23,60 @@
*/
#include "helpers.h"
+#if defined(PAD00) && defined(PAD10) && defined(PAD20) && defined(PAD21) && defined(PAD30) && defined(DATA_TYPE) && defined(VEC_SIZE) // Compile time constants
+
+/** Perform a padded copy of input tensor to the output tensor. Padding values are defined at compile time
+ *
+ * @attention The following variables must be passed at compile time:
+ * -# -DPAD{d}{0,1} = padding before{0} and after{1} dimension d (d < 4)
+ * -# -DDEPTH = The third dimension (depth) of the tensor (it is needed only if d == 3)
+ * -# -DDATA_TYPE = Input and output datatypes.
+ *
+ * @param[in] in_ptr Pointer to the source tensor. Supported data types: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
+ * @param[in] in_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] in_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] in_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] in_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] in_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] in_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] in_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] out_ptr Pointer to the destination tensor. Supported data types: same as @p in_ptr
+ * @param[in] out_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] out_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] out_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] out_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] out_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] out_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] out_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+__kernel void copy_pad_tensor(
+ TENSOR3D_DECLARATION(in),
+ TENSOR3D_DECLARATION(out))
+
+{
+ Tensor3D in = CONVERT_TO_TENSOR3D_STRUCT(in);
+ Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(out);
+
+ const int offset_x = PAD00;
+ const int offset_y = PAD10;
+ const int offset_z = PAD20;
+
+#if PAD30 > 0
+ const size_t in_batch = get_global_id(2) / DEPTH;
+ const int total_depth = DEPTH + PAD20 + PAD21;
+ const int offset_w = PAD30 * total_depth + in_batch * (PAD20 + PAD21);
+#else // PAD30 == 0
+ const int offset_w = 0;
+#endif // PAD30
+
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)in.ptr);
+
+ VSTORE(VEC_SIZE)
+ (data, 0, (__global DATA_TYPE *)tensor3D_offset(&out, offset_x, offset_y, offset_z + offset_w));
+}
+#endif // Compile time constants
+
/** Performs a copy of input tensor to the output tensor.
*
* @param[in] in_ptr Pointer to the source tensor. Supported data types: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
@@ -56,4 +110,4 @@ __kernel void copy_tensor(
// Store result
VSTORE(VEC_SIZE)
(data, 0, (__global DATA_TYPE *)out.ptr);
-} \ No newline at end of file
+}
diff --git a/src/core/CL/cl_kernels/memset.cl b/src/core/CL/cl_kernels/memset.cl
new file mode 100644
index 0000000000..80b34ebdf4
--- /dev/null
+++ b/src/core/CL/cl_kernels/memset.cl
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+
+#if defined(DATA_TYPE) && defined(CONSTANT_VALUE) // Check for compile time constants
+
+/** Fill the tensor's planes with all value
+ * @attention The following variables must be passed at compile time:
+ * -# -DDATA_TYPE = Tensor data type. Supported data types: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
+ * -# -DCONSTANT_VALUE = The value use to fill the tensor's planes
+ * -# -DVEC_SIZE = Vector size
+ * -# -DLAST_ACCESSED_X = The element that is on the X border (threads trying to set this, might need to step back a bit)
+ *
+ * @param[in] tensor_ptr Pointer to the source image. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
+ * @param[in] tensor_stride_x Stride of the source image in X dimension (in bytes)
+ * @param[in] tensor_step_x tensor_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] tensor_stride_y Stride of the source image in Y dimension (in bytes)
+ * @param[in] tensor_step_y tensor_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] tensor_offset_first_element_in_bytes The offset of the first element in the source image
+ * @param[in] value The value used to fill the pages of the tensor
+ */
+__kernel void memset(
+ IMAGE_DECLARATION(tensor))
+{
+ Image tensor = CONVERT_TO_IMAGE_STRUCT(tensor);
+
+#if defined(VEC_SIZE) && defined(LAST_ACCESSED_X)
+ // Check if access on width gets out of bounds
+ // If it does shift access vector to access elements within bounds
+ const int xi = (int)(get_global_id(0) * VEC_SIZE);
+ tensor.ptr -= max(xi - (int)LAST_ACCESSED_X, 0) * tensor_stride_x;
+
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ data = (DATA_TYPE)(CONSTANT_VALUE);
+
+ VSTORE(VEC_SIZE)
+ (data, 0, (__global DATA_TYPE *)tensor.ptr);
+#else // !defined(VEC_SIZE) || !defined(LAST_ACCESSED_X)
+ *((__global DATA_TYPE *)(tensor.ptr)) = (DATA_TYPE)(CONSTANT_VALUE);
+#endif // defined(VEC_SIZE) && defined(LAST_ACCESSED_X)
+}
+
+#endif // Check for compile time constants
diff --git a/src/core/CL/kernels/CLCopyKernel.cpp b/src/core/CL/kernels/CLCopyKernel.cpp
index 2da67d2666..e14e5dafab 100644
--- a/src/core/CL/kernels/CLCopyKernel.cpp
+++ b/src/core/CL/kernels/CLCopyKernel.cpp
@@ -30,21 +30,22 @@
#include "arm_compute/core/Error.h"
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Validate.h"
#include "arm_compute/core/Window.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-using namespace arm_compute;
-
+namespace arm_compute
+{
namespace
{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output)
+Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &padding = PaddingList())
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON(padding.size() > 4);
// Validate output if initialized
if(output->total_size() != 0)
{
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(input->tensor_shape(), output->tensor_shape());
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(misc::shape_calculator::compute_padded_shape(input->tensor_shape(), padding), output->tensor_shape());
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
}
@@ -69,6 +70,64 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITen
Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
return std::make_pair(err, win);
}
+
+std::pair<Status, Window> validate_and_configure_window_with_padding(ITensorInfo *input, ITensorInfo *output, const PaddingList &padding)
+{
+ TensorShape input_shape = input->tensor_shape();
+ TensorShape padded_shape = misc::shape_calculator::compute_padded_shape(input_shape, padding);
+
+ auto_init_if_empty(*output, input->clone()->set_tensor_shape(padded_shape));
+
+ // Configure window
+ const unsigned int num_elems_processed_per_iteration = 16 / input->element_size();
+
+ Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration));
+
+ // Pad on the x dimension accounting for the padding offset along the same dimension
+ AccessWindowHorizontal output_access(output, padding[0].first, num_elems_processed_per_iteration);
+ AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration);
+ bool window_changed = update_window_and_padding(win, input_access, output_access);
+
+ Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
+ return std::make_pair(err, win);
+}
+
+/** Generate the string "-DPAD= @p dim @p index @p padding"
+ *
+ * @param[in] dim The dimension index
+ * @param[in] index Can be 0 for the start dimension and 1 for the end dimension
+ * @param[in] padding The value to pad for that index/dimension pair
+ *
+ * @return The correct concatenated string
+ */
+std::string generate_pad_string(const size_t dim, const size_t index, const size_t padding)
+{
+ return "-DPAD" + support::cpp11::to_string(dim) + support::cpp11::to_string(index) + "=" + support::cpp11::to_string(padding);
+}
+
+/** Pass the padding as build option to the kernel.
+ *
+ * @param[in] tensor The padded tensor
+ * @param[in] padding The list of the padding for each dimension
+ * @param[out] build_opts The build option to which adding the padding
+ */
+void add_padding_as_build_options(const PaddingList &padding, CLBuildOptions &build_opts)
+{
+ size_t dim = 0;
+ for(dim = 0; dim < padding.size(); dim++)
+ {
+ build_opts.add_option(generate_pad_string(dim, 0, padding[dim].first));
+ build_opts.add_option(generate_pad_string(dim, 1, padding[dim].second));
+ }
+
+ while(dim < TensorShape::num_max_dimensions)
+ {
+ build_opts.add_option(generate_pad_string(dim, 0, 0));
+ build_opts.add_option(generate_pad_string(dim, 1, 0));
+ dim++;
+ }
+}
+
} // namespace
CLCopyKernel::CLCopyKernel()
@@ -76,32 +135,68 @@ CLCopyKernel::CLCopyKernel()
{
}
-void CLCopyKernel::configure(const ICLTensor *input, ICLTensor *output)
+void CLCopyKernel::configure(const ICLTensor *input, ICLTensor *output, const PaddingList &padding)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info()));
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), padding));
_input = input;
_output = output;
- const unsigned int num_elems_processed_per_iteration = 16 / input->info()->element_size();
-
// Create kernel
CLBuildOptions build_opts;
build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
+
+ const unsigned int num_elems_processed_per_iteration = 16 / input->info()->element_size();
build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("copy_tensor", build_opts.options()));
- // Configure kernel window
- auto win_config = validate_and_configure_window(input->info(), output->info());
+ std::pair<Status, Window> win_config;
+
+ if(padding.empty())
+ {
+ // Build kernel
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("copy_tensor", build_opts.options()));
+
+ // Configure window
+ win_config = validate_and_configure_window(input->info(), output->info());
+ }
+ else
+ {
+ // Add compile time options
+ add_padding_as_build_options(padding, build_opts);
+
+ // If we are padding in the fourth dimension the kernel needs to know the depth of the
+ // different cubes
+ if(padding.size() == 4)
+ {
+ const size_t depth = input->info()->tensor_shape()[2];
+ build_opts.add_option("-DDEPTH=" + support::cpp11::to_string(depth));
+ }
+
+ // Build kernel
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("copy_pad_tensor", build_opts.options()));
+
+ // Configure window
+ win_config = validate_and_configure_window_with_padding(input->info(), output->info(), padding);
+ }
+
+ // Validate and set the window
ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
ICLKernel::configure_internal(win_config.second);
}
-Status CLCopyKernel::validate(const arm_compute::ITensorInfo *input, const arm_compute::ITensorInfo *output)
+Status CLCopyKernel::validate(const arm_compute::ITensorInfo *input, const arm_compute::ITensorInfo *output, const PaddingList &padding)
{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get()).first);
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, padding));
+
+ if(padding.empty())
+ {
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get()).first);
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window_with_padding(input->clone().get(), output->clone().get(), padding).first);
+ }
return Status{};
}
@@ -123,3 +218,4 @@ void CLCopyKernel::run(const Window &window, cl::CommandQueue &queue)
}
while(collapsed.slide_window_slice_3D(slice));
}
+} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLMemsetKernel.cpp b/src/core/CL/kernels/CLMemsetKernel.cpp
new file mode 100644
index 0000000000..ab53897543
--- /dev/null
+++ b/src/core/CL/kernels/CLMemsetKernel.cpp
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/CL/kernels/CLMemsetKernel.h"
+
+#include "arm_compute/core/AccessWindowStatic.h"
+#include "arm_compute/core/CL/CLHelpers.h"
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/Window.h"
+
+namespace arm_compute
+{
+CLMemsetKernel::CLMemsetKernel()
+ : ICLKernel(), _tensor(nullptr)
+{
+}
+
+void CLMemsetKernel::configure(ICLTensor *tensor,
+ const PixelValue &constant_value)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(tensor);
+ _tensor = tensor;
+
+ const DataType data_type = tensor->info()->data_type();
+ const int vec_size_x = 16 / tensor->info()->element_size();
+ const int output_width_x = tensor->info()->tensor_shape().x();
+ const bool multi_access_x = (output_width_x / vec_size_x > 0);
+
+ // Create and update the window (if needed)
+ Window win = calculate_max_window(*tensor->info());
+ if(multi_access_x)
+ {
+ win.set(Window::DimX,
+ Window::Dimension(win.x().start(), ceil_to_multiple(win.x().end(), vec_size_x), vec_size_x));
+ }
+ ICLKernel::configure_internal(win);
+
+ // Create kernel
+ CLBuildOptions build_opts;
+ build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type));
+ build_opts.add_option("-DCONSTANT_VALUE=" + string_from_pixel_value(constant_value, data_type));
+ build_opts.add_option_if(multi_access_x, "-DVEC_SIZE=" + support::cpp11::to_string(vec_size_x));
+ build_opts.add_option_if(multi_access_x, "-DLAST_ACCESSED_X=" + support::cpp11::to_string(std::max<int>(output_width_x - vec_size_x, 0)));
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("memset", build_opts.options()));
+}
+
+Status CLMemsetKernel::validate(const ITensorInfo *tensor, const PixelValue &constant_value)
+{
+ ARM_COMPUTE_UNUSED(tensor);
+ ARM_COMPUTE_UNUSED(constant_value);
+ return Status{};
+}
+
+void CLMemsetKernel::run(const Window &window, cl::CommandQueue &queue)
+{
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
+
+ // Collapse all the batches on the third
+ Window collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimY);
+ Window slice = collapsed.first_slice_window_2D();
+
+ do
+ {
+ unsigned int idx = 0;
+ add_2D_tensor_argument(idx, _tensor, slice);
+ enqueue(queue, *this, slice);
+ }
+ while(collapsed.slide_window_slice_2D(slice));
+}
+} // namespace arm_compute
diff --git a/src/core/Utils.cpp b/src/core/Utils.cpp
index 11bdbdafe0..229579d8d9 100644
--- a/src/core/Utils.cpp
+++ b/src/core/Utils.cpp
@@ -252,6 +252,55 @@ const std::string &arm_compute::string_from_pooling_type(PoolingType type)
return pool_type_map[type];
}
+std::string arm_compute::string_from_pixel_value(const PixelValue &value, const DataType data_type)
+{
+ std::stringstream ss;
+ std::string converted_string;
+
+ switch(data_type)
+ {
+ case DataType::U8:
+ case DataType::QASYMM8:
+ // Needs conversion to 32 bit, otherwise interpreted as ASCII values
+ ss << uint32_t(value.get<uint8_t>());
+ converted_string = ss.str();
+ break;
+ case DataType::S8:
+ // Needs conversion to 32 bit, otherwise interpreted as ASCII values
+ ss << int32_t(value.get<int8_t>());
+ converted_string = ss.str();
+ break;
+ case DataType::U16:
+ ss << value.get<uint16_t>();
+ converted_string = ss.str();
+ break;
+ case DataType::S16:
+ ss << value.get<int16_t>();
+ converted_string = ss.str();
+ break;
+ case DataType::U32:
+ ss << value.get<uint32_t>();
+ converted_string = ss.str();
+ break;
+ case DataType::S32:
+ ss << value.get<int32_t>();
+ converted_string = ss.str();
+ break;
+ case DataType::F32:
+ converted_string = float_to_string_with_full_precision(value.get<float>());
+ break;
+ case DataType::F16:
+ static_assert(sizeof(half) == 2, "Half must be 16 bit");
+ ss << value.get<half>();
+ converted_string = ss.str();
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Not handled");
+ }
+
+ return converted_string;
+}
+
std::string arm_compute::lower_string(const std::string &val)
{
std::string res = val;
diff --git a/src/runtime/CL/functions/CLPadLayer.cpp b/src/runtime/CL/functions/CLPadLayer.cpp
new file mode 100644
index 0000000000..de43c7dca2
--- /dev/null
+++ b/src/runtime/CL/functions/CLPadLayer.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/runtime/CL/functions/CLPadLayer.h"
+
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/Types.h"
+#include "support/ToolchainSupport.h"
+
+namespace arm_compute
+{
+CLPadLayer::CLPadLayer()
+ : _copy_kernel(), _fillborder_kernel(), _memset_kernel()
+{
+}
+
+void CLPadLayer::configure(ICLTensor *input, ICLTensor *output, const PaddingList &padding)
+{
+ // Copy the input to the output
+ _copy_kernel.configure(input, output, padding);
+
+ // Set the pages of the output to zero
+ _memset_kernel.configure(output, PixelValue());
+
+ // Fill padding on the first two dimensions with zeros
+ _fillborder_kernel.configure(input, input->info()->padding(), BorderMode::CONSTANT);
+}
+
+Status CLPadLayer::validate(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &padding)
+{
+ ARM_COMPUTE_RETURN_ON_ERROR(CLMemsetKernel::validate(input, PixelValue()));
+ ARM_COMPUTE_RETURN_ON_ERROR(CLCopyKernel::validate(input, output, padding));
+
+ return Status{};
+}
+
+void CLPadLayer::run()
+{
+ CLScheduler::get().enqueue(_memset_kernel, false);
+ CLScheduler::get().enqueue(_fillborder_kernel, false);
+ CLScheduler::get().enqueue(_copy_kernel, true);
+}
+} // namespace arm_compute
diff --git a/tests/validation/CL/Padding.cpp b/tests/validation/CL/Padding.cpp
new file mode 100644
index 0000000000..2548226e65
--- /dev/null
+++ b/tests/validation/CL/Padding.cpp
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/runtime/CL/CLScheduler.h"
+#include "arm_compute/runtime/CL/functions/CLPadLayer.h"
+#include "tests/CL/CLAccessor.h"
+#include "tests/Globals.h"
+#include "tests/datasets/ShapeDatasets.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/fixtures/PadLayerFixture.h"
+#include "utils/TypePrinter.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace
+{
+const auto PaddingSizesDataset = framework::dataset::make("PaddingSize", { PaddingList{ { 0, 0 } },
+ PaddingList{ { 1, 1 } },
+ PaddingList{ { 1, 1 }, { 2, 2 } },
+ PaddingList{ { 1, 1 }, { 1, 1 }, { 1, 1 }, { 1, 1 } },
+ PaddingList{ { 0, 0 }, { 1, 0 }, { 0, 1 }, { 1, 2 } },
+ PaddingList{ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 1, 1 } }
+});
+} // namespace
+
+TEST_SUITE(CL)
+TEST_SUITE(Padding)
+
+// *INDENT-OFF*
+// clang-format off
+
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
+ framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data type input/output
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching shapes
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32)
+ }),
+ framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16),
+ TensorInfo(TensorShape(28U, 11U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(29U, 17U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(29U, 15U, 4U, 3U), 1, DataType::F32),
+ TensorInfo(TensorShape(27U, 14U, 3U, 4U), 1, DataType::F32),
+ TensorInfo(TensorShape(32U, 13U, 2U, 3U), 1, DataType::F32)
+ })),
+ framework::dataset::make("PaddingSize", { PaddingList{{0, 0}},
+ PaddingList{{1, 1}},
+ PaddingList{{1, 1}, {2, 2}},
+ PaddingList{{1,1}, {1,1}, {1,1}, {1,1}},
+ PaddingList{{0,0}, {1,0}, {0,1}, {1,2}},
+ PaddingList{{0,0}, {0,0}, {0,0}, {1,1}}
+ })),
+ framework::dataset::make("Expected", { false, false, true, true, true, true })),
+ input_info, output_info, padding, expected)
+{
+ ARM_COMPUTE_EXPECT(bool(CLPadLayer::validate(&input_info.clone()->set_is_resizable(true), &output_info.clone()->set_is_resizable(true), padding)) == expected, framework::LogLevel::ERRORS);
+}
+
+// clang-format on
+// *INDENT-ON*
+
+template <typename T>
+using CLPaddingFixture = PaddingFixture<CLTensor, CLAccessor, CLPadLayer, T>;
+
+TEST_SUITE(Float)
+
+TEST_SUITE(FP32)
+
+FIXTURE_DATA_TEST_CASE(RunPadding, CLPaddingFixture<float>, framework::DatasetMode::ALL,
+ combine(
+ combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::F32 })),
+ PaddingSizesDataset))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
+TEST_SUITE_END() // FP32
+
+TEST_SUITE(FP16)
+FIXTURE_DATA_TEST_CASE(RunPadding, CLPaddingFixture<half>, framework::DatasetMode::ALL,
+ combine(
+ combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::F16 })),
+ PaddingSizesDataset))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END() // FP16
+
+TEST_SUITE_END() // Float
+
+TEST_SUITE(S8)
+FIXTURE_DATA_TEST_CASE(RunPadding, CLPaddingFixture<int8_t>, framework::DatasetMode::ALL,
+ combine(
+ combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::S8 })),
+ PaddingSizesDataset))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END() // S8
+
+TEST_SUITE_END() // Padding
+TEST_SUITE_END() // CL
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/fixtures/PadLayerFixture.h b/tests/validation/fixtures/PadLayerFixture.h
new file mode 100644
index 0000000000..72ce57dc9f
--- /dev/null
+++ b/tests/validation/fixtures/PadLayerFixture.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_PADLAYER_FIXTURE
+#define ARM_COMPUTE_TEST_PADLAYER_FIXTURE
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/CL/functions/CLPadLayer.h"
+#include "tests/AssetsLibrary.h"
+#include "tests/Globals.h"
+#include "tests/IAccessor.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/validation/Helpers.h"
+#include "tests/validation/reference/PadLayer.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class PaddingFixture : public framework::Fixture
+{
+public:
+ template <typename...>
+ void setup(TensorShape shape, DataType data_type, const PaddingList &padding)
+ {
+ _target = compute_target(shape, data_type, padding);
+ _reference = compute_reference(shape, data_type, padding);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor)
+ {
+ library->fill_tensor_uniform(tensor, 0);
+ }
+
+ TensorType compute_target(const TensorShape &shape,
+ DataType data_type,
+ const PaddingList &paddings)
+ {
+ // Create tensors
+ TensorType src = create_tensor<TensorType>(shape, data_type);
+ TensorType dst;
+
+ // Create and configure function
+ FunctionType padding;
+ padding.configure(&src, &dst, paddings);
+
+ ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Allocate tensors
+ src.allocator()->allocate();
+ dst.allocator()->allocate();
+
+ ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Fill tensors
+ fill(AccessorType(src));
+
+ // Compute function
+ padding.run();
+
+ return dst;
+ }
+
+ SimpleTensor<T> compute_reference(const TensorShape &shape, DataType data_type,
+ const PaddingList &paddings)
+ {
+ // Create reference tensor
+ SimpleTensor<T> src{ shape, data_type };
+
+ // Fill reference tensor
+ fill(src);
+
+ return reference::pad_layer(src, paddings);
+ }
+
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+};
+
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_PADLAYER_FIXTURE */
diff --git a/tests/validation/reference/PadLayer.cpp b/tests/validation/reference/PadLayer.cpp
new file mode 100644
index 0000000000..0a3b38d697
--- /dev/null
+++ b/tests/validation/reference/PadLayer.cpp
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "PadLayer.h"
+
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "tests/validation/Helpers.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace reference
+{
+template <typename T>
+SimpleTensor<T> pad_layer(const SimpleTensor<T> &src, const PaddingList &paddings)
+{
+ DataType dst_data_type = src.data_type();
+
+ TensorShape orig_shape = src.shape();
+
+ std::vector<PaddingInfo> paddings_extended = paddings;
+
+ for(size_t i = paddings.size(); i < TensorShape::num_max_dimensions; i++)
+ {
+ paddings_extended.emplace_back(PaddingInfo{ 0, 0 });
+ }
+
+ TensorShape padded_shape = misc::shape_calculator::compute_padded_shape(orig_shape, paddings);
+
+ SimpleTensor<T> dst(padded_shape, dst_data_type);
+
+ // Reference algorithm: loop over the different dimension of the input.
+ for(int idx = 0; idx < dst.num_elements(); idx++)
+ {
+ Coordinates coord = index2coord(padded_shape, idx);
+
+ const size_t i = coord.x();
+ const size_t j = coord.y();
+ const size_t k = coord.z();
+ const size_t l = coord[3];
+ const size_t m = coord[4];
+ const size_t n = coord[5];
+
+ std::array<size_t, TensorShape::num_max_dimensions> dims = { 0, 1, 2, 3, 4, 5 };
+ std::array<size_t, TensorShape::num_max_dimensions> coords = { i, j, k, l, m, n };
+ auto is_padding_area = [&](size_t i)
+ {
+ return (coords[i] < paddings_extended[i].first || coords[i] > orig_shape[i] + paddings_extended[i].first - 1);
+ };
+
+ // If the tuple [i,j,k,l,m] is in the padding area, then seimply set the value
+ if(std::any_of(dims.begin(), dims.end(), is_padding_area))
+ {
+ dst[idx] = T(0);
+ }
+ else
+ {
+ // If the tuple[i,j,k,l,m] is not in the padding area, then copy the input into the output
+
+ Coordinates orig_coords{ i - paddings_extended[0].first,
+ j - paddings_extended[1].first,
+ k - paddings_extended[2].first,
+ l - paddings_extended[3].first,
+ m - paddings_extended[4].first,
+ n - paddings_extended[5].first };
+
+ const size_t idx_src = coord2index(orig_shape, orig_coords);
+ dst[idx] = src[idx_src];
+ }
+ }
+
+ return dst;
+}
+
+template SimpleTensor<float> pad_layer(const SimpleTensor<float> &src, const PaddingList &paddings);
+template SimpleTensor<half> pad_layer(const SimpleTensor<half> &src, const PaddingList &paddings);
+template SimpleTensor<uint32_t> pad_layer(const SimpleTensor<uint32_t> &src, const PaddingList &paddings);
+template SimpleTensor<uint8_t> pad_layer(const SimpleTensor<uint8_t> &src, const PaddingList &paddings);
+template SimpleTensor<int8_t> pad_layer(const SimpleTensor<int8_t> &src, const PaddingList &paddings);
+template SimpleTensor<uint16_t> pad_layer(const SimpleTensor<uint16_t> &src, const PaddingList &paddings);
+template SimpleTensor<int16_t> pad_layer(const SimpleTensor<int16_t> &src, const PaddingList &paddings);
+} // namespace reference
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/reference/PadLayer.h b/tests/validation/reference/PadLayer.h
new file mode 100644
index 0000000000..9406b05c4d
--- /dev/null
+++ b/tests/validation/reference/PadLayer.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_TEST_PADLAYER_H__
+#define __ARM_COMPUTE_TEST_PADLAYER_H__
+
+#include "arm_compute/core/Types.h"
+#include "tests/SimpleTensor.h"
+#include "tests/validation/Helpers.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace reference
+{
+/** Reference function to pad an ND tensor. This function is not supposed to be optimized, but to
+ * clearly and naively execute the padding of a tensor
+ *
+ * @param[in] src Tensor to pad
+ * @param[in] paddings Padding size in each dimension
+ *
+ * @return The padded Tensor
+ */
+template <typename T>
+SimpleTensor<T> pad_layer(const SimpleTensor<T> &src, const PaddingList &paddings);
+} // namespace reference
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_TEST_PADLAYER_H__ */
diff --git a/utils/TypePrinter.h b/utils/TypePrinter.h
index 1d881f8c06..d31c16c32b 100644
--- a/utils/TypePrinter.h
+++ b/utils/TypePrinter.h
@@ -769,6 +769,24 @@ inline ::std::ostream &operator<<(::std::ostream &os, const BorderSize &border)
return os;
}
+/** Formatted output of the PaddingList type.
+ *
+ * @param[out] os Output stream.
+ * @param[in] padding Type to output.
+ *
+ * @return Modified output stream.
+ */
+inline ::std::ostream &operator<<(::std::ostream &os, const PaddingList &padding)
+{
+ os << "{";
+ for(auto const &p : padding)
+ {
+ os << "{" << p.first << "," << p.second << "}";
+ }
+ os << "}";
+ return os;
+}
+
/** Formatted output of the InterpolationPolicy type.
*
* @param[out] os Output stream.
@@ -1108,6 +1126,19 @@ inline std::string to_string(const BorderSize &border)
return str.str();
}
+/** Formatted output of the PaddingList type.
+ *
+ * @param[in] padding Type to output.
+ *
+ * @return Formatted string.
+ */
+inline std::string to_string(const PaddingList &padding)
+{
+ std::stringstream str;
+ str << padding;
+ return str.str();
+}
+
/** Formatted output of the InterpolationPolicy type.
*
* @param[in] policy Type to output.