aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichalis Spyrou <michalis.spyrou@arm.com>2018-04-18 09:49:16 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:50:48 +0000
commite74b201ca1abca040ca9f30837fdf19aa610e7c4 (patch)
tree28a9022c564e40a410c66716467d4133574fec7b
parent2213d4b334567d0cb7f283090d42b5fb1b70f66b (diff)
downloadComputeLibrary-e74b201ca1abca040ca9f30837fdf19aa610e7c4.tar.gz
COMPMID-805 Add NHWC data format support for CL pooling
Change-Id: I3d91fde78b971aba3f6349f633cd9b1c50e5cacf Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/124712 Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
-rw-r--r--arm_compute/core/utils/misc/ShapeCalculator.h25
-rw-r--r--src/core/CL/CLKernelLibrary.cpp6
-rw-r--r--src/core/CL/cl_kernels/pooling_layer.cl100
-rw-r--r--src/core/CL/cl_kernels/pooling_layer_quantized.cl82
-rw-r--r--src/core/CL/kernels/CLPoolingLayerKernel.cpp341
-rw-r--r--src/runtime/CL/functions/CLPoolingLayer.cpp23
-rw-r--r--tests/validation/CL/GlobalPoolingLayer.cpp4
-rw-r--r--tests/validation/CL/PoolingLayer.cpp13
8 files changed, 440 insertions, 154 deletions
diff --git a/arm_compute/core/utils/misc/ShapeCalculator.h b/arm_compute/core/utils/misc/ShapeCalculator.h
index 8d4c024f62..7d07d4619b 100644
--- a/arm_compute/core/utils/misc/ShapeCalculator.h
+++ b/arm_compute/core/utils/misc/ShapeCalculator.h
@@ -324,6 +324,30 @@ inline TensorShape compute_min_max_shape(const ITensorInfo *input)
return output_shape;
}
+inline TensorShape compute_pool_shape(const ITensorInfo &input, PoolingLayerInfo pool_info)
+{
+ unsigned int pooled_w = 0;
+ unsigned int pooled_h = 0;
+
+ const bool is_global_pooling = pool_info.is_global_pooling();
+ const int idx_width = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH);
+ const int idx_height = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT);
+ const unsigned int pool_size_x = is_global_pooling ? input.tensor_shape()[idx_width] : pool_info.pool_size().width;
+ const unsigned int pool_size_y = is_global_pooling ? input.tensor_shape()[idx_height] : pool_info.pool_size().height;
+
+ std::tie(pooled_w, pooled_h) = scaled_dimensions(input.dimension(idx_width),
+ input.dimension(idx_height),
+ pool_size_x,
+ pool_size_y,
+ pool_info.pad_stride_info());
+
+ TensorShape output_shape{ input.tensor_shape() };
+ output_shape.set(get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH), pooled_w);
+ output_shape.set(get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT), pooled_h);
+
+ return output_shape;
+}
+
inline TensorShape compute_rnn_shape(const ITensorInfo *input, const unsigned int batch_size)
{
TensorShape output_shape{ input->tensor_shape() };
@@ -331,7 +355,6 @@ inline TensorShape compute_rnn_shape(const ITensorInfo *input, const unsigned in
return output_shape;
}
-
} // namespace shape_calculator
} // namespace misc
} // namespace arm_compute
diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp
index 1c773bc42f..577ba762cb 100644
--- a/src/core/CL/CLKernelLibrary.cpp
+++ b/src/core/CL/CLKernelLibrary.cpp
@@ -313,8 +313,10 @@ const std::map<std::string, std::string> CLKernelLibrary::_kernel_program_map =
{ "pooling_layer_3", "pooling_layer.cl" },
{ "pooling_layer_optimized_3", "pooling_layer.cl" },
{ "pooling_layer_7", "pooling_layer.cl" },
- { "pooling_layer_MxN", "pooling_layer.cl" },
- { "pooling_layer_MxN_quantized", "pooling_layer_quantized.cl" },
+ { "pooling_layer_MxN_nchw", "pooling_layer.cl" },
+ { "pooling_layer_MxN_nhwc", "pooling_layer.cl" },
+ { "pooling_layer_MxN_quantized_nhwc", "pooling_layer_quantized.cl" },
+ { "pooling_layer_MxN_quantized_nchw", "pooling_layer_quantized.cl" },
{ "quantization_layer", "quantization_layer.cl" },
{ "reduction_operation", "reduction_operation.cl" },
{ "remap_nearest_neighbour", "remap.cl" },
diff --git a/src/core/CL/cl_kernels/pooling_layer.cl b/src/core/CL/cl_kernels/pooling_layer.cl
index dae0b99908..2c7ddfdf23 100644
--- a/src/core/CL/cl_kernels/pooling_layer.cl
+++ b/src/core/CL/cl_kernels/pooling_layer.cl
@@ -62,6 +62,8 @@
#endif /* FIXED_POINT_POSITION */
+#define DIV_OP_NHWC(x, y) (x * (VEC_DATA_TYPE(DATA_TYPE, 8))(1.f / y))
+
#if STRIDE_X == 1
#define POOLING3x3(res, input, output) POOLING3x3_STRIDE1(res, input, output)
#elif STRIDE_X == 2 /* STRIDE_X == 1 */
@@ -423,7 +425,7 @@ __kernel void pooling_layer_optimized_3(
#endif // POOL_AVG
-/** Performs a pooling function of pool size equal to N
+/** Performs a pooling function of pool size equal to N (NCHW)
*
* @note Datatype must be passed using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types are QS8/QS16/F16/F32;
* @note -DFP16 must be passed at compile time if half float data type is used
@@ -451,7 +453,7 @@ __kernel void pooling_layer_optimized_3(
* @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination image
*/
-__kernel void pooling_layer_MxN(
+__kernel void pooling_layer_MxN_nchw(
TENSOR3D_DECLARATION(input),
TENSOR3D_DECLARATION(output))
{
@@ -512,3 +514,97 @@ __kernel void pooling_layer_MxN(
*(__global DATA_TYPE *)output.ptr = res;
}
#endif // defined(POOL_SIZE_X) && defined(POOL_SIZE_Y)
+
+DATA_TYPE calculate_avg_scale_nhwc(const int pool_size_x, const int pool_size_y, int upper_bound_w, int upper_bound_h,
+ const int pad_x, const int pad_y, const int stride_x, const int stride_y)
+{
+ int start_x = get_global_id(1) * stride_x - pad_x;
+ int start_y = get_global_id(2) * stride_y - pad_y;
+
+#if !defined(EXCLUDE_PADDING)
+ upper_bound_w += pad_x;
+ upper_bound_h += pad_y;
+#endif /* defined(EXCLUDE_PADDING) */
+ const int end_x = min(start_x + pool_size_x, upper_bound_w);
+ const int end_y = min(start_y + pool_size_y, upper_bound_h);
+#if defined(EXCLUDE_PADDING)
+ start_x = max(0, start_x);
+ start_y = max(0, start_y);
+#endif /* defined(EXCLUDE_PADDING) */
+ return ((end_y - start_y) * (end_x - start_x));
+}
+
+/** Performs a pooling function of pool size equal to N (NHWC)
+ *
+ * @note Datatype must be passed using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types are F16/F32
+ * @note -DFP16 must be passed at compile time if half float data type is used
+ * @note Pool sizes must be passed using -DPOOL_SIZE_X and -DPOOL_SIZE_Y e.g. -DPOOL_SIZE_X=13;
+ * @note Tensors width and height must be passed at compile time using -DMAX_WIDTH and -DMAX_HEIGHT
+ * @note Strides must be passed at compile time using -DSTRIDE_X and -DSTRIDE_Y which are the steps of the window along the x and y directions
+ * @note Pad values must be passed at compile time using -DPAD_X and -DPAD_Y which are the pooling paddings in x and y dimension
+ * @note In case of average pooling the following information must be passed at compile time:
+ * -DPOOL_AVG must be provided otherwise max pooling will be performed.
+ *
+ * @param[in] input_ptr Pointer to the source image. Supported data types: F16/F32
+ * @param[in] input_stride_x Stride of the source image in X dimension (in bytes)
+ * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_stride_y Stride of the source image in Y dimension (in bytes)
+ * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source image
+ * @param[out] output_ptr Pointer to the destination image. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x Stride of the destination image in X dimension (in bytes)
+ * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y Stride of the destination image in Y dimension (in bytes)
+ * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination image
+ */
+__kernel void pooling_layer_MxN_nhwc(
+ TENSOR3D_DECLARATION(input),
+ TENSOR3D_DECLARATION(output))
+{
+ // Get pixels pointer
+ Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input);
+ Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
+
+ VEC_DATA_TYPE(DATA_TYPE, 8)
+ vdata = INITIAL_VALUE;
+ DATA_TYPE sdata = INITIAL_VALUE;
+
+ const int idx_width = get_global_id(1) * STRIDE_X;
+ const int idx_height = get_global_id(2) * STRIDE_Y;
+
+ for(int y = 0; y < POOL_SIZE_Y; ++y)
+ {
+ int y1 = select(y, PAD_Y - idx_height, y + idx_height < PAD_Y || y + idx_height > MAX_HEIGHT);
+ for(int x = 0; x < POOL_SIZE_X; ++x)
+ {
+ int x1 = select(x, PAD_X - idx_width - 1, x + idx_width < PAD_X || x + idx_width > MAX_WIDTH);
+ x1 = select(x1, PAD_X - idx_width - 1, y != y1);
+
+ VEC_DATA_TYPE(DATA_TYPE, 8)
+ data0 = vload8(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, x1 - PAD_X, y1 - PAD_Y));
+#if defined(POOL_L2)
+ // Raise to power of 2 for L2 Pooling
+ data0 *= data0;
+#endif /* defined(POOL_L2) */
+ vdata = POOL_OP(vdata, data0);
+ }
+ }
+
+#if defined(POOL_AVG) || defined(POOL_L2)
+ // Divide by pool region in case of average pooling
+ vdata = DIV_OP_NHWC(vdata, calculate_avg_scale_nhwc(POOL_SIZE_X, POOL_SIZE_Y, MAX_WIDTH, MAX_HEIGHT, PAD_X, PAD_Y, STRIDE_X, STRIDE_Y));
+#endif /* defined(POOL_AVG) || defined(POOL_L2) */
+
+#if defined(POOL_L2)
+ // Take square root of the result in L2 pooling
+ vdata = SQRT_OP(vdata);
+#endif /* defined(POOL_L2) */
+
+ // Store result
+ vstore8(vdata, 0, (__global DATA_TYPE *)output.ptr);
+}
diff --git a/src/core/CL/cl_kernels/pooling_layer_quantized.cl b/src/core/CL/cl_kernels/pooling_layer_quantized.cl
index 98850c00a5..17d893a013 100644
--- a/src/core/CL/cl_kernels/pooling_layer_quantized.cl
+++ b/src/core/CL/cl_kernels/pooling_layer_quantized.cl
@@ -31,6 +31,8 @@
#define DIV_OP(x, y) (x * (1.f / y))
+#define DIV_OP_NHWC(x, y) (convert_float8(x) * (float8)(1.f / y))
+
#if defined(POOL_L2)
#error "L2 pooling is not supported"
#endif /* defined(POOL_L2) */
@@ -49,7 +51,7 @@ int calculate_avg_scale(const int pool_size_x, const int pool_size_y, const int
return ((end_y - start_y) * (end_x - start_x));
}
-/** Performs a pooling function of pool size equal to N
+/** Performs a pooling function of pool size equal to N (NCHW)
*
* @note Pool sizes must be passed using -DPOOL_SIZE_X and -DPOOL_SIZE_Y e.g. -DPOOL_SIZE_X=13;
* @note In case of average pooling the following information must be passed at compile time:
@@ -75,7 +77,7 @@ int calculate_avg_scale(const int pool_size_x, const int pool_size_y, const int
* @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination image
*/
-__kernel void pooling_layer_MxN_quantized(
+__kernel void pooling_layer_MxN_quantized_nchw(
TENSOR3D_DECLARATION(input),
TENSOR3D_DECLARATION(output))
{
@@ -119,3 +121,79 @@ __kernel void pooling_layer_MxN_quantized(
// Store result
*(__global uchar *)output.ptr = convert_uchar(res);
}
+
+int calculate_avg_scale_nhwc(const int pool_size_x, const int pool_size_y, int upper_bound_w, int upper_bound_h,
+ const int pad_x, const int pad_y, const int stride_x, const int stride_y)
+{
+ int start_x = get_global_id(1) * stride_x - pad_x;
+ int start_y = get_global_id(2) * stride_y - pad_y;
+
+ const int end_x = min(start_x + pool_size_x, upper_bound_w);
+ const int end_y = min(start_y + pool_size_y, upper_bound_h);
+
+ start_x = max(0, start_x);
+ start_y = max(0, start_y);
+
+ return ((end_y - start_y) * (end_x - start_x));
+}
+
+/** Performs a pooling function of pool size equal to N (NHWC)
+ *
+ * @note Pool sizes must be passed using -DPOOL_SIZE_X and -DPOOL_SIZE_Y e.g. -DPOOL_SIZE_X=13;
+ * @note Tensors width and height must be passed at compile time using -DMAX_WIDTH and -DMAX_HEIGHT
+ * @note Strides must be passed at compile time using -DSTRIDE_X and -DSTRIDE_Y which are the steps of the window along the x and y directions
+ * @note Pad values must be passed at compile time using -DPAD_X and -DPAD_Y which are the pooling paddings in x and y dimension
+ * @note In case of average pooling the following information must be passed at compile time:
+ * -DPOOL_AVG must be provided otherwise max pooling will be performed.
+ *
+ * @param[in] input_ptr Pointer to the source image. Supported data types: QASYMM8
+ * @param[in] input_stride_x Stride of the source image in X dimension (in bytes)
+ * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_stride_y Stride of the source image in Y dimension (in bytes)
+ * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source image
+ * @param[out] output_ptr Pointer to the destination image. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x Stride of the destination image in X dimension (in bytes)
+ * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y Stride of the destination image in Y dimension (in bytes)
+ * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination image
+ */
+__kernel void pooling_layer_MxN_quantized_nhwc(
+ TENSOR3D_DECLARATION(input),
+ TENSOR3D_DECLARATION(output))
+{
+ // Get pixels pointer
+ Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input);
+ Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
+
+ int8 vdata = 0;
+
+ const int idx_width = get_global_id(1) * STRIDE_X;
+ const int idx_height = get_global_id(2) * STRIDE_Y;
+
+ for(int y = 0; y < POOL_SIZE_Y; ++y)
+ {
+ int y1 = select(y, PAD_Y - idx_height, y + idx_height < PAD_Y || y + idx_height > MAX_HEIGHT);
+ for(int x = 0; x < POOL_SIZE_X; ++x)
+ {
+ int x1 = select(x, PAD_X - idx_width - 1, x + idx_width < PAD_X || x + idx_width > MAX_WIDTH);
+ x1 = select(x1, PAD_X - idx_width - 1, y != y1);
+ uchar8 data = vload8(0, (__global uchar *)tensor3D_offset(&input, 0, x1 - PAD_X, y1 - PAD_Y));
+ int8 data0 = convert_int8(data);
+ vdata = POOL_OP(vdata, data0);
+ }
+ }
+
+#if defined(POOL_AVG)
+ // Divide by pool region in case of average pooling
+ vdata = convert_int8(round(DIV_OP_NHWC(vdata, calculate_avg_scale_nhwc(POOL_SIZE_X, POOL_SIZE_Y, MAX_WIDTH, MAX_HEIGHT, PAD_X, PAD_Y, STRIDE_X, STRIDE_Y))));
+#endif /* defined(POOL_AVG) */
+
+ // Store result
+ vstore8(convert_uchar8(vdata), 0, (__global uchar *)output.ptr);
+} \ No newline at end of file
diff --git a/src/core/CL/kernels/CLPoolingLayerKernel.cpp b/src/core/CL/kernels/CLPoolingLayerKernel.cpp
index b4deec1386..7907d01daa 100644
--- a/src/core/CL/kernels/CLPoolingLayerKernel.cpp
+++ b/src/core/CL/kernels/CLPoolingLayerKernel.cpp
@@ -34,53 +34,52 @@
#include "arm_compute/core/Utils.h"
#include "arm_compute/core/Validate.h"
#include "arm_compute/core/Window.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include <set>
#include <string>
#include <tuple>
using namespace arm_compute;
+using namespace arm_compute::misc::shape_calculator;
namespace
{
// Internal window config info
using CLPoolingConfig = std::pair<unsigned int, BorderSize>; //num_elems_processed_per_iteration, border_size
-void auto_init(const ITensorInfo *input, ITensorInfo *output, unsigned int pooled_w, unsigned int pooled_h)
+void auto_init(const ITensorInfo *input, ITensorInfo *output, PoolingLayerInfo pool_info)
{
- TensorShape output_shape{ input->tensor_shape() };
- output_shape.set(0, pooled_w);
- output_shape.set(1, pooled_h);
-
- auto_init_if_empty(*output, input->clone()->set_tensor_shape(output_shape));
+ TensorShape out_shape = compute_pool_shape(*input, pool_info);
+ auto_init_if_empty(*output, input->clone()->set_tensor_shape(out_shape));
}
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &pool_info)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32);
+ DataLayout data_layout = input->data_layout();
+ switch(data_layout)
+ {
+ case DataLayout::NCHW:
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32);
+ break;
+ case DataLayout::NHWC:
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32);
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Data layout not supported");
+ }
ARM_COMPUTE_RETURN_ERROR_ON_MSG((is_data_type_quantized_asymmetric(input->data_type()) && pool_info.pool_type() == PoolingType::L2),
"Unsupported combination of parameters!");
- const bool is_global_pooling = pool_info.is_global_pooling();
- const unsigned int pool_size_x = is_global_pooling ? input->tensor_shape().x() : pool_info.pool_size().width;
- const unsigned int pool_size_y = is_global_pooling ? input->tensor_shape().y() : pool_info.pool_size().height;
-
// Checks performed when output is configured
if(output->total_size() != 0)
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input, output);
-
- unsigned int pooled_w = 0;
- unsigned int pooled_h = 0;
- std::tie(pooled_w, pooled_h) = scaled_dimensions(input->dimension(0),
- input->dimension(1),
- pool_size_x,
- pool_size_y,
- pool_info.pad_stride_info());
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((output->dimension(0) != pooled_w) || (output->dimension(1) != pooled_h),
- "Invalid output pooling dimensions!");
+ TensorInfo out_info(TensorInfo(compute_pool_shape(*input, pool_info), 1, output->data_type(), output->fixed_point_position()));
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &out_info);
}
return Status{};
@@ -88,59 +87,83 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c
std::tuple<Status, Window, CLPoolingConfig> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, const PoolingLayerInfo &pool_info)
{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+
+ // Get data layout
+ const DataLayout data_layout = input->data_layout();
+ const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
+ const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
+
int pool_stride_x = 0;
int pool_stride_y = 0;
unsigned int pooled_w = 0;
unsigned int pooled_h = 0;
- int pool_size_x = pool_info.is_global_pooling() ? input->dimension(0) : pool_info.pool_size().width;
- int pool_size_y = pool_info.is_global_pooling() ? input->dimension(1) : pool_info.pool_size().height;
+ int pool_size_x = pool_info.is_global_pooling() ? input->dimension(idx_width) : pool_info.pool_size().width;
+ int pool_size_y = pool_info.is_global_pooling() ? input->dimension(idx_height) : pool_info.pool_size().height;
const PadStrideInfo pad_stride_info = pool_info.pad_stride_info();
std::tie(pool_stride_x, pool_stride_y) = pad_stride_info.stride();
- const int pool_pad_right = pad_stride_info.pad_right();
- const int pool_pad_top = pad_stride_info.pad_top();
- const int pool_pad_left = pad_stride_info.pad_left();
- const int pool_pad_bottom = pad_stride_info.pad_bottom();
+ const int pool_pad_right = pad_stride_info.pad_right();
+ const int pool_pad_top = pad_stride_info.pad_top();
+ const int pool_pad_left = pad_stride_info.pad_left();
+ const int pool_pad_bottom = pad_stride_info.pad_bottom();
+ BorderSize border_size = BorderSize(pool_pad_top, pool_pad_right, pool_pad_bottom, pool_pad_left);
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
-
- // Check output dimensions
- std::tie(pooled_w, pooled_h) = scaled_dimensions(input->dimension(0),
- input->dimension(1),
- pool_size_x,
- pool_size_y,
- pad_stride_info);
+ auto_init(input, output, pool_info);
+ pooled_w = output->tensor_shape()[idx_width];
+ pooled_h = output->tensor_shape()[idx_height];
- auto_init(input, output, pooled_w, pooled_h);
+ const DataType data_type = input->data_type();
- BorderSize border_size = BorderSize(pool_pad_top, pool_pad_right, pool_pad_bottom, pool_pad_left);
- const DataType data_type = input->data_type();
+ const int input_width = input->dimension(idx_width);
+ const int input_height = input->dimension(idx_height);
- const int input_width = input->dimension(0);
- const int input_height = input->dimension(1);
-
- // Change the number of elements processed per iteration
- // for pooling 3x3 with stride less equal than 3
- const bool can_optimize = (pool_size_x == 3) && (pool_size_y == 3) && (pool_stride_x <= 3) && !is_data_type_quantized(data_type);
- const unsigned int num_elems_processed_per_iteration = can_optimize ? 4 : 1;
- const int num_elems_read_per_iteration = (num_elems_processed_per_iteration - 1) * pool_stride_x + pool_size_x;
-
- // Number of iterations in X dimension
- const int num_iterations_x = (pooled_w + num_elems_processed_per_iteration - 1) / num_elems_processed_per_iteration;
-
- // Upper limit for the number of right/bottom border elements that are accessed
- const int upper_bound_w = ((num_iterations_x - 1) * num_elems_processed_per_iteration * pool_stride_x - pool_pad_left + num_elems_read_per_iteration) - input_width;
- const int upper_bound_h = ((pooled_h - 1) * pool_stride_y - pool_pad_top + pool_size_y) - input_height;
-
- border_size.right = std::max(upper_bound_w, pool_pad_right);
- border_size.bottom = std::max(upper_bound_h, pool_pad_bottom);
-
- Window win = calculate_max_window(*output, Steps(num_elems_processed_per_iteration));
-
- AccessWindowRectangle input_access(input, -pool_pad_left, -pool_pad_top, num_elems_read_per_iteration, pool_size_y,
- pool_stride_x * num_elems_processed_per_iteration, pool_stride_y);
- AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
- bool window_changed = update_window_and_padding(win, input_access, output_access);
- output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
+ unsigned int num_elems_processed_per_iteration = 0;
+ bool window_changed = false;
+ Window win{};
+ switch(data_layout)
+ {
+ case DataLayout::NCHW:
+ {
+ // Change the number of elements processed per iteration
+ // for pooling 3x3 with stride less equal than 3
+ const bool can_optimize = (pool_size_x == 3) && (pool_size_y == 3) && (pool_stride_x <= 3) && !is_data_type_quantized(data_type);
+ num_elems_processed_per_iteration = can_optimize ? 4 : 1;
+ const unsigned int num_elems_read_per_iteration = (num_elems_processed_per_iteration - 1) * pool_stride_x + pool_size_x;
+
+ // Number of iterations in X dimension
+ const int num_iterations_x = (pooled_w + num_elems_processed_per_iteration - 1) / num_elems_processed_per_iteration;
+
+ // Upper limit for the number of right/bottom border elements that are accessed
+ const int upper_bound_w = ((num_iterations_x - 1) * num_elems_processed_per_iteration * pool_stride_x - pool_pad_left + num_elems_read_per_iteration) - input_width;
+ const int upper_bound_h = ((pooled_h - 1) * pool_stride_y - pool_pad_top + pool_size_y) - input_height;
+
+ border_size.right = std::max(upper_bound_w, pool_pad_right);
+ border_size.bottom = std::max(upper_bound_h, pool_pad_bottom);
+
+ win = calculate_max_window(*output, Steps(num_elems_processed_per_iteration));
+
+ AccessWindowRectangle input_access(input, -pool_pad_left, -pool_pad_top, num_elems_read_per_iteration, pool_size_y,
+ pool_stride_x * num_elems_processed_per_iteration, pool_stride_y);
+ AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
+ window_changed = update_window_and_padding(win, input_access, output_access);
+ output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
+ break;
+ }
+ case DataLayout::NHWC:
+ {
+ num_elems_processed_per_iteration = 8;
+ win = calculate_max_window(*output, Steps(num_elems_processed_per_iteration));
+
+ AccessWindowRectangle input_access(input, -1, -1, num_elems_processed_per_iteration, pool_size_y,
+ pool_stride_x * num_elems_processed_per_iteration, pool_stride_y);
+ AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
+ window_changed = update_window_and_padding(win, input_access, output_access);
+ output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
+ break;
+ }
+ default:
+ ARM_COMPUTE_ERROR("Not implemented");
+ }
Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
return std::make_tuple(err, win, CLPoolingConfig(num_elems_processed_per_iteration, border_size));
@@ -159,30 +182,25 @@ BorderSize CLPoolingLayerKernel::border_size() const
void CLPoolingLayerKernel::configure(const ICLTensor *input, ICLTensor *output, const PoolingLayerInfo &pool_info)
{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+
int pool_stride_x = 0;
int pool_stride_y = 0;
- unsigned int pooled_w = 0;
- unsigned int pooled_h = 0;
const PoolingType pool_type = pool_info.pool_type();
- const int pool_size_x = pool_info.is_global_pooling() ? input->info()->dimension(0) : pool_info.pool_size().width;
- const int pool_size_y = pool_info.is_global_pooling() ? input->info()->dimension(1) : pool_info.pool_size().height;
+ DataLayout data_layout = input->info()->data_layout();
+ const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
+ const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
+ const int idx_channel = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
+ const int pool_size_x = pool_info.is_global_pooling() ? input->info()->dimension(idx_width) : pool_info.pool_size().width;
+ const int pool_size_y = pool_info.is_global_pooling() ? input->info()->dimension(idx_height) : pool_info.pool_size().height;
const PadStrideInfo pad_stride_info = pool_info.pad_stride_info();
const bool exclude_padding = pool_info.exclude_padding();
std::tie(pool_stride_x, pool_stride_y) = pad_stride_info.stride();
const int pool_pad_top = pad_stride_info.pad_top();
const int pool_pad_left = pad_stride_info.pad_left();
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
-
// Check output dimensions
- std::tie(pooled_w, pooled_h) = scaled_dimensions(input->info()->dimension(0),
- input->info()->dimension(1),
- pool_size_x,
- pool_size_y,
- pad_stride_info);
-
- auto_init(input->info(), output->info(), pooled_w, pooled_h);
-
+ auto_init(input->info(), output->info(), pool_info);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), pool_info));
// Set instance variables
@@ -200,65 +218,93 @@ void CLPoolingLayerKernel::configure(const ICLTensor *input, ICLTensor *output,
build_opts.add_option_if(is_data_type_fixed_point(data_type),
"-DFIXED_POINT_POSITION=" + support::cpp11::to_string(input->info()->fixed_point_position()));
build_opts.add_option("-DSTRIDE_X=" + support::cpp11::to_string(pool_stride_x));
- if(pool_type != PoolingType::MAX)
- {
- build_opts.add_option_if(exclude_padding, "-DEXCLUDE_PADDING");
- build_opts.add_option("-DMAX_WIDTH=" + support::cpp11::to_string(input->info()->dimension(0) + (exclude_padding ? 0 : pool_pad_left)));
- build_opts.add_option("-DMAX_HEIGHT=" + support::cpp11::to_string(input->info()->dimension(1) + (exclude_padding ? 0 : pool_pad_top)));
- build_opts.add_option("-DSTRIDE_Y=" + support::cpp11::to_string(pool_stride_y));
- build_opts.add_option("-DPAD_X=" + support::cpp11::to_string(pool_pad_left));
- build_opts.add_option("-DPAD_Y=" + support::cpp11::to_string(pool_pad_top));
- }
+ build_opts.add_option("-DSTRIDE_Y=" + support::cpp11::to_string(pool_stride_y));
+ build_opts.add_option("-DPAD_X=" + support::cpp11::to_string(pool_pad_left));
+ build_opts.add_option("-DPAD_Y=" + support::cpp11::to_string(pool_pad_top));
+ build_opts.add_option("-DPOOL_SIZE_X=" + support::cpp11::to_string(pool_size_x));
+ build_opts.add_option("-DPOOL_SIZE_Y=" + support::cpp11::to_string(pool_size_y));
+ build_opts.add_option_if(data_type == DataType::F16, "-DFP16");
// Create kernel
- if((pool_size_x == 3) && (pool_size_y == 3) && !is_data_type_quantized_asymmetric(data_type))
+ switch(data_layout)
{
- // Check if we have pool3x3 with stride_x less equal than 3. In these cases, run an optimized OpenCL kernel where
- // each thread computes 4 output elements
- const bool is_pool3x3_stride_le3 = (pool_size_x == 3) && (pool_size_y == 3) && (pool_stride_x <= 3) && !is_data_type_fixed_point(data_type);
-
- std::string kernel_name = ((is_pool3x3_stride_le3) ? "pooling_layer_optimized_" : "pooling_layer_")
- + support::cpp11::to_string(pool_size_x);
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
- }
- else // Run general case
- {
- build_opts.add_option("-DPOOL_SIZE_X=" + support::cpp11::to_string(pool_size_x));
- build_opts.add_option("-DPOOL_SIZE_Y=" + support::cpp11::to_string(pool_size_y));
- build_opts.add_option_if(data_type == DataType::F16, "-DFP16");
-
- std::string kernel_name = is_data_type_quantized_asymmetric(data_type) ? "pooling_layer_MxN_quantized" : "pooling_layer_MxN";
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ case DataLayout::NCHW:
+ {
+ build_opts.add_option("-DMAX_WIDTH=" + support::cpp11::to_string(input->info()->dimension(idx_width) + (exclude_padding ? 0 : pool_pad_left)));
+ build_opts.add_option("-DMAX_HEIGHT=" + support::cpp11::to_string(input->info()->dimension(idx_height) + (exclude_padding ? 0 : pool_pad_top)));
+ if(pool_type != PoolingType::MAX)
+ {
+ build_opts.add_option_if(exclude_padding, "-DEXCLUDE_PADDING");
+ }
+
+ if((pool_size_x == 3) && (pool_size_y == 3) && !is_data_type_quantized_asymmetric(data_type))
+ {
+ // Check if we have pool3x3 with stride_x less equal than 3. In these cases, run an optimized OpenCL kernel where
+ // each thread computes 4 output elements
+ const bool is_pool3x3_stride_le3 = (pool_size_x == 3) && (pool_size_y == 3) && (pool_stride_x <= 3) && !is_data_type_fixed_point(data_type);
+
+ std::string kernel_name = ((is_pool3x3_stride_le3) ? "pooling_layer_optimized_" : "pooling_layer_")
+ + support::cpp11::to_string(pool_size_x);
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ }
+ else // Run general case
+ {
+ std::string kernel_name = is_data_type_quantized_asymmetric(data_type) ? "pooling_layer_MxN_quantized_nchw" : "pooling_layer_MxN_nchw";
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ }
+ break;
+ }
+ case DataLayout::NHWC:
+ {
+ build_opts.add_option_if(exclude_padding, "-DEXCLUDE_PADDING");
+ build_opts.add_option("-DMAX_WIDTH=" + support::cpp11::to_string(input->info()->dimension(idx_width)));
+ build_opts.add_option("-DMAX_HEIGHT=" + support::cpp11::to_string(input->info()->dimension(idx_height)));
+ std::string kernel_name = is_data_type_quantized_asymmetric(data_type) ? "pooling_layer_MxN_quantized_nhwc" : "pooling_layer_MxN_nhwc";
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ break;
+ }
+ default:
+ ARM_COMPUTE_ERROR("Not implemented");
}
// Configure kernel window
auto win_config = validate_and_configure_window(input->info(), output->info(), pool_info);
ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config));
+ ICLKernel::configure(std::get<1>(win_config));
// Configure the local work size (hint) from the first two dimensions of the global work size.
// On Bifrost, this works for up to 35x35xC filters, for which the pooling_layer_3_optimized
// kernel is launched with gws=(9, 33, C). In any case, the hint will be ignored if it is
// invalid (e.g. exceeds the maximum workgroup size that the kernel can be launched with).
- if(gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72, GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT, GPUTarget::TNOX))
+ if(data_layout == DataLayout::NCHW)
{
- cl::NDRange gws = ICLKernel::gws_from_window(std::get<1>(win_config));
- _lws_hint = cl::NDRange(gws[0], gws[1], 1);
+ CLPoolingConfig pooling_config = std::get<2>(win_config);
+ _num_elems_processed_per_iteration = pooling_config.first;
+ _border_size = pooling_config.second;
+ if(gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72, GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT, GPUTarget::TNOX))
+ {
+ cl::NDRange gws = ICLKernel::gws_from_window(std::get<1>(win_config));
+ _lws_hint = cl::NDRange(gws[0], gws[1], 1);
+ }
+ }
+ else
+ {
+ _border_size = BorderSize(1, 0, 0, 0);
+ _num_elems_processed_per_iteration = 8;
}
-
- ICLKernel::configure(std::get<1>(win_config));
-
- CLPoolingConfig pooling_config = std::get<2>(win_config);
- _num_elems_processed_per_iteration = pooling_config.first;
- _border_size = pooling_config.second;
// Set config_id for enabling LWS tuning
_config_id = "pooling_layer_";
_config_id += lower_string(string_from_data_type(data_type));
_config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(0));
+ _config_id += lower_string(string_from_data_layout(data_layout));
+ _config_id += "_";
+ _config_id += support::cpp11::to_string(output->info()->dimension(idx_width));
_config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(1));
+ _config_id += support::cpp11::to_string(output->info()->dimension(idx_height));
+ _config_id += "_";
+ _config_id += support::cpp11::to_string(output->info()->dimension(idx_channel));
}
Status CLPoolingLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &pool_info)
@@ -278,25 +324,52 @@ void CLPoolingLayerKernel::run(const Window &window, cl::CommandQueue &queue)
unsigned int pool_stride_y = 0;
std::tie(pool_stride_x, pool_stride_y) = _pool_info.pad_stride_info().stride();
- Window window_collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
- Window slice = window_collapsed.first_slice_window_3D();
-
- do
+ switch(_input->info()->data_layout())
{
- // Upsample input by pool size
- Window in_slice(slice);
- in_slice.set(Window::DimX, Window::Dimension(in_slice.x().start() - _pool_info.pad_stride_info().pad_left(),
- (in_slice.x().end() - _pool_info.pad_stride_info().pad_left()) * pool_stride_x,
- pool_stride_x * _num_elems_processed_per_iteration));
- in_slice.set(Window::DimY, Window::Dimension(in_slice.y().start() - _pool_info.pad_stride_info().pad_top(),
- (in_slice.y().end() - _pool_info.pad_stride_info().pad_top()) * pool_stride_y,
- pool_stride_y));
-
- // Set inputs
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, _input, in_slice);
- add_3D_tensor_argument(idx, _output, slice);
- enqueue(queue, *this, slice, _lws_hint);
+ case DataLayout::NCHW:
+ {
+ Window window_collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
+ Window slice = window_collapsed.first_slice_window_3D();
+ do
+ {
+ // Upsample input by pool size
+ Window in_slice(slice);
+ in_slice.set(Window::DimX, Window::Dimension(in_slice.x().start() - _pool_info.pad_stride_info().pad_left(),
+ (in_slice.x().end() - _pool_info.pad_stride_info().pad_left()) * pool_stride_x,
+ pool_stride_x * _num_elems_processed_per_iteration));
+ in_slice.set(Window::DimY, Window::Dimension(in_slice.y().start() - _pool_info.pad_stride_info().pad_top(),
+ (in_slice.y().end() - _pool_info.pad_stride_info().pad_top()) * pool_stride_y,
+ pool_stride_y));
+
+ // Set inputs
+ unsigned int idx = 0;
+ add_3D_tensor_argument(idx, _input, in_slice);
+ add_3D_tensor_argument(idx, _output, slice);
+ enqueue(queue, *this, slice, _lws_hint);
+ }
+ while(window_collapsed.slide_window_slice_3D(slice));
+ break;
+ }
+ case DataLayout::NHWC:
+ {
+ Window slice = window.first_slice_window_3D();
+
+ Window in_slice = window.first_slice_window_3D();
+ in_slice.set(Window::DimX, Window::Dimension(0, _input->info()->dimension(0), _num_elems_processed_per_iteration));
+ in_slice.set(Window::DimY, Window::Dimension(0, _input->info()->dimension(1), pool_stride_x));
+ in_slice.set(Window::DimZ, Window::Dimension(0, _input->info()->dimension(2), pool_stride_y));
+ do
+ {
+ // Set inputs
+ unsigned int idx = 0;
+ add_3D_tensor_argument(idx, _input, in_slice);
+ add_3D_tensor_argument(idx, _output, slice);
+ enqueue(queue, *this, slice, _lws_hint);
+ }
+ while(window.slide_window_slice_3D(slice) && window.slide_window_slice_3D(in_slice));
+ break;
+ }
+ default:
+ ARM_COMPUTE_ERROR("Not implemented");
}
- while(window_collapsed.slide_window_slice_3D(slice));
}
diff --git a/src/runtime/CL/functions/CLPoolingLayer.cpp b/src/runtime/CL/functions/CLPoolingLayer.cpp
index 201bf87b47..17875a38ad 100644
--- a/src/runtime/CL/functions/CLPoolingLayer.cpp
+++ b/src/runtime/CL/functions/CLPoolingLayer.cpp
@@ -41,13 +41,28 @@ void CLPoolingLayer::configure(ICLTensor *input, ICLTensor *output, const Poolin
_kernel = std::move(k);
// Configure border depending on operation required (quantize border in case of asymmetric data_type)
- BorderMode border_mode = (PoolingType::MAX == pool_info.pool_type()) ? BorderMode::REPLICATE : BorderMode::CONSTANT;
- PixelValue zero_value(0.f);
+ BorderMode border_mode{};
+ PixelValue pixel_value(0.f);
if(is_data_type_quantized_asymmetric(input->info()->data_type()) && !pool_info.exclude_padding())
{
- zero_value = PixelValue(static_cast<uint32_t>(input->info()->quantization_info().offset));
+ pixel_value = PixelValue(static_cast<uint32_t>(input->info()->quantization_info().offset));
}
- _border_handler.configure(input, _kernel->border_size(), border_mode, zero_value);
+ switch(input->info()->data_layout())
+ {
+ case DataLayout::NCHW:
+ border_mode = (PoolingType::MAX == pool_info.pool_type()) ? BorderMode::REPLICATE : BorderMode::CONSTANT;
+ break;
+ case DataLayout::NHWC:
+ border_mode = BorderMode::CONSTANT;
+ if(PoolingType::MAX == pool_info.pool_type() && !is_data_type_quantized_asymmetric(input->info()->data_type()))
+ {
+ pixel_value = PixelValue(std::numeric_limits<float>::lowest());
+ }
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Data layout not supported");
+ }
+ _border_handler.configure(input, _kernel->border_size(), border_mode, pixel_value);
}
Status CLPoolingLayer::validate(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &pool_info)
diff --git a/tests/validation/CL/GlobalPoolingLayer.cpp b/tests/validation/CL/GlobalPoolingLayer.cpp
index 46752c4913..586be5e041 100644
--- a/tests/validation/CL/GlobalPoolingLayer.cpp
+++ b/tests/validation/CL/GlobalPoolingLayer.cpp
@@ -61,7 +61,7 @@ TEST_SUITE(Float)
TEST_SUITE(FP32)
FIXTURE_DATA_TEST_CASE(RunGlobalPooling, CLGlobalPoolingLayerFixture<float>, framework::DatasetMode::ALL, combine(combine(GlobalPoolingLayerDataset, framework::dataset::make("DataType",
DataType::F32)),
- framework::dataset::make("DataLayout", DataLayout::NCHW)))
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f32);
@@ -71,7 +71,7 @@ TEST_SUITE_END()
TEST_SUITE(FP16)
FIXTURE_DATA_TEST_CASE(RunGlobalPooling, CLGlobalPoolingLayerFixture<half>, framework::DatasetMode::ALL, combine(combine(GlobalPoolingLayerDataset, framework::dataset::make("DataType",
DataType::F16)),
- framework::dataset::make("DataLayout", DataLayout::NCHW)))
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f16);
diff --git a/tests/validation/CL/PoolingLayer.cpp b/tests/validation/CL/PoolingLayer.cpp
index 79e526b9ec..7bd090cb77 100644
--- a/tests/validation/CL/PoolingLayer.cpp
+++ b/tests/validation/CL/PoolingLayer.cpp
@@ -128,7 +128,7 @@ FIXTURE_DATA_TEST_CASE(RunSpecial, CLSpecialPoolingLayerFixture<float>, framewor
}
FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerFixture<float>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), combine(PoolingLayerDatasetFP, framework::dataset::make("DataType",
DataType::F32))),
- framework::dataset::make("DataLayout", DataLayout::NCHW)))
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f32);
@@ -136,7 +136,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerFixture<float>, framework::Datase
FIXTURE_DATA_TEST_CASE(RunLarge, CLPoolingLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), combine(PoolingLayerDatasetFP,
framework::dataset::make("DataType",
DataType::F32))),
- framework::dataset::make("DataLayout", DataLayout::NCHW)))
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f32);
@@ -146,14 +146,14 @@ TEST_SUITE_END() // FP32
TEST_SUITE(FP16)
FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerFixture<half>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), combine(PoolingLayerDatasetFP,
framework::dataset::make("DataType", DataType::F16))),
- framework::dataset::make("DataLayout", DataLayout::NCHW)))
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f16);
}
FIXTURE_DATA_TEST_CASE(RunLarge, CLPoolingLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), combine(PoolingLayerDatasetFP,
framework::dataset::make("DataType", DataType::F16))),
- framework::dataset::make("DataLayout", DataLayout::NCHW)))
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f16);
@@ -211,7 +211,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerQuantizedFixture<uint8_t>, framew
framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255, 127),
QuantizationInfo(7.f / 255, 123)
})),
- framework::dataset::make("DataLayout", DataLayout::NCHW)))
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
@@ -219,14 +219,13 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerQuantizedFixture<uint8_t>, framew
FIXTURE_DATA_TEST_CASE(RunLarge, CLPoolingLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), combine(PoolingLayerDatasetQASYMM8,
framework::dataset::make("DataType", DataType::QASYMM8))),
framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 255, 0) })),
- framework::dataset::make("DataLayout", DataLayout::NCHW)))
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
}
TEST_SUITE_END() // QASYMM8
TEST_SUITE_END() // Quantized
-
TEST_SUITE_END() // PoolingLayer
TEST_SUITE_END() // CL
} // namespace validation