aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arm_compute/core/CL/kernels/CLROIAlignLayerKernel.h16
-rw-r--r--arm_compute/runtime/CL/functions/CLROIAlignLayer.h16
-rw-r--r--src/core/CL/CLKernelLibrary.cpp7
-rw-r--r--src/core/CL/cl_kernels/roi_align_layer_quantized.cl225
-rw-r--r--src/core/CL/kernels/CLROIAlignLayerKernel.cpp39
-rw-r--r--tests/validation/CL/ROIAlignLayer.cpp70
-rw-r--r--tests/validation/Helpers.cpp12
-rw-r--r--tests/validation/Helpers.h10
-rw-r--r--tests/validation/fixtures/ROIAlignLayerFixture.h93
-rw-r--r--tests/validation/reference/ROIAlignLayer.cpp39
-rw-r--r--tests/validation/reference/ROIAlignLayer.h6
11 files changed, 460 insertions, 73 deletions
diff --git a/arm_compute/core/CL/kernels/CLROIAlignLayerKernel.h b/arm_compute/core/CL/kernels/CLROIAlignLayerKernel.h
index b5e02324bc..e8dd0c50c8 100644
--- a/arm_compute/core/CL/kernels/CLROIAlignLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLROIAlignLayerKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -51,9 +51,10 @@ public:
/** Set the input and output tensors.
*
- * @param[in] input Source tensor. Data types supported: F16/F32.
+ * @param[in] input Source tensor. Data types supported: QASYMM8/F16/F32.
* @param[in] rois ROIs tensor, it is a 2D tensor of size [5, N] (where N is the number of ROIs) containing top left and bottom right corner
- * as coordinate of an image and batch_id of ROI [ batch_id, x1, y1, x2, y2 ]. Data types supported: same as @p input
+ * as coordinate of an image and batch_id of ROI [ batch_id, x1, y1, x2, y2 ].
+ * Data types supported: QASYMM16 with scale of 0.125 and 0 offset if @p input is QASYMM8, otherwise same as @p input
* @param[out] output Destination tensor. Data types supported: Same as @p input.
* @param[in] pool_info Contains pooling operation information described in @ref ROIPoolingLayerInfo.
*
@@ -65,10 +66,11 @@ public:
void configure(const ICLTensor *input, const ICLTensor *rois, ICLTensor *output, const ROIPoolingLayerInfo &pool_info);
/** Static function to check if given info will lead to a valid configuration of @ref CLROIAlignLayerKernel
*
- * @param[in] input Source tensor info. Data types supported: F16/F32.
- * @param[in] rois ROIs tensor info. Data types supported: same as @p input
- * @param[out] output Destination tensor info. Data types supported: Same as @p input.
- * @param[in] pool_info Contains pooling operation information described in @ref ROIPoolingLayerInfo.
+ * @param[in] input Source tensor info. Data types supported: QASYMM8/F16/F32.
+ * @param[in] rois ROIs tensor info. Data types supported: QASYMM16 with scale of 0.125 and 0 offset if @p input is QASYMM8,
+ * otherwise same as @p input
+ * @param[in] output Destination tensor info. Data types supported: Same as @p input.
+ * @param[in] pool_info Contains pooling operation information described in @ref ROIPoolingLayerInfo.
*
* @note The x and y dimensions of @p output tensor must be the same as @p pool_info 's pooled
* width and pooled height.
diff --git a/arm_compute/runtime/CL/functions/CLROIAlignLayer.h b/arm_compute/runtime/CL/functions/CLROIAlignLayer.h
index fec0dac51a..e12978ac2b 100644
--- a/arm_compute/runtime/CL/functions/CLROIAlignLayer.h
+++ b/arm_compute/runtime/CL/functions/CLROIAlignLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -43,9 +43,10 @@ class CLROIAlignLayer : public ICLSimpleFunction
public:
/** Set the input and output tensors.
*
- * @param[in] input Source tensor. Data types supported: F16/F32.
+ * @param[in] input Source tensor. Data types supported: QASYMM8/F16/F32.
* @param[in] rois ROIs tensor, it is a 2D tensor of size [5, N] (where N is the number of ROIs) containing top left and bottom right corner
- * as coordinate of an image and batch_id of ROI [ batch_id, x1, y1, x2, y2 ]. Data types supported: same as @p input
+ * as coordinate of an image and batch_id of ROI [ batch_id, x1, y1, x2, y2 ].
+ * Data types supported: QASYMM16 with scale of 0.125 and 0 offset if @p input is QASYMM8, otherwise same as @p input
* @param[out] output Destination tensor. Data types supported: Same as @p input.
* @param[in] pool_info Contains pooling operation information described in @ref ROIPoolingLayerInfo.
*
@@ -57,10 +58,11 @@ public:
void configure(const ICLTensor *input, const ICLTensor *rois, ICLTensor *output, const ROIPoolingLayerInfo &pool_info);
/** Static function to check if given info will lead to a valid configuration of @ref CLROIAlignLayer
*
- * @param[in] input Source tensor info. Data types supported: F16/F32.
- * @param[in] rois ROIs tensor info. Data types supported: same as @p input
- * @param[out] output Destination tensor info. Data types supported: Same as @p input.
- * @param[in] pool_info Contains pooling operation information described in @ref ROIPoolingLayerInfo.
+ * @param[in] input Source tensor info. Data types supported: QASYMM8/F16/F32.
+ * @param[in] rois ROIs tensor info. Data types supported: QASYMM16 with scale of 0.125 and 0 offset if @p input is QASYMM8,
+ * otherwise same as @p input
+ * @param[in] output Destination tensor info. Data types supported: Same as @p input.
+ * @param[in] pool_info Contains pooling operation information described in @ref ROIPoolingLayerInfo.
*
* @note The x and y dimensions of @p output tensor must be the same as @p pool_info 's pooled
* width and pooled height.
diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp
index b938a1813a..4b3b37c3da 100644
--- a/src/core/CL/CLKernelLibrary.cpp
+++ b/src/core/CL/CLKernelLibrary.cpp
@@ -447,6 +447,7 @@ const std::map<std::string, std::string> CLKernelLibrary::_kernel_program_map =
{ "RGBA8888_to_RGB888_bt709", "color_convert.cl" },
{ "RGBA8888_to_YUV444_bt709", "color_convert.cl" },
{ "roi_align_layer", "roi_align_layer.cl" },
+ { "roi_align_layer_quantized", "roi_align_layer_quantized.cl" },
{ "roi_pooling_layer", "roi_pooling_layer.cl" },
{ "scale_nearest_neighbour_nchw", "scale.cl" },
{ "scale_nearest_neighbour_nhwc", "scale.cl" },
@@ -935,6 +936,10 @@ const std::map<std::string, std::string> CLKernelLibrary::_program_source_map =
#include "./cl_kernels/roi_align_layer.clembed"
},
{
+ "roi_align_layer_quantized.cl",
+#include "./cl_kernels/roi_align_layer_quantized.clembed"
+ },
+ {
"roi_pooling_layer.cl",
#include "./cl_kernels/roi_pooling_layer.clembed"
},
@@ -1251,4 +1256,4 @@ std::string CLKernelLibrary::get_device_version()
cl_uint CLKernelLibrary::get_num_compute_units()
{
return _device.getInfo<CL_DEVICE_MAX_COMPUTE_UNITS>();
-} \ No newline at end of file
+}
diff --git a/src/core/CL/cl_kernels/roi_align_layer_quantized.cl b/src/core/CL/cl_kernels/roi_align_layer_quantized.cl
new file mode 100644
index 0000000000..f9360e98f1
--- /dev/null
+++ b/src/core/CL/cl_kernels/roi_align_layer_quantized.cl
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+
+// This specifies the value to shift the result of roi_dims / pooled_dims before ceiling.
+// It is close to the epsilon machine (for a floating point system, x and x+EPS are the same number).
+#define EPS_GRID 0.00001f
+
+#if defined(DATA_TYPE) && defined(POOLED_DIM_X) && defined(POOLED_DIM_Y) && defined(MAX_DIM_X) && defined(MAX_DIM_Y) && defined(MAX_DIM_Z) && defined(SPATIAL_SCALE) && defined(OFFSET_IN) && defined(OFFSET_OUT) && defined(SCALE_IN) && defined(SCALE_OUT) && defined(OFFSET_ROIS) && defined(SCALE_ROIS) // Check for compile time constants
+
+#define CONVERT_RTE(x, type) (convert_##type##_rte((x)))
+#define CONVERT_DOWN(x, type) CONVERT_RTE(x, type)
+inline float dequantize_qasymm8(uchar input, float offset, float scale)
+{
+ return ((float)input - offset) * scale;
+}
+
+inline uchar quantize_qasymm8(float input, float offset, float scale)
+{
+ float out_f32 = input / scale + offset;
+ uchar res_u8 = CONVERT_SAT(CONVERT_DOWN(out_f32, int), uchar);
+ return res_u8;
+}
+
+inline float4 dequantize_qasymm16(ushort4 input, float offset, float scale)
+{
+ float4 in_f32 = (CONVERT(input, float4) - (float4)(offset)) * (float4)(scale);
+ return in_f32;
+}
+
+/** Performs a roi align on a single output pixel.
+ *
+ * @param[in] input Pointer to input Tensor3D struct.
+ * @param[in] region_start_x Start x index projected onto the input tensor.
+ * @param[in] region_end_x End x index projected onto the input tensor.
+ * @param[in] region_start_y Start y index projected onto the input tensor.
+ * @param[in] region_end_y End y index projected onto the input tensor.
+ * @param[in] pz z index of the input tensor.
+ *
+ * @return An average pooled value from the region specified in the input tensor.
+ */
+inline DATA_TYPE roi_align_1x1(const Tensor3D *input, float region_start_x,
+ float bin_size_x,
+ float grid_size_x,
+ float region_end_x,
+ float region_start_y,
+ float bin_size_y,
+ float grid_size_y,
+ float region_end_y,
+ int pz)
+{
+ // Iterate through the pooling region
+ float sum = 0;
+ for(int iy = 0; iy < grid_size_y; ++iy)
+ {
+ for(int ix = 0; ix < grid_size_x; ++ix)
+ {
+ // Align the window in the middle of every bin
+ const float y = region_start_y + (iy + 0.5f) * bin_size_y / (float)grid_size_y;
+ const float x = region_start_x + (ix + 0.5f) * bin_size_x / (float)grid_size_x;
+
+ // Interpolation in the unit square
+ const int y_low = (int)y;
+ const int x_low = (int)x;
+ const int y_high = y_low + 1;
+ const int x_high = x_low + 1;
+
+ const float ly = y - y_low;
+ const float lx = x - x_low;
+ const float hy = 1.f - ly;
+ const float hx = 1.f - lx;
+
+ const float w1 = hy * hx;
+ const float w2 = hy * lx;
+ const float w3 = ly * hx;
+ const float w4 = ly * lx;
+#if defined(NHWC)
+ const DATA_TYPE data1 = *(__global DATA_TYPE *)tensor3D_offset(input, pz, x_low, y_low);
+ const DATA_TYPE data2 = *(__global DATA_TYPE *)tensor3D_offset(input, pz, x_high, y_low);
+ const DATA_TYPE data3 = *(__global DATA_TYPE *)tensor3D_offset(input, pz, x_low, y_high);
+ const DATA_TYPE data4 = *(__global DATA_TYPE *)tensor3D_offset(input, pz, x_high, y_high);
+#else // !defined(NHWC)
+ const DATA_TYPE data1 = *(__global DATA_TYPE *)tensor3D_offset(input, x_low, y_low, pz);
+ const DATA_TYPE data2 = *(__global DATA_TYPE *)tensor3D_offset(input, x_high, y_low, pz);
+ const DATA_TYPE data3 = *(__global DATA_TYPE *)tensor3D_offset(input, x_low, y_high, pz);
+ const DATA_TYPE data4 = *(__global DATA_TYPE *)tensor3D_offset(input, x_high, y_high, pz);
+#endif // defined(NHWC)
+ const float data1_f32 = dequantize_qasymm8(data1, OFFSET_IN, SCALE_IN);
+ const float data2_f32 = dequantize_qasymm8(data2, OFFSET_IN, SCALE_IN);
+ const float data3_f32 = dequantize_qasymm8(data3, OFFSET_IN, SCALE_IN);
+ const float data4_f32 = dequantize_qasymm8(data4, OFFSET_IN, SCALE_IN);
+ sum += w1 * data1_f32 + w2 * data2_f32 + w3 * data3_f32 + w4 * data4_f32;
+ }
+ }
+
+ const float res_f32 = sum / (grid_size_x * grid_size_y);
+ return quantize_qasymm8(res_f32, OFFSET_OUT, SCALE_OUT);
+}
+
+/** Performs a roi align function.
+ *
+ * @note Datatype must be passed using -DDATA_TYPE e.g. -DDATA_TYPE=uchar
+ * @note Datasize must be passed using -DDATA_SIZE e.g. -DDATA_SIZE=32;
+ * @note Input dimensions must be passed using -DMAX_DIM_X, -DMAX_DIM_Y and -DMAX_DIM_Z;
+ * @note Pooled region dimensions must be passed using -DPOOLED_DIM_X and -DPOOLED_DIM_Y;
+ * @note Spatial scale must be passed using -DSPATIAL_SCALE;
+ * @note Sampling ratio (i.e., the number of samples in each bin) may be passed using -DSAMPLING_RATIO. If not defined each roi
+ * will have a default sampling ratio of roi_dims/pooling_dims
+ *
+ * @param[in] input_ptr Pointer to the source tensor. Supported data types: QASYMM8
+ * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the pooled region of the source tensor as specifed by ROI
+ * @param[in] rois_ptr Pointer to the ROIs tensor. Layout: { batch_index, x1, y1, x2, y2 }.
+ * Supported data types: QASYMM16 with 0.125f scale and 0 offset
+ * @param[in] rois_stride_x Stride of the ROIs tensor in X dimension (in bytes)
+ * @param[in] rois_step_x Step of the ROIs tensor in X dimension (in bytes)
+ * @param[in] rois_stride_y Stride of the ROIs tensor in Y dimension (in bytes)
+ * @param[in] rois_step_y Step of the ROIs tensor in Y dimension (in bytes)
+ * @param[in] rois_offset_first_element_in_bytes The offset of the first element in the ROIs tensor
+ * @param[out] output_ptr Pointer to the destination tensor. Supported data types: Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] input_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] output_stride_w Stride of the destination tensor in W dimension (in bytes)
+ */
+__kernel void roi_align_layer_quantized(
+ TENSOR3D_DECLARATION(input),
+ IMAGE_DECLARATION(rois),
+ TENSOR3D_DECLARATION(output),
+ unsigned int input_stride_w, unsigned int output_stride_w)
+{
+ // Get pixels pointer
+ Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(input);
+ Image rois = CONVERT_TO_IMAGE_STRUCT_NO_STEP(rois);
+ Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(output);
+
+#if defined(NHWC)
+ const int px = get_global_id(1);
+ const int py = get_global_id(2);
+ const int pw = get_global_id(0);
+#else // !defined(NHWC)
+ const int px = get_global_id(0);
+ const int py = get_global_id(1);
+ const int pw = get_global_id(2);
+#endif // defined(NHWC)
+
+ // Load roi parameters
+ // roi is laid out as follows { batch_index, x1, y1, x2, y2 }
+ const ushort roi_batch = *((__global ushort *)offset(&rois, 0, pw));
+ float4 roi = dequantize_qasymm16(vload4(0, (__global ushort *)offset(&rois, 1, pw)), OFFSET_ROIS, SCALE_ROIS);
+ float2 roi_anchor = roi.s01 * convert_float(SPATIAL_SCALE);
+ float2 roi_dims = fmax((roi.s23 - roi.s01) * convert_float(SPATIAL_SCALE), 1.f);
+
+ // Calculate pooled region start and end
+ float2 spatial_indx = (float2)(px, py);
+ float2 pooled_dims = (float2)(POOLED_DIM_X, POOLED_DIM_Y);
+ float2 max_spatial_dims = (float2)(MAX_DIM_X, MAX_DIM_Y);
+
+ float2 bin_size = (float2)((roi_dims.s0 / (float)POOLED_DIM_X), (roi_dims.s1 / (float)POOLED_DIM_Y));
+ float2 region_start = spatial_indx * bin_size + roi_anchor;
+ float2 region_end = (spatial_indx + 1) * bin_size + roi_anchor;
+
+ region_start = clamp(region_start, 0, max_spatial_dims);
+ region_end = clamp(region_end, 0, max_spatial_dims);
+
+#if defined(SAMPLING_RATIO)
+ float2 roi_bin_grid = SAMPLING_RATIO;
+#else // !defined(SAMPLING_RATIO)
+ // Note that we subtract EPS_GRID before ceiling. This is to avoid situations where 1.000001 gets ceiled to 2.
+ float2 roi_bin_grid = ceil(bin_size - EPS_GRID);
+#endif // defined(SAMPLING_RATIO)
+
+ // Move input and output pointer across the fourth dimension
+ input.ptr += roi_batch * input_stride_w;
+ output.ptr += pw * output_stride_w;
+ for(int pz = 0; pz < MAX_DIM_Z; ++pz)
+ {
+#if defined(NHWC)
+ __global DATA_TYPE *_output_ptr = (__global DATA_TYPE *)tensor3D_offset(&output, pz, px, py);
+#else // !defined(NHWC)
+ __global DATA_TYPE *_output_ptr = (__global DATA_TYPE *)tensor3D_offset(&output, px, py, pz);
+#endif // defined(NHWC)
+ *_output_ptr = (__global DATA_TYPE)roi_align_1x1(&input,
+ region_start.x,
+ bin_size.x,
+ roi_bin_grid.x,
+ region_end.x,
+ region_start.y,
+ bin_size.y,
+ roi_bin_grid.y,
+ region_end.y, pz);
+ }
+}
+#endif // Check for compile time constants
diff --git a/src/core/CL/kernels/CLROIAlignLayerKernel.cpp b/src/core/CL/kernels/CLROIAlignLayerKernel.cpp
index 50729f2421..134286bae1 100644
--- a/src/core/CL/kernels/CLROIAlignLayerKernel.cpp
+++ b/src/core/CL/kernels/CLROIAlignLayerKernel.cpp
@@ -45,11 +45,10 @@ namespace
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *rois, ITensorInfo *output, const ROIPoolingLayerInfo &pool_info)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, rois, output);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, rois);
ARM_COMPUTE_RETURN_ERROR_ON(rois->dimension(0) != 5);
ARM_COMPUTE_RETURN_ERROR_ON(rois->num_dimensions() > 2);
ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32, DataType::F16);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F32, DataType::F16);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_LAYOUT_NOT_IN(input, DataLayout::NHWC, DataLayout::NCHW);
ARM_COMPUTE_RETURN_ERROR_ON((pool_info.pooled_width() == 0) || (pool_info.pooled_height() == 0));
@@ -59,6 +58,19 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *rois, ITe
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(compute_roi_align_shape(*input, *rois, pool_info), output->tensor_shape());
}
+
+ if(input->data_type() == DataType::QASYMM8)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(rois, 1, DataType::QASYMM16);
+
+ const UniformQuantizationInfo rois_qinfo = rois->quantization_info().uniform();
+ ARM_COMPUTE_RETURN_ERROR_ON(rois_qinfo.scale != 0.125f);
+ ARM_COMPUTE_RETURN_ERROR_ON(rois_qinfo.offset != 0);
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, rois);
+ }
return Status{};
}
@@ -104,9 +116,12 @@ void CLROIAlignLayerKernel::configure(const ICLTensor *input, const ICLTensor *r
_rois = rois;
_pool_info = pool_info;
+ const DataType data_type = input->info()->data_type();
+ const bool is_qasymm = is_data_type_quantized_asymmetric(data_type);
+
// Set build options
CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
+ build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type));
build_opts.add_option("-DDATA_SIZE=" + get_data_size_from_data_type(input->info()->data_type()));
build_opts.add_option("-DMAX_DIM_X=" + support::cpp11::to_string(_input->info()->dimension(get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::WIDTH))));
build_opts.add_option("-DMAX_DIM_Y=" + support::cpp11::to_string(_input->info()->dimension(get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::HEIGHT))));
@@ -117,9 +132,23 @@ void CLROIAlignLayerKernel::configure(const ICLTensor *input, const ICLTensor *r
build_opts.add_option_if(input->info()->data_layout() == DataLayout::NHWC, "-DNHWC");
build_opts.add_option_if(pool_info.sampling_ratio() > 0, "-DSAMPLING_RATIO=" + support::cpp11::to_string(pool_info.sampling_ratio()));
+ if(is_qasymm)
+ {
+ const UniformQuantizationInfo iq_info = input->info()->quantization_info().uniform();
+ const UniformQuantizationInfo roisq_info = rois->info()->quantization_info().uniform();
+ const UniformQuantizationInfo oq_info = output->info()->quantization_info().uniform();
+
+ build_opts.add_option("-DOFFSET_IN=" + float_to_string_with_full_precision(iq_info.offset));
+ build_opts.add_option("-DSCALE_IN=" + float_to_string_with_full_precision(iq_info.scale));
+ build_opts.add_option("-DOFFSET_ROIS=" + float_to_string_with_full_precision(roisq_info.offset));
+ build_opts.add_option("-DSCALE_ROIS=" + float_to_string_with_full_precision(roisq_info.scale));
+ build_opts.add_option("-DOFFSET_OUT=" + float_to_string_with_full_precision(oq_info.offset));
+ build_opts.add_option("-DSCALE_OUT=" + float_to_string_with_full_precision(oq_info.scale));
+ }
+
// Create kernel
- std::string kernel_name = "roi_align_layer";
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ const std::string kernel_name = (is_qasymm) ? "roi_align_layer_quantized" : "roi_align_layer";
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
ICLKernel::configure_internal(win_config.second);
}
diff --git a/tests/validation/CL/ROIAlignLayer.cpp b/tests/validation/CL/ROIAlignLayer.cpp
index 566e1985b3..b213c6815f 100644
--- a/tests/validation/CL/ROIAlignLayer.cpp
+++ b/tests/validation/CL/ROIAlignLayer.cpp
@@ -41,11 +41,13 @@ namespace validation
{
namespace
{
-RelativeTolerance<float> relative_tolerance_f32(0.01f);
-AbsoluteTolerance<float> absolute_tolerance_f32(0.001f);
+constexpr RelativeTolerance<float> relative_tolerance_f32(0.01f);
+constexpr AbsoluteTolerance<float> absolute_tolerance_f32(0.001f);
-RelativeTolerance<float> relative_tolerance_f16(0.01f);
-AbsoluteTolerance<float> absolute_tolerance_f16(0.001f);
+constexpr RelativeTolerance<float> relative_tolerance_f16(0.01f);
+constexpr AbsoluteTolerance<float> absolute_tolerance_f16(0.001f);
+
+constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(1);
} // namespace
TEST_SUITE(CL)
@@ -55,13 +57,14 @@ TEST_SUITE(RoiAlign)
// clang-format off
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
framework::dataset::make("InputInfo", { TensorInfo(TensorShape(250U, 128U, 3U), 1, DataType::F32),
- TensorInfo(TensorShape(250U, 128U, 3U), 1, DataType::F32), // Mismatching data type input/rois
- TensorInfo(TensorShape(250U, 128U, 3U), 1, DataType::F32), // Mismatching data type input/output
- TensorInfo(TensorShape(250U, 128U, 2U), 1, DataType::F32), // Mismatching depth size input/output
- TensorInfo(TensorShape(250U, 128U, 3U), 1, DataType::F32), // Mismatching number of rois and output batch size
- TensorInfo(TensorShape(250U, 128U, 3U), 1, DataType::F32), // Invalid number of values per ROIS
- TensorInfo(TensorShape(250U, 128U, 3U), 1, DataType::F32), // Mismatching height and width input/output
-
+ TensorInfo(TensorShape(250U, 128U, 3U), 1, DataType::F32), // Mismatching data type input/rois
+ TensorInfo(TensorShape(250U, 128U, 3U), 1, DataType::F32), // Mismatching data type input/output
+ TensorInfo(TensorShape(250U, 128U, 2U), 1, DataType::F32), // Mismatching depth size input/output
+ TensorInfo(TensorShape(250U, 128U, 3U), 1, DataType::F32), // Mismatching number of rois and output batch size
+ TensorInfo(TensorShape(250U, 128U, 3U), 1, DataType::F32), // Invalid number of values per ROIS
+ TensorInfo(TensorShape(250U, 128U, 3U), 1, DataType::F32), // Mismatching height and width input/output
+ TensorInfo(TensorShape(250U, 128U, 3U), 1, DataType::QASYMM8, QuantizationInfo(1.f / 255.f, 127)), // Invalid ROIS data type
+ TensorInfo(TensorShape(250U, 128U, 3U), 1, DataType::QASYMM8, QuantizationInfo(1.f / 255.f, 127)), // Invalid ROIS Quantization Info
}),
framework::dataset::make("RoisInfo", { TensorInfo(TensorShape(5, 4U), 1, DataType::F32),
TensorInfo(TensorShape(5, 4U), 1, DataType::F16),
@@ -70,6 +73,8 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
TensorInfo(TensorShape(5, 10U), 1, DataType::F32),
TensorInfo(TensorShape(4, 4U), 1, DataType::F32),
TensorInfo(TensorShape(5, 4U), 1, DataType::F32),
+ TensorInfo(TensorShape(5, 4U), 1, DataType::F32),
+ TensorInfo(TensorShape(5, 4U), 1, DataType::QASYMM16, QuantizationInfo(0.2f, 0)),
})),
framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(7U, 7U, 3U, 4U), 1, DataType::F32),
TensorInfo(TensorShape(7U, 7U, 3U, 4U), 1, DataType::F32),
@@ -78,6 +83,8 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
TensorInfo(TensorShape(7U, 7U, 3U, 4U), 1, DataType::F32),
TensorInfo(TensorShape(7U, 7U, 3U, 4U), 1, DataType::F32),
TensorInfo(TensorShape(5U, 5U, 3U, 4U), 1, DataType::F32),
+ TensorInfo(TensorShape(7U, 7U, 3U, 4U), 1, DataType::QASYMM8, QuantizationInfo(1.f / 255.f, 120)),
+ TensorInfo(TensorShape(7U, 7U, 3U, 4U), 1, DataType::QASYMM8, QuantizationInfo(1.f / 255.f, 120)),
})),
framework::dataset::make("PoolInfo", { ROIPoolingLayerInfo(7U, 7U, 1./8),
ROIPoolingLayerInfo(7U, 7U, 1./8),
@@ -86,8 +93,9 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
ROIPoolingLayerInfo(7U, 7U, 1./8),
ROIPoolingLayerInfo(7U, 7U, 1./8),
ROIPoolingLayerInfo(7U, 7U, 1./8),
+ ROIPoolingLayerInfo(7U, 7U, 1./8),
})),
- framework::dataset::make("Expected", { true, false, false, false, false, false, false })),
+ framework::dataset::make("Expected", { true, false, false, false, false, false, false, false, false })),
input_info, rois_info, output_info, pool_info, expected)
{
ARM_COMPUTE_EXPECT(bool(CLROIAlignLayer::validate(&input_info.clone()->set_is_resizable(true), &rois_info.clone()->set_is_resizable(true), &output_info.clone()->set_is_resizable(true), pool_info)) == expected, framework::LogLevel::ERRORS);
@@ -99,24 +107,46 @@ template <typename T>
using CLROIAlignLayerFixture = ROIAlignLayerFixture<CLTensor, CLAccessor, CLROIAlignLayer, T>;
TEST_SUITE(Float)
-FIXTURE_DATA_TEST_CASE(SmallROIAlignLayerFloat, CLROIAlignLayerFixture<float>, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(datasets::SmallROIDataset(),
- framework::dataset::make("DataType", { DataType::F32 })),
- framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
+TEST_SUITE(FP32)
+FIXTURE_DATA_TEST_CASE(Small, CLROIAlignLayerFixture<float>, framework::DatasetMode::ALL,
+ combine(combine(datasets::SmallROIDataset(),
+ framework::dataset::make("DataType", { DataType::F32 })),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
{
// Validate output
validate(CLAccessor(_target), _reference, relative_tolerance_f32, .02f, absolute_tolerance_f32);
}
-FIXTURE_DATA_TEST_CASE(SmallROIAlignLayerHalf, CLROIAlignLayerFixture<half>, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(datasets::SmallROIDataset(),
- framework::dataset::make("DataType", { DataType::F16 })),
- framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
+TEST_SUITE_END() // FP32
+TEST_SUITE(FP16)
+FIXTURE_DATA_TEST_CASE(Small, CLROIAlignLayerFixture<half>, framework::DatasetMode::ALL,
+ combine(combine(datasets::SmallROIDataset(),
+ framework::dataset::make("DataType", { DataType::F16 })),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
{
// Validate output
validate(CLAccessor(_target), _reference, relative_tolerance_f16, .02f, absolute_tolerance_f16);
}
+TEST_SUITE_END() // FP16
TEST_SUITE_END() // Float
+template <typename T>
+using CLROIAlignLayerQuantizedFixture = ROIAlignLayerQuantizedFixture<CLTensor, CLAccessor, CLROIAlignLayer, T>;
+
+TEST_SUITE(Quantized)
+TEST_SUITE(QASYMM8)
+FIXTURE_DATA_TEST_CASE(Small, CLROIAlignLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL,
+ combine(combine(combine(combine(datasets::SmallROIDataset(),
+ framework::dataset::make("DataType", { DataType::QASYMM8 })),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
+ framework::dataset::make("InputQuantizationInfo", { QuantizationInfo(1.f / 255.f, 127) })),
+ framework::dataset::make("OutputQuantizationInfo", { QuantizationInfo(2.f / 255.f, 120) })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_qasymm8);
+}
+TEST_SUITE_END() // QASYMM8
+TEST_SUITE_END() // Quantized
+
TEST_SUITE_END() // RoiAlign
TEST_SUITE_END() // CL
} // namespace validation
diff --git a/tests/validation/Helpers.cpp b/tests/validation/Helpers.cpp
index 360859e487..a811cabf56 100644
--- a/tests/validation/Helpers.cpp
+++ b/tests/validation/Helpers.cpp
@@ -120,6 +120,18 @@ SimpleTensor<float> convert_from_asymmetric(const SimpleTensor<uint8_t> &src)
return dst;
}
+SimpleTensor<float> convert_from_asymmetric(const SimpleTensor<uint16_t> &src)
+{
+ const UniformQuantizationInfo &quantization_info = src.quantization_info().uniform();
+ SimpleTensor<float> dst{ src.shape(), DataType::F32, 1, QuantizationInfo(), src.data_layout() };
+
+ for(int i = 0; i < src.num_elements(); ++i)
+ {
+ dst[i] = dequantize_qasymm16(src[i], quantization_info);
+ }
+ return dst;
+}
+
SimpleTensor<uint8_t> convert_to_asymmetric(const SimpleTensor<float> &src, const QuantizationInfo &quantization_info)
{
SimpleTensor<uint8_t> dst{ src.shape(), DataType::QASYMM8, 1, quantization_info };
diff --git a/tests/validation/Helpers.h b/tests/validation/Helpers.h
index 44dd7a9b81..0d6515b5c5 100644
--- a/tests/validation/Helpers.h
+++ b/tests/validation/Helpers.h
@@ -177,7 +177,7 @@ void fill_lookuptable(T &&table)
}
}
-/** Convert quantized simple tensor into float using tensor quantization information.
+/** Convert 8-bit asymmetric quantized simple tensor into float using tensor quantization information.
*
* @param[in] src Quantized tensor.
*
@@ -185,6 +185,14 @@ void fill_lookuptable(T &&table)
*/
SimpleTensor<float> convert_from_asymmetric(const SimpleTensor<uint8_t> &src);
+/** Convert 16-bit asymmetric quantized simple tensor into float using tensor quantization information.
+ *
+ * @param[in] src Quantized tensor.
+ *
+ * @return Float tensor.
+ */
+SimpleTensor<float> convert_from_asymmetric(const SimpleTensor<uint16_t> &src);
+
/** Convert float simple tensor into quantized using specified quantization information.
*
* @param[in] src Float tensor.
diff --git a/tests/validation/fixtures/ROIAlignLayerFixture.h b/tests/validation/fixtures/ROIAlignLayerFixture.h
index dfbb478a41..b9b85d3073 100644
--- a/tests/validation/fixtures/ROIAlignLayerFixture.h
+++ b/tests/validation/fixtures/ROIAlignLayerFixture.h
@@ -26,7 +26,7 @@
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
-#include "arm_compute/runtime/CL/functions/CLROIAlignLayer.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "tests/AssetsLibrary.h"
#include "tests/Globals.h"
#include "tests/IAccessor.h"
@@ -42,14 +42,17 @@ namespace test
namespace validation
{
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class ROIAlignLayerFixture : public framework::Fixture
+class ROIAlignLayerGenericFixture : public framework::Fixture
{
public:
+ using TRois = typename std::conditional<std::is_same<typename std::decay<T>::type, uint8_t>::value, uint16_t, T>::type;
+
template <typename...>
- void setup(TensorShape input_shape, const ROIPoolingLayerInfo pool_info, TensorShape rois_shape, DataType data_type, DataLayout data_layout)
+ void setup(TensorShape input_shape, const ROIPoolingLayerInfo pool_info, TensorShape rois_shape, DataType data_type, DataLayout data_layout, QuantizationInfo qinfo, QuantizationInfo output_qinfo)
{
- _target = compute_target(input_shape, data_type, data_layout, pool_info, rois_shape);
- _reference = compute_reference(input_shape, data_type, pool_info, rois_shape);
+ _rois_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::QASYMM16 : data_type;
+ _target = compute_target(input_shape, data_type, data_layout, pool_info, rois_shape, qinfo, output_qinfo);
+ _reference = compute_reference(input_shape, data_type, pool_info, rois_shape, qinfo, output_qinfo);
}
protected:
@@ -66,17 +69,17 @@ protected:
const size_t num_rois = rois_shape.y();
std::mt19937 gen(library->seed());
- T *rois_ptr = static_cast<T *>(rois.data());
+ TRois *rois_ptr = static_cast<TRois *>(rois.data());
const float pool_width = pool_info.pooled_width();
const float pool_height = pool_info.pooled_height();
const float roi_scale = pool_info.spatial_scale();
// Calculate distribution bounds
- const auto scaled_width = static_cast<T>((shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH)] / roi_scale) / pool_width);
- const auto scaled_height = static_cast<T>((shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT)] / roi_scale) / pool_height);
- const auto min_width = static_cast<T>(pool_width / roi_scale);
- const auto min_height = static_cast<T>(pool_height / roi_scale);
+ const auto scaled_width = static_cast<float>((shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH)] / roi_scale) / pool_width);
+ const auto scaled_height = static_cast<float>((shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT)] / roi_scale) / pool_height);
+ const auto min_width = static_cast<float>(pool_width / roi_scale);
+ const auto min_height = static_cast<float>(pool_height / roi_scale);
// Create distributions
std::uniform_int_distribution<int> dist_batch(0, shape[3] - 1);
@@ -93,11 +96,21 @@ protected:
const auto x2 = x1 + dist_w(gen);
const auto y2 = y1 + dist_h(gen);
- rois_ptr[values_per_roi * pw] = batch_idx;
- rois_ptr[values_per_roi * pw + 1] = x1;
- rois_ptr[values_per_roi * pw + 2] = y1;
- rois_ptr[values_per_roi * pw + 3] = x2;
- rois_ptr[values_per_roi * pw + 4] = y2;
+ rois_ptr[values_per_roi * pw] = batch_idx;
+ if(rois.data_type() == DataType::QASYMM16)
+ {
+ rois_ptr[values_per_roi * pw + 1] = quantize_qasymm16(static_cast<float>(x1), rois.quantization_info());
+ rois_ptr[values_per_roi * pw + 2] = quantize_qasymm16(static_cast<float>(y1), rois.quantization_info());
+ rois_ptr[values_per_roi * pw + 3] = quantize_qasymm16(static_cast<float>(x2), rois.quantization_info());
+ rois_ptr[values_per_roi * pw + 4] = quantize_qasymm16(static_cast<float>(y2), rois.quantization_info());
+ }
+ else
+ {
+ rois_ptr[values_per_roi * pw + 1] = static_cast<TRois>(x1);
+ rois_ptr[values_per_roi * pw + 2] = static_cast<TRois>(y1);
+ rois_ptr[values_per_roi * pw + 3] = static_cast<TRois>(x2);
+ rois_ptr[values_per_roi * pw + 4] = static_cast<TRois>(y2);
+ }
}
}
@@ -105,17 +118,23 @@ protected:
DataType data_type,
DataLayout data_layout,
const ROIPoolingLayerInfo &pool_info,
- const TensorShape rois_shape)
+ const TensorShape rois_shape,
+ const QuantizationInfo &qinfo,
+ const QuantizationInfo &output_qinfo)
{
if(data_layout == DataLayout::NHWC)
{
permute(input_shape, PermutationVector(2U, 0U, 1U));
}
+ const QuantizationInfo rois_qinfo = is_data_type_quantized(data_type) ? QuantizationInfo(0.125f, 0) : QuantizationInfo();
+
// Create tensors
- TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout);
- TensorType rois_tensor = create_tensor<TensorType>(rois_shape, data_type);
- TensorType dst;
+ TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, qinfo, data_layout);
+ TensorType rois_tensor = create_tensor<TensorType>(rois_shape, _rois_data_type, 1, rois_qinfo);
+
+ const TensorShape dst_shape = misc::shape_calculator::compute_roi_align_shape(*(src.info()), *(rois_tensor.info()), pool_info);
+ TensorType dst = create_tensor<TensorType>(dst_shape, data_type, 1, output_qinfo, data_layout);
// Create and configure function
FunctionType roi_align_layer;
@@ -147,23 +166,51 @@ protected:
SimpleTensor<T> compute_reference(const TensorShape &input_shape,
DataType data_type,
const ROIPoolingLayerInfo &pool_info,
- const TensorShape rois_shape)
+ const TensorShape rois_shape,
+ const QuantizationInfo &qinfo,
+ const QuantizationInfo &output_qinfo)
{
// Create reference tensor
- SimpleTensor<T> src{ input_shape, data_type };
- SimpleTensor<T> rois_tensor{ rois_shape, data_type };
+ SimpleTensor<T> src{ input_shape, data_type, 1, qinfo };
+ const QuantizationInfo rois_qinfo = is_data_type_quantized(data_type) ? QuantizationInfo(0.125f, 0) : QuantizationInfo();
+ SimpleTensor<TRois> rois_tensor{ rois_shape, _rois_data_type, 1, rois_qinfo };
// Fill reference tensor
fill(src);
generate_rois(rois_tensor, input_shape, pool_info, rois_shape);
- return reference::roi_align_layer(src, rois_tensor, pool_info);
+ return reference::roi_align_layer(src, rois_tensor, pool_info, output_qinfo);
}
TensorType _target{};
SimpleTensor<T> _reference{};
+ DataType _rois_data_type{};
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ROIAlignLayerFixture : public ROIAlignLayerGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(TensorShape input_shape, const ROIPoolingLayerInfo pool_info, TensorShape rois_shape, DataType data_type, DataLayout data_layout)
+ {
+ ROIAlignLayerGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, pool_info, rois_shape, data_type, data_layout,
+ QuantizationInfo(), QuantizationInfo());
+ }
};
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ROIAlignLayerQuantizedFixture : public ROIAlignLayerGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(TensorShape input_shape, const ROIPoolingLayerInfo pool_info, TensorShape rois_shape, DataType data_type,
+ DataLayout data_layout, QuantizationInfo qinfo, QuantizationInfo output_qinfo)
+ {
+ ROIAlignLayerGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, pool_info, rois_shape,
+ data_type, data_layout, qinfo, output_qinfo);
+ }
+};
} // namespace validation
} // namespace test
} // namespace arm_compute
diff --git a/tests/validation/reference/ROIAlignLayer.cpp b/tests/validation/reference/ROIAlignLayer.cpp
index 8a76983d44..8ad78ff915 100644
--- a/tests/validation/reference/ROIAlignLayer.cpp
+++ b/tests/validation/reference/ROIAlignLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -112,15 +112,31 @@ T clamp(T value, T lower, T upper)
{
return std::max(lower, std::min(value, upper));
}
+
+SimpleTensor<float> convert_rois_from_asymmetric(SimpleTensor<uint16_t> rois)
+{
+ const UniformQuantizationInfo &quantization_info = rois.quantization_info().uniform();
+ SimpleTensor<float> dst{ rois.shape(), DataType::F32, 1, QuantizationInfo(), rois.data_layout() };
+
+ for(int i = 0; i < rois.num_elements(); i += 5)
+ {
+ dst[i] = static_cast<float>(rois[i]); // batch idx
+ dst[i + 1] = dequantize_qasymm16(rois[i + 1], quantization_info);
+ dst[i + 2] = dequantize_qasymm16(rois[i + 2], quantization_info);
+ dst[i + 3] = dequantize_qasymm16(rois[i + 3], quantization_info);
+ dst[i + 4] = dequantize_qasymm16(rois[i + 4], quantization_info);
+ }
+ return dst;
+}
} // namespace
-template <typename T>
-SimpleTensor<T> roi_align_layer(const SimpleTensor<T> &src, const SimpleTensor<T> &rois, const ROIPoolingLayerInfo &pool_info)
+template <typename T, typename TRois>
+SimpleTensor<T> roi_align_layer(const SimpleTensor<T> &src, const SimpleTensor<TRois> &rois, const ROIPoolingLayerInfo &pool_info, const QuantizationInfo &output_qinfo)
{
const size_t values_per_roi = rois.shape()[0];
const size_t num_rois = rois.shape()[1];
DataType dst_data_type = src.data_type();
- const auto *rois_ptr = static_cast<const T *>(rois.data());
+ const auto *rois_ptr = static_cast<const TRois *>(rois.data());
TensorShape input_shape = src.shape();
TensorShape output_shape(pool_info.pooled_width(), pool_info.pooled_height(), src.shape()[2], num_rois);
@@ -183,8 +199,19 @@ SimpleTensor<T> roi_align_layer(const SimpleTensor<T> &src, const SimpleTensor<T
}
return dst;
}
-template SimpleTensor<float> roi_align_layer(const SimpleTensor<float> &src, const SimpleTensor<float> &rois, const ROIPoolingLayerInfo &pool_info);
-template SimpleTensor<half> roi_align_layer(const SimpleTensor<half> &src, const SimpleTensor<half> &rois, const ROIPoolingLayerInfo &pool_info);
+
+template SimpleTensor<float> roi_align_layer(const SimpleTensor<float> &src, const SimpleTensor<float> &rois, const ROIPoolingLayerInfo &pool_info, const QuantizationInfo &output_qinfo);
+template SimpleTensor<half> roi_align_layer(const SimpleTensor<half> &src, const SimpleTensor<half> &rois, const ROIPoolingLayerInfo &pool_info, const QuantizationInfo &output_qinfo);
+
+template <>
+SimpleTensor<uint8_t> roi_align_layer(const SimpleTensor<uint8_t> &src, const SimpleTensor<uint16_t> &rois, const ROIPoolingLayerInfo &pool_info, const QuantizationInfo &output_qinfo)
+{
+ SimpleTensor<float> src_tmp = convert_from_asymmetric(src);
+ SimpleTensor<float> rois_tmp = convert_rois_from_asymmetric(rois);
+ SimpleTensor<float> dst_tmp = roi_align_layer<float, float>(src_tmp, rois_tmp, pool_info, output_qinfo);
+ SimpleTensor<uint8_t> dst = convert_to_asymmetric(dst_tmp, output_qinfo);
+ return dst;
+}
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/ROIAlignLayer.h b/tests/validation/reference/ROIAlignLayer.h
index b67ff42166..e1568133e7 100644
--- a/tests/validation/reference/ROIAlignLayer.h
+++ b/tests/validation/reference/ROIAlignLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -36,8 +36,8 @@ namespace validation
{
namespace reference
{
-template <typename T>
-SimpleTensor<T> roi_align_layer(const SimpleTensor<T> &src, const SimpleTensor<T> &rois, const ROIPoolingLayerInfo &pool_info);
+template <typename T, typename TRois>
+SimpleTensor<T> roi_align_layer(const SimpleTensor<T> &src, const SimpleTensor<TRois> &rois, const ROIPoolingLayerInfo &pool_info, const QuantizationInfo &output_qinfo);
} // namespace reference
} // namespace validation
} // namespace test