From ab8408872f49c9429c84d83de665c55e31a500b2 Mon Sep 17 00:00:00 2001 From: Suhail Munshi Date: Tue, 9 Feb 2021 16:31:00 +0000 Subject: Added Qasymm8 datatype support to NEROIPoolingLayer with Tests Tests added to check ROIPooling Layer against reference with both Float32 and Qasymm8 input. Resolves : COMPMID-2319 Change-Id: I867bc4dde1e3e91f9f42f4a7ce8debfe83b8db50 Signed-off-by: Mohammed Suhail Munshi Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/c/VisualCompute/ComputeLibrary/+/296640 Tested-by: bsgcomp Reviewed-by: Pablo Tello Comments-Addressed: Pablo Tello Signed-off-by: Suhail Munshi Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5060 Tested-by: Arm Jenkins Reviewed-by: Pablo Marquez Tello Comments-Addressed: Arm Jenkins --- src/core/NEON/kernels/NEROIPoolingLayerKernel.cpp | 130 ++++++++++++++++------ 1 file changed, 95 insertions(+), 35 deletions(-) (limited to 'src/core/NEON/kernels/NEROIPoolingLayerKernel.cpp') diff --git a/src/core/NEON/kernels/NEROIPoolingLayerKernel.cpp b/src/core/NEON/kernels/NEROIPoolingLayerKernel.cpp index 9a3a757f1c..400e8291d6 100644 --- a/src/core/NEON/kernels/NEROIPoolingLayerKernel.cpp +++ b/src/core/NEON/kernels/NEROIPoolingLayerKernel.cpp @@ -22,7 +22,6 @@ * SOFTWARE. */ #include "src/core/NEON/kernels/NEROIPoolingLayerKernel.h" - #include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/Validate.h" #include "arm_compute/core/Window.h" @@ -35,35 +34,101 @@ namespace arm_compute { +namespace +{ +Status validate_arguments(const ITensorInfo *input, const ITensorInfo *rois, const ITensorInfo *output, const ROIPoolingLayerInfo &pool_info) +{ + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output, rois); + + //Validate arguments + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(rois, DataType::U16); + ARM_COMPUTE_RETURN_ERROR_ON(rois->dimension(0) != 5); + ARM_COMPUTE_RETURN_ERROR_ON(rois->num_dimensions() > 2); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(input, DataType::F32, DataType::QASYMM8); + ARM_COMPUTE_RETURN_ERROR_ON((pool_info.pooled_width() == 0) || (pool_info.pooled_height() == 0)); + + if(output->total_size() != 0) + { + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); + ARM_COMPUTE_RETURN_ERROR_ON((output->dimension(0) != pool_info.pooled_width()) || (output->dimension(1) != pool_info.pooled_height())); + ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(2) != output->dimension(2)); + ARM_COMPUTE_RETURN_ERROR_ON(rois->dimension(1) != output->dimension(3)); + } + + return Status{}; +} + +/** Evaluate number needing to be stored in output tensor as quantized format. + * + * @param[in] input Source tensor. Data types supported: QASYMM8 + * @param[out] output Destination tensor. Where output value will be stored, same datatype as input + * @param[in] region_start_x Beginning region of x coordinate of pooling region + * @param[in] region_start_y Beginning region of y coordinate of pooling region + * @param[in] region_end_x End of pooling region, x coordinate + * @param[in] region_end_y End of pooling region, y coordinate + * @param[in] fm Channel index of coordinate in output Tensor to store value + * @param[in] px Width index of coodinate in output Tensor to store value + * @param[in] py Height index of coordinate in output Tensor to store value + * @param[in] roi_batch Index of image to perform Pooling on in input Tensor + * @param[in] roi_indx Index of image of coordinate in output Tensor to store value + */ +template +void template_eval(const ITensor *input, const ITensor *output, int region_start_x, int region_start_y, + int region_end_x, int region_end_y, int fm, int px, int py, int roi_batch, int roi_indx) +{ + if((region_end_x <= region_start_x) || (region_end_y <= region_start_y)) + { + *reinterpret_cast(output->ptr_to_element(Coordinates(px, py, fm, roi_indx))) = 0; + } + else + { + T curr_max = std::numeric_limits::lowest(); // Min value of typename T + for(int j = region_start_y; j < region_end_y; ++j) + { + for(int i = region_start_x; i < region_end_x; ++i) + { + const auto val = *reinterpret_cast(input->ptr_to_element(Coordinates(i, j, fm, roi_batch))); + curr_max = std::max(val, curr_max); + } + } + + // if quantized datatype, requantize then store in output tensor + if(is_data_type_quantized(input->info()->data_type())) + { + // covert qasymm to new output quantization scale and offset + UniformQuantizationInfo uqinfo = compute_requantization_scale_offset(input->info()->quantization_info().uniform(), output->info()->quantization_info().uniform()); + *reinterpret_cast(output->ptr_to_element(Coordinates(px, py, fm, roi_indx))) = quantize_qasymm8(curr_max, uqinfo); + } + else + { + *reinterpret_cast(output->ptr_to_element(Coordinates(px, py, fm, roi_indx))) = curr_max; + } + } +} +} // namespace + NEROIPoolingLayerKernel::NEROIPoolingLayerKernel() : _input(nullptr), _rois(nullptr), _output(nullptr), _pool_info(0, 0, 0.f) { } -void NEROIPoolingLayerKernel::configure(const ITensor *input, const ITensor *rois, ITensor *output, const ROIPoolingLayerInfo &pool_info) +Status NEROIPoolingLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *rois, const ITensorInfo *output, const ROIPoolingLayerInfo &pool_info) +{ + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, rois, output, pool_info)); + return Status{}; +} + +void NEROIPoolingLayerKernel::configure(const ITensor *input, const ITensor *rois, const ITensor *output, const ROIPoolingLayerInfo &pool_info) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, rois); //Validate arguments - ARM_COMPUTE_ERROR_ON_NULLPTR(input->info(), rois->info(), output->info()); - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(rois, 1, DataType::U16); - ARM_COMPUTE_ERROR_ON(rois->info()->dimension(0) != 5); - ARM_COMPUTE_ERROR_ON(rois->info()->num_dimensions() > 2); - ARM_COMPUTE_ERROR_ON_CPU_F16_UNSUPPORTED(input); - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32); - ARM_COMPUTE_ERROR_ON((pool_info.pooled_width() == 0) || (pool_info.pooled_height() == 0)); - - if(output->info()->total_size() != 0) - { - ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); - ARM_COMPUTE_ERROR_ON((output->info()->dimension(0) != pool_info.pooled_width()) || (output->info()->dimension(1) != pool_info.pooled_height())); - ARM_COMPUTE_ERROR_ON(input->info()->dimension(2) != output->info()->dimension(2)); - ARM_COMPUTE_ERROR_ON(rois->info()->dimension(1) != output->info()->dimension(3)); - } + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), rois->info(), output->info(), pool_info)); // Output auto initialization if not yet initialized TensorShape output_shape(pool_info.pooled_width(), pool_info.pooled_height(), input->info()->dimension(2), rois->info()->dimension(1)); - auto_init_if_empty(*output->info(), output_shape, 1, input->info()->data_type()); + + auto_init_if_empty(*output->info(), output_shape, 1, input->info()->data_type(), output->info()->quantization_info()); ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); ARM_COMPUTE_ERROR_ON((output->info()->dimension(0) != pool_info.pooled_width()) || (output->info()->dimension(1) != pool_info.pooled_height())); @@ -99,7 +164,8 @@ void NEROIPoolingLayerKernel::run(const Window &window, const ThreadInfo &info) const int pooled_h = _pool_info.pooled_height(); const float spatial_scale = _pool_info.spatial_scale(); - const auto *rois_ptr = reinterpret_cast(_rois->buffer()); + const auto *rois_ptr = reinterpret_cast(_rois->buffer()); + const auto data_type = _input->info()->data_type(); for(int roi_indx = roi_list_start; roi_indx < roi_list_end; ++roi_indx) { @@ -133,23 +199,17 @@ void NEROIPoolingLayerKernel::run(const Window &window, const ThreadInfo &info) region_start_y = std::min(std::max(region_start_y + roi_anchor_y, 0), height); region_end_y = std::min(std::max(region_end_y + roi_anchor_y, 0), height); - // Iterate through the pooling region - if((region_end_x <= region_start_x) || (region_end_y <= region_start_y)) - { - *reinterpret_cast(_output->ptr_to_element(Coordinates(px, py, fm, roi_indx))) = 0; - } - else + switch(data_type) { - float curr_max = -FLT_MAX; - for(int j = region_start_y; j < region_end_y; ++j) - { - for(int i = region_start_x; i < region_end_x; ++i) - { - const auto val = *reinterpret_cast(_input->ptr_to_element(Coordinates(i, j, fm, roi_batch))); - curr_max = std::max(val, curr_max); - } - } - *reinterpret_cast(_output->ptr_to_element(Coordinates(px, py, fm, roi_indx))) = curr_max; + case DataType::F32: + template_eval(_input, _output, region_start_x, region_start_y, region_end_x, region_end_y, fm, px, py, roi_batch, roi_indx); + break; + case DataType::QASYMM8: + template_eval(_input, _output, region_start_x, region_start_y, region_end_x, region_end_y, fm, px, py, roi_batch, roi_indx); + break; + default: + ARM_COMPUTE_ERROR("DataType not Supported"); + break; } } } -- cgit v1.2.1