From 6b612f5fa1fee9528f2f87491fe7edb3887d9817 Mon Sep 17 00:00:00 2001 From: Michele Di Giorgio Date: Thu, 5 Sep 2019 12:30:22 +0100 Subject: COMPMID-2310: CLGenerateProposalsLayer: support for QASYMM8 Change-Id: I48b77e09857cd43f9498d28e8f4bf346e3d7110d Signed-off-by: Michele Di Giorgio Reviewed-on: https://review.mlplatform.org/c/1969 Reviewed-by: Pablo Marquez Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins --- .../CL/kernels/CLGenerateProposalsLayerKernel.h | 4 +- arm_compute/core/CL/kernels/CLStridedSliceKernel.h | 4 +- .../CL/functions/CLGenerateProposalsLayer.h | 46 ++++-- arm_compute/runtime/CL/functions/CLSlice.h | 6 +- arm_compute/runtime/CL/functions/CLStridedSlice.h | 4 +- .../CPPBoxWithNonMaximaSuppressionLimit.h | 2 - src/core/CL/CLKernelLibrary.cpp | 5 + .../CL/cl_kernels/generate_proposals_quantized.cl | 87 +++++++++++ src/core/CL/cl_kernels/helpers_asymm.h | 2 + src/core/CL/cl_kernels/slice_ops.cl | 4 +- .../CL/kernels/CLGenerateProposalsLayerKernel.cpp | 21 ++- src/core/CL/kernels/CLStridedSliceKernel.cpp | 2 +- .../CPPBoxWithNonMaximaSuppressionLimitKernel.cpp | 1 + .../CL/functions/CLGenerateProposalsLayer.cpp | 161 ++++++++++++++++----- .../CPPBoxWithNonMaximaSuppressionLimit.cpp | 69 +++++---- tests/validation/CL/GenerateProposalsLayer.cpp | 20 ++- .../validation/fixtures/ComputeAllAnchorsFixture.h | 39 ++++- tests/validation/reference/ComputeAllAnchors.cpp | 9 ++ 18 files changed, 376 insertions(+), 110 deletions(-) create mode 100644 src/core/CL/cl_kernels/generate_proposals_quantized.cl diff --git a/arm_compute/core/CL/kernels/CLGenerateProposalsLayerKernel.h b/arm_compute/core/CL/kernels/CLGenerateProposalsLayerKernel.h index 5900d79821..e2b20f667f 100644 --- a/arm_compute/core/CL/kernels/CLGenerateProposalsLayerKernel.h +++ b/arm_compute/core/CL/kernels/CLGenerateProposalsLayerKernel.h @@ -48,7 +48,7 @@ public: /** Set the input and output tensors. * - * @param[in] anchors Source tensor. Original set of anchors of size (4, A), where A is the number of anchors. Data types supported: F16/F32 + * @param[in] anchors Source tensor. Original set of anchors of size (4, A), where A is the number of anchors. Data types supported: QSYMM16/F16/F32 * @param[out] all_anchors Destination tensor. Destination anchors of size (4, H*W*A) where H and W are the height and width of the feature map and A is the number of anchors. Data types supported: Same as @p input * @param[in] info Contains Compute Anchors operation information described in @ref ComputeAnchorsInfo * @@ -57,7 +57,7 @@ public: /** Static function to check if given info will lead to a valid configuration of @ref CLComputeAllAnchorsKernel * - * @param[in] anchors Source tensor info. Original set of anchors of size (4, A), where A is the number of anchors. Data types supported: F16/F32 + * @param[in] anchors Source tensor info. Original set of anchors of size (4, A), where A is the number of anchors. Data types supported: QSYMM16/F16/F32 * @param[in] all_anchors Destination tensor info. Destination anchors of size (4, H*W*A) where H and W are the height and width of the feature map and A is the number of anchors. Data types supported: Same as @p input * @param[in] info Contains Compute Anchors operation information described in @ref ComputeAnchorsInfo * diff --git a/arm_compute/core/CL/kernels/CLStridedSliceKernel.h b/arm_compute/core/CL/kernels/CLStridedSliceKernel.h index 5b69b3fd16..d579d1ceb9 100644 --- a/arm_compute/core/CL/kernels/CLStridedSliceKernel.h +++ b/arm_compute/core/CL/kernels/CLStridedSliceKernel.h @@ -54,7 +54,7 @@ public: * * @note Supported tensor rank: up to 4 * - * @param[in] input Source tensor. Data type supported: U8/S8/QASYMM8/U16/S16/QSYMM16/U32/S32/F16/F32 + * @param[in] input Source tensor. Data type supported: U8/S8/QASYMM8/U16/S16/QASYMM16/QSYMM16/U32/S32/F16/F32 * @param[out] output Destination tensor. Data type supported: Same as @p input * @param[in] starts The starts of the dimensions of the input tensor to be sliced. The length must be of rank(input). * @param[in] ends The ends of the dimensions of the input tensor to be sliced. The length must be of rank(input). @@ -72,7 +72,7 @@ public: * * @note Supported tensor rank: up to 4 * - * @param[in] input Source tensor. Data type supported: U8/S8/QASYMM8/U16/S16/QSYMM16/U32/S32/F16/F32 + * @param[in] input Source tensor. Data type supported: U8/S8/QASYMM8/U16/S16/QASYMM16/QSYMM16/U32/S32/F16/F32 * @param[in] output Destination tensor. Data type supported: Same as @p input * @param[in] starts The starts of the dimensions of the input tensor to be sliced. The length must be of rank(input). * @param[in] ends The ends of the dimensions of the input tensor to be sliced. The length must be of rank(input). diff --git a/arm_compute/runtime/CL/functions/CLGenerateProposalsLayer.h b/arm_compute/runtime/CL/functions/CLGenerateProposalsLayer.h index 8546261fef..827f19d130 100644 --- a/arm_compute/runtime/CL/functions/CLGenerateProposalsLayer.h +++ b/arm_compute/runtime/CL/functions/CLGenerateProposalsLayer.h @@ -24,16 +24,17 @@ #ifndef __ARM_COMPUTE_CLGENERATEPROPOSALSLAYER_H__ #define __ARM_COMPUTE_CLGENERATEPROPOSALSLAYER_H__ #include "arm_compute/core/CL/kernels/CLBoundingBoxTransformKernel.h" +#include "arm_compute/core/CL/kernels/CLDequantizationLayerKernel.h" #include "arm_compute/core/CL/kernels/CLGenerateProposalsLayerKernel.h" #include "arm_compute/core/CL/kernels/CLPadLayerKernel.h" #include "arm_compute/core/CL/kernels/CLPermuteKernel.h" +#include "arm_compute/core/CL/kernels/CLQuantizationLayerKernel.h" #include "arm_compute/core/CL/kernels/CLReshapeLayerKernel.h" -#include "arm_compute/core/CL/kernels/CLStridedSliceKernel.h" -#include "arm_compute/core/CPP/kernels/CPPBoxWithNonMaximaSuppressionLimitKernel.h" #include "arm_compute/core/Types.h" #include "arm_compute/runtime/CL/CLScheduler.h" #include "arm_compute/runtime/CL/CLTensor.h" #include "arm_compute/runtime/CPP/CPPScheduler.h" +#include "arm_compute/runtime/CPP/functions/CPPBoxWithNonMaximaSuppressionLimit.h" #include "arm_compute/runtime/IFunction.h" #include "arm_compute/runtime/MemoryGroup.h" @@ -47,10 +48,11 @@ class ICLTensor; * -# @ref CLComputeAllAnchors * -# @ref CLPermute x 2 * -# @ref CLReshapeLayer x 2 - * -# @ref CLStridedSlice x 3 * -# @ref CLBoundingBoxTransform * -# @ref CLPadLayerKernel - * And the following CPP kernels: + * -# @ref CLDequantizationLayerKernel + * -# @ref CLQuantizationLayerKernel + * And the following CPP functions: * -# @ref CPPBoxWithNonMaximaSuppressionLimit */ class CLGenerateProposalsLayer : public IFunction @@ -72,11 +74,13 @@ public: /** Set the input and output tensors. * - * @param[in] scores Scores from convolution layer of size (W, H, A), where H and W are the height and width of the feature map, and A is the number of anchors. Data types supported: F16/F32 + * @param[in] scores Scores from convolution layer of size (W, H, A), where H and W are the height and width of the feature map, and A is the number of anchors. + * Data types supported: QASYMM8/F16/F32 * @param[in] deltas Bounding box deltas from convolution layer of size (W, H, 4*A). Data types supported: Same as @p scores - * @param[in] anchors Anchors tensor of size (4, A). Data types supported: Same as @p input - * @param[out] proposals Box proposals output tensor of size (5, W*H*A). Data types supported: Same as @p input - * @param[out] scores_out Box scores output tensor of size (W*H*A). Data types supported: Same as @p input + * @param[in] anchors Anchors tensor of size (4, A). Data types supported: QSYMM16 with scale of 0.125 if @p scores is QASYMM8, otherwise same as @p scores + * @param[out] proposals Box proposals output tensor of size (5, W*H*A). + * Data types supported: QASYMM16 with scale of 0.125 and 0 offset if @p scores is QASYMM8, otherwise same as @p scores + * @param[out] scores_out Box scores output tensor of size (W*H*A). Data types supported: Same as @p scores * @param[out] num_valid_proposals Scalar output tensor which says which of the first proposals are valid. Data types supported: U32 * @param[in] info Contains GenerateProposals operation information described in @ref GenerateProposalsInfo * @@ -88,12 +92,14 @@ public: /** Static function to check if given info will lead to a valid configuration of @ref CLGenerateProposalsLayer * - * @param[in] scores Scores info from convolution layer of size (W, H, A), where H and W are the height and width of the feature map, and A is the number of anchors. Data types supported: F16/F32 + * @param[in] scores Scores info from convolution layer of size (W, H, A), where H and W are the height and width of the feature map, and A is the number of anchors. + * Data types supported: QASYMM8/F16/F32 * @param[in] deltas Bounding box deltas info from convolution layer of size (W, H, 4*A). Data types supported: Same as @p scores - * @param[in] anchors Anchors tensor info of size (4, A). Data types supported: Same as @p input - * @param[in] proposals Box proposals info output tensor of size (5, W*H*A). Data types supported: Data types supported: U32 - * @param[in] scores_out Box scores output tensor info of size (W*H*A). Data types supported: Same as @p input - * @param[in] num_valid_proposals Scalar output tensor info which says which of the first proposals are valid. Data types supported: Same as @p input + * @param[in] anchors Anchors tensor of size (4, A). Data types supported: QSYMM16 with scale of 0.125 if @p scores is QASYMM8, otherwise same as @p scores + * @param[in] proposals Box proposals info output tensor of size (5, W*H*A). + * Data types supported: QASYMM16 with scale of 0.125 and 0 offset if @p scores is QASYMM8, otherwise same as @p scores + * @param[in] scores_out Box scores output tensor info of size (W*H*A). Data types supported: Same as @p scores + * @param[in] num_valid_proposals Scalar output tensor info which says which of the first proposals are valid. Data types supported: U32 * @param[in] info Contains GenerateProposals operation information described in @ref GenerateProposalsInfo * * @return a Status @@ -117,23 +123,33 @@ private: CLComputeAllAnchorsKernel _compute_anchors_kernel; CLBoundingBoxTransformKernel _bounding_box_kernel; CLPadLayerKernel _pad_kernel; + CLDequantizationLayerKernel _dequantize_anchors; + CLDequantizationLayerKernel _dequantize_deltas; + CLQuantizationLayerKernel _quantize_all_proposals; - // CPP kernels - CPPBoxWithNonMaximaSuppressionLimitKernel _cpp_nms_kernel; + // CPP functions + CPPBoxWithNonMaximaSuppressionLimit _cpp_nms; bool _is_nhwc; + bool _is_qasymm8; // Temporary tensors CLTensor _deltas_permuted; CLTensor _deltas_flattened; + CLTensor _deltas_flattened_f32; CLTensor _scores_permuted; CLTensor _scores_flattened; CLTensor _all_anchors; + CLTensor _all_anchors_f32; CLTensor _all_proposals; + CLTensor _all_proposals_quantized; CLTensor _keeps_nms_unused; CLTensor _classes_nms_unused; CLTensor _proposals_4_roi_values; + // Temporary tensor pointers + CLTensor *_all_proposals_to_use; + // Output tensor pointers ICLTensor *_num_valid_proposals; ICLTensor *_scores_out; diff --git a/arm_compute/runtime/CL/functions/CLSlice.h b/arm_compute/runtime/CL/functions/CLSlice.h index acd4f0d3ad..5e8d0199c2 100644 --- a/arm_compute/runtime/CL/functions/CLSlice.h +++ b/arm_compute/runtime/CL/functions/CLSlice.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 ARM Limited. + * Copyright (c) 2018-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -42,7 +42,7 @@ public: * @note End coordinates can be negative, which represents the number of elements before the end of that dimension. * @note End indices are not inclusive unless negative. * - * @param[in] input Source tensor. Data type supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32 + * @param[in] input Source tensor. Data type supported: U8/S8/QASYMM8/U16/S16/QASYMM16/QSYMM16/U32/S32/F16/F32 * @param[out] output Destination tensor. Data type supported: Same as @p input * @param[in] starts The starts of the dimensions of the input tensor to be sliced. The length must be of rank(input). * @param[in] ends The ends of the dimensions of the input tensor to be sliced. The length must be of rank(input). @@ -56,7 +56,7 @@ public: * @note End coordinates can be negative, which represents the number of elements before the end of that dimension. * @note End indices are not inclusive unless negative. * - * @param[in] input Source tensor info. Data type supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32 + * @param[in] input Source tensor info. Data type supported: U8/S8/QASYMM8/U16/S16/QASYMM16/QSYMM16/U32/S32/F16/F32 * @param[in] output Destination tensor info. Data type supported: Same as @p input * @param[in] starts The starts of the dimensions of the input tensor to be sliced. The length must be of rank(input). * @param[in] ends The ends of the dimensions of the input tensor to be sliced. The length must be of rank(input). diff --git a/arm_compute/runtime/CL/functions/CLStridedSlice.h b/arm_compute/runtime/CL/functions/CLStridedSlice.h index bb97b17fea..885751788c 100644 --- a/arm_compute/runtime/CL/functions/CLStridedSlice.h +++ b/arm_compute/runtime/CL/functions/CLStridedSlice.h @@ -39,7 +39,7 @@ public: * * @note Supported tensor rank: up to 4 * - * @param[in] input Source tensor. Data type supported: U8/S8/QASYMM8/U16/S16/QSYMM16/U32/S32/F16/F32 + * @param[in] input Source tensor. Data type supported: U8/S8/QASYMM8/U16/S16/QASYMM16/QSYMM16/U32/S32/F16/F32 * @param[out] output Destination tensor. Data type supported: Same as @p input * @param[in] starts The starts of the dimensions of the input tensor to be sliced. The length must be of rank(input). * @param[in] ends The ends of the dimensions of the input tensor to be sliced. The length must be of rank(input). @@ -57,7 +57,7 @@ public: * * @note Supported tensor rank: up to 4 * - * @param[in] input Source tensor. Data type supported: U8/S8/QASYMM8/U16/S16/QSYMM16/U32/S32/F16/F32 + * @param[in] input Source tensor. Data type supported: U8/S8/QASYMM8/U16/S16/QASYMM16/QSYMM16/U32/S32/F16/F32 * @param[in] output Destination tensor. Data type supported: Same as @p input * @param[in] starts The starts of the dimensions of the input tensor to be sliced. The length must be of rank(input). * @param[in] ends The ends of the dimensions of the input tensor to be sliced. The length must be of rank(input). diff --git a/arm_compute/runtime/CPP/functions/CPPBoxWithNonMaximaSuppressionLimit.h b/arm_compute/runtime/CPP/functions/CPPBoxWithNonMaximaSuppressionLimit.h index 4857f74f93..dc23d42126 100644 --- a/arm_compute/runtime/CPP/functions/CPPBoxWithNonMaximaSuppressionLimit.h +++ b/arm_compute/runtime/CPP/functions/CPPBoxWithNonMaximaSuppressionLimit.h @@ -100,7 +100,6 @@ private: ITensor *_classes; ITensor *_batch_splits_out; ITensor *_keeps; - ITensor *_keeps_size; Tensor _scores_in_f32; Tensor _boxes_in_f32; @@ -110,7 +109,6 @@ private: Tensor _classes_f32; Tensor _batch_splits_out_f32; Tensor _keeps_f32; - Tensor _keeps_size_f32; bool _is_qasymm8; }; diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp index 2f748de53e..a5e75df8be 100644 --- a/src/core/CL/CLKernelLibrary.cpp +++ b/src/core/CL/CLKernelLibrary.cpp @@ -347,6 +347,7 @@ const std::map CLKernelLibrary::_kernel_program_map = { "gemmlowp_output_stage_quantize_down_fixedpoint_qsymm16", "gemmlowp.cl" }, { "gemmlowp_output_stage_quantize_down_float", "gemmlowp.cl" }, { "generate_proposals_compute_all_anchors", "generate_proposals.cl" }, + { "generate_proposals_compute_all_anchors_quantized", "generate_proposals_quantized.cl" }, { "harris_score_3x3", "harris_corners.cl" }, { "harris_score_5x5", "harris_corners.cl" }, { "harris_score_7x7", "harris_corners.cl" }, @@ -791,6 +792,10 @@ const std::map CLKernelLibrary::_program_source_map = { "generate_proposals.cl", #include "./cl_kernels/generate_proposals.clembed" + }, + { + "generate_proposals_quantized.cl", +#include "./cl_kernels/generate_proposals_quantized.clembed" }, { "harris_corners.cl", diff --git a/src/core/CL/cl_kernels/generate_proposals_quantized.cl b/src/core/CL/cl_kernels/generate_proposals_quantized.cl new file mode 100644 index 0000000000..690d1cfdf8 --- /dev/null +++ b/src/core/CL/cl_kernels/generate_proposals_quantized.cl @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "helpers_asymm.h" + +/** Generate all the region of interests based on the image size and the anchors passed in. For each element (x,y) of the + * grid, it will generate NUM_ANCHORS rois, given by shifting the grid position to match the anchor. + * + * @attention The following variables must be passed at compile time: + * -# -DDATA_TYPE= Tensor data type. Supported data types: QASYMM8 + * -# -DHEIGHT= Height of the feature map on which this kernel is applied + * -# -DWIDTH= Width of the feature map on which this kernel is applied + * -# -DNUM_ANCHORS= Number of anchors to be used to generate the rois per each pixel + * -# -DSTRIDE= Stride to be applied at each different pixel position (i.e., x_range = (1:WIDTH)*STRIDE and y_range = (1:HEIGHT)*STRIDE + * -# -DNUM_ROI_FIELDS= Number of fields used to represent a roi + * + * @param[in] anchors_ptr Pointer to the anchors tensor. Supported data types: QASYMM8 + * @param[in] anchors_stride_x Stride of the anchors tensor in X dimension (in bytes) + * @param[in] anchors_step_x anchors_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] anchors_stride_y Stride of the anchors tensor in Y dimension (in bytes) + * @param[in] anchors_step_y anchors_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] anchors_stride_z Stride of the source tensor in Z dimension (in bytes) + * @param[in] anchors_step_z anchors_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] anchors_offset_first_element_in_bytes The offset of the first element in the boxes tensor + * @param[out] rois_ptr Pointer to the rois. Supported data types: same as @p in_ptr + * @param[out] rois_stride_x Stride of the rois in X dimension (in bytes) + * @param[out] rois_step_x pred_boxes_stride_x * number of elements along X processed per workitem(in bytes) + * @param[out] rois_stride_y Stride of the rois in Y dimension (in bytes) + * @param[out] rois_step_y pred_boxes_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[out] rois_stride_z Stride of the rois in Z dimension (in bytes) + * @param[out] rois_step_z pred_boxes_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[out] rois_offset_first_element_in_bytes The offset of the first element in the rois + */ +#if defined(DATA_TYPE) && defined(WIDTH) && defined(HEIGHT) && defined(NUM_ANCHORS) && defined(STRIDE) && defined(NUM_ROI_FIELDS) && defined(OFFSET) && defined(SCALE) +__kernel void generate_proposals_compute_all_anchors_quantized( + VECTOR_DECLARATION(anchors), + VECTOR_DECLARATION(rois)) +{ + Vector anchors = CONVERT_TO_VECTOR_STRUCT_NO_STEP(anchors); + Vector rois = CONVERT_TO_VECTOR_STRUCT(rois); + + const size_t idx = get_global_id(0); + // Find the index of the anchor + const size_t anchor_idx = idx % NUM_ANCHORS; + + // Find which shift is this thread using + const size_t shift_idx = idx / NUM_ANCHORS; + + // Compute the shift on the X and Y direction (the shift depends exclusively by the index thread id) + const float shift_x = (float)(shift_idx % WIDTH) * STRIDE; + const float shift_y = (float)(shift_idx / WIDTH) * STRIDE; + + VEC_DATA_TYPE(float, NUM_ROI_FIELDS) + shift = (VEC_DATA_TYPE(float, NUM_ROI_FIELDS))(shift_x, shift_y, shift_x, shift_y); + + // Read the given anchor + VEC_DATA_TYPE(float, NUM_ROI_FIELDS) + anchor = DEQUANTIZE(VLOAD(NUM_ROI_FIELDS)(0, (__global DATA_TYPE *)vector_offset(&anchors, anchor_idx * NUM_ROI_FIELDS)), OFFSET, SCALE, DATA_TYPE, NUM_ROI_FIELDS); + + // Apply the shift to the anchor + VEC_DATA_TYPE(float, NUM_ROI_FIELDS) + shifted_anchor = anchor + shift; + + VSTORE(NUM_ROI_FIELDS) + (QUANTIZE(shifted_anchor, OFFSET, SCALE, DATA_TYPE, NUM_ROI_FIELDS), 0, (__global DATA_TYPE *)rois.ptr); +} +#endif //defined(DATA_TYPE) && defined(WIDTH) && defined(HEIGHT) && defined(NUM_ANCHORS) && defined(STRIDE) && defined(NUM_ROI_FIELDS) && defined(OFFSET) && defined(SCALE) diff --git a/src/core/CL/cl_kernels/helpers_asymm.h b/src/core/CL/cl_kernels/helpers_asymm.h index ad06451f13..53e6719cd7 100644 --- a/src/core/CL/cl_kernels/helpers_asymm.h +++ b/src/core/CL/cl_kernels/helpers_asymm.h @@ -375,9 +375,11 @@ inline float dequantize_qasymm8(uchar input, float offset, float scale) QUANTIZE_IMPL(uchar, 4) QUANTIZE_IMPL(ushort, 4) +QUANTIZE_IMPL(short, 4) DEQUANTIZE_IMPL(uchar, 4) DEQUANTIZE_IMPL(ushort, 4) +DEQUANTIZE_IMPL(short, 4) ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(2) ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(4) diff --git a/src/core/CL/cl_kernels/slice_ops.cl b/src/core/CL/cl_kernels/slice_ops.cl index 97decee6fc..2163c699dd 100644 --- a/src/core/CL/cl_kernels/slice_ops.cl +++ b/src/core/CL/cl_kernels/slice_ops.cl @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 ARM Limited. + * Copyright (c) 2018-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -32,7 +32,7 @@ * @attention Absolute start coordinates for each dimension should be given as preprocessor -DSTART_index=value e.g. -DSTART_0=2 * @attention Strides for each dimension should be given as preprocessor -DSTRIDE_index=value e.g. -DSTRIDE_1=1 * - * @param[in] input_ptr Pointer to the source tensor. Supported data types: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32 + * @param[in] input_ptr Pointer to the source tensor. Supported data types: U8/S8/QASYMM8/U16/S16/QASYMM16/QSYMM16/F16/U32/S32/F32 * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes) * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes) diff --git a/src/core/CL/kernels/CLGenerateProposalsLayerKernel.cpp b/src/core/CL/kernels/CLGenerateProposalsLayerKernel.cpp index 79e364caf7..16d0e86d7d 100644 --- a/src/core/CL/kernels/CLGenerateProposalsLayerKernel.cpp +++ b/src/core/CL/kernels/CLGenerateProposalsLayerKernel.cpp @@ -44,7 +44,7 @@ Status validate_arguments(const ITensorInfo *anchors, const ITensorInfo *all_anc ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(anchors, all_anchors); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(anchors); ARM_COMPUTE_RETURN_ERROR_ON(anchors->dimension(0) != info.values_per_roi()); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(anchors, DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(anchors, DataType::QSYMM16, DataType::F16, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON(anchors->num_dimensions() > 2); if(all_anchors->total_size() > 0) { @@ -55,6 +55,11 @@ Status validate_arguments(const ITensorInfo *anchors, const ITensorInfo *all_anc ARM_COMPUTE_RETURN_ERROR_ON(all_anchors->num_dimensions() > 2); ARM_COMPUTE_RETURN_ERROR_ON(all_anchors->dimension(0) != info.values_per_roi()); ARM_COMPUTE_RETURN_ERROR_ON(all_anchors->dimension(1) != feature_height * feature_width * num_anchors); + + if(is_data_type_quantized(anchors->data_type())) + { + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(anchors, all_anchors); + } } return Status{}; } @@ -78,12 +83,14 @@ void CLComputeAllAnchorsKernel::configure(const ICLTensor *anchors, ICLTensor *a // Initialize the output if empty const TensorShape output_shape(info.values_per_roi(), width * height * num_anchors); - auto_init_if_empty(*all_anchors->info(), output_shape, 1, data_type); + auto_init_if_empty(*all_anchors->info(), TensorInfo(output_shape, 1, data_type, anchors->info()->quantization_info())); // Set instance variables _anchors = anchors; _all_anchors = all_anchors; + const bool is_quantized = is_data_type_quantized(anchors->info()->data_type()); + // Set build options CLBuildOptions build_opts; build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type)); @@ -93,8 +100,16 @@ void CLComputeAllAnchorsKernel::configure(const ICLTensor *anchors, ICLTensor *a build_opts.add_option("-DNUM_ANCHORS=" + support::cpp11::to_string(num_anchors)); build_opts.add_option("-DNUM_ROI_FIELDS=" + support::cpp11::to_string(info.values_per_roi())); + if(is_quantized) + { + const UniformQuantizationInfo qinfo = anchors->info()->quantization_info().uniform(); + build_opts.add_option("-DSCALE=" + float_to_string_with_full_precision(qinfo.scale)); + build_opts.add_option("-DOFFSET=" + float_to_string_with_full_precision(qinfo.offset)); + } + // Create kernel - _kernel = static_cast(CLKernelLibrary::get().create_kernel("generate_proposals_compute_all_anchors", build_opts.options())); + const std::string kernel_name = (is_quantized) ? "generate_proposals_compute_all_anchors_quantized" : "generate_proposals_compute_all_anchors"; + _kernel = static_cast(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options())); // The tensor all_anchors can be interpreted as an array of structs (each structs has values_per_roi fields). // This means we don't need to pad on the X dimension, as we know in advance how many fields diff --git a/src/core/CL/kernels/CLStridedSliceKernel.cpp b/src/core/CL/kernels/CLStridedSliceKernel.cpp index 9dd488b678..248a55717d 100644 --- a/src/core/CL/kernels/CLStridedSliceKernel.cpp +++ b/src/core/CL/kernels/CLStridedSliceKernel.cpp @@ -48,7 +48,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::S8, DataType::QASYMM8, - DataType::U16, DataType::S16, DataType::QSYMM16, + DataType::U16, DataType::S16, DataType::QASYMM16, DataType::QSYMM16, DataType::U32, DataType::S32, DataType::F16, DataType::F32); diff --git a/src/core/CPP/kernels/CPPBoxWithNonMaximaSuppressionLimitKernel.cpp b/src/core/CPP/kernels/CPPBoxWithNonMaximaSuppressionLimitKernel.cpp index 62568b4b45..3058a0c977 100644 --- a/src/core/CPP/kernels/CPPBoxWithNonMaximaSuppressionLimitKernel.cpp +++ b/src/core/CPP/kernels/CPPBoxWithNonMaximaSuppressionLimitKernel.cpp @@ -360,6 +360,7 @@ void CPPBoxWithNonMaximaSuppressionLimitKernel::configure(const ITensor *scores_ ARM_COMPUTE_ERROR_ON(scores_out->info()->dimension(0) != boxes_out->info()->dimension(1)); ARM_COMPUTE_ERROR_ON(boxes_out->info()->dimension(0) != 4); + ARM_COMPUTE_ERROR_ON(scores_out->info()->dimension(0) != classes->info()->dimension(0)); if(keeps != nullptr) { ARM_COMPUTE_ERROR_ON_MSG(keeps_size == nullptr, "keeps_size cannot be nullptr if keeps has to be provided as output"); diff --git a/src/runtime/CL/functions/CLGenerateProposalsLayer.cpp b/src/runtime/CL/functions/CLGenerateProposalsLayer.cpp index 94aa5e7198..c9eb8abc29 100644 --- a/src/runtime/CL/functions/CLGenerateProposalsLayer.cpp +++ b/src/runtime/CL/functions/CLGenerateProposalsLayer.cpp @@ -30,7 +30,7 @@ namespace arm_compute { CLGenerateProposalsLayer::CLGenerateProposalsLayer(std::shared_ptr memory_manager) - : _memory_group(std::move(memory_manager)), + : _memory_group(memory_manager), _permute_deltas_kernel(), _flatten_deltas_kernel(), _permute_scores_kernel(), @@ -38,17 +38,25 @@ CLGenerateProposalsLayer::CLGenerateProposalsLayer(std::shared_ptrinfo(), deltas->info(), anchors->info(), proposals->info(), scores_out->info(), num_valid_proposals->info(), info)); - _is_nhwc = scores->info()->data_layout() == DataLayout::NHWC; - const DataType data_type = deltas->info()->data_type(); - const int num_anchors = scores->info()->dimension(get_data_layout_dimension_index(scores->info()->data_layout(), DataLayoutDimension::CHANNEL)); - const int feat_width = scores->info()->dimension(get_data_layout_dimension_index(scores->info()->data_layout(), DataLayoutDimension::WIDTH)); - const int feat_height = scores->info()->dimension(get_data_layout_dimension_index(scores->info()->data_layout(), DataLayoutDimension::HEIGHT)); - const int total_num_anchors = num_anchors * feat_width * feat_height; - const int pre_nms_topN = info.pre_nms_topN(); - const int post_nms_topN = info.post_nms_topN(); - const size_t values_per_roi = info.values_per_roi(); + _is_nhwc = scores->info()->data_layout() == DataLayout::NHWC; + const DataType scores_data_type = scores->info()->data_type(); + _is_qasymm8 = scores_data_type == DataType::QASYMM8; + const int num_anchors = scores->info()->dimension(get_data_layout_dimension_index(scores->info()->data_layout(), DataLayoutDimension::CHANNEL)); + const int feat_width = scores->info()->dimension(get_data_layout_dimension_index(scores->info()->data_layout(), DataLayoutDimension::WIDTH)); + const int feat_height = scores->info()->dimension(get_data_layout_dimension_index(scores->info()->data_layout(), DataLayoutDimension::HEIGHT)); + const int total_num_anchors = num_anchors * feat_width * feat_height; + const int pre_nms_topN = info.pre_nms_topN(); + const int post_nms_topN = info.post_nms_topN(); + const size_t values_per_roi = info.values_per_roi(); + + const QuantizationInfo scores_qinfo = scores->info()->quantization_info(); + const DataType rois_data_type = (_is_qasymm8) ? DataType::QASYMM16 : scores_data_type; + const QuantizationInfo rois_qinfo = (_is_qasymm8) ? QuantizationInfo(0.125f, 0) : scores->info()->quantization_info(); // Compute all the anchors _memory_group.manage(&_all_anchors); _compute_anchors_kernel.configure(anchors, &_all_anchors, ComputeAnchorsInfo(feat_width, feat_height, info.spatial_scale())); const TensorShape flatten_shape_deltas(values_per_roi, total_num_anchors); - _deltas_flattened.allocator()->init(TensorInfo(flatten_shape_deltas, 1, data_type)); + _deltas_flattened.allocator()->init(TensorInfo(flatten_shape_deltas, 1, scores_data_type, deltas->info()->quantization_info())); // Permute and reshape deltas + _memory_group.manage(&_deltas_flattened); if(!_is_nhwc) { _memory_group.manage(&_deltas_permuted); - _memory_group.manage(&_deltas_flattened); _permute_deltas_kernel.configure(deltas, &_deltas_permuted, PermutationVector{ 2, 0, 1 }); _flatten_deltas_kernel.configure(&_deltas_permuted, &_deltas_flattened); _deltas_permuted.allocator()->allocate(); } else { - _memory_group.manage(&_deltas_flattened); _flatten_deltas_kernel.configure(deltas, &_deltas_flattened); } const TensorShape flatten_shape_scores(1, total_num_anchors); - _scores_flattened.allocator()->init(TensorInfo(flatten_shape_scores, 1, data_type)); + _scores_flattened.allocator()->init(TensorInfo(flatten_shape_scores, 1, scores_data_type, scores_qinfo)); // Permute and reshape scores + _memory_group.manage(&_scores_flattened); if(!_is_nhwc) { _memory_group.manage(&_scores_permuted); - _memory_group.manage(&_scores_flattened); _permute_scores_kernel.configure(scores, &_scores_permuted, PermutationVector{ 2, 0, 1 }); _flatten_scores_kernel.configure(&_scores_permuted, &_scores_flattened); _scores_permuted.allocator()->allocate(); } else { - _memory_group.manage(&_scores_flattened); _flatten_scores_kernel.configure(scores, &_scores_flattened); } + CLTensor *anchors_to_use = &_all_anchors; + CLTensor *deltas_to_use = &_deltas_flattened; + if(_is_qasymm8) + { + _all_anchors_f32.allocator()->init(TensorInfo(_all_anchors.info()->tensor_shape(), 1, DataType::F32)); + _deltas_flattened_f32.allocator()->init(TensorInfo(_deltas_flattened.info()->tensor_shape(), 1, DataType::F32)); + _memory_group.manage(&_all_anchors_f32); + _memory_group.manage(&_deltas_flattened_f32); + // Dequantize anchors to float + _dequantize_anchors.configure(&_all_anchors, &_all_anchors_f32); + _all_anchors.allocator()->allocate(); + anchors_to_use = &_all_anchors_f32; + // Dequantize deltas to float + _dequantize_deltas.configure(&_deltas_flattened, &_deltas_flattened_f32); + _deltas_flattened.allocator()->allocate(); + deltas_to_use = &_deltas_flattened_f32; + } // Bounding box transform _memory_group.manage(&_all_proposals); BoundingBoxTransformInfo bbox_info(info.im_width(), info.im_height(), 1.f); - _bounding_box_kernel.configure(&_all_anchors, &_all_proposals, &_deltas_flattened, bbox_info); - _deltas_flattened.allocator()->allocate(); - _all_anchors.allocator()->allocate(); + _bounding_box_kernel.configure(anchors_to_use, &_all_proposals, deltas_to_use, bbox_info); + deltas_to_use->allocator()->allocate(); + anchors_to_use->allocator()->allocate(); + _all_proposals_to_use = &_all_proposals; + if(_is_qasymm8) + { + _memory_group.manage(&_all_proposals_quantized); + // Requantize all_proposals to QASYMM16 with 0.125 scale and 0 offset + _all_proposals_quantized.allocator()->init(TensorInfo(_all_proposals.info()->tensor_shape(), 1, DataType::QASYMM16, QuantizationInfo(0.125f, 0))); + _quantize_all_proposals.configure(&_all_proposals, &_all_proposals_quantized); + _all_proposals.allocator()->allocate(); + _all_proposals_to_use = &_all_proposals_quantized; + } // The original layer implementation first selects the best pre_nms_topN anchors (thus having a lightweight sort) // that are then transformed by bbox_transform. The boxes generated are then fed into a non-sorting NMS operation. // Since we are reusing the NMS layer and we don't implement any CL/sort, we let NMS do the sorting (of all the input) @@ -127,12 +165,12 @@ void CLGenerateProposalsLayer::configure(const ICLTensor *scores, const ICLTenso _memory_group.manage(&_keeps_nms_unused); // Note that NMS needs outputs preinitialized. - auto_init_if_empty(*scores_out->info(), TensorShape(scores_nms_size), 1, data_type); - auto_init_if_empty(*_proposals_4_roi_values.info(), TensorShape(values_per_roi, scores_nms_size), 1, data_type); + auto_init_if_empty(*scores_out->info(), TensorShape(scores_nms_size), 1, scores_data_type, scores_qinfo); + auto_init_if_empty(*_proposals_4_roi_values.info(), TensorShape(values_per_roi, scores_nms_size), 1, rois_data_type, rois_qinfo); auto_init_if_empty(*num_valid_proposals->info(), TensorShape(1), 1, DataType::U32); // Initialize temporaries (unused) outputs - _classes_nms_unused.allocator()->init(TensorInfo(TensorShape(1, 1), 1, data_type)); + _classes_nms_unused.allocator()->init(TensorInfo(TensorShape(scores_nms_size), 1, scores_data_type, scores_qinfo)); _keeps_nms_unused.allocator()->init(*scores_out->info()); // Save the output (to map and unmap them at run) @@ -140,11 +178,11 @@ void CLGenerateProposalsLayer::configure(const ICLTensor *scores, const ICLTenso _num_valid_proposals = num_valid_proposals; _memory_group.manage(&_proposals_4_roi_values); - _cpp_nms_kernel.configure(&_scores_flattened, &_all_proposals, nullptr, scores_out, &_proposals_4_roi_values, &_classes_nms_unused, nullptr, &_keeps_nms_unused, num_valid_proposals, - BoxNMSLimitInfo(0.0f, info.nms_thres(), scores_nms_size, false, NMSType::LINEAR, 0.5f, 0.001f, true, min_size_scaled, info.im_width(), info.im_height())); + _cpp_nms.configure(&_scores_flattened, _all_proposals_to_use, nullptr, scores_out, &_proposals_4_roi_values, &_classes_nms_unused, nullptr, &_keeps_nms_unused, num_valid_proposals, + BoxNMSLimitInfo(0.0f, info.nms_thres(), scores_nms_size, false, NMSType::LINEAR, 0.5f, 0.001f, true, min_size_scaled, info.im_width(), info.im_height())); _keeps_nms_unused.allocator()->allocate(); _classes_nms_unused.allocator()->allocate(); - _all_proposals.allocator()->allocate(); + _all_proposals_to_use->allocator()->allocate(); _scores_flattened.allocator()->allocate(); // Add the first column that represents the batch id. This will be all zeros, as we don't support multiple images @@ -156,8 +194,10 @@ Status CLGenerateProposalsLayer::validate(const ITensorInfo *scores, const ITens const ITensorInfo *num_valid_proposals, const GenerateProposalsInfo &info) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(scores, deltas, anchors, proposals, scores_out, num_valid_proposals); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(scores, 1, DataType::QASYMM8, DataType::F16, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON_DATA_LAYOUT_NOT_IN(scores, DataLayout::NCHW, DataLayout::NHWC); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(scores, deltas); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(scores, deltas); const int num_anchors = scores->dimension(get_data_layout_dimension_index(scores->data_layout(), DataLayoutDimension::CHANNEL)); const int feat_width = scores->dimension(get_data_layout_dimension_index(scores->data_layout(), DataLayoutDimension::WIDTH)); @@ -166,8 +206,17 @@ Status CLGenerateProposalsLayer::validate(const ITensorInfo *scores, const ITens const int total_num_anchors = num_anchors * feat_width * feat_height; const int values_per_roi = info.values_per_roi(); + const bool is_qasymm8 = scores->data_type() == DataType::QASYMM8; + ARM_COMPUTE_RETURN_ERROR_ON(num_images > 1); + if(is_qasymm8) + { + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(anchors, 1, DataType::QSYMM16); + const UniformQuantizationInfo anchors_qinfo = anchors->quantization_info().uniform(); + ARM_COMPUTE_RETURN_ERROR_ON(anchors_qinfo.scale != 0.125f); + } + TensorInfo all_anchors_info(anchors->clone()->set_tensor_shape(TensorShape(values_per_roi, total_num_anchors)).set_is_resizable(true)); ARM_COMPUTE_RETURN_ON_ERROR(CLComputeAllAnchorsKernel::validate(anchors, &all_anchors_info, ComputeAnchorsInfo(feat_width, feat_height, info.spatial_scale()))); @@ -187,14 +236,36 @@ Status CLGenerateProposalsLayer::validate(const ITensorInfo *scores, const ITens TensorInfo deltas_flattened_info(deltas->clone()->set_tensor_shape(TensorShape(values_per_roi, total_num_anchors)).set_is_resizable(true)); ARM_COMPUTE_RETURN_ON_ERROR(CLReshapeLayerKernel::validate(&deltas_permuted_info, &deltas_flattened_info)); - TensorInfo scores_flattened_info(deltas->clone()->set_tensor_shape(TensorShape(1, total_num_anchors)).set_is_resizable(true)); + TensorInfo scores_flattened_info(scores->clone()->set_tensor_shape(TensorShape(1, total_num_anchors)).set_is_resizable(true)); TensorInfo proposals_4_roi_values(deltas->clone()->set_tensor_shape(TensorShape(values_per_roi, total_num_anchors)).set_is_resizable(true)); ARM_COMPUTE_RETURN_ON_ERROR(CLReshapeLayerKernel::validate(&scores_permuted_info, &scores_flattened_info)); - ARM_COMPUTE_RETURN_ON_ERROR(CLBoundingBoxTransformKernel::validate(&all_anchors_info, &proposals_4_roi_values, &deltas_flattened_info, BoundingBoxTransformInfo(info.im_width(), info.im_height(), - 1.f))); - ARM_COMPUTE_RETURN_ON_ERROR(CLPadLayerKernel::validate(&proposals_4_roi_values, proposals, PaddingList{ { 1, 0 } })); + TensorInfo *proposals_4_roi_values_to_use = &proposals_4_roi_values; + TensorInfo proposals_4_roi_values_quantized(deltas->clone()->set_tensor_shape(TensorShape(values_per_roi, total_num_anchors)).set_is_resizable(true)); + proposals_4_roi_values_quantized.set_data_type(DataType::QASYMM16).set_quantization_info(QuantizationInfo(0.125f, 0)); + if(is_qasymm8) + { + TensorInfo all_anchors_f32_info(anchors->clone()->set_tensor_shape(TensorShape(values_per_roi, total_num_anchors)).set_is_resizable(true).set_data_type(DataType::F32)); + ARM_COMPUTE_RETURN_ON_ERROR(CLDequantizationLayerKernel::validate(&all_anchors_info, &all_anchors_f32_info)); + + TensorInfo deltas_flattened_f32_info(deltas->clone()->set_tensor_shape(TensorShape(values_per_roi, total_num_anchors)).set_is_resizable(true).set_data_type(DataType::F32)); + ARM_COMPUTE_RETURN_ON_ERROR(CLDequantizationLayerKernel::validate(&deltas_flattened_info, &deltas_flattened_f32_info)); + + TensorInfo proposals_4_roi_values_f32(deltas->clone()->set_tensor_shape(TensorShape(values_per_roi, total_num_anchors)).set_is_resizable(true).set_data_type(DataType::F32)); + ARM_COMPUTE_RETURN_ON_ERROR(CLBoundingBoxTransformKernel::validate(&all_anchors_f32_info, &proposals_4_roi_values_f32, &deltas_flattened_f32_info, + BoundingBoxTransformInfo(info.im_width(), info.im_height(), 1.f))); + + ARM_COMPUTE_RETURN_ON_ERROR(CLQuantizationLayerKernel::validate(&proposals_4_roi_values_f32, &proposals_4_roi_values_quantized)); + proposals_4_roi_values_to_use = &proposals_4_roi_values_quantized; + } + else + { + ARM_COMPUTE_RETURN_ON_ERROR(CLBoundingBoxTransformKernel::validate(&all_anchors_info, &proposals_4_roi_values, &deltas_flattened_info, + BoundingBoxTransformInfo(info.im_width(), info.im_height(), 1.f))); + } + + ARM_COMPUTE_RETURN_ON_ERROR(CLPadLayerKernel::validate(proposals_4_roi_values_to_use, proposals, PaddingList{ { 1, 0 } })); if(num_valid_proposals->total_size() > 0) { @@ -208,7 +279,17 @@ Status CLGenerateProposalsLayer::validate(const ITensorInfo *scores, const ITens ARM_COMPUTE_RETURN_ERROR_ON(proposals->num_dimensions() > 2); ARM_COMPUTE_RETURN_ERROR_ON(proposals->dimension(0) != size_t(values_per_roi) + 1); ARM_COMPUTE_RETURN_ERROR_ON(proposals->dimension(1) != size_t(total_num_anchors)); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(proposals, deltas); + if(is_qasymm8) + { + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(proposals, 1, DataType::QASYMM16); + const UniformQuantizationInfo proposals_qinfo = proposals->quantization_info().uniform(); + ARM_COMPUTE_RETURN_ERROR_ON(proposals_qinfo.scale != 0.125f); + ARM_COMPUTE_RETURN_ERROR_ON(proposals_qinfo.offset != 0); + } + else + { + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(proposals, scores); + } } if(scores_out->total_size() > 0) @@ -225,7 +306,7 @@ void CLGenerateProposalsLayer::run_cpp_nms_kernel() { // Map inputs _scores_flattened.map(true); - _all_proposals.map(true); + _all_proposals_to_use->map(true); // Map outputs _scores_out->map(CLScheduler::get().queue(), true); @@ -235,7 +316,7 @@ void CLGenerateProposalsLayer::run_cpp_nms_kernel() _classes_nms_unused.map(true); // Run nms - CPPScheduler::get().schedule(&_cpp_nms_kernel, Window::DimX); + _cpp_nms.run(); // Unmap outputs _keeps_nms_unused.unmap(); @@ -246,7 +327,7 @@ void CLGenerateProposalsLayer::run_cpp_nms_kernel() // Unmap inputs _scores_flattened.unmap(); - _all_proposals.unmap(); + _all_proposals_to_use->unmap(); } void CLGenerateProposalsLayer::run() @@ -266,8 +347,20 @@ void CLGenerateProposalsLayer::run() CLScheduler::get().enqueue(_flatten_deltas_kernel, false); CLScheduler::get().enqueue(_flatten_scores_kernel, false); + if(_is_qasymm8) + { + CLScheduler::get().enqueue(_dequantize_anchors, false); + CLScheduler::get().enqueue(_dequantize_deltas, false); + } + // Build the boxes CLScheduler::get().enqueue(_bounding_box_kernel, false); + + if(_is_qasymm8) + { + CLScheduler::get().enqueue(_quantize_all_proposals, false); + } + // Non maxima suppression run_cpp_nms_kernel(); // Add dummy batch indexes diff --git a/src/runtime/CPP/functions/CPPBoxWithNonMaximaSuppressionLimit.cpp b/src/runtime/CPP/functions/CPPBoxWithNonMaximaSuppressionLimit.cpp index 158f45a320..782771bc50 100644 --- a/src/runtime/CPP/functions/CPPBoxWithNonMaximaSuppressionLimit.cpp +++ b/src/runtime/CPP/functions/CPPBoxWithNonMaximaSuppressionLimit.cpp @@ -30,9 +30,10 @@ namespace arm_compute { namespace { -void dequantize_tensor(const ITensor *input, ITensor *output, DataType data_type) +void dequantize_tensor(const ITensor *input, ITensor *output) { - const UniformQuantizationInfo qinfo = input->info()->quantization_info().uniform(); + const UniformQuantizationInfo qinfo = input->info()->quantization_info().uniform(); + const DataType data_type = input->info()->data_type(); Window window; window.use_tensor_dimensions(input->info()->tensor_shape()); @@ -60,9 +61,10 @@ void dequantize_tensor(const ITensor *input, ITensor *output, DataType data_type } } -void quantize_tensor(const ITensor *input, ITensor *output, DataType data_type) +void quantize_tensor(const ITensor *input, ITensor *output) { - const UniformQuantizationInfo qinfo = input->info()->quantization_info().uniform(); + const UniformQuantizationInfo qinfo = output->info()->quantization_info().uniform(); + const DataType data_type = output->info()->data_type(); Window window; window.use_tensor_dimensions(input->info()->tensor_shape()); @@ -102,7 +104,6 @@ CPPBoxWithNonMaximaSuppressionLimit::CPPBoxWithNonMaximaSuppressionLimit(std::sh _classes(), _batch_splits_out(), _keeps(), - _keeps_size(), _scores_in_f32(), _boxes_in_f32(), _batch_splits_in_f32(), @@ -111,7 +112,6 @@ CPPBoxWithNonMaximaSuppressionLimit::CPPBoxWithNonMaximaSuppressionLimit(std::sh _classes_f32(), _batch_splits_out_f32(), _keeps_f32(), - _keeps_size_f32(), _is_qasymm8(false) { } @@ -119,7 +119,7 @@ CPPBoxWithNonMaximaSuppressionLimit::CPPBoxWithNonMaximaSuppressionLimit(std::sh void CPPBoxWithNonMaximaSuppressionLimit::configure(const ITensor *scores_in, const ITensor *boxes_in, const ITensor *batch_splits_in, ITensor *scores_out, ITensor *boxes_out, ITensor *classes, ITensor *batch_splits_out, ITensor *keeps, ITensor *keeps_size, const BoxNMSLimitInfo info) { - ARM_COMPUTE_ERROR_ON_NULLPTR(scores_in, boxes_in, batch_splits_in, scores_out, boxes_out, classes); + ARM_COMPUTE_ERROR_ON_NULLPTR(scores_in, boxes_in, scores_out, boxes_out, classes); _is_qasymm8 = scores_in->info()->data_type() == DataType::QASYMM8; @@ -131,20 +131,22 @@ void CPPBoxWithNonMaximaSuppressionLimit::configure(const ITensor *scores_in, co _classes = classes; _batch_splits_out = batch_splits_out; _keeps = keeps; - _keeps_size = keeps_size; if(_is_qasymm8) { // Manage intermediate buffers _memory_group.manage(&_scores_in_f32); _memory_group.manage(&_boxes_in_f32); - _memory_group.manage(&_batch_splits_in_f32); _memory_group.manage(&_scores_out_f32); _memory_group.manage(&_boxes_out_f32); _memory_group.manage(&_classes_f32); _scores_in_f32.allocator()->init(scores_in->info()->clone()->set_data_type(DataType::F32)); _boxes_in_f32.allocator()->init(boxes_in->info()->clone()->set_data_type(DataType::F32)); - _batch_splits_in_f32.allocator()->init(batch_splits_in->info()->clone()->set_data_type(DataType::F32)); + if(batch_splits_in != nullptr) + { + _memory_group.manage(&_batch_splits_in_f32); + _batch_splits_in_f32.allocator()->init(batch_splits_in->info()->clone()->set_data_type(DataType::F32)); + } _scores_out_f32.allocator()->init(scores_out->info()->clone()->set_data_type(DataType::F32)); _boxes_out_f32.allocator()->init(boxes_out->info()->clone()->set_data_type(DataType::F32)); _classes_f32.allocator()->init(classes->info()->clone()->set_data_type(DataType::F32)); @@ -158,15 +160,11 @@ void CPPBoxWithNonMaximaSuppressionLimit::configure(const ITensor *scores_in, co _memory_group.manage(&_keeps_f32); _keeps_f32.allocator()->init(keeps->info()->clone()->set_data_type(DataType::F32)); } - if(keeps_size != nullptr) - { - _memory_group.manage(&_keeps_size_f32); - _keeps_size_f32.allocator()->init(keeps_size->info()->clone()->set_data_type(DataType::F32)); - } - _box_with_nms_limit_kernel.configure(&_scores_in_f32, &_boxes_in_f32, &_batch_splits_in_f32, &_scores_out_f32, &_boxes_out_f32, &_classes_f32, + _box_with_nms_limit_kernel.configure(&_scores_in_f32, &_boxes_in_f32, (batch_splits_in != nullptr) ? &_batch_splits_in_f32 : nullptr, + &_scores_out_f32, &_boxes_out_f32, &_classes_f32, (batch_splits_out != nullptr) ? &_batch_splits_out_f32 : nullptr, (keeps != nullptr) ? &_keeps_f32 : nullptr, - (keeps_size != nullptr) ? &_keeps_size_f32 : nullptr, info); + keeps_size, info); } else { @@ -177,7 +175,10 @@ void CPPBoxWithNonMaximaSuppressionLimit::configure(const ITensor *scores_in, co { _scores_in_f32.allocator()->allocate(); _boxes_in_f32.allocator()->allocate(); - _batch_splits_in_f32.allocator()->allocate(); + if(_batch_splits_in != nullptr) + { + _batch_splits_in_f32.allocator()->allocate(); + } _scores_out_f32.allocator()->allocate(); _boxes_out_f32.allocator()->allocate(); _classes_f32.allocator()->allocate(); @@ -189,17 +190,13 @@ void CPPBoxWithNonMaximaSuppressionLimit::configure(const ITensor *scores_in, co { _keeps_f32.allocator()->allocate(); } - if(keeps_size != nullptr) - { - _keeps_size_f32.allocator()->allocate(); - } } } Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info) { - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(scores_in, boxes_in, batch_splits_in, scores_out, boxes_out, classes); + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(scores_in, boxes_in, scores_out, boxes_out, classes); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(scores_in, 1, DataType::QASYMM8, DataType::F16, DataType::F32); const bool is_qasymm8 = scores_in->data_type() == DataType::QASYMM8; @@ -218,31 +215,33 @@ Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const void CPPBoxWithNonMaximaSuppressionLimit::run() { + // Acquire all the temporaries + MemoryGroupResourceScope scope_mg(_memory_group); + if(_is_qasymm8) { - dequantize_tensor(_scores_in, &_scores_in_f32, _scores_in->info()->data_type()); - dequantize_tensor(_boxes_in, &_boxes_in_f32, _boxes_in->info()->data_type()); - dequantize_tensor(_batch_splits_in, &_batch_splits_in_f32, _batch_splits_in->info()->data_type()); + dequantize_tensor(_scores_in, &_scores_in_f32); + dequantize_tensor(_boxes_in, &_boxes_in_f32); + if(_batch_splits_in != nullptr) + { + dequantize_tensor(_batch_splits_in, &_batch_splits_in_f32); + } } Scheduler::get().schedule(&_box_with_nms_limit_kernel, Window::DimY); if(_is_qasymm8) { - quantize_tensor(&_scores_out_f32, _scores_out, _scores_out->info()->data_type()); - quantize_tensor(&_boxes_out_f32, _boxes_out, _boxes_out->info()->data_type()); - quantize_tensor(&_classes_f32, _classes, _classes->info()->data_type()); + quantize_tensor(&_scores_out_f32, _scores_out); + quantize_tensor(&_boxes_out_f32, _boxes_out); + quantize_tensor(&_classes_f32, _classes); if(_batch_splits_out != nullptr) { - quantize_tensor(&_batch_splits_out_f32, _batch_splits_out, _batch_splits_out->info()->data_type()); + quantize_tensor(&_batch_splits_out_f32, _batch_splits_out); } if(_keeps != nullptr) { - quantize_tensor(&_keeps_f32, _keeps, _keeps->info()->data_type()); - } - if(_keeps_size != nullptr) - { - quantize_tensor(&_keeps_size_f32, _keeps_size, _keeps_size->info()->data_type()); + quantize_tensor(&_keeps_f32, _keeps); } } } diff --git a/tests/validation/CL/GenerateProposalsLayer.cpp b/tests/validation/CL/GenerateProposalsLayer.cpp index 4ebffd7e79..bfad8e8381 100644 --- a/tests/validation/CL/GenerateProposalsLayer.cpp +++ b/tests/validation/CL/GenerateProposalsLayer.cpp @@ -82,6 +82,8 @@ const auto ComputeAllInfoDataset = framework::dataset::make("ComputeAllInfo", ComputeAnchorsInfo(100U, 100U, 1. / 4.f), }); + +constexpr AbsoluteTolerance tolerance_qsymm16(1); } // namespace TEST_SUITE(CL) @@ -364,7 +366,7 @@ DATA_TEST_CASE(IntegrationTestCaseGenerateProposals, framework::DatasetMode::ALL proposals_final.allocator()->allocate(); select_proposals.run(); - // Select the first N entries of the proposals + // Select the first N entries of the scores CLTensor scores_final; CLSlice select_scores; select_scores.configure(&scores_out, &scores_final, Coordinates(0), Coordinates(N)); @@ -395,6 +397,22 @@ FIXTURE_DATA_TEST_CASE(ComputeAllAnchors, CLComputeAllAnchorsFixture, fram TEST_SUITE_END() // FP16 TEST_SUITE_END() // Float +template +using CLComputeAllAnchorsQuantizedFixture = ComputeAllAnchorsQuantizedFixture; + +TEST_SUITE(Quantized) +TEST_SUITE(QASYMM8) +FIXTURE_DATA_TEST_CASE(ComputeAllAnchors, CLComputeAllAnchorsQuantizedFixture, framework::DatasetMode::ALL, + combine(combine(combine(framework::dataset::make("NumAnchors", { 2, 4, 8 }), ComputeAllInfoDataset), + framework::dataset::make("DataType", { DataType::QSYMM16 })), + framework::dataset::make("QuantInfo", { QuantizationInfo(0.125f, 0) }))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_qsymm16); +} +TEST_SUITE_END() // QASYMM8 +TEST_SUITE_END() // Quantized + TEST_SUITE_END() // GenerateProposals TEST_SUITE_END() // CL diff --git a/tests/validation/fixtures/ComputeAllAnchorsFixture.h b/tests/validation/fixtures/ComputeAllAnchorsFixture.h index 6f2db3e623..e837bd4838 100644 --- a/tests/validation/fixtures/ComputeAllAnchorsFixture.h +++ b/tests/validation/fixtures/ComputeAllAnchorsFixture.h @@ -41,14 +41,14 @@ namespace test namespace validation { template -class ComputeAllAnchorsFixture : public framework::Fixture +class ComputeAllAnchorsGenericFixture : public framework::Fixture { public: template - void setup(size_t num_anchors, const ComputeAnchorsInfo &info, DataType data_type) + void setup(size_t num_anchors, const ComputeAnchorsInfo &info, DataType data_type, QuantizationInfo qinfo) { - _target = compute_target(num_anchors, data_type, info); - _reference = compute_reference(num_anchors, data_type, info); + _target = compute_target(num_anchors, data_type, info, qinfo); + _reference = compute_reference(num_anchors, data_type, info, qinfo); } protected: @@ -58,11 +58,11 @@ protected: library->fill_tensor_uniform(tensor, 0, T(0), T(100)); } - TensorType compute_target(size_t num_anchors, DataType data_type, const ComputeAnchorsInfo &info) + TensorType compute_target(size_t num_anchors, DataType data_type, const ComputeAnchorsInfo &info, QuantizationInfo qinfo) { // Create tensors TensorShape anchors_shape(4, num_anchors); - TensorType anchors = create_tensor(anchors_shape, data_type); + TensorType anchors = create_tensor(anchors_shape, data_type, 1, qinfo); TensorType all_anchors; // Create and configure function @@ -88,10 +88,11 @@ protected: SimpleTensor compute_reference(size_t num_anchors, DataType data_type, - const ComputeAnchorsInfo &info) + const ComputeAnchorsInfo &info, + QuantizationInfo qinfo) { // Create reference tensor - SimpleTensor anchors(TensorShape(4, num_anchors), data_type); + SimpleTensor anchors(TensorShape(4, num_anchors), data_type, 1, qinfo); // Fill reference tensor fill(anchors); @@ -101,6 +102,28 @@ protected: TensorType _target{}; SimpleTensor _reference{}; }; + +template +class ComputeAllAnchorsFixture : public ComputeAllAnchorsGenericFixture +{ +public: + template + void setup(size_t num_anchors, const ComputeAnchorsInfo &info, DataType data_type) + { + ComputeAllAnchorsGenericFixture::setup(num_anchors, info, data_type, QuantizationInfo()); + } +}; + +template +class ComputeAllAnchorsQuantizedFixture : public ComputeAllAnchorsGenericFixture +{ +public: + template + void setup(size_t num_anchors, const ComputeAnchorsInfo &info, DataType data_type, QuantizationInfo qinfo) + { + ComputeAllAnchorsGenericFixture::setup(num_anchors, info, data_type, qinfo); + } +}; } // namespace validation } // namespace test } // namespace arm_compute diff --git a/tests/validation/reference/ComputeAllAnchors.cpp b/tests/validation/reference/ComputeAllAnchors.cpp index 3f0498015a..60be7ef8a8 100644 --- a/tests/validation/reference/ComputeAllAnchors.cpp +++ b/tests/validation/reference/ComputeAllAnchors.cpp @@ -73,6 +73,15 @@ SimpleTensor compute_all_anchors(const SimpleTensor &anchors, const Comput } template SimpleTensor compute_all_anchors(const SimpleTensor &anchors, const ComputeAnchorsInfo &info); template SimpleTensor compute_all_anchors(const SimpleTensor &anchors, const ComputeAnchorsInfo &info); + +template <> +SimpleTensor compute_all_anchors(const SimpleTensor &anchors, const ComputeAnchorsInfo &info) +{ + SimpleTensor anchors_tmp = convert_from_symmetric(anchors); + SimpleTensor all_anchors_tmp = compute_all_anchors(anchors_tmp, info); + SimpleTensor all_anchors = convert_to_symmetric(all_anchors_tmp, anchors.quantization_info()); + return all_anchors; +} } // namespace reference } // namespace validation } // namespace test -- cgit v1.2.1