aboutsummaryrefslogtreecommitdiff
path: root/arm_compute
diff options
context:
space:
mode:
authorMichele Di Giorgio <michele.digiorgio@arm.com>2019-09-05 12:30:22 +0100
committerPablo Marquez <pablo.tello@arm.com>2019-09-27 16:20:14 +0000
commit6b612f5fa1fee9528f2f87491fe7edb3887d9817 (patch)
tree579ef443d61ed1319e5d8f44d8a7a8ce83c82aad /arm_compute
parent240b79de1c211ebb8d439b4a1c8c79777aa36f13 (diff)
downloadComputeLibrary-6b612f5fa1fee9528f2f87491fe7edb3887d9817.tar.gz
COMPMID-2310: CLGenerateProposalsLayer: support for QASYMM8
Change-Id: I48b77e09857cd43f9498d28e8f4bf346e3d7110d Signed-off-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-on: https://review.mlplatform.org/c/1969 Reviewed-by: Pablo Marquez <pablo.tello@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'arm_compute')
-rw-r--r--arm_compute/core/CL/kernels/CLGenerateProposalsLayerKernel.h4
-rw-r--r--arm_compute/core/CL/kernels/CLStridedSliceKernel.h4
-rw-r--r--arm_compute/runtime/CL/functions/CLGenerateProposalsLayer.h46
-rw-r--r--arm_compute/runtime/CL/functions/CLSlice.h6
-rw-r--r--arm_compute/runtime/CL/functions/CLStridedSlice.h4
-rw-r--r--arm_compute/runtime/CPP/functions/CPPBoxWithNonMaximaSuppressionLimit.h2
6 files changed, 40 insertions, 26 deletions
diff --git a/arm_compute/core/CL/kernels/CLGenerateProposalsLayerKernel.h b/arm_compute/core/CL/kernels/CLGenerateProposalsLayerKernel.h
index 5900d79821..e2b20f667f 100644
--- a/arm_compute/core/CL/kernels/CLGenerateProposalsLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLGenerateProposalsLayerKernel.h
@@ -48,7 +48,7 @@ public:
/** Set the input and output tensors.
*
- * @param[in] anchors Source tensor. Original set of anchors of size (4, A), where A is the number of anchors. Data types supported: F16/F32
+ * @param[in] anchors Source tensor. Original set of anchors of size (4, A), where A is the number of anchors. Data types supported: QSYMM16/F16/F32
* @param[out] all_anchors Destination tensor. Destination anchors of size (4, H*W*A) where H and W are the height and width of the feature map and A is the number of anchors. Data types supported: Same as @p input
* @param[in] info Contains Compute Anchors operation information described in @ref ComputeAnchorsInfo
*
@@ -57,7 +57,7 @@ public:
/** Static function to check if given info will lead to a valid configuration of @ref CLComputeAllAnchorsKernel
*
- * @param[in] anchors Source tensor info. Original set of anchors of size (4, A), where A is the number of anchors. Data types supported: F16/F32
+ * @param[in] anchors Source tensor info. Original set of anchors of size (4, A), where A is the number of anchors. Data types supported: QSYMM16/F16/F32
* @param[in] all_anchors Destination tensor info. Destination anchors of size (4, H*W*A) where H and W are the height and width of the feature map and A is the number of anchors. Data types supported: Same as @p input
* @param[in] info Contains Compute Anchors operation information described in @ref ComputeAnchorsInfo
*
diff --git a/arm_compute/core/CL/kernels/CLStridedSliceKernel.h b/arm_compute/core/CL/kernels/CLStridedSliceKernel.h
index 5b69b3fd16..d579d1ceb9 100644
--- a/arm_compute/core/CL/kernels/CLStridedSliceKernel.h
+++ b/arm_compute/core/CL/kernels/CLStridedSliceKernel.h
@@ -54,7 +54,7 @@ public:
*
* @note Supported tensor rank: up to 4
*
- * @param[in] input Source tensor. Data type supported: U8/S8/QASYMM8/U16/S16/QSYMM16/U32/S32/F16/F32
+ * @param[in] input Source tensor. Data type supported: U8/S8/QASYMM8/U16/S16/QASYMM16/QSYMM16/U32/S32/F16/F32
* @param[out] output Destination tensor. Data type supported: Same as @p input
* @param[in] starts The starts of the dimensions of the input tensor to be sliced. The length must be of rank(input).
* @param[in] ends The ends of the dimensions of the input tensor to be sliced. The length must be of rank(input).
@@ -72,7 +72,7 @@ public:
*
* @note Supported tensor rank: up to 4
*
- * @param[in] input Source tensor. Data type supported: U8/S8/QASYMM8/U16/S16/QSYMM16/U32/S32/F16/F32
+ * @param[in] input Source tensor. Data type supported: U8/S8/QASYMM8/U16/S16/QASYMM16/QSYMM16/U32/S32/F16/F32
* @param[in] output Destination tensor. Data type supported: Same as @p input
* @param[in] starts The starts of the dimensions of the input tensor to be sliced. The length must be of rank(input).
* @param[in] ends The ends of the dimensions of the input tensor to be sliced. The length must be of rank(input).
diff --git a/arm_compute/runtime/CL/functions/CLGenerateProposalsLayer.h b/arm_compute/runtime/CL/functions/CLGenerateProposalsLayer.h
index 8546261fef..827f19d130 100644
--- a/arm_compute/runtime/CL/functions/CLGenerateProposalsLayer.h
+++ b/arm_compute/runtime/CL/functions/CLGenerateProposalsLayer.h
@@ -24,16 +24,17 @@
#ifndef __ARM_COMPUTE_CLGENERATEPROPOSALSLAYER_H__
#define __ARM_COMPUTE_CLGENERATEPROPOSALSLAYER_H__
#include "arm_compute/core/CL/kernels/CLBoundingBoxTransformKernel.h"
+#include "arm_compute/core/CL/kernels/CLDequantizationLayerKernel.h"
#include "arm_compute/core/CL/kernels/CLGenerateProposalsLayerKernel.h"
#include "arm_compute/core/CL/kernels/CLPadLayerKernel.h"
#include "arm_compute/core/CL/kernels/CLPermuteKernel.h"
+#include "arm_compute/core/CL/kernels/CLQuantizationLayerKernel.h"
#include "arm_compute/core/CL/kernels/CLReshapeLayerKernel.h"
-#include "arm_compute/core/CL/kernels/CLStridedSliceKernel.h"
-#include "arm_compute/core/CPP/kernels/CPPBoxWithNonMaximaSuppressionLimitKernel.h"
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "arm_compute/runtime/CL/CLTensor.h"
#include "arm_compute/runtime/CPP/CPPScheduler.h"
+#include "arm_compute/runtime/CPP/functions/CPPBoxWithNonMaximaSuppressionLimit.h"
#include "arm_compute/runtime/IFunction.h"
#include "arm_compute/runtime/MemoryGroup.h"
@@ -47,10 +48,11 @@ class ICLTensor;
* -# @ref CLComputeAllAnchors
* -# @ref CLPermute x 2
* -# @ref CLReshapeLayer x 2
- * -# @ref CLStridedSlice x 3
* -# @ref CLBoundingBoxTransform
* -# @ref CLPadLayerKernel
- * And the following CPP kernels:
+ * -# @ref CLDequantizationLayerKernel
+ * -# @ref CLQuantizationLayerKernel
+ * And the following CPP functions:
* -# @ref CPPBoxWithNonMaximaSuppressionLimit
*/
class CLGenerateProposalsLayer : public IFunction
@@ -72,11 +74,13 @@ public:
/** Set the input and output tensors.
*
- * @param[in] scores Scores from convolution layer of size (W, H, A), where H and W are the height and width of the feature map, and A is the number of anchors. Data types supported: F16/F32
+ * @param[in] scores Scores from convolution layer of size (W, H, A), where H and W are the height and width of the feature map, and A is the number of anchors.
+ * Data types supported: QASYMM8/F16/F32
* @param[in] deltas Bounding box deltas from convolution layer of size (W, H, 4*A). Data types supported: Same as @p scores
- * @param[in] anchors Anchors tensor of size (4, A). Data types supported: Same as @p input
- * @param[out] proposals Box proposals output tensor of size (5, W*H*A). Data types supported: Same as @p input
- * @param[out] scores_out Box scores output tensor of size (W*H*A). Data types supported: Same as @p input
+ * @param[in] anchors Anchors tensor of size (4, A). Data types supported: QSYMM16 with scale of 0.125 if @p scores is QASYMM8, otherwise same as @p scores
+ * @param[out] proposals Box proposals output tensor of size (5, W*H*A).
+ * Data types supported: QASYMM16 with scale of 0.125 and 0 offset if @p scores is QASYMM8, otherwise same as @p scores
+ * @param[out] scores_out Box scores output tensor of size (W*H*A). Data types supported: Same as @p scores
* @param[out] num_valid_proposals Scalar output tensor which says which of the first proposals are valid. Data types supported: U32
* @param[in] info Contains GenerateProposals operation information described in @ref GenerateProposalsInfo
*
@@ -88,12 +92,14 @@ public:
/** Static function to check if given info will lead to a valid configuration of @ref CLGenerateProposalsLayer
*
- * @param[in] scores Scores info from convolution layer of size (W, H, A), where H and W are the height and width of the feature map, and A is the number of anchors. Data types supported: F16/F32
+ * @param[in] scores Scores info from convolution layer of size (W, H, A), where H and W are the height and width of the feature map, and A is the number of anchors.
+ * Data types supported: QASYMM8/F16/F32
* @param[in] deltas Bounding box deltas info from convolution layer of size (W, H, 4*A). Data types supported: Same as @p scores
- * @param[in] anchors Anchors tensor info of size (4, A). Data types supported: Same as @p input
- * @param[in] proposals Box proposals info output tensor of size (5, W*H*A). Data types supported: Data types supported: U32
- * @param[in] scores_out Box scores output tensor info of size (W*H*A). Data types supported: Same as @p input
- * @param[in] num_valid_proposals Scalar output tensor info which says which of the first proposals are valid. Data types supported: Same as @p input
+ * @param[in] anchors Anchors tensor of size (4, A). Data types supported: QSYMM16 with scale of 0.125 if @p scores is QASYMM8, otherwise same as @p scores
+ * @param[in] proposals Box proposals info output tensor of size (5, W*H*A).
+ * Data types supported: QASYMM16 with scale of 0.125 and 0 offset if @p scores is QASYMM8, otherwise same as @p scores
+ * @param[in] scores_out Box scores output tensor info of size (W*H*A). Data types supported: Same as @p scores
+ * @param[in] num_valid_proposals Scalar output tensor info which says which of the first proposals are valid. Data types supported: U32
* @param[in] info Contains GenerateProposals operation information described in @ref GenerateProposalsInfo
*
* @return a Status
@@ -117,23 +123,33 @@ private:
CLComputeAllAnchorsKernel _compute_anchors_kernel;
CLBoundingBoxTransformKernel _bounding_box_kernel;
CLPadLayerKernel _pad_kernel;
+ CLDequantizationLayerKernel _dequantize_anchors;
+ CLDequantizationLayerKernel _dequantize_deltas;
+ CLQuantizationLayerKernel _quantize_all_proposals;
- // CPP kernels
- CPPBoxWithNonMaximaSuppressionLimitKernel _cpp_nms_kernel;
+ // CPP functions
+ CPPBoxWithNonMaximaSuppressionLimit _cpp_nms;
bool _is_nhwc;
+ bool _is_qasymm8;
// Temporary tensors
CLTensor _deltas_permuted;
CLTensor _deltas_flattened;
+ CLTensor _deltas_flattened_f32;
CLTensor _scores_permuted;
CLTensor _scores_flattened;
CLTensor _all_anchors;
+ CLTensor _all_anchors_f32;
CLTensor _all_proposals;
+ CLTensor _all_proposals_quantized;
CLTensor _keeps_nms_unused;
CLTensor _classes_nms_unused;
CLTensor _proposals_4_roi_values;
+ // Temporary tensor pointers
+ CLTensor *_all_proposals_to_use;
+
// Output tensor pointers
ICLTensor *_num_valid_proposals;
ICLTensor *_scores_out;
diff --git a/arm_compute/runtime/CL/functions/CLSlice.h b/arm_compute/runtime/CL/functions/CLSlice.h
index acd4f0d3ad..5e8d0199c2 100644
--- a/arm_compute/runtime/CL/functions/CLSlice.h
+++ b/arm_compute/runtime/CL/functions/CLSlice.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,7 +42,7 @@ public:
* @note End coordinates can be negative, which represents the number of elements before the end of that dimension.
* @note End indices are not inclusive unless negative.
*
- * @param[in] input Source tensor. Data type supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32
+ * @param[in] input Source tensor. Data type supported: U8/S8/QASYMM8/U16/S16/QASYMM16/QSYMM16/U32/S32/F16/F32
* @param[out] output Destination tensor. Data type supported: Same as @p input
* @param[in] starts The starts of the dimensions of the input tensor to be sliced. The length must be of rank(input).
* @param[in] ends The ends of the dimensions of the input tensor to be sliced. The length must be of rank(input).
@@ -56,7 +56,7 @@ public:
* @note End coordinates can be negative, which represents the number of elements before the end of that dimension.
* @note End indices are not inclusive unless negative.
*
- * @param[in] input Source tensor info. Data type supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32
+ * @param[in] input Source tensor info. Data type supported: U8/S8/QASYMM8/U16/S16/QASYMM16/QSYMM16/U32/S32/F16/F32
* @param[in] output Destination tensor info. Data type supported: Same as @p input
* @param[in] starts The starts of the dimensions of the input tensor to be sliced. The length must be of rank(input).
* @param[in] ends The ends of the dimensions of the input tensor to be sliced. The length must be of rank(input).
diff --git a/arm_compute/runtime/CL/functions/CLStridedSlice.h b/arm_compute/runtime/CL/functions/CLStridedSlice.h
index bb97b17fea..885751788c 100644
--- a/arm_compute/runtime/CL/functions/CLStridedSlice.h
+++ b/arm_compute/runtime/CL/functions/CLStridedSlice.h
@@ -39,7 +39,7 @@ public:
*
* @note Supported tensor rank: up to 4
*
- * @param[in] input Source tensor. Data type supported: U8/S8/QASYMM8/U16/S16/QSYMM16/U32/S32/F16/F32
+ * @param[in] input Source tensor. Data type supported: U8/S8/QASYMM8/U16/S16/QASYMM16/QSYMM16/U32/S32/F16/F32
* @param[out] output Destination tensor. Data type supported: Same as @p input
* @param[in] starts The starts of the dimensions of the input tensor to be sliced. The length must be of rank(input).
* @param[in] ends The ends of the dimensions of the input tensor to be sliced. The length must be of rank(input).
@@ -57,7 +57,7 @@ public:
*
* @note Supported tensor rank: up to 4
*
- * @param[in] input Source tensor. Data type supported: U8/S8/QASYMM8/U16/S16/QSYMM16/U32/S32/F16/F32
+ * @param[in] input Source tensor. Data type supported: U8/S8/QASYMM8/U16/S16/QASYMM16/QSYMM16/U32/S32/F16/F32
* @param[in] output Destination tensor. Data type supported: Same as @p input
* @param[in] starts The starts of the dimensions of the input tensor to be sliced. The length must be of rank(input).
* @param[in] ends The ends of the dimensions of the input tensor to be sliced. The length must be of rank(input).
diff --git a/arm_compute/runtime/CPP/functions/CPPBoxWithNonMaximaSuppressionLimit.h b/arm_compute/runtime/CPP/functions/CPPBoxWithNonMaximaSuppressionLimit.h
index 4857f74f93..dc23d42126 100644
--- a/arm_compute/runtime/CPP/functions/CPPBoxWithNonMaximaSuppressionLimit.h
+++ b/arm_compute/runtime/CPP/functions/CPPBoxWithNonMaximaSuppressionLimit.h
@@ -100,7 +100,6 @@ private:
ITensor *_classes;
ITensor *_batch_splits_out;
ITensor *_keeps;
- ITensor *_keeps_size;
Tensor _scores_in_f32;
Tensor _boxes_in_f32;
@@ -110,7 +109,6 @@ private:
Tensor _classes_f32;
Tensor _batch_splits_out_f32;
Tensor _keeps_f32;
- Tensor _keeps_size_f32;
bool _is_qasymm8;
};