From 60954c671ffdc3422bbdb728fc022eb6896c1e17 Mon Sep 17 00:00:00 2001 From: Alex Gilday Date: Mon, 5 Mar 2018 16:22:48 +0000 Subject: COMPMID-754: Add validation to (De)QuantizationLayers Change-Id: If8fa1277e8dc5b8e28a8bcad4ff9fc672b00ce9a Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/123275 Reviewed-by: Anthony Barbier Tested-by: Jenkins Reviewed-by: Michalis Spyrou --- arm_compute/core/CL/kernels/CLDequantizationLayerKernel.h | 12 +++++++++++- arm_compute/core/CL/kernels/CLMinMaxLayerKernel.h | 13 +++++++++++-- arm_compute/core/CL/kernels/CLQuantizationLayerKernel.h | 10 ++++++++++ arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h | 10 ++++++++++ arm_compute/core/NEON/kernels/NEMinMaxLayerKernel.h | 9 +++++++++ arm_compute/core/NEON/kernels/NEQuantizationLayerKernel.h | 10 ++++++++++ arm_compute/core/utils/misc/ShapeCalculator.h | 11 +++++++++++ arm_compute/runtime/CL/functions/CLDequantizationLayer.h | 12 +++++++++++- arm_compute/runtime/CL/functions/CLQuantizationLayer.h | 10 +++++++++- arm_compute/runtime/NEON/functions/NEDequantizationLayer.h | 12 +++++++++++- arm_compute/runtime/NEON/functions/NEQuantizationLayer.h | 10 +++++++++- 11 files changed, 112 insertions(+), 7 deletions(-) (limited to 'arm_compute') diff --git a/arm_compute/core/CL/kernels/CLDequantizationLayerKernel.h b/arm_compute/core/CL/kernels/CLDequantizationLayerKernel.h index 03112d282a..38aa63e98f 100644 --- a/arm_compute/core/CL/kernels/CLDequantizationLayerKernel.h +++ b/arm_compute/core/CL/kernels/CLDequantizationLayerKernel.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -58,6 +58,16 @@ public: * The dimensions over the second must match the batched dimensions of the input tensor. Data type supported: F32. */ void configure(const ICLTensor *input, ICLTensor *output, const ICLTensor *min_max); + /** Static function to check if given info will lead to a valid configuration of @ref CLDequantizationLayerKernel + * + * @param[in] input Input tensor info. Data types supported: U8. + * @param[in] output Output tensor info. Data types supported: F32. + * @param[in] min_max Info for the tensor with shape [2, batches] which stores the minimum and maximum value for each 3D input tensor. + * The dimensions over the second must match the batched dimensions of the input tensor. Data type supported: F32. + * + * @return a status + */ + static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *min_max); // Inherited methods overridden: void run(const Window &window, cl::CommandQueue &queue) override; diff --git a/arm_compute/core/CL/kernels/CLMinMaxLayerKernel.h b/arm_compute/core/CL/kernels/CLMinMaxLayerKernel.h index a0eba4879b..0faa5c0bde 100644 --- a/arm_compute/core/CL/kernels/CLMinMaxLayerKernel.h +++ b/arm_compute/core/CL/kernels/CLMinMaxLayerKernel.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -49,9 +49,18 @@ public: * * @param[in] input Input tensor with at least 3 dimensions. The dimensions over the third will be interpreted as batches.Data types supported: F32. * @param[out] output Output tensor with shape [2, batches, ...] which stores the minimum and maximum values for each 3D input tensor. - * The dimensions over the second must match the batched dimensions of the input tensor. Data types supported: F32. + * The dimensions over the second must match the batched dimensions of the input tensor. Data types supported: F32. */ void configure(const ICLTensor *input, ICLTensor *output); + /** Static function to check if given info will lead to a valid configuration of @ref CLMinMaxLayerKernel + * + * @param[in] input Input tensor info. Data types supported: F32. + * @param[in] output Output tensor info with shape [2, batches, ...] which stores the minimum and maximum values for each 3D input tensor. + * The dimensions over the second must match the batched dimensions of the input tensor. Data types supported: F32. + * + * @return a status + */ + static Status validate(const ITensorInfo *input, const ITensorInfo *output); /** Resets global minimum and maximum * diff --git a/arm_compute/core/CL/kernels/CLQuantizationLayerKernel.h b/arm_compute/core/CL/kernels/CLQuantizationLayerKernel.h index a24ddb1a3a..49d76087b5 100644 --- a/arm_compute/core/CL/kernels/CLQuantizationLayerKernel.h +++ b/arm_compute/core/CL/kernels/CLQuantizationLayerKernel.h @@ -57,6 +57,16 @@ public: * The dimensions over the second must match the batched dimensions of the input tensor. Data type supported: F32. */ void configure(const ICLTensor *input, ICLTensor *output, ICLTensor *min_max); + /** Static function to check if given info will lead to a valid configuration of @ref CLQuantizationLayerKernel + * + * @param[in] input Input tensor info. Data types supported: F32. + * @param[in] output Output tensor info. Output data type must be U8. + * @param[in] min_max Info for the tensor with shape [2, batches] which stores the minimum and maximum value for each 3D input tensor. + * The dimensions over the second must match the batched dimensions of the input tensor. Data type supported: F32. + * + * @return a status + */ + static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *min_max); // Inherited methods overridden: void run(const Window &window, cl::CommandQueue &queue) override; diff --git a/arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h b/arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h index 25383aa7dc..7ee2078e9e 100644 --- a/arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h +++ b/arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h @@ -62,6 +62,16 @@ public: * The dimensions over the second must match the batched dimensions of the input tensor. Data type supported: F32 */ void configure(const ITensor *input, ITensor *output, const ITensor *min_max); + /** Static function to check if given info will lead to a valid configuration of @ref NEDequantizationLayerKernel + * + * @param[in] input Input tensor info. Data types supported: U8. + * @param[in] output Output tensor info. Data types supported: F32. + * @param[in] min_max Info for the tensor with shape [2, batches] which stores the minimum and maximum value for each 3D input tensor. + * The dimensions over the second must match the batched dimensions of the input tensor. Data type supported: F32. + * + * @return a status + */ + static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *min_max); // Inherited methods overridden: void run(const Window &window, const ThreadInfo &info) override; diff --git a/arm_compute/core/NEON/kernels/NEMinMaxLayerKernel.h b/arm_compute/core/NEON/kernels/NEMinMaxLayerKernel.h index 592b5941b5..cfc3ee5290 100644 --- a/arm_compute/core/NEON/kernels/NEMinMaxLayerKernel.h +++ b/arm_compute/core/NEON/kernels/NEMinMaxLayerKernel.h @@ -65,6 +65,15 @@ public: * The dimensions over the second must match the batched dimensions of the input tensor. Data types supported: F32 */ void configure(const ITensor *input, ITensor *output); + /** Static function to check if given info will lead to a valid configuration of @ref CLMinMaxLayerKernel + * + * @param[in] input Input tensor info. Data types supported: F32. + * @param[in] output Output tensor info with shape [2, batches, ...] which stores the minimum and maximum values for each 3D input tensor. + * The dimensions over the second must match the batched dimensions of the input tensor. Data types supported: F32. + * + * @return a status + */ + static Status validate(const ITensorInfo *input, const ITensorInfo *output); /** Resets global minimum and maximum. */ void reset(); diff --git a/arm_compute/core/NEON/kernels/NEQuantizationLayerKernel.h b/arm_compute/core/NEON/kernels/NEQuantizationLayerKernel.h index 9642ac52a7..e7cf0a8ca4 100644 --- a/arm_compute/core/NEON/kernels/NEQuantizationLayerKernel.h +++ b/arm_compute/core/NEON/kernels/NEQuantizationLayerKernel.h @@ -62,6 +62,16 @@ public: * The dimensions over the second must match the batched dimensions of the input tensor. Data type supported: F32 */ void configure(const ITensor *input, ITensor *output, const ITensor *min_max); + /** Static function to check if given info will lead to a valid configuration of @ref NEQuantizationLayerKernel + * + * @param[in] input Input tensor info. Data types supported: F32. + * @param[in] output Output tensor info. Data types supported: U8. + * @param[in] min_max Info for the tensor with shape [2, batches] which stores the minimum and maximum value for each 3D input tensor. + * The dimensions over the second must match the batched dimensions of the input tensor. Data type supported: F32. + * + * @return a status + */ + static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *min_max); // Inherited methods overridden: void run(const Window &window, const ThreadInfo &info) override; diff --git a/arm_compute/core/utils/misc/ShapeCalculator.h b/arm_compute/core/utils/misc/ShapeCalculator.h index 9cb8023463..1e90927a93 100644 --- a/arm_compute/core/utils/misc/ShapeCalculator.h +++ b/arm_compute/core/utils/misc/ShapeCalculator.h @@ -249,6 +249,17 @@ inline TensorShape compute_deep_convolution_shape(const ITensorInfo &input, cons return output_shape; } + +inline TensorShape compute_min_max_shape(const ITensorInfo *input) +{ + TensorShape output_shape{ input->tensor_shape() }; + output_shape.set(Window::DimX, 2); + output_shape.remove_dimension(1); + output_shape.remove_dimension(1); + + return output_shape; +} + } // namespace shape_calculator } // namespace misc } // namespace arm_compute diff --git a/arm_compute/runtime/CL/functions/CLDequantizationLayer.h b/arm_compute/runtime/CL/functions/CLDequantizationLayer.h index 0ba182b475..efd28fc819 100644 --- a/arm_compute/runtime/CL/functions/CLDequantizationLayer.h +++ b/arm_compute/runtime/CL/functions/CLDequantizationLayer.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -57,6 +57,16 @@ public: * The dimensions over the second must match the batched dimensions of the input tensor. Data type supported: F32. */ void configure(const ICLTensor *input, ICLTensor *output, const ICLTensor *min_max); + /** Static function to check if given info will lead to a valid configuration of @ref CLDequantizationLayer + * + * @param[in] input Input tensor info. Data types supported: U8. + * @param[in] output Output tensor info. Data type supported: F32. + * @param[in] min_max Info for the tensor with shape [2, batches] which stores the minimum and maximum value for each 3D input tensor. + * The dimensions over the second must match the batched dimensions of the input tensor. Data type supported: F32. + * + * @return a status + */ + static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *min_max); // Inherited methods overridden: void run() override; diff --git a/arm_compute/runtime/CL/functions/CLQuantizationLayer.h b/arm_compute/runtime/CL/functions/CLQuantizationLayer.h index b3c4da05d4..738187dfe7 100644 --- a/arm_compute/runtime/CL/functions/CLQuantizationLayer.h +++ b/arm_compute/runtime/CL/functions/CLQuantizationLayer.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -53,6 +53,14 @@ public: * @param[out] output Destination tensor with the same dimensions of input. Output data type must be U8. */ void configure(const ICLTensor *input, ICLTensor *output); + /** Static function to check if given info will lead to a valid configuration of @ref CLQuantizationLayer + * + * @param[in] input Input tensor info. The dimensions over the third will be interpreted as batches. Data types supported: F32. + * @param[in] output Output tensor info. Output data type must be U8. + * + * @return a status + */ + static Status validate(const ITensorInfo *input, const ITensorInfo *output); // Inherited methods overridden: void run() override; diff --git a/arm_compute/runtime/NEON/functions/NEDequantizationLayer.h b/arm_compute/runtime/NEON/functions/NEDequantizationLayer.h index 898586190e..90c454ef3e 100644 --- a/arm_compute/runtime/NEON/functions/NEDequantizationLayer.h +++ b/arm_compute/runtime/NEON/functions/NEDequantizationLayer.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -54,6 +54,16 @@ public: * The dimensions over the second must match the batched dimensions of the input tensor. Data type supported: F32 */ void configure(const ITensor *input, ITensor *output, const ITensor *min_max); + /** Static function to check if given info will lead to a valid configuration of @ref NEDequantizationLayer + * + * @param[in] input Input tensor info. Data types supported: U8. + * @param[in] output Output tensor info. Data type supported: F32. + * @param[in] min_max Info for the tensor with shape [2, batches] which stores the minimum and maximum value for each 3D input tensor. + * The dimensions over the second must match the batched dimensions of the input tensor. Data type supported: F32. + * + * @return a status + */ + static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *min_max); // Inherited methods overridden: void run() override; diff --git a/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h b/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h index d91b4ad1ad..9cc1666b4c 100644 --- a/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h +++ b/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -55,6 +55,14 @@ public: * @param[out] output Destination tensor with the same dimensions of input. Data types supported: U8 */ void configure(const ITensor *input, ITensor *output); + /** Static function to check if given info will lead to a valid configuration of @ref NEQuantizationLayer + * + * @param[in] input Input tensor info. The dimensions over the third will be interpreted as batches. Data types supported: F32. + * @param[in] output Output tensor info. Data types supported: U8 + * + * @return a status + */ + static Status validate(const ITensorInfo *input, const ITensorInfo *output); // Inherited methods overridden: void run() override; -- cgit v1.2.1