From 60954c671ffdc3422bbdb728fc022eb6896c1e17 Mon Sep 17 00:00:00 2001 From: Alex Gilday Date: Mon, 5 Mar 2018 16:22:48 +0000 Subject: COMPMID-754: Add validation to (De)QuantizationLayers Change-Id: If8fa1277e8dc5b8e28a8bcad4ff9fc672b00ce9a Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/123275 Reviewed-by: Anthony Barbier Tested-by: Jenkins Reviewed-by: Michalis Spyrou --- arm_compute/runtime/CL/functions/CLDequantizationLayer.h | 12 +++++++++++- arm_compute/runtime/CL/functions/CLQuantizationLayer.h | 10 +++++++++- arm_compute/runtime/NEON/functions/NEDequantizationLayer.h | 12 +++++++++++- arm_compute/runtime/NEON/functions/NEQuantizationLayer.h | 10 +++++++++- 4 files changed, 40 insertions(+), 4 deletions(-) (limited to 'arm_compute/runtime') diff --git a/arm_compute/runtime/CL/functions/CLDequantizationLayer.h b/arm_compute/runtime/CL/functions/CLDequantizationLayer.h index 0ba182b475..efd28fc819 100644 --- a/arm_compute/runtime/CL/functions/CLDequantizationLayer.h +++ b/arm_compute/runtime/CL/functions/CLDequantizationLayer.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -57,6 +57,16 @@ public: * The dimensions over the second must match the batched dimensions of the input tensor. Data type supported: F32. */ void configure(const ICLTensor *input, ICLTensor *output, const ICLTensor *min_max); + /** Static function to check if given info will lead to a valid configuration of @ref CLDequantizationLayer + * + * @param[in] input Input tensor info. Data types supported: U8. + * @param[in] output Output tensor info. Data type supported: F32. + * @param[in] min_max Info for the tensor with shape [2, batches] which stores the minimum and maximum value for each 3D input tensor. + * The dimensions over the second must match the batched dimensions of the input tensor. Data type supported: F32. + * + * @return a status + */ + static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *min_max); // Inherited methods overridden: void run() override; diff --git a/arm_compute/runtime/CL/functions/CLQuantizationLayer.h b/arm_compute/runtime/CL/functions/CLQuantizationLayer.h index b3c4da05d4..738187dfe7 100644 --- a/arm_compute/runtime/CL/functions/CLQuantizationLayer.h +++ b/arm_compute/runtime/CL/functions/CLQuantizationLayer.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -53,6 +53,14 @@ public: * @param[out] output Destination tensor with the same dimensions of input. Output data type must be U8. */ void configure(const ICLTensor *input, ICLTensor *output); + /** Static function to check if given info will lead to a valid configuration of @ref CLQuantizationLayer + * + * @param[in] input Input tensor info. The dimensions over the third will be interpreted as batches. Data types supported: F32. + * @param[in] output Output tensor info. Output data type must be U8. + * + * @return a status + */ + static Status validate(const ITensorInfo *input, const ITensorInfo *output); // Inherited methods overridden: void run() override; diff --git a/arm_compute/runtime/NEON/functions/NEDequantizationLayer.h b/arm_compute/runtime/NEON/functions/NEDequantizationLayer.h index 898586190e..90c454ef3e 100644 --- a/arm_compute/runtime/NEON/functions/NEDequantizationLayer.h +++ b/arm_compute/runtime/NEON/functions/NEDequantizationLayer.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -54,6 +54,16 @@ public: * The dimensions over the second must match the batched dimensions of the input tensor. Data type supported: F32 */ void configure(const ITensor *input, ITensor *output, const ITensor *min_max); + /** Static function to check if given info will lead to a valid configuration of @ref NEDequantizationLayer + * + * @param[in] input Input tensor info. Data types supported: U8. + * @param[in] output Output tensor info. Data type supported: F32. + * @param[in] min_max Info for the tensor with shape [2, batches] which stores the minimum and maximum value for each 3D input tensor. + * The dimensions over the second must match the batched dimensions of the input tensor. Data type supported: F32. + * + * @return a status + */ + static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *min_max); // Inherited methods overridden: void run() override; diff --git a/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h b/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h index d91b4ad1ad..9cc1666b4c 100644 --- a/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h +++ b/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -55,6 +55,14 @@ public: * @param[out] output Destination tensor with the same dimensions of input. Data types supported: U8 */ void configure(const ITensor *input, ITensor *output); + /** Static function to check if given info will lead to a valid configuration of @ref NEQuantizationLayer + * + * @param[in] input Input tensor info. The dimensions over the third will be interpreted as batches. Data types supported: F32. + * @param[in] output Output tensor info. Data types supported: U8 + * + * @return a status + */ + static Status validate(const ITensorInfo *input, const ITensorInfo *output); // Inherited methods overridden: void run() override; -- cgit v1.2.1