From 55a687d5e2cf3434e4c2564ac73959dcc7163304 Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Thu, 30 Jan 2020 12:00:23 +0000 Subject: COMPMID-2927: Add support for mixed precision in CLInstanceNormalizationLayer Signed-off-by: Georgios Pinitas Change-Id: I91482e2e4b723606aef76afef09a8277813e5d1b Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/2668 Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Giorgio Arena --- .../kernels/CLInstanceNormalizationLayerKernel.h | 30 ++++++++++------------ 1 file changed, 13 insertions(+), 17 deletions(-) (limited to 'arm_compute/core/CL/kernels/CLInstanceNormalizationLayerKernel.h') diff --git a/arm_compute/core/CL/kernels/CLInstanceNormalizationLayerKernel.h b/arm_compute/core/CL/kernels/CLInstanceNormalizationLayerKernel.h index cf726d8cdd..9982cc2f1c 100644 --- a/arm_compute/core/CL/kernels/CLInstanceNormalizationLayerKernel.h +++ b/arm_compute/core/CL/kernels/CLInstanceNormalizationLayerKernel.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 ARM Limited. + * Copyright (c) 2019-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -26,8 +26,11 @@ #include "arm_compute/core/CL/ICLKernel.h" +#include "arm_compute/core/KernelDescriptors.h" + namespace arm_compute { +// Forward declarations class ICLTensor; /** Interface for performing an instance normalization */ @@ -49,26 +52,22 @@ public: /** Set the input and output tensors. * - * @param[in, out] input Source tensor. Data types supported: F16/F32. Data layout supported: NCHW, NHWC - * In case of @p output tensor = nullptr this tensor will store the result of the normalization. - * @param[out] output Destination tensor. Data types and data layouts supported: same as @p input. - * @param[in] gamma (Optional) The scale scalar value applied to the normalized tensor. Defaults to 1.0 - * @param[in] beta (Optional) The offset scalar value applied to the normalized tensor. Defaults to 0.0 - * @param[in] epsilon (Optional) Lower bound value for the normalization. Defaults to 1e-12 + * @param[in, out] input Source tensor. Data types supported: F16/F32. Data layout supported: NCHW, NHWC + * In case of @p output tensor = nullptr this tensor will store the result of the normalization. + * @param[out] output Destination tensor. Data types and data layouts supported: same as @p input. + * @param[in] info Kernel meta-data descriptor */ - void configure(ICLTensor *input, ICLTensor *output, float gamma = 1.0f, float beta = 0.0f, float epsilon = 1e-12f); + void configure(ICLTensor *input, ICLTensor *output, const InstanceNormalizationLayerKernelInfo &info); /** Static function to check if given info will lead to a valid configuration of @ref CLInstanceNormalizationLayer. * - * @param[in] input Source tensor info. Data types supported: F16/F32. Data layout supported: NHWC, NCHW - * @param[in] output Destination tensor info. Data types and data layouts supported: same as @p input. - * @param[in] gamma (Optional) The scale scalar value applied to the normalized tensor. Defaults to 1.0 - * @param[in] beta (Optional) The offset scalar value applied to the normalized tensor. Defaults to 0.0 - * @param[in] epsilon (Optional) Lower bound value for the normalization. Defaults to 1e-12 + * @param[in] input Source tensor info. Data types supported: F16/F32. Data layout supported: NHWC, NCHW + * @param[in] output Destination tensor info. Data types and data layouts supported: same as @p input. + * @param[in] info Kernel meta-data descriptor * * @return a status */ - static Status validate(const ITensorInfo *input, const ITensorInfo *output, float gamma = 1.0f, float beta = 0.0f, float epsilon = 1e-12f); + static Status validate(const ITensorInfo *input, const ITensorInfo *output, const InstanceNormalizationLayerKernelInfo &info); // Inherited methods overridden: void run(const Window &window, cl::CommandQueue &queue) override; @@ -76,9 +75,6 @@ public: private: ICLTensor *_input; ICLTensor *_output; - float _gamma; - float _beta; - float _epsilon; bool _run_in_place; }; } // namespace arm_compute -- cgit v1.2.1