diff options
Diffstat (limited to 'arm_compute/runtime/CL/functions/CLInstanceNormalizationLayer.h')
-rw-r--r-- | arm_compute/runtime/CL/functions/CLInstanceNormalizationLayer.h | 12 |
1 files changed, 12 insertions, 0 deletions
diff --git a/arm_compute/runtime/CL/functions/CLInstanceNormalizationLayer.h b/arm_compute/runtime/CL/functions/CLInstanceNormalizationLayer.h index ddd4b12eca..4614b90c70 100644 --- a/arm_compute/runtime/CL/functions/CLInstanceNormalizationLayer.h +++ b/arm_compute/runtime/CL/functions/CLInstanceNormalizationLayer.h @@ -51,6 +51,18 @@ public: * @param[in] use_mixed_precision (Optional) Use mixed precision in case of FP16 execution */ void configure(ICLTensor *input, ICLTensor *output, float gamma = 1.0f, float beta = 0.0f, float epsilon = 1e-12f, bool use_mixed_precision = true); + /** Set the input and output tensors. + * + * @param[in] compile_context The compile context to be used. + * @param[in, out] input Source tensor. In case of @p output tensor = nullptr this tensor will store the result of the normalization. + * Data types supported: F16/F32. Data layout supported: NHWC, NCHW + * @param[out] output Destination tensor. Data types and data layouts supported: same as @p input. + * @param[in] gamma (Optional) The scale scalar value applied to the normalized tensor. Defaults to 1.0 + * @param[in] beta (Optional) The offset scalar value applied to the normalized tensor. Defaults to 0.0 + * @param[in] epsilon (Optional) Lower bound value for the normalization. Defaults to 1e-12 + * @param[in] use_mixed_precision (Optional) Use mixed precision in case of FP16 execution + */ + void configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, float gamma = 1.0f, float beta = 0.0f, float epsilon = 1e-12f, bool use_mixed_precision = true); /** Static function to check if given info will lead to a valid configuration of @ref CLInstanceNormalizationLayer. * |