diff options
author | Vidhya Sudhan Loganathan <vidhyasudhan.loganathan@arm.com> | 2018-07-04 09:34:00 +0100 |
---|---|---|
committer | Anthony Barbier <anthony.barbier@arm.com> | 2018-11-02 16:54:10 +0000 |
commit | 7485d5a62685cb745ab50e970adb722cb71557ac (patch) | |
tree | ba01b99ca466c93edc9a3f8c1e34394ff84be060 /arm_compute/core/NEON/kernels/NEBatchNormalizationLayerKernel.h | |
parent | 014333d73883c3872e458cedda5ccef586a7ccd4 (diff) | |
download | ComputeLibrary-7485d5a62685cb745ab50e970adb722cb71557ac.tar.gz |
COMPMID-970 : Remove QS8 / QS16 support
Removed fixed point related code.
Change-Id: I487acf138dace3b0450e0d72ca7071eaec254566
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/137678
Tested-by: Jenkins <bsgcomp@arm.com>
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'arm_compute/core/NEON/kernels/NEBatchNormalizationLayerKernel.h')
-rw-r--r-- | arm_compute/core/NEON/kernels/NEBatchNormalizationLayerKernel.h | 21 |
1 files changed, 3 insertions, 18 deletions
diff --git a/arm_compute/core/NEON/kernels/NEBatchNormalizationLayerKernel.h b/arm_compute/core/NEON/kernels/NEBatchNormalizationLayerKernel.h index 2d33f87dfa..2a540c151b 100644 --- a/arm_compute/core/NEON/kernels/NEBatchNormalizationLayerKernel.h +++ b/arm_compute/core/NEON/kernels/NEBatchNormalizationLayerKernel.h @@ -57,7 +57,7 @@ public: * * @param[in, out] input Source tensor. In case of @p output tensor = nullptr, this tensor will store the result. * 3 lower dimensions represent a single input with dimensions [width, height, FM]. - * The rest are optional and used for representing batches. Data types supported: QS8/QS16/F16/F32. + * The rest are optional and used for representing batches. Data types supported: F16/F32. * @param[out] output Destination tensor. Output will have the same number of dimensions as input. Data type supported: same as @p input * @param[in] mean Mean values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input * @param[in] var Variance values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input @@ -72,7 +72,7 @@ public: * * @param[in] input Source tensor info. In case of @p output tensor = nullptr, this tensor will store the result. * 3 lower dimensions represent a single input with dimensions [width, height, FM]. - * The rest are optional and used for representing batches. Data types supported: QS8/QS16/F16/F32. + * The rest are optional and used for representing batches. Data types supported: F16/F32. * @param[in] output Destination tensor info. Output will have the same number of dimensions as input. Data type supported: same as @p input * @param[in] mean Mean values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input * @param[in] var Variance values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input @@ -96,22 +96,7 @@ private: void configure_non_fused(); /** Configure execution function in case of fused activation **/ void configure_fused(); - /** Template function to run batch normalization on 8-bit fixed point - * - * @tparam fused_activation Boolean that flags if its a fused activation or not - * - * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()). - */ - template <bool fused_activation> - void batch_normalization_qs8(const Window &window); - /** Template function to run batch normalization on 16-bit fixed point - * - * @tparam fused_activation Boolean that flags if its a fused activation or not - * - * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()). - */ - template <bool fused_activation> - void batch_normalization_qs16(const Window &window); + /** Template function to run batch normalization on fp16 * * @tparam fused_activation Boolean that flags if its a fused activation or not |