aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/core/NEON/kernels
diff options
context:
space:
mode:
Diffstat (limited to 'arm_compute/core/NEON/kernels')
-rw-r--r--arm_compute/core/NEON/kernels/NENormalizationLayerKernel.h4
-rw-r--r--arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h6
2 files changed, 5 insertions, 5 deletions
diff --git a/arm_compute/core/NEON/kernels/NENormalizationLayerKernel.h b/arm_compute/core/NEON/kernels/NENormalizationLayerKernel.h
index d4e36d5ff1..b1bc594e4c 100644
--- a/arm_compute/core/NEON/kernels/NENormalizationLayerKernel.h
+++ b/arm_compute/core/NEON/kernels/NENormalizationLayerKernel.h
@@ -73,8 +73,8 @@ private:
*
* @param[in] window Region on which to execute the kernel.
*/
- template <unsigned int dim, bool do_2D_norm>
- void normalize(const Window &window);
+ template <DataType dt, unsigned int dim, bool do_2D_norm>
+ void normalize_float(const Window &window);
/** Function to perform normalization for fixed-point values depending on
* the given template dimension. The second template parameter specifies
diff --git a/arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h b/arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h
index 7e402cd220..433a20e48e 100644
--- a/arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h
+++ b/arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h
@@ -52,9 +52,9 @@ public:
* @note For @p scale equal to 1/255 only round to nearest even (implemented as round half up) is supported.
* For all other scale values only round to zero (implemented as round towards minus infinity) is supported.
*
- * @param[in] input1 An input tensor. Data types supported: U8/QS8/S16/F32.
- * @param[in] input2 An input tensor. Data types supported: U8/QS8/S16/F32.
- * @param[out] output The output tensor. Data types supported: U8 (Only if both inputs are U8) /S16/F32.
+ * @param[in] input1 An input tensor. Data types supported: U8/QS8/S16/F16/F32.
+ * @param[in] input2 An input tensor. Data types supported: U8/QS8/S16/F16/F32.
+ * @param[out] output The output tensor. Data types supported: U8 (Only if both inputs are U8) /S16/F16/F32.
* @param[in] scale Scale to apply after multiplication.
* Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15.
* @param[in] overflow_policy Overflow policy.