aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/core
diff options
context:
space:
mode:
authorMichalis Spyrou <michalis.spyrou@arm.com>2018-11-20 18:38:29 +0000
committerGeorgios Pinitas <georgios.pinitas@arm.com>2018-11-22 13:22:24 +0000
commit2897e61e8fe04aaf95540f4525c3dd3f7f46ebfa (patch)
treea887648225c485ddf06363170d6111290b6111eb /arm_compute/core
parent303f0dbebf631b3db00d9d64e71018abbbe9d4fe (diff)
downloadComputeLibrary-2897e61e8fe04aaf95540f4525c3dd3f7f46ebfa.tar.gz
COMPMID-1645 NEL2Normalization for FP32/FP16 & NHWC
Change-Id: I29e35024e29781a6b943b568abec9c73649215e6
Diffstat (limited to 'arm_compute/core')
-rw-r--r--arm_compute/core/NEON/kernels/NEL2NormalizeLayerKernel.h8
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h1
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/invsqrt.h53
3 files changed, 58 insertions, 4 deletions
diff --git a/arm_compute/core/NEON/kernels/NEL2NormalizeLayerKernel.h b/arm_compute/core/NEON/kernels/NEL2NormalizeLayerKernel.h
index 0de07fdab7..f893c4ae6b 100644
--- a/arm_compute/core/NEON/kernels/NEL2NormalizeLayerKernel.h
+++ b/arm_compute/core/NEON/kernels/NEL2NormalizeLayerKernel.h
@@ -52,24 +52,24 @@ public:
~NEL2NormalizeLayerKernel() = default;
/** Set the input and output tensors.
*
- * @param[in] input Source tensor. Data types supported: F32. Data layouts supported: NCHW.
+ * @param[in] input Source tensor. Data types supported: F16/F32.
* @param[in] sum Sum values tensor. Data types supported: same as @p input.
* Sum will have the same number of dimensions as input.
* @param[out] output Destination tensor. Data types and data layouts supported: same as @p input.
* Output will have the same number of dimensions as input.
- * @param[in] axis Dimension along which to reduce. Supported reduction axis : 0
+ * @param[in] axis Dimension along which to reduce. Supported reduction axis : 0, 1, 2
* @param[in] epsilon Lower bound value for the normalization.
*/
void configure(const ITensor *input, const ITensor *sum, ITensor *output, unsigned int axis, float epsilon);
/** Static function to check if given info will lead to a valid configuration of @ref NEL2NormalizeLayerKernel.
*
- * @param[in] input Source tensor info. Data types supported: F32. Data layouts supported: NCHW.
+ * @param[in] input Source tensor info. Data types supported: F16/F32.
* @param[in] sum Sum values tensor info. Data types supported: same as @p input.
* Sum will have the same number of dimensions as input.
* @param[in] output Destination tensor info. Data types and data layouts supported: same as @p input.
* Output will have the same number of dimensions as input.
- * @param[in] axis Dimension along which to reduce. Supported reduction axis : 0
+ * @param[in] axis Dimension along which to reduce. Supported reduction axis : 0, 1, 2
* @param[in] epsilon Lower bound value for the normalization.
*
* @return a status
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h
index 2e6fd75005..7ea0aba565 100644
--- a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h
+++ b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h
@@ -32,6 +32,7 @@
#include "arm_compute/core/NEON/wrapper/intrinsics/getlane.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/getlow.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/inv.h"
+#include "arm_compute/core/NEON/wrapper/intrinsics/invsqrt.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/load.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/max.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/min.h"
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/invsqrt.h b/arm_compute/core/NEON/wrapper/intrinsics/invsqrt.h
new file mode 100644
index 0000000000..0bbf49b5c0
--- /dev/null
+++ b/arm_compute/core/NEON/wrapper/intrinsics/invsqrt.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_WRAPPER_INVSQRT_H__
+#define __ARM_COMPUTE_WRAPPER_INVSQRT_H__
+
+#include "arm_compute/core/NEON/NEMath.h"
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VINVSQRT_IMPL(stype, vtype, prefix, postfix) \
+ inline vtype vinvsqrt(const vtype &a) \
+ { \
+ return prefix##_##postfix(a); \
+ }
+
+VINVSQRT_IMPL(float, float32x2_t, vinvsqrt, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VINVSQRT_IMPL(float16_t, float16x4_t, vinvsqrt, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+VINVSQRT_IMPL(float, float32x4_t, vinvsqrtq, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VINVSQRT_IMPL(float16_t, float16x8_t, vinvsqrtq, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef VINVSQRT_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_WRAPPER_INVSQRT_H__ */