From 0c71d0ba75a11720e39e2a7163e993d51350683d Mon Sep 17 00:00:00 2001 From: Michalis Spyrou Date: Thu, 22 Nov 2018 11:22:18 +0000 Subject: COMPMID-1647 NENormalizationLayer IN_MAP_2D support for NHWC for FP32/FP16 Change-Id: Id74cc7ba8e5cabee6acd3798d4779f88b1f00a9b --- .../core/NEON/kernels/NENormalizationLayerKernel.h | 14 +++---- .../core/NEON/wrapper/intrinsics/intrinsics.h | 1 + arm_compute/core/NEON/wrapper/intrinsics/mla.h | 13 ++++++ arm_compute/core/NEON/wrapper/intrinsics/pow.h | 48 ++++++++++++++++++++++ .../runtime/NEON/functions/NENormalizationLayer.h | 8 ++-- 5 files changed, 73 insertions(+), 11 deletions(-) create mode 100644 arm_compute/core/NEON/wrapper/intrinsics/pow.h (limited to 'arm_compute') diff --git a/arm_compute/core/NEON/kernels/NENormalizationLayerKernel.h b/arm_compute/core/NEON/kernels/NENormalizationLayerKernel.h index 92086437a6..533335f9af 100644 --- a/arm_compute/core/NEON/kernels/NENormalizationLayerKernel.h +++ b/arm_compute/core/NEON/kernels/NENormalizationLayerKernel.h @@ -54,20 +54,20 @@ public: /** Set the input and output tensors. * * @param[in] input Source tensor. 3 lower dims represent a single input with dimensions [width, height, IFM], - * and an optional 4th dimension for batch of inputs. Data types supported: FP16/F32. + * and an optional 4th dimension for batch of inputs. Data types supported: FP16/F32. Data layouts supported: NCHW/NHWC. * @param[in] input_squared Source with each element has been squared. 3 lower dims represent a single input with dimensions [width, height, IFM], - * Data type supported: same as @p input - * @param[out] output Destination tensor. Output will have the same number of dimensions as input. Data type supported: same as @p input + * Data type and layout supported: same as @p input. + * @param[out] output Destination tensor. Output will have the same number of dimensions as input. Data type and layout supported: same as @p input. * @param[in] norm_info Normalization layer information like the normalization type, normalization size and other parameters. */ void configure(const ITensor *input, const ITensor *input_squared, ITensor *output, NormalizationLayerInfo norm_info); /** Static function to check if given info will lead to a valid configuration of @ref NENormalizationLayerKernel * * @param[in] input Source tensor. 3 lower dims represent a single input with dimensions [width, height, IFM], - * and an optional 4th dimension for batch of inputs. Data types supported: FP16/F32. + * and an optional 4th dimension for batch of inputs. Data types supported: FP16/F32. Data layouts supported: NCHW/NHWC. * @param[in] input_squared Source with each element has been squared. 3 lower dims represent a single input with dimensions [width, height, IFM], - * Data type supported: same as @p input - * @param[in] output Destination tensor. Output will have the same number of dimensions as input. Data type supported: same as @p input + * Data type and layout supported: same as @p input. + * @param[in] output Destination tensor. Output will have the same number of dimensions as input. Data type and layout supported: same as @p input. * @param[in] norm_info Normalization layer information like the normalization type, normalization size and other parameters. * * @return a status @@ -89,7 +89,7 @@ private: * * @param[in] window Region on which to execute the kernel. */ - template + template void normalize_float(const Window &window); /** Common signature for all the specialised normalization functions diff --git a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h index 7ea0aba565..77787afcf4 100644 --- a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h +++ b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h @@ -42,6 +42,7 @@ #include "arm_compute/core/NEON/wrapper/intrinsics/mul.h" #include "arm_compute/core/NEON/wrapper/intrinsics/neg.h" #include "arm_compute/core/NEON/wrapper/intrinsics/padd.h" +#include "arm_compute/core/NEON/wrapper/intrinsics/pow.h" #include "arm_compute/core/NEON/wrapper/intrinsics/store.h" #endif /* __ARM_COMPUTE_WRAPPER_INTRINSICS_H__ */ diff --git a/arm_compute/core/NEON/wrapper/intrinsics/mla.h b/arm_compute/core/NEON/wrapper/intrinsics/mla.h index 32a650b57f..db6d7b957a 100644 --- a/arm_compute/core/NEON/wrapper/intrinsics/mla.h +++ b/arm_compute/core/NEON/wrapper/intrinsics/mla.h @@ -35,6 +35,13 @@ namespace wrapper { \ return prefix##_##postfix(a, b, c); \ } +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#define VMLA_IMPL2(stype, vtype, prefix1, prefix2, postfix) \ + inline vtype vmla(const vtype &a, const vtype &b, const vtype &c) \ + { \ + return prefix1##_##postfix(a, prefix2##_##postfix(b, c)); \ + } +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC VMLA_IMPL(uint8x8_t, uint8x8_t, vmla, u8) VMLA_IMPL(int8x8_t, int8x8_t, vmla, s8) @@ -43,6 +50,9 @@ VMLA_IMPL(int16x4_t, int16x4_t, vmla, s16) VMLA_IMPL(uint32x2_t, uint32x2_t, vmla, u32) VMLA_IMPL(int32x2_t, int32x2_t, vmla, s32) VMLA_IMPL(float32x2_t, float32x2_t, vmla, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VMLA_IMPL2(float16x4_t, float16x4_t, vadd, vmul, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC VMLA_IMPL(uint8x16_t, uint8x16_t, vmlaq, u8) VMLA_IMPL(int8x16_t, int8x16_t, vmlaq, s8) @@ -51,6 +61,9 @@ VMLA_IMPL(int16x8_t, int16x8_t, vmlaq, s16) VMLA_IMPL(uint32x4_t, uint32x4_t, vmlaq, u32) VMLA_IMPL(int32x4_t, int32x4_t, vmlaq, s32) VMLA_IMPL(float32x4_t, float32x4_t, vmlaq, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VMLA_IMPL2(float16x8_t, float16x8_t, vaddq, vmulq, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC #undef VMLA_IMPL } // namespace wrapper diff --git a/arm_compute/core/NEON/wrapper/intrinsics/pow.h b/arm_compute/core/NEON/wrapper/intrinsics/pow.h new file mode 100644 index 0000000000..865df416ee --- /dev/null +++ b/arm_compute/core/NEON/wrapper/intrinsics/pow.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_WRAPPER_POW_H__ +#define __ARM_COMPUTE_WRAPPER_POW_H__ + +#include "arm_compute/core/NEON/NEMath.h" +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VPOW_IMPL(vtype, prefix, postfix) \ + inline vtype vpow(const vtype &a, const vtype &b) \ + { \ + return prefix##_##postfix(a, b); \ + } + +VPOW_IMPL(float32x4_t, vpowq, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VPOW_IMPL(float16x8_t, vpowq, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#undef VPOW_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* __ARM_COMPUTE_WRAPPER_POW_H__ */ diff --git a/arm_compute/runtime/NEON/functions/NENormalizationLayer.h b/arm_compute/runtime/NEON/functions/NENormalizationLayer.h index 4f1f32fba5..d994093e1d 100644 --- a/arm_compute/runtime/NEON/functions/NENormalizationLayer.h +++ b/arm_compute/runtime/NEON/functions/NENormalizationLayer.h @@ -55,16 +55,16 @@ public: /** Set the input and output tensors. * * @param[in] input Source tensor. 3 lower dims represent a single input with dimensions [width, height, IFM], - * and an optional 4th dimension for batch of inputs. Data type supported: F16/F32 - * @param[out] output Destination with the same dimensions, data type and number of channels of @p input + * and an optional 4th dimension for batch of inputs. Data type supported: F16/F32. Data layouts supported: NCHW/NHWC. + * @param[out] output Destination with the same dimensions, data type, data layout and number of channels of @p input * @param[in] norm_info Normalization layer information like the normalization type, normalization size and other parameters. */ void configure(const ITensor *input, ITensor *output, const NormalizationLayerInfo &norm_info); /** Static function to check if given info will lead to a valid configuration of @ref NENormalizationLayer * * @param[in] input Source tensor. 3 lower dims represent a single input with dimensions [width, height, IFM], - * and an optional 4th dimension for batch of inputs. Data type supported: F16/F32 - * @param[in] output Destination with the same dimensions, data type and number of channels of @p input + * and an optional 4th dimension for batch of inputs. Data type supported: F16/F32. Data layouts supported: NCHW/NHWC. + * @param[in] output Destination with the same dimensions, data type, data layout and number of channels of @p input * @param[in] norm_info Normalization layer information like the normalization type, normalization size and other parameters. * * @return a status -- cgit v1.2.1