From 57c033bb5400ef19e5952f191da3e878e21bba91 Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Thu, 15 Feb 2018 12:29:44 +0000 Subject: COMPMID-906: Use fused activation in NEON Batch normalization Change-Id: I5a6413548b2c9b8972c91ddba57395509dffd87e Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/120656 Tested-by: Jenkins Reviewed-by: Anthony Barbier --- .../NEON/kernels/NEBatchNormalizationLayerKernel.h | 107 +++++++++++++------ .../kernels/detail/NEActivationFunctionDetail.h | 113 +++++++++++++++++++++ arm_compute/core/NEON/wrapper/intrinsics/and.h | 8 +- arm_compute/core/NEON/wrapper/intrinsics/dup_n.h | 60 +++++++++++ .../core/NEON/wrapper/intrinsics/intrinsics.h | 34 +++++++ arm_compute/core/NEON/wrapper/intrinsics/load.h | 8 +- arm_compute/core/NEON/wrapper/intrinsics/max.h | 58 +++++++++++ arm_compute/core/NEON/wrapper/intrinsics/min.h | 58 +++++++++++ arm_compute/core/NEON/wrapper/intrinsics/store.h | 8 +- arm_compute/core/NEON/wrapper/traits.h | 51 ++++++---- arm_compute/core/NEON/wrapper/wrapper.h | 6 +- 11 files changed, 446 insertions(+), 65 deletions(-) create mode 100644 arm_compute/core/NEON/kernels/detail/NEActivationFunctionDetail.h create mode 100644 arm_compute/core/NEON/wrapper/intrinsics/dup_n.h create mode 100644 arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h create mode 100644 arm_compute/core/NEON/wrapper/intrinsics/max.h create mode 100644 arm_compute/core/NEON/wrapper/intrinsics/min.h (limited to 'arm_compute/core') diff --git a/arm_compute/core/NEON/kernels/NEBatchNormalizationLayerKernel.h b/arm_compute/core/NEON/kernels/NEBatchNormalizationLayerKernel.h index f748830b81..63eb739487 100644 --- a/arm_compute/core/NEON/kernels/NEBatchNormalizationLayerKernel.h +++ b/arm_compute/core/NEON/kernels/NEBatchNormalizationLayerKernel.h @@ -55,49 +55,98 @@ public: * * @note If the output tensor is a nullptr, the batch normalization function will be performed in-place * - * @param[in, out] input Source tensor. In case of @p output tensor = nullptr, this tensor will store the result. - * 3 lower dimensions represent a single input with dimensions [width, height, FM]. - * The rest are optional and used for representing batches. Data types supported: QS8/QS16/F16/F32. - * @param[out] output Destination tensor. Output will have the same number of dimensions as input. Data type supported: same as @p input - * @param[in] mean Mean values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input - * @param[in] var Variance values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input - * @param[in] beta Beta values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input - * @param[in] gamma Gamma values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input - * @param[in] epsilon Small value to avoid division with zero. + * @param[in, out] input Source tensor. In case of @p output tensor = nullptr, this tensor will store the result. + * 3 lower dimensions represent a single input with dimensions [width, height, FM]. + * The rest are optional and used for representing batches. Data types supported: QS8/QS16/F16/F32. + * @param[out] output Destination tensor. Output will have the same number of dimensions as input. Data type supported: same as @p input + * @param[in] mean Mean values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input + * @param[in] var Variance values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input + * @param[in] beta Beta values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input + * @param[in] gamma Gamma values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input + * @param[in] epsilon Small value to avoid division with zero. + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported. + * Data types supported: F32 */ - void configure(ITensor *input, ITensor *output, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, float epsilon); + void configure(ITensor *input, ITensor *output, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, float epsilon, + ActivationLayerInfo act_info = ActivationLayerInfo()); /** Static function to check if given info will lead to a valid configuration of @ref NEBatchNormalizationLayerKernel * - * @param[in] input Source tensor info. In case of @p output tensor = nullptr, this tensor will store the result. - * 3 lower dimensions represent a single input with dimensions [width, height, FM]. - * The rest are optional and used for representing batches. Data types supported: QS8/QS16/F16/F32. - * @param[in] output Destination tensor info. Output will have the same number of dimensions as input. Data type supported: same as @p input - * @param[in] mean Mean values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input - * @param[in] var Variance values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input - * @param[in] beta Beta values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input - * @param[in] gamma Gamma values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input - * @param[in] epsilon Small value to avoid division with zero. + * @param[in] input Source tensor info. In case of @p output tensor = nullptr, this tensor will store the result. + * 3 lower dimensions represent a single input with dimensions [width, height, FM]. + * The rest are optional and used for representing batches. Data types supported: QS8/QS16/F16/F32. + * @param[in] output Destination tensor info. Output will have the same number of dimensions as input. Data type supported: same as @p input + * @param[in] mean Mean values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input + * @param[in] var Variance values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input + * @param[in] beta Beta values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input + * @param[in] gamma Gamma values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input + * @param[in] epsilon Small value to avoid division with zero. + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported. + * Data types supported: F32 * * @return a status */ static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *mean, const ITensorInfo *var, const ITensorInfo *beta, const ITensorInfo *gamma, - float epsilon); + float epsilon, ActivationLayerInfo act_info); // Inherited methods overridden: void run(const Window &window, const ThreadInfo &info) override; private: - using BatchNormFunction = void(ITensor *input, ITensor *output, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, float epsilon, const Window &window); - BatchNormFunction *_func; - ITensor *_input; - ITensor *_output; - const ITensor *_mean; - const ITensor *_var; - const ITensor *_gamma; - const ITensor *_beta; - float _epsilon; + /** Configure execution function in case of non-fused activation **/ + void configure_non_fused(); + /** Configure execution function in case of fused activation **/ + void configure_fused(); + /** Template function to run batch normalization on 8-bit fixed point + * + * @tparam fused_activation Boolean that flags if its a fused activation or not + * + * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()). + */ + template + void batch_normalization_qs8(const Window &window); + /** Template function to run batch normalization on 16-bit fixed point + * + * @tparam fused_activation Boolean that flags if its a fused activation or not + * + * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()). + */ + template + void batch_normalization_qs16(const Window &window); + /** Template function to run batch normalization on fp16 + * + * @tparam fused_activation Boolean that flags if its a fused activation or not + * + * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()). + */ + template + void batch_normalization_fp16(const Window &window); + /** Template function to run batch normalization on fp32 + * + * @tparam fused_activation Boolean that flags if its a fused activation or not + * @tparam F Activation function functor to run + * + * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()). + */ + template + void batch_normalization_fp32(const Window &window); + /** Common signature for all the batch normalization functions + * + * @param[in] window Region on which to execute the kernel. + */ + using BatchNormFunctionPtr = void (NEBatchNormalizationLayerKernel::*)(const Window &window); + +private: + BatchNormFunctionPtr _func; + ITensor *_input; + ITensor *_output; + const ITensor *_mean; + const ITensor *_var; + const ITensor *_gamma; + const ITensor *_beta; + float _epsilon; + ActivationLayerInfo _act_info; }; } // namespace arm_compute #endif /*__ARM_COMPUTE_NEBATCHNORMALIZATIONLAYERKERNEL_H__ */ diff --git a/arm_compute/core/NEON/kernels/detail/NEActivationFunctionDetail.h b/arm_compute/core/NEON/kernels/detail/NEActivationFunctionDetail.h new file mode 100644 index 0000000000..e4d3f54943 --- /dev/null +++ b/arm_compute/core/NEON/kernels/detail/NEActivationFunctionDetail.h @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_DETAIL_NEACTIVATION_FUNCTION_DETAIL_H__ +#define __ARM_COMPUTE_DETAIL_NEACTIVATION_FUNCTION_DETAIL_H__ + +#include "arm_compute/core/NEON/wrapper/wrapper.h" + +namespace arm_compute +{ +namespace detail +{ +// Dummy activation object +/** Dummy activation object */ +template +struct dummy +{ + using ExactType = typename wrapper::traits::neon_vector::type; + + explicit dummy(ActivationLayerInfo act_info) + { + ARM_COMPUTE_UNUSED(act_info); + } + void operator()(ExactType &vval) + { + ARM_COMPUTE_UNUSED(vval); + } +}; +/** RELU activation object */ +template +struct relu +{ + using ExactType = typename wrapper::traits::neon_vector::type; + using ExactTagType = typename wrapper::traits::neon_vector::tag_type; + + explicit relu(ActivationLayerInfo act_info) + : vzero(wrapper::vdup_n(static_cast(0.f), ExactTagType{})) + { + ARM_COMPUTE_UNUSED(act_info); + } + + void operator()(ExactType &vval) + { + vval = wrapper::vmax(vzero, vval); + } + + const ExactType vzero; +}; +/** Bounded RELU activation object */ +template +struct brelu +{ + using ExactType = typename wrapper::traits::neon_vector::type; + using ExactTagType = typename wrapper::traits::neon_vector::tag_type; + + explicit brelu(ActivationLayerInfo act_info) + : vzero(wrapper::vdup_n(static_cast(0.f), ExactTagType{})), + valpha(wrapper::vdup_n(static_cast(act_info.a()), ExactTagType{})) + { + } + + void operator()(ExactType &vval) + { + vval = wrapper::vmin(valpha, wrapper::vmax(vzero, vval)); + } + + const ExactType vzero; + const ExactType valpha; +}; +/** Lower-Upper Bounded RELU activation object */ +template +struct lubrelu +{ + using ExactType = typename wrapper::traits::neon_vector::type; + using ExactTagType = typename wrapper::traits::neon_vector::tag_type; + + explicit lubrelu(ActivationLayerInfo act_info) + : valpha(wrapper::vdup_n(static_cast(act_info.a()), ExactTagType{})), + vbeta(wrapper::vdup_n(static_cast(act_info.b()), ExactTagType{})) + { + } + + void operator()(ExactType &vval) + { + vval = wrapper::vmin(valpha, wrapper::vmax(vbeta, vval)); + } + + const ExactType valpha; + const ExactType vbeta; +}; +} // namespace detail +} // namespace arm_compute +#endif /* __ARM_COMPUTE_DETAIL_NEACTIVATION_FUNCTION_DETAIL_H__ */ diff --git a/arm_compute/core/NEON/wrapper/intrinsics/and.h b/arm_compute/core/NEON/wrapper/intrinsics/and.h index 9b5cfd6b89..4910738e86 100644 --- a/arm_compute/core/NEON/wrapper/intrinsics/and.h +++ b/arm_compute/core/NEON/wrapper/intrinsics/and.h @@ -24,8 +24,6 @@ #ifndef __ARM_COMPUTE_WRAPPER_AND_H__ #define __ARM_COMPUTE_WRAPPER_AND_H__ -#include "arm_compute/core/NEON/wrapper/traits.h" - #include namespace arm_compute @@ -55,6 +53,8 @@ VAND_IMPL(uint32_t, uint32x4_t, vandq, u32) VAND_IMPL(int32_t, int32x4_t, vandq, s32) VAND_IMPL(uint64_t, uint64x2_t, vandq, u64) VAND_IMPL(int64_t, int64x2_t, vandq, s64) -} -} + +#undef VAND_IMPL +} // namespace wrapper +} // namespace arm_compute #endif /* __ARM_COMPUTE_WRAPPER_AND_H__ */ diff --git a/arm_compute/core/NEON/wrapper/intrinsics/dup_n.h b/arm_compute/core/NEON/wrapper/intrinsics/dup_n.h new file mode 100644 index 0000000000..1c07b4f3ff --- /dev/null +++ b/arm_compute/core/NEON/wrapper/intrinsics/dup_n.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_WRAPPER_DUP_N_H__ +#define __ARM_COMPUTE_WRAPPER_DUP_N_H__ + +#include "arm_compute/core/NEON/wrapper/traits.h" + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VDUP_N_IMPL(stype, vtype, prefix, postfix, tag) \ + inline vtype vdup_n(stype value, tag) \ + { \ + return prefix##_##postfix(value); \ + } + +VDUP_N_IMPL(uint8_t, uint8x8_t, vdup_n, u8, traits::vector_64_tag) +VDUP_N_IMPL(int8_t, int8x8_t, vdup_n, s8, traits::vector_64_tag) +VDUP_N_IMPL(uint16_t, uint16x4_t, vdup_n, u16, traits::vector_64_tag) +VDUP_N_IMPL(int16_t, int16x4_t, vdup_n, s16, traits::vector_64_tag) +VDUP_N_IMPL(uint32_t, uint32x2_t, vdup_n, u32, traits::vector_64_tag) +VDUP_N_IMPL(int32_t, int32x2_t, vdup_n, s32, traits::vector_64_tag) +VDUP_N_IMPL(float, float32x2_t, vdup_n, f32, traits::vector_64_tag) + +VDUP_N_IMPL(uint8_t, uint8x16_t, vdupq_n, u8, traits::vector_128_tag) +VDUP_N_IMPL(int8_t, int8x16_t, vdupq_n, s8, traits::vector_128_tag) +VDUP_N_IMPL(uint16_t, uint16x8_t, vdupq_n, u16, traits::vector_128_tag) +VDUP_N_IMPL(int16_t, int16x8_t, vdupq_n, s16, traits::vector_128_tag) +VDUP_N_IMPL(uint32_t, uint32x4_t, vdupq_n, u32, traits::vector_128_tag) +VDUP_N_IMPL(int32_t, int32x4_t, vdupq_n, s32, traits::vector_128_tag) +VDUP_N_IMPL(float, float32x4_t, vdupq_n, f32, traits::vector_128_tag) + +#undef VDUP_N_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* __ARM_COMPUTE_WRAPPER_DUP_N_H__ */ diff --git a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h new file mode 100644 index 0000000000..b302b366cd --- /dev/null +++ b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_WRAPPER_INTRINSICS_H__ +#define __ARM_COMPUTE_WRAPPER_INTRINSICS_H__ + +#include "arm_compute/core/NEON/wrapper/intrinsics/and.h" +#include "arm_compute/core/NEON/wrapper/intrinsics/dup_n.h" +#include "arm_compute/core/NEON/wrapper/intrinsics/load.h" +#include "arm_compute/core/NEON/wrapper/intrinsics/max.h" +#include "arm_compute/core/NEON/wrapper/intrinsics/min.h" +#include "arm_compute/core/NEON/wrapper/intrinsics/store.h" + +#endif /* __ARM_COMPUTE_WRAPPER_INTRINSICS_H__ */ diff --git a/arm_compute/core/NEON/wrapper/intrinsics/load.h b/arm_compute/core/NEON/wrapper/intrinsics/load.h index 9629f2b4e0..442d857497 100644 --- a/arm_compute/core/NEON/wrapper/intrinsics/load.h +++ b/arm_compute/core/NEON/wrapper/intrinsics/load.h @@ -24,8 +24,6 @@ #ifndef __ARM_COMPUTE_WRAPPER_LOAD_H__ #define __ARM_COMPUTE_WRAPPER_LOAD_H__ -#include "arm_compute/core/NEON/wrapper/traits.h" - #include namespace arm_compute @@ -63,6 +61,8 @@ VLOADQ_IMPL(int32_t, int32x4_t, s32) //VLOAD_IMPL(uint64_t, uint64x1_t, u64) //VLOAD_IMPL(int64_t, int64x1_t, s64) VLOADQ_IMPL(float, float32x4_t, f32) -} -} + +#undef VLOAD_IMPL +} // namespace wrapper +} // namespace arm_compute #endif /* __ARM_COMPUTE_WRAPPER_LOAD_H__ */ diff --git a/arm_compute/core/NEON/wrapper/intrinsics/max.h b/arm_compute/core/NEON/wrapper/intrinsics/max.h new file mode 100644 index 0000000000..1a8e95de87 --- /dev/null +++ b/arm_compute/core/NEON/wrapper/intrinsics/max.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_WRAPPER_MAX_H__ +#define __ARM_COMPUTE_WRAPPER_MAX_H__ + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VMAX_IMPL(stype, vtype, prefix, postfix) \ + inline vtype vmax(const vtype &a, const vtype &b) \ + { \ + return prefix##_##postfix(a, b); \ + } + +VMAX_IMPL(uint8_t, uint8x8_t, vmax, u8) +VMAX_IMPL(int8_t, int8x8_t, vmax, s8) +VMAX_IMPL(uint16_t, uint16x4_t, vmax, u16) +VMAX_IMPL(int16_t, int16x4_t, vmax, s16) +VMAX_IMPL(uint32_t, uint32x2_t, vmax, u32) +VMAX_IMPL(int32_t, int32x2_t, vmax, s32) +VMAX_IMPL(float, float32x2_t, vmax, f32) + +VMAX_IMPL(uint8_t, uint8x16_t, vmaxq, u8) +VMAX_IMPL(int8_t, int8x16_t, vmaxq, s8) +VMAX_IMPL(uint16_t, uint16x8_t, vmaxq, u16) +VMAX_IMPL(int16_t, int16x8_t, vmaxq, s16) +VMAX_IMPL(uint32_t, uint32x4_t, vmaxq, u32) +VMAX_IMPL(int32_t, int32x4_t, vmaxq, s32) +VMAX_IMPL(float, float32x4_t, vmaxq, f32) + +#undef VMAX_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* __ARM_COMPUTE_WRAPPER_MAX_H__ */ diff --git a/arm_compute/core/NEON/wrapper/intrinsics/min.h b/arm_compute/core/NEON/wrapper/intrinsics/min.h new file mode 100644 index 0000000000..ae79631190 --- /dev/null +++ b/arm_compute/core/NEON/wrapper/intrinsics/min.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_WRAPPER_MIN_H__ +#define __ARM_COMPUTE_WRAPPER_MIN_H__ + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VMIN_IMPL(stype, vtype, prefix, postfix) \ + inline vtype vmin(const vtype &a, const vtype &b) \ + { \ + return prefix##_##postfix(a, b); \ + } + +VMIN_IMPL(uint8_t, uint8x8_t, vmin, u8) +VMIN_IMPL(int8_t, int8x8_t, vmin, s8) +VMIN_IMPL(uint16_t, uint16x4_t, vmin, u16) +VMIN_IMPL(int16_t, int16x4_t, vmin, s16) +VMIN_IMPL(uint32_t, uint32x2_t, vmin, u32) +VMIN_IMPL(int32_t, int32x2_t, vmin, s32) +VMIN_IMPL(float, float32x2_t, vmin, f32) + +VMIN_IMPL(uint8_t, uint8x16_t, vminq, u8) +VMIN_IMPL(int8_t, int8x16_t, vminq, s8) +VMIN_IMPL(uint16_t, uint16x8_t, vminq, u16) +VMIN_IMPL(int16_t, int16x8_t, vminq, s16) +VMIN_IMPL(uint32_t, uint32x4_t, vminq, u32) +VMIN_IMPL(int32_t, int32x4_t, vminq, s32) +VMIN_IMPL(float, float32x4_t, vminq, f32) + +#undef VMIN_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* __ARM_COMPUTE_WRAPPER_MIN_H__ */ diff --git a/arm_compute/core/NEON/wrapper/intrinsics/store.h b/arm_compute/core/NEON/wrapper/intrinsics/store.h index de57b7350f..be89602c09 100644 --- a/arm_compute/core/NEON/wrapper/intrinsics/store.h +++ b/arm_compute/core/NEON/wrapper/intrinsics/store.h @@ -24,8 +24,6 @@ #ifndef __ARM_COMPUTE_WRAPPER_STORE_H__ #define __ARM_COMPUTE_WRAPPER_STORE_H__ -#include "arm_compute/core/NEON/wrapper/traits.h" - #include namespace arm_compute @@ -57,6 +55,8 @@ VSTORE_IMPL(int32_t, int32x4_t, vst1q, s32) //VSTORE_IMPL(uint64_t, 2, vst1q, u64) //VSTORE_IMPL(int64_t, 2, vst1q, s64) VSTORE_IMPL(float, float32x4_t, vst1q, f32) -} -} + +#undef VSTORE_IMPL +} // namespace wrapper +} // namespace arm_compute #endif /* __ARM_COMPUTE_WRAPPER_STORE_H__ */ diff --git a/arm_compute/core/NEON/wrapper/traits.h b/arm_compute/core/NEON/wrapper/traits.h index 045839cf48..08b2c9b48f 100644 --- a/arm_compute/core/NEON/wrapper/traits.h +++ b/arm_compute/core/NEON/wrapper/traits.h @@ -35,31 +35,40 @@ namespace traits // *INDENT-OFF* // clang-format off +/** 64-bit vector tag */ +struct vector_64_tag {}; +/** 128-bit vector tag */ +struct vector_128_tag {}; + /** Create the appropriate NEON vector given its type and size */ template struct neon_vector; /** Specializations */ -template <> struct neon_vector{ using type = uint8x8_t; }; -template <> struct neon_vector{ using type = int8x8_t; }; -template <> struct neon_vector{ using type = uint8x16_t; }; -template <> struct neon_vector{ using type = int8x16_t; }; -template <> struct neon_vector{ using type = uint16x4_t; }; -template <> struct neon_vector{ using type = int16x4_t; }; -template <> struct neon_vector{ using type = uint16x8_t; }; -template <> struct neon_vector{ using type = int16x8_t; }; -template <> struct neon_vector{ using type = uint32x2_t; }; -template <> struct neon_vector{ using type = int32x2_t; }; -template <> struct neon_vector{ using type = uint32x4_t; }; -template <> struct neon_vector{ using type = int32x4_t; }; -template <> struct neon_vector{ using type = uint64x1_t; }; -template <> struct neon_vector{ using type = int64x1_t; }; -template <> struct neon_vector{ using type = uint64x2_t; }; -template <> struct neon_vector{ using type = int64x2_t; }; -template <> struct neon_vector{ using type = float32x2_t; }; -template <> struct neon_vector{ using type = float32x4_t; }; +template <> struct neon_vector{ using type = uint8x8_t; using tag_type = vector_64_tag; }; +template <> struct neon_vector{ using type = int8x8_t; using tag_type = vector_64_tag; }; +template <> struct neon_vector{ using type = uint8x16_t; using tag_type = vector_128_tag; }; +template <> struct neon_vector{ using type = int8x16_t; using tag_type = vector_128_tag; }; +template <> struct neon_vector{ using type = uint16x4_t; using tag_type = vector_64_tag; }; +template <> struct neon_vector{ using type = int16x4_t; using tag_type = vector_64_tag; }; +template <> struct neon_vector{ using type = uint16x8_t; using tag_type = vector_128_tag; }; +template <> struct neon_vector{ using type = int16x8_t; using tag_type = vector_128_tag; }; +template <> struct neon_vector{ using type = uint32x2_t; using tag_type = vector_64_tag; }; +template <> struct neon_vector{ using type = int32x2_t; using tag_type = vector_64_tag; }; +template <> struct neon_vector{ using type = uint32x4_t; using tag_type = vector_128_tag; }; +template <> struct neon_vector{ using type = int32x4_t; using tag_type = vector_128_tag; }; +template <> struct neon_vector{ using type = uint64x1_t; using tag_type = vector_64_tag; }; +template <> struct neon_vector{ using type = int64x1_t; using tag_type = vector_64_tag; }; +template <> struct neon_vector{ using type = uint64x2_t; using tag_type = vector_128_tag; }; +template <> struct neon_vector{ using type = int64x2_t; using tag_type = vector_128_tag; }; +template <> struct neon_vector{ using type = float32x2_t; using tag_type = vector_64_tag; }; +template <> struct neon_vector{ using type = float32x4_t; using tag_type = vector_128_tag; }; + +/** Helper type template to get the type of a neon vector */ template using neon_vector_t = typename neon_vector::type; +/** Helper type template to get the tag type of a neon vector */ +template using neon_vector_tag_t = typename neon_vector::tag_type; // clang-format on // *INDENT-ON* -} -} -} +} // namespace traits +} // namespace wrapper +} // namespace arm_compute #endif /* __ARM_COMPUTE_WRAPPER_TRAITS_H__ */ diff --git a/arm_compute/core/NEON/wrapper/wrapper.h b/arm_compute/core/NEON/wrapper/wrapper.h index 9676d04d71..61dc42a69b 100644 --- a/arm_compute/core/NEON/wrapper/wrapper.h +++ b/arm_compute/core/NEON/wrapper/wrapper.h @@ -24,10 +24,10 @@ #ifndef __ARM_COMPUTE_WRAPPER_H__ #define __ARM_COMPUTE_WRAPPER_H__ +// Traits #include "arm_compute/core/NEON/wrapper/traits.h" -#include "arm_compute/core/NEON/wrapper/intrinsics/and.h" -#include "arm_compute/core/NEON/wrapper/intrinsics/load.h" -#include "arm_compute/core/NEON/wrapper/intrinsics/store.h" +// Intrinsics Overloads +#include "arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h" #endif /* __ARM_COMPUTE_WRAPPER_H__ */ -- cgit v1.2.1