aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2018-02-15 12:29:44 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:47:18 +0000
commit57c033bb5400ef19e5952f191da3e878e21bba91 (patch)
treeb325e4a0beba35bcdf29c4ae6dea874d7cd26b9f
parent02ee4291795f64fb510a71c6c754671438635186 (diff)
downloadComputeLibrary-57c033bb5400ef19e5952f191da3e878e21bba91.tar.gz
COMPMID-906: Use fused activation in NEON Batch normalization
Change-Id: I5a6413548b2c9b8972c91ddba57395509dffd87e Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/120656 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
-rw-r--r--arm_compute/core/NEON/kernels/NEBatchNormalizationLayerKernel.h107
-rw-r--r--arm_compute/core/NEON/kernels/detail/NEActivationFunctionDetail.h113
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/and.h8
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/dup_n.h60
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h34
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/load.h8
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/max.h58
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/min.h58
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/store.h8
-rw-r--r--arm_compute/core/NEON/wrapper/traits.h51
-rw-r--r--arm_compute/core/NEON/wrapper/wrapper.h6
-rw-r--r--arm_compute/runtime/NEON/functions/NEBatchNormalizationLayer.h5
-rw-r--r--src/core/NEON/kernels/NEBatchNormalizationLayerKernel.cpp277
-rw-r--r--src/runtime/NEON/functions/NEBatchNormalizationLayer.cpp21
-rw-r--r--tests/benchmark/NEON/BatchNormalizationLayer.cpp4
-rw-r--r--tests/validation/NEON/BatchNormalizationLayer.cpp2
16 files changed, 631 insertions, 189 deletions
diff --git a/arm_compute/core/NEON/kernels/NEBatchNormalizationLayerKernel.h b/arm_compute/core/NEON/kernels/NEBatchNormalizationLayerKernel.h
index f748830b81..63eb739487 100644
--- a/arm_compute/core/NEON/kernels/NEBatchNormalizationLayerKernel.h
+++ b/arm_compute/core/NEON/kernels/NEBatchNormalizationLayerKernel.h
@@ -55,49 +55,98 @@ public:
*
* @note If the output tensor is a nullptr, the batch normalization function will be performed in-place
*
- * @param[in, out] input Source tensor. In case of @p output tensor = nullptr, this tensor will store the result.
- * 3 lower dimensions represent a single input with dimensions [width, height, FM].
- * The rest are optional and used for representing batches. Data types supported: QS8/QS16/F16/F32.
- * @param[out] output Destination tensor. Output will have the same number of dimensions as input. Data type supported: same as @p input
- * @param[in] mean Mean values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
- * @param[in] var Variance values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
- * @param[in] beta Beta values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
- * @param[in] gamma Gamma values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
- * @param[in] epsilon Small value to avoid division with zero.
+ * @param[in, out] input Source tensor. In case of @p output tensor = nullptr, this tensor will store the result.
+ * 3 lower dimensions represent a single input with dimensions [width, height, FM].
+ * The rest are optional and used for representing batches. Data types supported: QS8/QS16/F16/F32.
+ * @param[out] output Destination tensor. Output will have the same number of dimensions as input. Data type supported: same as @p input
+ * @param[in] mean Mean values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
+ * @param[in] var Variance values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
+ * @param[in] beta Beta values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
+ * @param[in] gamma Gamma values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
+ * @param[in] epsilon Small value to avoid division with zero.
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported.
+ * Data types supported: F32
*/
- void configure(ITensor *input, ITensor *output, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, float epsilon);
+ void configure(ITensor *input, ITensor *output, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, float epsilon,
+ ActivationLayerInfo act_info = ActivationLayerInfo());
/** Static function to check if given info will lead to a valid configuration of @ref NEBatchNormalizationLayerKernel
*
- * @param[in] input Source tensor info. In case of @p output tensor = nullptr, this tensor will store the result.
- * 3 lower dimensions represent a single input with dimensions [width, height, FM].
- * The rest are optional and used for representing batches. Data types supported: QS8/QS16/F16/F32.
- * @param[in] output Destination tensor info. Output will have the same number of dimensions as input. Data type supported: same as @p input
- * @param[in] mean Mean values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
- * @param[in] var Variance values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
- * @param[in] beta Beta values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
- * @param[in] gamma Gamma values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
- * @param[in] epsilon Small value to avoid division with zero.
+ * @param[in] input Source tensor info. In case of @p output tensor = nullptr, this tensor will store the result.
+ * 3 lower dimensions represent a single input with dimensions [width, height, FM].
+ * The rest are optional and used for representing batches. Data types supported: QS8/QS16/F16/F32.
+ * @param[in] output Destination tensor info. Output will have the same number of dimensions as input. Data type supported: same as @p input
+ * @param[in] mean Mean values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
+ * @param[in] var Variance values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
+ * @param[in] beta Beta values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
+ * @param[in] gamma Gamma values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
+ * @param[in] epsilon Small value to avoid division with zero.
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported.
+ * Data types supported: F32
*
* @return a status
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *output,
const ITensorInfo *mean, const ITensorInfo *var,
const ITensorInfo *beta, const ITensorInfo *gamma,
- float epsilon);
+ float epsilon, ActivationLayerInfo act_info);
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;
private:
- using BatchNormFunction = void(ITensor *input, ITensor *output, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, float epsilon, const Window &window);
- BatchNormFunction *_func;
- ITensor *_input;
- ITensor *_output;
- const ITensor *_mean;
- const ITensor *_var;
- const ITensor *_gamma;
- const ITensor *_beta;
- float _epsilon;
+ /** Configure execution function in case of non-fused activation **/
+ void configure_non_fused();
+ /** Configure execution function in case of fused activation **/
+ void configure_fused();
+ /** Template function to run batch normalization on 8-bit fixed point
+ *
+ * @tparam fused_activation Boolean that flags if its a fused activation or not
+ *
+ * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()).
+ */
+ template <bool fused_activation>
+ void batch_normalization_qs8(const Window &window);
+ /** Template function to run batch normalization on 16-bit fixed point
+ *
+ * @tparam fused_activation Boolean that flags if its a fused activation or not
+ *
+ * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()).
+ */
+ template <bool fused_activation>
+ void batch_normalization_qs16(const Window &window);
+ /** Template function to run batch normalization on fp16
+ *
+ * @tparam fused_activation Boolean that flags if its a fused activation or not
+ *
+ * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()).
+ */
+ template <bool fused_activation>
+ void batch_normalization_fp16(const Window &window);
+ /** Template function to run batch normalization on fp32
+ *
+ * @tparam fused_activation Boolean that flags if its a fused activation or not
+ * @tparam F Activation function functor to run
+ *
+ * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()).
+ */
+ template <bool fused_activation, typename F>
+ void batch_normalization_fp32(const Window &window);
+ /** Common signature for all the batch normalization functions
+ *
+ * @param[in] window Region on which to execute the kernel.
+ */
+ using BatchNormFunctionPtr = void (NEBatchNormalizationLayerKernel::*)(const Window &window);
+
+private:
+ BatchNormFunctionPtr _func;
+ ITensor *_input;
+ ITensor *_output;
+ const ITensor *_mean;
+ const ITensor *_var;
+ const ITensor *_gamma;
+ const ITensor *_beta;
+ float _epsilon;
+ ActivationLayerInfo _act_info;
};
} // namespace arm_compute
#endif /*__ARM_COMPUTE_NEBATCHNORMALIZATIONLAYERKERNEL_H__ */
diff --git a/arm_compute/core/NEON/kernels/detail/NEActivationFunctionDetail.h b/arm_compute/core/NEON/kernels/detail/NEActivationFunctionDetail.h
new file mode 100644
index 0000000000..e4d3f54943
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/detail/NEActivationFunctionDetail.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_DETAIL_NEACTIVATION_FUNCTION_DETAIL_H__
+#define __ARM_COMPUTE_DETAIL_NEACTIVATION_FUNCTION_DETAIL_H__
+
+#include "arm_compute/core/NEON/wrapper/wrapper.h"
+
+namespace arm_compute
+{
+namespace detail
+{
+// Dummy activation object
+/** Dummy activation object */
+template <typename T, int S>
+struct dummy
+{
+ using ExactType = typename wrapper::traits::neon_vector<T, S>::type;
+
+ explicit dummy(ActivationLayerInfo act_info)
+ {
+ ARM_COMPUTE_UNUSED(act_info);
+ }
+ void operator()(ExactType &vval)
+ {
+ ARM_COMPUTE_UNUSED(vval);
+ }
+};
+/** RELU activation object */
+template <typename T, int S>
+struct relu
+{
+ using ExactType = typename wrapper::traits::neon_vector<T, S>::type;
+ using ExactTagType = typename wrapper::traits::neon_vector<T, S>::tag_type;
+
+ explicit relu(ActivationLayerInfo act_info)
+ : vzero(wrapper::vdup_n(static_cast<T>(0.f), ExactTagType{}))
+ {
+ ARM_COMPUTE_UNUSED(act_info);
+ }
+
+ void operator()(ExactType &vval)
+ {
+ vval = wrapper::vmax(vzero, vval);
+ }
+
+ const ExactType vzero;
+};
+/** Bounded RELU activation object */
+template <typename T, int S>
+struct brelu
+{
+ using ExactType = typename wrapper::traits::neon_vector<T, S>::type;
+ using ExactTagType = typename wrapper::traits::neon_vector<T, S>::tag_type;
+
+ explicit brelu(ActivationLayerInfo act_info)
+ : vzero(wrapper::vdup_n(static_cast<T>(0.f), ExactTagType{})),
+ valpha(wrapper::vdup_n(static_cast<T>(act_info.a()), ExactTagType{}))
+ {
+ }
+
+ void operator()(ExactType &vval)
+ {
+ vval = wrapper::vmin(valpha, wrapper::vmax(vzero, vval));
+ }
+
+ const ExactType vzero;
+ const ExactType valpha;
+};
+/** Lower-Upper Bounded RELU activation object */
+template <typename T, int S>
+struct lubrelu
+{
+ using ExactType = typename wrapper::traits::neon_vector<T, S>::type;
+ using ExactTagType = typename wrapper::traits::neon_vector<T, S>::tag_type;
+
+ explicit lubrelu(ActivationLayerInfo act_info)
+ : valpha(wrapper::vdup_n(static_cast<T>(act_info.a()), ExactTagType{})),
+ vbeta(wrapper::vdup_n(static_cast<T>(act_info.b()), ExactTagType{}))
+ {
+ }
+
+ void operator()(ExactType &vval)
+ {
+ vval = wrapper::vmin(valpha, wrapper::vmax(vbeta, vval));
+ }
+
+ const ExactType valpha;
+ const ExactType vbeta;
+};
+} // namespace detail
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_DETAIL_NEACTIVATION_FUNCTION_DETAIL_H__ */
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/and.h b/arm_compute/core/NEON/wrapper/intrinsics/and.h
index 9b5cfd6b89..4910738e86 100644
--- a/arm_compute/core/NEON/wrapper/intrinsics/and.h
+++ b/arm_compute/core/NEON/wrapper/intrinsics/and.h
@@ -24,8 +24,6 @@
#ifndef __ARM_COMPUTE_WRAPPER_AND_H__
#define __ARM_COMPUTE_WRAPPER_AND_H__
-#include "arm_compute/core/NEON/wrapper/traits.h"
-
#include <arm_neon.h>
namespace arm_compute
@@ -55,6 +53,8 @@ VAND_IMPL(uint32_t, uint32x4_t, vandq, u32)
VAND_IMPL(int32_t, int32x4_t, vandq, s32)
VAND_IMPL(uint64_t, uint64x2_t, vandq, u64)
VAND_IMPL(int64_t, int64x2_t, vandq, s64)
-}
-}
+
+#undef VAND_IMPL
+} // namespace wrapper
+} // namespace arm_compute
#endif /* __ARM_COMPUTE_WRAPPER_AND_H__ */
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/dup_n.h b/arm_compute/core/NEON/wrapper/intrinsics/dup_n.h
new file mode 100644
index 0000000000..1c07b4f3ff
--- /dev/null
+++ b/arm_compute/core/NEON/wrapper/intrinsics/dup_n.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_WRAPPER_DUP_N_H__
+#define __ARM_COMPUTE_WRAPPER_DUP_N_H__
+
+#include "arm_compute/core/NEON/wrapper/traits.h"
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VDUP_N_IMPL(stype, vtype, prefix, postfix, tag) \
+ inline vtype vdup_n(stype value, tag) \
+ { \
+ return prefix##_##postfix(value); \
+ }
+
+VDUP_N_IMPL(uint8_t, uint8x8_t, vdup_n, u8, traits::vector_64_tag)
+VDUP_N_IMPL(int8_t, int8x8_t, vdup_n, s8, traits::vector_64_tag)
+VDUP_N_IMPL(uint16_t, uint16x4_t, vdup_n, u16, traits::vector_64_tag)
+VDUP_N_IMPL(int16_t, int16x4_t, vdup_n, s16, traits::vector_64_tag)
+VDUP_N_IMPL(uint32_t, uint32x2_t, vdup_n, u32, traits::vector_64_tag)
+VDUP_N_IMPL(int32_t, int32x2_t, vdup_n, s32, traits::vector_64_tag)
+VDUP_N_IMPL(float, float32x2_t, vdup_n, f32, traits::vector_64_tag)
+
+VDUP_N_IMPL(uint8_t, uint8x16_t, vdupq_n, u8, traits::vector_128_tag)
+VDUP_N_IMPL(int8_t, int8x16_t, vdupq_n, s8, traits::vector_128_tag)
+VDUP_N_IMPL(uint16_t, uint16x8_t, vdupq_n, u16, traits::vector_128_tag)
+VDUP_N_IMPL(int16_t, int16x8_t, vdupq_n, s16, traits::vector_128_tag)
+VDUP_N_IMPL(uint32_t, uint32x4_t, vdupq_n, u32, traits::vector_128_tag)
+VDUP_N_IMPL(int32_t, int32x4_t, vdupq_n, s32, traits::vector_128_tag)
+VDUP_N_IMPL(float, float32x4_t, vdupq_n, f32, traits::vector_128_tag)
+
+#undef VDUP_N_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_WRAPPER_DUP_N_H__ */
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h
new file mode 100644
index 0000000000..b302b366cd
--- /dev/null
+++ b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_WRAPPER_INTRINSICS_H__
+#define __ARM_COMPUTE_WRAPPER_INTRINSICS_H__
+
+#include "arm_compute/core/NEON/wrapper/intrinsics/and.h"
+#include "arm_compute/core/NEON/wrapper/intrinsics/dup_n.h"
+#include "arm_compute/core/NEON/wrapper/intrinsics/load.h"
+#include "arm_compute/core/NEON/wrapper/intrinsics/max.h"
+#include "arm_compute/core/NEON/wrapper/intrinsics/min.h"
+#include "arm_compute/core/NEON/wrapper/intrinsics/store.h"
+
+#endif /* __ARM_COMPUTE_WRAPPER_INTRINSICS_H__ */
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/load.h b/arm_compute/core/NEON/wrapper/intrinsics/load.h
index 9629f2b4e0..442d857497 100644
--- a/arm_compute/core/NEON/wrapper/intrinsics/load.h
+++ b/arm_compute/core/NEON/wrapper/intrinsics/load.h
@@ -24,8 +24,6 @@
#ifndef __ARM_COMPUTE_WRAPPER_LOAD_H__
#define __ARM_COMPUTE_WRAPPER_LOAD_H__
-#include "arm_compute/core/NEON/wrapper/traits.h"
-
#include <arm_neon.h>
namespace arm_compute
@@ -63,6 +61,8 @@ VLOADQ_IMPL(int32_t, int32x4_t, s32)
//VLOAD_IMPL(uint64_t, uint64x1_t, u64)
//VLOAD_IMPL(int64_t, int64x1_t, s64)
VLOADQ_IMPL(float, float32x4_t, f32)
-}
-}
+
+#undef VLOAD_IMPL
+} // namespace wrapper
+} // namespace arm_compute
#endif /* __ARM_COMPUTE_WRAPPER_LOAD_H__ */
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/max.h b/arm_compute/core/NEON/wrapper/intrinsics/max.h
new file mode 100644
index 0000000000..1a8e95de87
--- /dev/null
+++ b/arm_compute/core/NEON/wrapper/intrinsics/max.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_WRAPPER_MAX_H__
+#define __ARM_COMPUTE_WRAPPER_MAX_H__
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VMAX_IMPL(stype, vtype, prefix, postfix) \
+ inline vtype vmax(const vtype &a, const vtype &b) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VMAX_IMPL(uint8_t, uint8x8_t, vmax, u8)
+VMAX_IMPL(int8_t, int8x8_t, vmax, s8)
+VMAX_IMPL(uint16_t, uint16x4_t, vmax, u16)
+VMAX_IMPL(int16_t, int16x4_t, vmax, s16)
+VMAX_IMPL(uint32_t, uint32x2_t, vmax, u32)
+VMAX_IMPL(int32_t, int32x2_t, vmax, s32)
+VMAX_IMPL(float, float32x2_t, vmax, f32)
+
+VMAX_IMPL(uint8_t, uint8x16_t, vmaxq, u8)
+VMAX_IMPL(int8_t, int8x16_t, vmaxq, s8)
+VMAX_IMPL(uint16_t, uint16x8_t, vmaxq, u16)
+VMAX_IMPL(int16_t, int16x8_t, vmaxq, s16)
+VMAX_IMPL(uint32_t, uint32x4_t, vmaxq, u32)
+VMAX_IMPL(int32_t, int32x4_t, vmaxq, s32)
+VMAX_IMPL(float, float32x4_t, vmaxq, f32)
+
+#undef VMAX_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_WRAPPER_MAX_H__ */
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/min.h b/arm_compute/core/NEON/wrapper/intrinsics/min.h
new file mode 100644
index 0000000000..ae79631190
--- /dev/null
+++ b/arm_compute/core/NEON/wrapper/intrinsics/min.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_WRAPPER_MIN_H__
+#define __ARM_COMPUTE_WRAPPER_MIN_H__
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VMIN_IMPL(stype, vtype, prefix, postfix) \
+ inline vtype vmin(const vtype &a, const vtype &b) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VMIN_IMPL(uint8_t, uint8x8_t, vmin, u8)
+VMIN_IMPL(int8_t, int8x8_t, vmin, s8)
+VMIN_IMPL(uint16_t, uint16x4_t, vmin, u16)
+VMIN_IMPL(int16_t, int16x4_t, vmin, s16)
+VMIN_IMPL(uint32_t, uint32x2_t, vmin, u32)
+VMIN_IMPL(int32_t, int32x2_t, vmin, s32)
+VMIN_IMPL(float, float32x2_t, vmin, f32)
+
+VMIN_IMPL(uint8_t, uint8x16_t, vminq, u8)
+VMIN_IMPL(int8_t, int8x16_t, vminq, s8)
+VMIN_IMPL(uint16_t, uint16x8_t, vminq, u16)
+VMIN_IMPL(int16_t, int16x8_t, vminq, s16)
+VMIN_IMPL(uint32_t, uint32x4_t, vminq, u32)
+VMIN_IMPL(int32_t, int32x4_t, vminq, s32)
+VMIN_IMPL(float, float32x4_t, vminq, f32)
+
+#undef VMIN_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_WRAPPER_MIN_H__ */
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/store.h b/arm_compute/core/NEON/wrapper/intrinsics/store.h
index de57b7350f..be89602c09 100644
--- a/arm_compute/core/NEON/wrapper/intrinsics/store.h
+++ b/arm_compute/core/NEON/wrapper/intrinsics/store.h
@@ -24,8 +24,6 @@
#ifndef __ARM_COMPUTE_WRAPPER_STORE_H__
#define __ARM_COMPUTE_WRAPPER_STORE_H__
-#include "arm_compute/core/NEON/wrapper/traits.h"
-
#include <arm_neon.h>
namespace arm_compute
@@ -57,6 +55,8 @@ VSTORE_IMPL(int32_t, int32x4_t, vst1q, s32)
//VSTORE_IMPL(uint64_t, 2, vst1q, u64)
//VSTORE_IMPL(int64_t, 2, vst1q, s64)
VSTORE_IMPL(float, float32x4_t, vst1q, f32)
-}
-}
+
+#undef VSTORE_IMPL
+} // namespace wrapper
+} // namespace arm_compute
#endif /* __ARM_COMPUTE_WRAPPER_STORE_H__ */
diff --git a/arm_compute/core/NEON/wrapper/traits.h b/arm_compute/core/NEON/wrapper/traits.h
index 045839cf48..08b2c9b48f 100644
--- a/arm_compute/core/NEON/wrapper/traits.h
+++ b/arm_compute/core/NEON/wrapper/traits.h
@@ -35,31 +35,40 @@ namespace traits
// *INDENT-OFF*
// clang-format off
+/** 64-bit vector tag */
+struct vector_64_tag {};
+/** 128-bit vector tag */
+struct vector_128_tag {};
+
/** Create the appropriate NEON vector given its type and size */
template <typename T, int S> struct neon_vector;
/** Specializations */
-template <> struct neon_vector<uint8_t, 8>{ using type = uint8x8_t; };
-template <> struct neon_vector<int8_t, 8>{ using type = int8x8_t; };
-template <> struct neon_vector<uint8_t, 16>{ using type = uint8x16_t; };
-template <> struct neon_vector<int8_t, 16>{ using type = int8x16_t; };
-template <> struct neon_vector<uint16_t, 4>{ using type = uint16x4_t; };
-template <> struct neon_vector<int16_t, 4>{ using type = int16x4_t; };
-template <> struct neon_vector<uint16_t, 8>{ using type = uint16x8_t; };
-template <> struct neon_vector<int16_t, 8>{ using type = int16x8_t; };
-template <> struct neon_vector<uint32_t, 2>{ using type = uint32x2_t; };
-template <> struct neon_vector<int32_t, 2>{ using type = int32x2_t; };
-template <> struct neon_vector<uint32_t, 4>{ using type = uint32x4_t; };
-template <> struct neon_vector<int32_t, 4>{ using type = int32x4_t; };
-template <> struct neon_vector<uint64_t, 1>{ using type = uint64x1_t; };
-template <> struct neon_vector<int64_t, 1>{ using type = int64x1_t; };
-template <> struct neon_vector<uint64_t, 2>{ using type = uint64x2_t; };
-template <> struct neon_vector<int64_t, 2>{ using type = int64x2_t; };
-template <> struct neon_vector<float_t, 2>{ using type = float32x2_t; };
-template <> struct neon_vector<float_t, 4>{ using type = float32x4_t; };
+template <> struct neon_vector<uint8_t, 8>{ using type = uint8x8_t; using tag_type = vector_64_tag; };
+template <> struct neon_vector<int8_t, 8>{ using type = int8x8_t; using tag_type = vector_64_tag; };
+template <> struct neon_vector<uint8_t, 16>{ using type = uint8x16_t; using tag_type = vector_128_tag; };
+template <> struct neon_vector<int8_t, 16>{ using type = int8x16_t; using tag_type = vector_128_tag; };
+template <> struct neon_vector<uint16_t, 4>{ using type = uint16x4_t; using tag_type = vector_64_tag; };
+template <> struct neon_vector<int16_t, 4>{ using type = int16x4_t; using tag_type = vector_64_tag; };
+template <> struct neon_vector<uint16_t, 8>{ using type = uint16x8_t; using tag_type = vector_128_tag; };
+template <> struct neon_vector<int16_t, 8>{ using type = int16x8_t; using tag_type = vector_128_tag; };
+template <> struct neon_vector<uint32_t, 2>{ using type = uint32x2_t; using tag_type = vector_64_tag; };
+template <> struct neon_vector<int32_t, 2>{ using type = int32x2_t; using tag_type = vector_64_tag; };
+template <> struct neon_vector<uint32_t, 4>{ using type = uint32x4_t; using tag_type = vector_128_tag; };
+template <> struct neon_vector<int32_t, 4>{ using type = int32x4_t; using tag_type = vector_128_tag; };
+template <> struct neon_vector<uint64_t, 1>{ using type = uint64x1_t; using tag_type = vector_64_tag; };
+template <> struct neon_vector<int64_t, 1>{ using type = int64x1_t; using tag_type = vector_64_tag; };
+template <> struct neon_vector<uint64_t, 2>{ using type = uint64x2_t; using tag_type = vector_128_tag; };
+template <> struct neon_vector<int64_t, 2>{ using type = int64x2_t; using tag_type = vector_128_tag; };
+template <> struct neon_vector<float_t, 2>{ using type = float32x2_t; using tag_type = vector_64_tag; };
+template <> struct neon_vector<float_t, 4>{ using type = float32x4_t; using tag_type = vector_128_tag; };
+
+/** Helper type template to get the type of a neon vector */
template <typename T, int S> using neon_vector_t = typename neon_vector<T, S>::type;
+/** Helper type template to get the tag type of a neon vector */
+template <typename T, int S> using neon_vector_tag_t = typename neon_vector<T, S>::tag_type;
// clang-format on
// *INDENT-ON*
-}
-}
-}
+} // namespace traits
+} // namespace wrapper
+} // namespace arm_compute
#endif /* __ARM_COMPUTE_WRAPPER_TRAITS_H__ */
diff --git a/arm_compute/core/NEON/wrapper/wrapper.h b/arm_compute/core/NEON/wrapper/wrapper.h
index 9676d04d71..61dc42a69b 100644
--- a/arm_compute/core/NEON/wrapper/wrapper.h
+++ b/arm_compute/core/NEON/wrapper/wrapper.h
@@ -24,10 +24,10 @@
#ifndef __ARM_COMPUTE_WRAPPER_H__
#define __ARM_COMPUTE_WRAPPER_H__
+// Traits
#include "arm_compute/core/NEON/wrapper/traits.h"
-#include "arm_compute/core/NEON/wrapper/intrinsics/and.h"
-#include "arm_compute/core/NEON/wrapper/intrinsics/load.h"
-#include "arm_compute/core/NEON/wrapper/intrinsics/store.h"
+// Intrinsics Overloads
+#include "arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h"
#endif /* __ARM_COMPUTE_WRAPPER_H__ */
diff --git a/arm_compute/runtime/NEON/functions/NEBatchNormalizationLayer.h b/arm_compute/runtime/NEON/functions/NEBatchNormalizationLayer.h
index 5c8200beda..242144c987 100644
--- a/arm_compute/runtime/NEON/functions/NEBatchNormalizationLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEBatchNormalizationLayer.h
@@ -58,6 +58,7 @@ public:
* @param[in] gamma Gamma values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
* @param[in] epsilon Small value to avoid division with zero.
* @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported.
+ * Data types supported: F32
*/
void configure(ITensor *input, ITensor *output, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, float epsilon,
ActivationLayerInfo act_info = ActivationLayerInfo());
@@ -73,6 +74,7 @@ public:
* @param[in] gamma Gamma values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
* @param[in] epsilon Small value to avoid division with zero.
* @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported.
+ * Data types supported: F32
*
* @return a status
*/
@@ -86,9 +88,6 @@ public:
private:
NEBatchNormalizationLayerKernel _norm_kernel; /**< Batch normalization layer kernel */
- // COMPMID-906 Use fused activation in NEON Batch normalization
- NEActivationLayer _act_func;
- bool _act_info_enabled;
};
}
#endif /* __ARM_COMPUTE_NEBATCHNORMALIZATIONLAYER_H__ */
diff --git a/src/core/NEON/kernels/NEBatchNormalizationLayerKernel.cpp b/src/core/NEON/kernels/NEBatchNormalizationLayerKernel.cpp
index f5144c6bf3..1f730a2c3c 100644
--- a/src/core/NEON/kernels/NEBatchNormalizationLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEBatchNormalizationLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -26,19 +26,34 @@
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/NEON/NEFixedPoint.h"
#include "arm_compute/core/NEON/NEMath.h"
+#include "arm_compute/core/NEON/kernels/detail/NEActivationFunctionDetail.h"
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Utils.h"
#include "arm_compute/core/Validate.h"
#include "arm_compute/core/Window.h"
+#include <map>
+
using namespace arm_compute;
namespace
{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *mean, const ITensorInfo *var, const ITensorInfo *beta, const ITensorInfo *gamma, float epsilon)
+Status
+validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *mean, const ITensorInfo *var,
+ const ITensorInfo *beta, const ITensorInfo *gamma, float epsilon, ActivationLayerInfo act_info)
{
ARM_COMPUTE_UNUSED(epsilon);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::F16,
+ DataType::F32);
+
+ if(act_info.enabled())
+ {
+ ActivationLayerInfo::ActivationFunction act = act_info.activation();
+ ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() != DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON(act != ActivationLayerInfo::ActivationLayerInfo::ActivationFunction::RELU && act != ActivationLayerInfo::ActivationLayerInfo::ActivationFunction::BOUNDED_RELU
+ && act != ActivationLayerInfo::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU);
+ ARM_COMPUTE_RETURN_ERROR_ON(act_info.b() > act_info.a());
+ }
if(nullptr != output)
{
@@ -67,28 +82,32 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITen
Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
return std::make_pair(err, win);
}
+} //namespace
-void batch_normalization_q8(ITensor *in, ITensor *out, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, float epsilon, const Window &window)
+template <bool fused_activation>
+void NEBatchNormalizationLayerKernel::batch_normalization_qs8(const Window &window)
{
- Iterator input(in, window);
- Iterator output(out, window);
+ static_assert(!fused_activation, "Activation is not supported for QS8");
+
+ Iterator input(_input, window);
+ Iterator output(_output, window);
// Hold information about the current feature map we are iterating.
// Only compute denominator and NEON vectors once per feature map.
int slice = -1;
- const int fixed_point_position = in->info()->fixed_point_position();
- const auto input_mean = reinterpret_cast<const qint8_t *>(mean->ptr_to_element(Coordinates(0, 0)));
- const auto input_var = reinterpret_cast<const qint8_t *>(var->ptr_to_element(Coordinates(0, 0)));
- const auto input_gamma = reinterpret_cast<const qint8_t *>(gamma->ptr_to_element(Coordinates(0, 0)));
- const auto input_beta = reinterpret_cast<const qint8_t *>(beta->ptr_to_element(Coordinates(0, 0)));
+ const int fixed_point_position = _input->info()->fixed_point_position();
+ const auto input_mean = reinterpret_cast<const qint8_t *>(_mean->ptr_to_element(Coordinates(0, 0)));
+ const auto input_var = reinterpret_cast<const qint8_t *>(_var->ptr_to_element(Coordinates(0, 0)));
+ const auto input_gamma = reinterpret_cast<const qint8_t *>(_gamma->ptr_to_element(Coordinates(0, 0)));
+ const auto input_beta = reinterpret_cast<const qint8_t *>(_beta->ptr_to_element(Coordinates(0, 0)));
qint8x16_t mean_vec = vdupq_n_qs8(0);
qint8x16_t var_vec = vdupq_n_qs8(0);
qint8x16_t gamma_vec = vdupq_n_qs8(0);
qint8x16_t beta_vec = vdupq_n_qs8(0);
qint8x16_t denominator = vdupq_n_qs8(0);
- const qint8x16_t epsilon_vec = vdupq_n_qs8(sqcvt_qs8_f32(epsilon, fixed_point_position));
+ const qint8x16_t epsilon_vec = vdupq_n_qs8(sqcvt_qs8_f32(_epsilon, fixed_point_position));
execute_window_loop(window, [&](const Coordinates & id)
{
if(slice != id.z())
@@ -112,27 +131,30 @@ void batch_normalization_q8(ITensor *in, ITensor *out, const ITensor *mean, cons
input, output);
}
-void batch_normalization_q16(ITensor *in, ITensor *out, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, float epsilon, const Window &window)
+template <bool fused_activation>
+void NEBatchNormalizationLayerKernel::batch_normalization_qs16(const Window &window)
{
- Iterator input(in, window);
- Iterator output(out, window);
+ static_assert(!fused_activation, "Activation is not supported for QS16");
+
+ Iterator input(_input, window);
+ Iterator output(_output, window);
// Hold information about the current feature map we are iterating.
// Only compute denominator and NEON vectors once per feature map.
int slice = -1;
- const int fixed_point_position = in->info()->fixed_point_position();
- const auto input_mean = reinterpret_cast<const qint16_t *>(mean->ptr_to_element(Coordinates(0, 0)));
- const auto input_var = reinterpret_cast<const qint16_t *>(var->ptr_to_element(Coordinates(0, 0)));
- const auto input_gamma = reinterpret_cast<const qint16_t *>(gamma->ptr_to_element(Coordinates(0, 0)));
- const auto input_beta = reinterpret_cast<const qint16_t *>(beta->ptr_to_element(Coordinates(0, 0)));
+ const int fixed_point_position = _input->info()->fixed_point_position();
+ const auto input_mean = reinterpret_cast<const qint16_t *>(_mean->ptr_to_element(Coordinates(0, 0)));
+ const auto input_var = reinterpret_cast<const qint16_t *>(_var->ptr_to_element(Coordinates(0, 0)));
+ const auto input_gamma = reinterpret_cast<const qint16_t *>(_gamma->ptr_to_element(Coordinates(0, 0)));
+ const auto input_beta = reinterpret_cast<const qint16_t *>(_beta->ptr_to_element(Coordinates(0, 0)));
qint16x8_t mean_vec = vdupq_n_qs16(0);
qint16x8_t var_vec = vdupq_n_qs16(0);
qint16x8_t gamma_vec = vdupq_n_qs16(0);
qint16x8_t beta_vec = vdupq_n_qs16(0);
qint16x8_t denominator = vdupq_n_qs16(0);
- const qint16x8_t epsilon_vec = vdupq_n_qs16(sqcvt_qs16_f32(epsilon, fixed_point_position));
+ const qint16x8_t epsilon_vec = vdupq_n_qs16(sqcvt_qs16_f32(_epsilon, fixed_point_position));
execute_window_loop(window, [&](const Coordinates & id)
{
if(slice != id.z())
@@ -156,101 +178,162 @@ void batch_normalization_q16(ITensor *in, ITensor *out, const ITensor *mean, con
input, output);
}
-void batch_normalization_fp32(ITensor *in, ITensor *out, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, float epsilon, const Window &window)
+template <bool fused_activation>
+void NEBatchNormalizationLayerKernel::batch_normalization_fp16(const Window &window)
{
- Iterator input(in, window);
- Iterator output(out, window);
+ static_assert(!fused_activation, "Activation is not supported for QS8");
+
+ ARM_COMPUTE_UNUSED(window);
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+ Iterator input(_input, window);
+ Iterator output(_output, window);
// Hold information about the current feature map we are iterating.
// Only compute denominator and NEON vectors once per feature map.
int slice = -1;
- const auto input_mean = reinterpret_cast<const float *>(mean->ptr_to_element(Coordinates(0, 0)));
- const auto input_var = reinterpret_cast<const float *>(var->ptr_to_element(Coordinates(0, 0)));
- const auto input_gamma = reinterpret_cast<const float *>(gamma->ptr_to_element(Coordinates(0, 0)));
- const auto input_beta = reinterpret_cast<const float *>(beta->ptr_to_element(Coordinates(0, 0)));
+ const auto input_mean = reinterpret_cast<const float16_t *>(_mean->ptr_to_element(Coordinates(0, 0)));
+ const auto input_var = reinterpret_cast<const float16_t *>(_var->ptr_to_element(Coordinates(0, 0)));
+ const auto input_gamma = reinterpret_cast<const float16_t *>(_gamma->ptr_to_element(Coordinates(0, 0)));
+ const auto input_beta = reinterpret_cast<const float16_t *>(_beta->ptr_to_element(Coordinates(0, 0)));
- float32x4_t mean_vec = vdupq_n_f32(0.0);
- float32x4_t var_vec = vdupq_n_f32(0.0);
- float32x4_t gamma_vec = vdupq_n_f32(0.0);
- float32x4_t beta_vec = vdupq_n_f32(0.0);
- float32x4_t denominator = vdupq_n_f32(0.0);
- const float32x4_t epsilon_vec = vdupq_n_f32(epsilon);
+ float16x8_t mean_vec = vdupq_n_f16(0.0);
+ float16x8_t var_vec = vdupq_n_f16(0.0);
+ float16x8_t gamma_vec = vdupq_n_f16(0.0);
+ float16x8_t beta_vec = vdupq_n_f16(0.0);
+ float16x8_t denominator = vdupq_n_f16(0.0);
+ const float16x8_t epsilon_vec = vdupq_n_f16(_epsilon);
execute_window_loop(window, [&](const Coordinates & id)
{
if(slice != id.z())
{
// Conctruct vectors
- mean_vec = vdupq_n_f32(*(input_mean + id.z()));
- var_vec = vdupq_n_f32(*(input_var + id.z()));
- gamma_vec = vdupq_n_f32(*(input_gamma + id.z()));
- beta_vec = vdupq_n_f32(*(input_beta + id.z()));
+ mean_vec = vdupq_n_f16(*(input_mean + id.z()));
+ var_vec = vdupq_n_f16(*(input_var + id.z()));
+ gamma_vec = vdupq_n_f16(*(input_gamma + id.z()));
+ beta_vec = vdupq_n_f16(*(input_beta + id.z()));
// Calculate denominator
- denominator = vinvsqrtq_f32(vaddq_f32(var_vec, epsilon_vec));
+ denominator = vinvsqrtq_f16(vaddq_f16(var_vec, epsilon_vec));
slice = id.z();
}
// Calculate x bar and store results
- const float32x4_t numerator = vsubq_f32(vld1q_f32(reinterpret_cast<const float *>(input.ptr())), mean_vec);
- const float32x4_t x_bar = vmulq_f32(numerator, denominator);
- vst1q_f32(reinterpret_cast<float *>(output.ptr()), vmlaq_f32(beta_vec, x_bar, gamma_vec));
+ const float16x8_t numerator = vsubq_f16(vld1q_f16(reinterpret_cast<const float16_t *>(input.ptr())), mean_vec);
+ const float16x8_t x_bar = vmulq_f16(numerator, denominator);
+ vst1q_f16(reinterpret_cast<float16_t *>(output.ptr()), vaddq_f16(beta_vec, vmulq_f16(x_bar, gamma_vec)));
},
input, output);
+#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
}
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-void batch_normalization_fp16(ITensor *in, ITensor *out, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, float epsilon, const Window &window)
+template <bool fused_activation, typename F>
+void NEBatchNormalizationLayerKernel::batch_normalization_fp32(const Window &window)
{
- Iterator input(in, window);
- Iterator output(out, window);
+ Iterator input(_input, window);
+ Iterator output(_output, window);
+
+ F activation_functor(_act_info);
// Hold information about the current feature map we are iterating.
// Only compute denominator and NEON vectors once per feature map.
int slice = -1;
- const auto input_mean = reinterpret_cast<const float16_t *>(mean->ptr_to_element(Coordinates(0, 0)));
- const auto input_var = reinterpret_cast<const float16_t *>(var->ptr_to_element(Coordinates(0, 0)));
- const auto input_gamma = reinterpret_cast<const float16_t *>(gamma->ptr_to_element(Coordinates(0, 0)));
- const auto input_beta = reinterpret_cast<const float16_t *>(beta->ptr_to_element(Coordinates(0, 0)));
+ const auto input_mean = reinterpret_cast<const float *>(_mean->ptr_to_element(Coordinates(0, 0)));
+ const auto input_var = reinterpret_cast<const float *>(_var->ptr_to_element(Coordinates(0, 0)));
+ const auto input_gamma = reinterpret_cast<const float *>(_gamma->ptr_to_element(Coordinates(0, 0)));
+ const auto input_beta = reinterpret_cast<const float *>(_beta->ptr_to_element(Coordinates(0, 0)));
- float16x8_t mean_vec = vdupq_n_f16(0.0);
- float16x8_t var_vec = vdupq_n_f16(0.0);
- float16x8_t gamma_vec = vdupq_n_f16(0.0);
- float16x8_t beta_vec = vdupq_n_f16(0.0);
- float16x8_t denominator = vdupq_n_f16(0.0);
- const float16x8_t epsilon_vec = vdupq_n_f16(epsilon);
+ float32x4_t mean_vec = vdupq_n_f32(0.0);
+ float32x4_t var_vec = vdupq_n_f32(0.0);
+ float32x4_t gamma_vec = vdupq_n_f32(0.0);
+ float32x4_t beta_vec = vdupq_n_f32(0.0);
+ float32x4_t denominator = vdupq_n_f32(0.0);
+ const float32x4_t epsilon_vec = vdupq_n_f32(_epsilon);
execute_window_loop(window, [&](const Coordinates & id)
{
if(slice != id.z())
{
// Conctruct vectors
- mean_vec = vdupq_n_f16(*(input_mean + id.z()));
- var_vec = vdupq_n_f16(*(input_var + id.z()));
- gamma_vec = vdupq_n_f16(*(input_gamma + id.z()));
- beta_vec = vdupq_n_f16(*(input_beta + id.z()));
+ mean_vec = vdupq_n_f32(*(input_mean + id.z()));
+ var_vec = vdupq_n_f32(*(input_var + id.z()));
+ gamma_vec = vdupq_n_f32(*(input_gamma + id.z()));
+ beta_vec = vdupq_n_f32(*(input_beta + id.z()));
// Calculate denominator
- denominator = vinvsqrtq_f16(vaddq_f16(var_vec, epsilon_vec));
+ denominator = vinvsqrtq_f32(vaddq_f32(var_vec, epsilon_vec));
slice = id.z();
}
- // Calculate x bar and store results
- const float16x8_t numerator = vsubq_f16(vld1q_f16(reinterpret_cast<const float16_t *>(input.ptr())), mean_vec);
- const float16x8_t x_bar = vmulq_f16(numerator, denominator);
- vst1q_f16(reinterpret_cast<float16_t *>(output.ptr()), vaddq_f16(beta_vec, vmulq_f16(x_bar, gamma_vec)));
+ // Calculate x bar
+ const float32x4_t numerator = vsubq_f32(vld1q_f32(reinterpret_cast<const float *>(input.ptr())), mean_vec);
+ const float32x4_t x_bar = vmulq_f32(numerator, denominator);
+ float32x4_t res = vmlaq_f32(beta_vec, x_bar, gamma_vec);
+
+ // Perform fused activation
+ if(fused_activation)
+ {
+ activation_functor(res);
+ }
+
+ // Store results
+ vst1q_f32(reinterpret_cast<float *>(output.ptr()), res);
},
input, output);
}
-#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
-} // namespace
+
+void NEBatchNormalizationLayerKernel::configure_non_fused()
+{
+ switch(_input->info()->data_type())
+ {
+ case DataType::QS8:
+ _func = &NEBatchNormalizationLayerKernel::batch_normalization_qs8<false>;
+ break;
+ case DataType::QS16:
+ _func = &NEBatchNormalizationLayerKernel::batch_normalization_qs16<false>;
+ break;
+ case DataType::F16:
+ _func = &NEBatchNormalizationLayerKernel::batch_normalization_fp16<false>;
+ break;
+ case DataType::F32:
+ _func = &NEBatchNormalizationLayerKernel::batch_normalization_fp32<false, ::detail::dummy<float, 4>>;
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Element size not supported");
+ break;
+ }
+}
+
+void NEBatchNormalizationLayerKernel::configure_fused()
+{
+ // Fused Batched Normalization with activation functions : FP32
+ static std::map<ActivationLayerInfo::ActivationFunction, BatchNormFunctionPtr> bn_fused_map_f32 =
+ {
+ { ActivationLayerInfo::ActivationFunction::RELU, &NEBatchNormalizationLayerKernel::batch_normalization_fp32<true, ::detail::relu<float, 4>> },
+ { ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, &NEBatchNormalizationLayerKernel::batch_normalization_fp32<true, ::detail::brelu<float, 4>> },
+ { ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, &NEBatchNormalizationLayerKernel::batch_normalization_fp32<true, ::detail::lubrelu<float, 4>> }
+ };
+
+ switch(_input->info()->data_type())
+ {
+ case DataType::F32:
+ _func = bn_fused_map_f32[_act_info.activation()];
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Element size not supported");
+ break;
+ }
+}
NEBatchNormalizationLayerKernel::NEBatchNormalizationLayerKernel()
- : _func(nullptr), _input(nullptr), _output(nullptr), _mean(nullptr), _var(nullptr), _gamma(nullptr), _beta(nullptr), _epsilon()
+ : _func(nullptr), _input(nullptr), _output(nullptr), _mean(nullptr), _var(nullptr), _gamma(nullptr), _beta(nullptr), _epsilon(), _act_info()
{
}
-void NEBatchNormalizationLayerKernel::configure(ITensor *input, ITensor *output, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, float epsilon)
+void NEBatchNormalizationLayerKernel::configure(ITensor *input, ITensor *output,
+ const ITensor *mean, const ITensor *var,
+ const ITensor *beta, const ITensor *gamma,
+ float epsilon, ActivationLayerInfo act_info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, mean, var, beta, gamma);
@@ -264,40 +347,33 @@ void NEBatchNormalizationLayerKernel::configure(ITensor *input, ITensor *output,
output_info = output->info();
}
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output_info, mean->info(), var->info(), beta->info(), gamma->info(), epsilon));
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output_info,
+ mean->info(), var->info(),
+ beta->info(), gamma->info(),
+ epsilon, act_info));
- _input = input;
- _output = input;
- _mean = mean;
- _var = var;
- _gamma = gamma;
- _beta = beta;
- _epsilon = epsilon;
+ _input = input;
+ _output = input;
+ _mean = mean;
+ _var = var;
+ _gamma = gamma;
+ _beta = beta;
+ _epsilon = epsilon;
+ _act_info = act_info;
if(output != nullptr)
{
_output = output;
}
- switch(input->info()->data_type())
+ // Configure activation function to run
+ if(_act_info.enabled())
{
- case DataType::QS8:
- _func = &batch_normalization_q8;
- break;
- case DataType::QS16:
- _func = &batch_normalization_q16;
- break;
- case DataType::F32:
- _func = &batch_normalization_fp32;
- break;
- case DataType::F16:
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
- _func = &batch_normalization_fp16;
- break;
-#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
- default:
- ARM_COMPUTE_ERROR("Element size not supported");
- break;
+ configure_fused();
+ }
+ else
+ {
+ configure_non_fused();
}
// Configure kernel window
@@ -306,11 +382,12 @@ void NEBatchNormalizationLayerKernel::configure(ITensor *input, ITensor *output,
INEKernel::configure(win_config.second);
}
-Status NEBatchNormalizationLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *mean, const ITensorInfo *var, const ITensorInfo *beta,
- const ITensorInfo *gamma,
- float epsilon)
+Status NEBatchNormalizationLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output,
+ const ITensorInfo *mean, const ITensorInfo *var,
+ const ITensorInfo *beta, const ITensorInfo *gamma,
+ float epsilon, ActivationLayerInfo act_info)
{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, mean, var, beta, gamma, epsilon));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, mean, var, beta, gamma, epsilon, act_info));
ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output ? output->clone().get() : nullptr).first);
return Status{};
@@ -323,5 +400,5 @@ void NEBatchNormalizationLayerKernel::run(const Window &window, const ThreadInfo
ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
ARM_COMPUTE_ERROR_ON(_func == nullptr);
- (*_func)(_input, _output, _mean, _var, _beta, _gamma, _epsilon, window);
+ (this->*_func)(window);
}
diff --git a/src/runtime/NEON/functions/NEBatchNormalizationLayer.cpp b/src/runtime/NEON/functions/NEBatchNormalizationLayer.cpp
index b3753e842d..bb224db163 100644
--- a/src/runtime/NEON/functions/NEBatchNormalizationLayer.cpp
+++ b/src/runtime/NEON/functions/NEBatchNormalizationLayer.cpp
@@ -33,40 +33,25 @@
using namespace arm_compute;
NEBatchNormalizationLayer::NEBatchNormalizationLayer()
- : _norm_kernel(), _act_func(), _act_info_enabled(false)
+ : _norm_kernel()
{
}
void NEBatchNormalizationLayer::configure(ITensor *input, ITensor *output, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, float epsilon,
ActivationLayerInfo act_info)
{
- _act_info_enabled = act_info.enabled();
-
// Configure kernel
- _norm_kernel.configure(input, output, mean, var, beta, gamma, epsilon);
- if(_act_info_enabled)
- {
- _act_func.configure(output, nullptr, act_info);
- }
+ _norm_kernel.configure(input, output, mean, var, beta, gamma, epsilon, act_info);
}
Status NEBatchNormalizationLayer::validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *mean, const ITensorInfo *var, const ITensorInfo *beta, const ITensorInfo *gamma,
float epsilon, ActivationLayerInfo act_info)
{
- ARM_COMPUTE_RETURN_ON_ERROR(NEBatchNormalizationLayerKernel::validate(input, output, mean, var, beta, gamma, epsilon));
- if(act_info.enabled())
- {
- ARM_COMPUTE_RETURN_ON_ERROR(NEActivationLayer::validate(output, nullptr, act_info));
- }
-
+ ARM_COMPUTE_RETURN_ON_ERROR(NEBatchNormalizationLayerKernel::validate(input, output, mean, var, beta, gamma, epsilon, act_info));
return Status{};
}
void NEBatchNormalizationLayer::run()
{
NEScheduler::get().schedule(&_norm_kernel, Window::DimY);
- if(_act_info_enabled)
- {
- _act_func.run();
- }
}
diff --git a/tests/benchmark/NEON/BatchNormalizationLayer.cpp b/tests/benchmark/NEON/BatchNormalizationLayer.cpp
index 2aae3a480b..25200374f3 100644
--- a/tests/benchmark/NEON/BatchNormalizationLayer.cpp
+++ b/tests/benchmark/NEON/BatchNormalizationLayer.cpp
@@ -55,7 +55,7 @@ TEST_SUITE(NEON)
REGISTER_FIXTURE_DATA_TEST_CASE(MobileNetBatchNormalizationLayer, NEBatchNormalizationLayerFixture, framework::DatasetMode::ALL,
framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::MobileNetBatchNormalizationLayerDataset(),
- framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f))),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
data_types),
framework::dataset::make("Batches", 1)));
REGISTER_FIXTURE_DATA_TEST_CASE(YOLOV2BatchNormalizationLayer, NEBatchNormalizationLayerFixture, framework::DatasetMode::ALL,
@@ -73,7 +73,7 @@ REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4BatchNormalizationLayer, NEB
TEST_SUITE(NIGHTLY)
REGISTER_FIXTURE_DATA_TEST_CASE(MobileNetBatchNormalizationLayer, NEBatchNormalizationLayerFixture, framework::DatasetMode::NIGHTLY,
framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::MobileNetBatchNormalizationLayerDataset(),
- framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f))),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
data_types),
framework::dataset::make("Batches", { 4, 8 })));
REGISTER_FIXTURE_DATA_TEST_CASE(YOLOV2BatchNormalizationLayer, NEBatchNormalizationLayerFixture, framework::DatasetMode::NIGHTLY,
diff --git a/tests/validation/NEON/BatchNormalizationLayer.cpp b/tests/validation/NEON/BatchNormalizationLayer.cpp
index 3501c359db..054ed278a2 100644
--- a/tests/validation/NEON/BatchNormalizationLayer.cpp
+++ b/tests/validation/NEON/BatchNormalizationLayer.cpp
@@ -162,7 +162,7 @@ TEST_SUITE_END()
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_SUITE(Float16)
FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
- act_infos),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
framework::dataset::make("DataType", DataType::F16)))
{
// Validate output