From b49a7153c901b5c523a3d07815b79a4f460533b1 Mon Sep 17 00:00:00 2001 From: Pablo Tello Date: Tue, 11 Jul 2017 16:31:35 +0100 Subject: COMPMID-421: Added FP16 support to Softmax. Change-Id: If48178689e7cdadf1858556438c7292128be5b92 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/80436 Tested-by: Kaizen Reviewed-by: Moritz Pflanzer --- .../core/NEON/kernels/NESoftmaxLayerKernel.h | 6 +- .../runtime/NEON/functions/NESoftmaxLayer.h | 2 +- scripts/check_clang-tidy.py | 1 + scripts/clang-tidy.h | 2 +- src/core/NEON/kernels/NESoftmaxLayerKernel.cpp | 154 ++++++++++++++++++++- src/runtime/NEON/functions/NESoftmaxLayer.cpp | 2 +- tests/validation/NEON/ActivationLayer.cpp | 2 +- tests/validation/NEON/ConvolutionLayerDirect.cpp | 4 +- tests/validation/NEON/PoolingLayer.cpp | 2 +- tests/validation/NEON/SoftmaxLayer.cpp | 26 +++- 10 files changed, 185 insertions(+), 16 deletions(-) diff --git a/arm_compute/core/NEON/kernels/NESoftmaxLayerKernel.h b/arm_compute/core/NEON/kernels/NESoftmaxLayerKernel.h index 53eef8d665..2caef55498 100644 --- a/arm_compute/core/NEON/kernels/NESoftmaxLayerKernel.h +++ b/arm_compute/core/NEON/kernels/NESoftmaxLayerKernel.h @@ -39,7 +39,7 @@ public: NELogits1DMaxKernel(); /** Set the input and output tensors. * - * @param[in] input Source tensor. Data types supported: QS8/QS16/F32. + * @param[in] input Source tensor. Data types supported: QS8/QS16/F16/F32. * @param[out] output Destination tensor. Data types supported: same as @p input */ void configure(const ITensor *input, ITensor *output); @@ -74,7 +74,7 @@ public: ~NELogits1DShiftExpSumKernel() = default; /** Set the input and output tensors. * - * @param[in] input Source tensor. Data types supported: QS8/QS16/F32. + * @param[in] input Source tensor. Data types supported: QS8/QS16/F16/F32. * @param[in] max Max values tensor. Data types supported: same as @p input. * @param[out] output Destination tensor. Data types supported: same as @p input. * @param[out] sum Sum of 1D logits tensor. Data types supported: same as @p input. @@ -113,7 +113,7 @@ public: ~NELogits1DNormKernel() = default; /** Set the input and output tensors. * - * @param[in] input Source tensor. Data types supported: QS8/QS16/F32. + * @param[in] input Source tensor. Data types supported: QS8/QS16/F16/F32. * @param[in] sum Sum tensor. The number of dimensions should be dim(input)-1. Data types supported: same as @p input. * @param[out] output Destination tensor. Data types supported: same as @p input. */ diff --git a/arm_compute/runtime/NEON/functions/NESoftmaxLayer.h b/arm_compute/runtime/NEON/functions/NESoftmaxLayer.h index 44a69d8c19..01402aee63 100644 --- a/arm_compute/runtime/NEON/functions/NESoftmaxLayer.h +++ b/arm_compute/runtime/NEON/functions/NESoftmaxLayer.h @@ -50,7 +50,7 @@ public: NESoftmaxLayer(); /** Set the input and output tensors. * - * @param[in] input Source tensor. Data types supported: QS8/QS16/F32. + * @param[in] input Source tensor. Data types supported: QS8/QS16/F16/F32. * @param[out] output Destination tensor. Data types supported: same as @p input. */ void configure(ITensor *input, ITensor *output); diff --git a/scripts/check_clang-tidy.py b/scripts/check_clang-tidy.py index 6c2173b6fe..30bfca133d 100755 --- a/scripts/check_clang-tidy.py +++ b/scripts/check_clang-tidy.py @@ -47,6 +47,7 @@ if __name__ == "__main__": ("ReferenceCPP.cpp" in line and re.search(r"parameter '[^']+' is unused", line)) or ("NEGEMMMatrixMultiplyKernel.cpp" in line and "do not use C-style cast to convert between unrelated types" in line) or ("NEPoolingLayerKernel.cpp" in line and "do not use C-style cast to convert between unrelated types" in line) or + ("NESoftmaxLayerKernel.cpp" in line and "do not use C-style cast to convert between unrelated types" in line) or "3rdparty" in line): continue diff --git a/scripts/clang-tidy.h b/scripts/clang-tidy.h index a780ab7218..7a629e6250 100644 --- a/scripts/clang-tidy.h +++ b/scripts/clang-tidy.h @@ -10,7 +10,7 @@ inline float16x4_t vpmax_f16 (float16x4_t, float16x4_t) return vdup_n_f16(0); } -inline float16x4_t vpadd_f16 (float16x4_t, float16x4_t) +inline float16x4_t vpadd_f16(float16x4_t, float16x4_t) { return vdup_n_f16(0); } diff --git a/src/core/NEON/kernels/NESoftmaxLayerKernel.cpp b/src/core/NEON/kernels/NESoftmaxLayerKernel.cpp index fe62d7b575..79fcba1dfb 100644 --- a/src/core/NEON/kernels/NESoftmaxLayerKernel.cpp +++ b/src/core/NEON/kernels/NESoftmaxLayerKernel.cpp @@ -106,6 +106,41 @@ void logits_1d_max_qs16(const ITensor *in, ITensor *out, const Window &window) } while(window.slide_window_slice_1D(in_slice) && window.slide_window_slice_1D(max_slice)); } + +#ifdef ARM_COMPUTE_ENABLE_FP16 +void logits_1d_max_f16(const ITensor *in, ITensor *out, const Window &window) +{ + Window in_slice = window.first_slice_window_1D(); + + Window window_max(window); + window_max.set(Window::DimX, Window::Dimension(0, 0, 0)); + Window max_slice = window_max.first_slice_window_1D(); + + do + { + Iterator input(in, in_slice); + Iterator output(out, max_slice); + + float16x8_t vec_max = vdupq_n_f16(std::numeric_limits::lowest()); + + execute_window_loop(in_slice, [&](const Coordinates & id) + { + const auto in_ptr = reinterpret_cast(input.ptr()); + const float16x8_t current_value = vld1q_f16(in_ptr); + vec_max = vmaxq_f16(vec_max, current_value); + }, + input); + + float16x4_t carry_max = vpmax_f16(vget_high_f16(vec_max), vget_low_f16(vec_max)); + carry_max = vpmax_f16(carry_max, carry_max); + carry_max = vpmax_f16(carry_max, carry_max); + + *(reinterpret_cast(output.ptr())) = vget_lane_f16(carry_max, 0); + } + while(window.slide_window_slice_1D(in_slice) && window.slide_window_slice_1D(max_slice)); +} +#endif /* ARM_COMPUTE_ENABLE_FP16 */ + void logits_1d_max_f32(const ITensor *in, ITensor *out, const Window &window) { Window in_slice = window.first_slice_window_1D(); @@ -150,7 +185,7 @@ BorderSize NELogits1DMaxKernel::border_size() const void NELogits1DMaxKernel::configure(const ITensor *input, ITensor *output) { - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::F32); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32); ARM_COMPUTE_ERROR_ON_NULLPTR(output); // Softmax across the x dimension @@ -178,6 +213,11 @@ void NELogits1DMaxKernel::configure(const ITensor *input, ITensor *output) case DataType::F32: _func = &logits_1d_max_f32; break; + case DataType::F16: +#ifdef ARM_COMPUTE_ENABLE_FP16 + _func = &logits_1d_max_f16; + break; +#endif /* ARM_COMPUTE_ENABLE_FP16 */ default: ARM_COMPUTE_ERROR("Unsupported data type."); } @@ -333,6 +373,69 @@ void logits_1d_shift_exp_sum_qs16(const ITensor *in, const ITensor *max, ITensor } while(window.slide_window_slice_1D(in_slice) && window.slide_window_slice_1D(max_slice)); } + +#ifdef ARM_COMPUTE_ENABLE_FP16 +void logits_1d_shift_exp_sum_f16(const ITensor *in, const ITensor *max, ITensor *out, ITensor *sum, const Window &window) +{ + Window window_max(window); + window_max.set(Window::DimX, Window::Dimension(0, 0, 0)); + + Window max_slice = window_max.first_slice_window_1D(); + Window in_slice = window.first_slice_window_1D(); + + constexpr int step = 8; + const int long_steps = in->info()->valid_region().shape.x() / step; + const int small_steps = in->info()->valid_region().shape.x() % step; + + do + { + Iterator input(in, in_slice); + Iterator exp(out, in_slice); + Iterator _max(max, max_slice); + Iterator _sum(sum, max_slice); + + // Get pointers + auto in_ptr = reinterpret_cast(input.ptr()); + auto exp_ptr = reinterpret_cast(exp.ptr()); + + // Init sum to zero + float16x8_t vec_sum_value = vdupq_n_f16(0); + + // Get max value + const auto max_ptr = reinterpret_cast(_max.ptr()); + const float16x8_t vec_max = vdupq_n_f16(*max_ptr); + + // Run neon loop + for(int i = 0; i < long_steps; ++i) + { + float16x8_t vec_elements = vld1q_f16(in_ptr); + vec_elements = vsubq_f16(vec_elements, vec_max); + vec_elements = vexpq_f16(vec_elements); + + vst1q_f16(exp_ptr, vec_elements); + vec_sum_value = vaddq_f16(vec_sum_value, vec_elements); + + in_ptr += step; + exp_ptr += step; + } + // Reduce sum + const float16x4_t sum_red = vadd_f16(vget_low_f16(vec_sum_value), vget_high_f16(vec_sum_value)); + const float16x4_t carry_addition = vpadd_f16(sum_red, sum_red); + float16_t sum = vget_lane_f16(carry_addition, 0) + vget_lane_f16(carry_addition, 1); + + // Run remaining elements + for(int i = 0; i < small_steps; ++i) + { + const float16_t element = std::exp(static_cast(in_ptr[i] - *max_ptr)); + exp_ptr[i] = element; + sum += element; + } + *(reinterpret_cast(_sum.ptr())) = sum; + } + while(window.slide_window_slice_1D(in_slice) && window.slide_window_slice_1D(max_slice)); +} +#endif /* ARM_COMPUTE_ENABLE_FP16 */ + void logits_1d_shift_exp_sum_f32(const ITensor *in, const ITensor *max, ITensor *out, ITensor *sum, const Window &window) { Window window_max(window); @@ -403,7 +506,7 @@ NELogits1DShiftExpSumKernel::NELogits1DShiftExpSumKernel() void NELogits1DShiftExpSumKernel::configure(const ITensor *input, const ITensor *max, ITensor *output, ITensor *sum) { - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::F32); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32); ARM_COMPUTE_ERROR_ON_NULLPTR(max, sum, output); // Output auto initialization if not yet initialized @@ -428,8 +531,14 @@ void NELogits1DShiftExpSumKernel::configure(const ITensor *input, const ITensor case DataType::F32: _func = &logits_1d_shift_exp_sum_f32; break; + case DataType::F16: +#ifdef ARM_COMPUTE_ENABLE_FP16 + _func = &logits_1d_shift_exp_sum_f16; + break; +#endif /* ARM_COMPUTE_ENABLE_FP16 */ default: ARM_COMPUTE_ERROR("Unsupported data type."); + break; } _input = input; @@ -527,6 +636,39 @@ void logits_1d_norm_qs16(const ITensor *in, const ITensor *sum, ITensor *out, co } while(window.slide_window_slice_1D(in_slice) && window.slide_window_slice_1D(sum_slice)); } +#ifdef ARM_COMPUTE_ENABLE_FP16 +void logits_1d_norm_f16(const ITensor *in, const ITensor *sum, ITensor *out, const Window &window) +{ + Window window_sum(window); + window_sum.set(Window::DimX, Window::Dimension(0, 0, 0)); + Window sum_slice = window_sum.first_slice_window_1D(); + Window in_slice = window.first_slice_window_1D(); + + do + { + Iterator input(in, in_slice); + Iterator _sum(sum, sum_slice); + Iterator output(out, in_slice); + + const float16_t sum_value = *reinterpret_cast(_sum.ptr()); + const float16x8_t vec_sum_inversed = vdupq_n_f16(1.0f / sum_value); + + execute_window_loop(in_slice, [&](const Coordinates & id) + { + const auto in_ptr = reinterpret_cast(input.ptr()); + const auto out_ptr = reinterpret_cast(output.ptr()); + + const float16x8_t vec_in = vld1q_f16(in_ptr); + const float16x8_t normalized_value = vmulq_f16(vec_in, vec_sum_inversed); + + vst1q_f16(out_ptr, normalized_value); + }, + input, output); + } + while(window.slide_window_slice_1D(in_slice) && window.slide_window_slice_1D(sum_slice)); +} +#endif /* ARM_COMPUTE_ENABLE_FP16 */ + void logits_1d_norm_f32(const ITensor *in, const ITensor *sum, ITensor *out, const Window &window) { Window window_sum(window); @@ -566,7 +708,7 @@ NELogits1DNormKernel::NELogits1DNormKernel() void NELogits1DNormKernel::configure(const ITensor *input, const ITensor *sum, ITensor *output) { - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::F32); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32); ARM_COMPUTE_ERROR_ON_NULLPTR(sum, output); // Output auto initialization if not yet initialized @@ -594,8 +736,14 @@ void NELogits1DNormKernel::configure(const ITensor *input, const ITensor *sum, I case DataType::F32: _func = &logits_1d_norm_f32; break; + case DataType::F16: +#ifdef ARM_COMPUTE_ENABLE_FP16 + _func = &logits_1d_norm_f16; + break; +#endif /* ARM_COMPUTE_ENABLE_FP16 */ default: ARM_COMPUTE_ERROR("Unsupported data type."); + break; } Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration)); diff --git a/src/runtime/NEON/functions/NESoftmaxLayer.cpp b/src/runtime/NEON/functions/NESoftmaxLayer.cpp index 7dfa927981..13dfa4a51e 100644 --- a/src/runtime/NEON/functions/NESoftmaxLayer.cpp +++ b/src/runtime/NEON/functions/NESoftmaxLayer.cpp @@ -38,7 +38,7 @@ NESoftmaxLayer::NESoftmaxLayer() void NESoftmaxLayer::configure(ITensor *input, ITensor *output) { - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::F32); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32); // Create intermediate tensors shapes TensorInfo tensor_info_tmp(input->info()->tensor_shape(), input->info()->num_channels(), input->info()->data_type(), input->info()->fixed_point_position()); diff --git a/tests/validation/NEON/ActivationLayer.cpp b/tests/validation/NEON/ActivationLayer.cpp index fbb5d17540..5f1a2c6fb6 100644 --- a/tests/validation/NEON/ActivationLayer.cpp +++ b/tests/validation/NEON/ActivationLayer.cpp @@ -238,7 +238,7 @@ BOOST_DATA_TEST_CASE(RunSmall, boost::unit_test::data::make({ false, true }) * S RawTensor ref_dst = Reference::compute_reference_activation_layer(shape, dt, act_info); // Validate output - validate(NEAccessor(dst), ref_dst, activation_layer_tolerance(dt, act_function)); + validate(Accessor(dst), ref_dst, activation_layer_tolerance(dt, act_function)); } BOOST_AUTO_TEST_SUITE_END() #endif /* ARM_COMPUTE_ENABLE_FP16 */ diff --git a/tests/validation/NEON/ConvolutionLayerDirect.cpp b/tests/validation/NEON/ConvolutionLayerDirect.cpp index 034a8b2045..effb898428 100644 --- a/tests/validation/NEON/ConvolutionLayerDirect.cpp +++ b/tests/validation/NEON/ConvolutionLayerDirect.cpp @@ -150,7 +150,7 @@ BOOST_DATA_TEST_CASE(W1x1, RawTensor ref = Reference::compute_reference_convolution_layer(input_shape, w_shape, b_shape, d_shape, dt, conv_info, 0); // Validate output - validate(NEAccessor(dst), ref); + validate(Accessor(dst), ref); } BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit")) @@ -172,7 +172,7 @@ BOOST_DATA_TEST_CASE(W3x3, DirectConvolutionShapes() * boost::unit_test::data::m RawTensor ref = Reference::compute_reference_convolution_layer(input_shape, w_shape, b_shape, d_shape, dt, conv_info, 0); // Validate output - validate(NEAccessor(dst), ref, tolerance_fp16); + validate(Accessor(dst), ref, tolerance_fp16); } BOOST_AUTO_TEST_SUITE_END() #endif /* ARM_COMPUTE_ENABLE_FP16 */ diff --git a/tests/validation/NEON/PoolingLayer.cpp b/tests/validation/NEON/PoolingLayer.cpp index 3961770310..0d2f285dff 100644 --- a/tests/validation/NEON/PoolingLayer.cpp +++ b/tests/validation/NEON/PoolingLayer.cpp @@ -162,7 +162,7 @@ BOOST_DATA_TEST_CASE(RandomDataset, RawTensor ref_dst = Reference::compute_reference_pooling_layer(obj.src_shape, obj.dst_shape, dt, obj.info); // Validate output - validate(NEAccessor(dst), ref_dst, tolerance_f16, 0); + validate(Accessor(dst), ref_dst, tolerance_f16, 0); } BOOST_AUTO_TEST_SUITE_END() #endif /* ARM_COMPUTE_ENABLE_FP16 */ diff --git a/tests/validation/NEON/SoftmaxLayer.cpp b/tests/validation/NEON/SoftmaxLayer.cpp index 92ca673f17..8422ba363c 100644 --- a/tests/validation/NEON/SoftmaxLayer.cpp +++ b/tests/validation/NEON/SoftmaxLayer.cpp @@ -49,7 +49,10 @@ using namespace arm_compute::test::validation; namespace { /** Tolerance for float operations */ -const float tolerance = 0.000001f; +const float tolerance_f32 = 0.000001f; +#ifdef ARM_COMPUTE_ENABLE_FP16 +const float tolerance_f16 = 0.0001f; +#endif /* ARM_COMPUTE_ENABLE_FP16*/ /** Tolerance for fixed point operations */ const float tolerance_fixed_point = 2.f; @@ -102,6 +105,23 @@ Tensor compute_softmax_layer(const TensorShape &shape, DataType dt, int fixed_po BOOST_AUTO_TEST_SUITE(NEON) BOOST_AUTO_TEST_SUITE(SoftmaxLayer) +#ifdef ARM_COMPUTE_ENABLE_FP16 +BOOST_AUTO_TEST_SUITE(Float16) +BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit")) +BOOST_DATA_TEST_CASE(RunSmall, SmallShapes(), shape) +{ + // Compute function + Tensor dst = compute_softmax_layer(shape, DataType::F16); + + // Compute reference + RawTensor ref_dst = Reference::compute_reference_softmax_layer(shape, DataType::F16); + + // Validate output + validate(Accessor(dst), ref_dst, tolerance_f16); +} +BOOST_AUTO_TEST_SUITE_END() +#endif /* ARM_COMPUTE_ENABLE_FP16*/ + BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit") * boost::unit_test::label("nightly")) BOOST_DATA_TEST_CASE(Configuration, (SmallShapes() + LargeShapes()) * CNNDataTypes(), shape, dt) { @@ -142,7 +162,7 @@ BOOST_DATA_TEST_CASE(RunSmall, SmallShapes() * CNNFloatDataTypes(), shape, dt) RawTensor ref_dst = Reference::compute_reference_softmax_layer(shape, dt); // Validate output - validate(Accessor(dst), ref_dst, tolerance); + validate(Accessor(dst), ref_dst, tolerance_f32); } BOOST_TEST_DECORATOR(*boost::unit_test::label("nightly")) @@ -155,7 +175,7 @@ BOOST_DATA_TEST_CASE(RunLarge, LargeShapes() * CNNFloatDataTypes(), shape, dt) RawTensor ref_dst = Reference::compute_reference_softmax_layer(shape, dt); // Validate output - validate(Accessor(dst), ref_dst, tolerance); + validate(Accessor(dst), ref_dst, tolerance_f32); } BOOST_AUTO_TEST_SUITE_END() -- cgit v1.2.1