aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorPablo Marquez Tello <pablo.tello@arm.com>2023-01-11 09:54:00 +0000
committerPablo Marquez Tello <pablo.tello@arm.com>2023-01-11 13:57:06 +0000
commit6bcdc578a388782f5ec80ec348c5dd3f5c1f8363 (patch)
tree07221f61b69faa7efb3280bf053667ef1906a470 /src
parent1b2f868b7b55e3e952520f0380e9174696c3ad1b (diff)
downloadComputeLibrary-6bcdc578a388782f5ec80ec348c5dd3f5c1f8363.tar.gz
Deprecated BF16 support in DepthConvert
* Removed BF16 validation tests for DepthConvert * Revert back to using inline assembly to convert to/from BF16 * Resolves COMPMID-5800 Change-Id: I803b2ad19ead297417f780c97c5b724cca6b394c Signed-off-by: Pablo Marquez Tello <pablo.tello@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8929 Reviewed-by: Jakub Sujak <jakub.sujak@arm.com> Reviewed-by: Viet-Hoa Do <viet-hoa.do@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src')
-rw-r--r--src/core/NEON/wrapper/intrinsics/cvt.h22
-rw-r--r--src/cpu/kernels/CpuCastKernel.h4
-rw-r--r--src/cpu/kernels/cast/generic/neon/bfloat16.cpp94
-rw-r--r--src/cpu/operators/CpuCast.h5
4 files changed, 88 insertions, 37 deletions
diff --git a/src/core/NEON/wrapper/intrinsics/cvt.h b/src/core/NEON/wrapper/intrinsics/cvt.h
index c75d43dbf2..1c77a9e9f0 100644
--- a/src/core/NEON/wrapper/intrinsics/cvt.h
+++ b/src/core/NEON/wrapper/intrinsics/cvt.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2022 Arm Limited.
+ * Copyright (c) 2020, 2022-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -87,6 +87,26 @@ vcvta(const float32x4_t &a)
return vcvtaq_s32_f32(a);
}
#endif //__aarch64__
+
+#if defined(ARM_COMPUTE_ENABLE_BF16)
+/** Convert 2x128-bit floating point vectors into 1x128-bit bfloat16 vector
+ *
+ * @param[in] inptr Pointer to the input memory to load values from
+ * @param[in,out] outptr Pointer to the output memory to store values to
+ */
+inline void vcvt_bf16_f32(const float *inptr, uint16_t *outptr)
+{
+ __asm __volatile(
+ "ldp q0, q1, [%[inptr]]\n"
+ ".inst 0xea16800\n" // BFCVTN v0, v0
+ ".inst 0x4ea16820\n" // BFCVTN2 v0, v1
+ "str q0, [%[outptr]]\n"
+ : [inptr] "+r"(inptr)
+ : [outptr] "r"(outptr)
+ : "v0", "v1", "memory");
+}
+#endif /* defined(ARM_COMPUTE_ENABLE_BF16) */
+
} // namespace wrapper
} // namespace arm_compute
#endif /* ARM_COMPUTE_WRAPPER_CVT_H */
diff --git a/src/cpu/kernels/CpuCastKernel.h b/src/cpu/kernels/CpuCastKernel.h
index 95d46fad23..de4ace2140 100644
--- a/src/cpu/kernels/CpuCastKernel.h
+++ b/src/cpu/kernels/CpuCastKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2022 Arm Limited.
+ * Copyright (c) 2016-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -62,6 +62,8 @@ public:
* @param[in] src The src tensor to convert. Data types supported: QASYMM8_SIGNED/QASYMM8/U8/U16/S16/BFLOAT16/F16/F32.
* @param[out] dst The dst tensor. Data types supported: QASYMM8_SIGNED/QASYMM8/U8/U16/S16/U32/S32/BFLOAT16/F16/F32.
* @param[in] policy Conversion policy.
+ *
+ * @deprecated Support for BFLOAT16 will be removed in 23.05 release
*/
void configure(const ITensorInfo *src, ITensorInfo *dst, ConvertPolicy policy);
/** Static function to check if given info will lead to a valid configuration
diff --git a/src/cpu/kernels/cast/generic/neon/bfloat16.cpp b/src/cpu/kernels/cast/generic/neon/bfloat16.cpp
index 942bdfae61..91c15be279 100644
--- a/src/cpu/kernels/cast/generic/neon/bfloat16.cpp
+++ b/src/cpu/kernels/cast/generic/neon/bfloat16.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2022 Arm Limited.
+ * Copyright (c) 2016-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -24,8 +24,10 @@
#if defined(ARM_COMPUTE_ENABLE_BF16)
#include "arm_compute/core/TensorInfo.h"
+#include "src/core/NEON/wrapper/wrapper.h"
#include "src/cpu/kernels/CpuCastKernel.h"
#include "src/cpu/kernels/cast/list.h"
+#include "support/SaturateCast.h"
namespace arm_compute
{
@@ -36,9 +38,9 @@ void neon_fp32_to_bfloat16_cast(const ITensor *_src, ITensor *_dst, const Thread
ARM_COMPUTE_UNUSED(info);
ARM_COMPUTE_UNUSED(_policy);
- const auto window_start_x = static_cast<int>(window.x().start());
- const auto window_end_x = static_cast<int>(window.x().end());
- constexpr int window_step_x = 8;
+ const auto window_start_x = static_cast<int>(window.x().start());
+ const auto window_end_x = static_cast<int>(window.x().end());
+ const int window_step_x = 16;
ARM_COMPUTE_ERROR_ON_NULLPTR(_src, _dst);
ARM_COMPUTE_ERROR_ON(_src == _dst);
@@ -50,23 +52,25 @@ void neon_fp32_to_bfloat16_cast(const ITensor *_src, ITensor *_dst, const Thread
Iterator src(_src, win);
Iterator dst(_dst, win);
+
/* Down-conversion F32 -> BFLOAT16 */
execute_window_loop(win, [&](const Coordinates &)
{
- const auto src_ptr = reinterpret_cast<const float *>(src.ptr());
- const auto dst_ptr = reinterpret_cast<bfloat16_t *>(dst.ptr());
- int x = window_start_x;
- const int right_bound = (window_end_x - window_step_x);
- for(; x <= right_bound; x += window_step_x)
+ const auto src_ptr = reinterpret_cast<const float *>(src.ptr());
+ const auto dst_ptr = reinterpret_cast<bfloat16 *>(dst.ptr());
+
+ int x = window_start_x;
+ for(; x <= (window_end_x - window_step_x); x += window_step_x)
{
- const auto vbf16_0 = vcombine_bf16(
- vcvt_bf16_f32(vld1q_f32(src_ptr + x)),
- vcvt_bf16_f32(vld1q_f32(src_ptr + x + 4)));
- vst1q_bf16(dst_ptr + x, vbf16_0);
+ wrapper::vcvt_bf16_f32(reinterpret_cast<float *>(src.ptr()),
+ reinterpret_cast<uint16_t *>(dst.ptr()));
+ wrapper::vcvt_bf16_f32(reinterpret_cast<float *>(src.ptr()) + 8,
+ reinterpret_cast<uint16_t *>(dst.ptr()) + 8);
}
+
for(; x < window_end_x; ++x)
{
- *(reinterpret_cast<bfloat16 *>(dst.ptr()) + x) = *(src_ptr + x);
+ *(dst_ptr + x) = *(src_ptr + x);
}
},
src, dst);
@@ -77,9 +81,9 @@ void neon_bfloat16_to_fp32_cast(const ITensor *_src, ITensor *_dst, const Thread
ARM_COMPUTE_UNUSED(info);
ARM_COMPUTE_UNUSED(_policy);
- const auto window_start_x = static_cast<int>(window.x().start());
- const auto window_end_x = static_cast<int>(window.x().end());
- constexpr int window_step_x = 8;
+ const auto window_start_x = static_cast<int>(window.x().start());
+ const auto window_end_x = static_cast<int>(window.x().end());
+ const int window_step_x = 16;
ARM_COMPUTE_ERROR_ON_NULLPTR(_src, _dst);
ARM_COMPUTE_ERROR_ON(_src == _dst);
@@ -91,26 +95,48 @@ void neon_bfloat16_to_fp32_cast(const ITensor *_src, ITensor *_dst, const Thread
Iterator src(_src, win);
Iterator dst(_dst, win);
- /* Up-conversion BFLOAT16 -> F32 */
- execute_window_loop(win, [&](const Coordinates &)
+ switch(_dst->info()->data_type())
{
- const auto src_ptr = reinterpret_cast<const bfloat16_t *>(src.ptr());
- const auto dst_ptr = reinterpret_cast<float *>(dst.ptr());
-
- int x = window_start_x;
- const int right_bound(window_end_x - window_step_x);
- for(; x <= right_bound; x += window_step_x)
+ case DataType::F32:
{
- const bfloat16x8_t vinput = vld1q_bf16(src_ptr + x);
- vst1q_f32(dst_ptr + x, vcvt_f32_bf16(vget_low_bf16(vinput)));
- vst1q_f32(dst_ptr + x + 4, vcvt_f32_bf16(vget_high_bf16(vinput)));
+ /* Up-conversion BFLOAT16 -> F32 */
+ execute_window_loop(win, [&](const Coordinates &)
+ {
+ const auto src_ptr = reinterpret_cast<const bfloat16 *>(src.ptr());
+ const auto dst_ptr = reinterpret_cast<float *>(dst.ptr());
+
+ int x = window_start_x;
+ for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ const uint16x8x2_t texels =
+ {
+ {
+ vld1q_u16(reinterpret_cast<uint16_t *>(src.ptr())),
+ vld1q_u16(reinterpret_cast<uint16_t *>(src.ptr()) + 8)
+ }
+ };
+
+ vst1q_f32(reinterpret_cast<float *>(dst.ptr()),
+ vreinterpretq_f32_u32(vshlq_n_u32(vmovl_u16(vget_low_u16(texels.val[0])), 16)));
+ vst1q_f32(reinterpret_cast<float *>(dst.ptr()) + 4,
+ vreinterpretq_f32_u32(vshlq_n_u32(vmovl_u16(vget_high_u16(texels.val[0])), 16)));
+ vst1q_f32(reinterpret_cast<float *>(dst.ptr()) + 8,
+ vreinterpretq_f32_u32(vshlq_n_u32(vmovl_u16(vget_low_u16(texels.val[1])), 16)));
+ vst1q_f32(reinterpret_cast<float *>(dst.ptr()) + 12,
+ vreinterpretq_f32_u32(vshlq_n_u32(vmovl_u16(vget_high_u16(texels.val[1])), 16)));
+ }
+
+ for(; x < window_end_x; ++x)
+ {
+ *(dst_ptr + x) = float(*(src_ptr + x));
+ }
+ },
+ src, dst);
+ break;
}
- for(; x < window_end_x; ++x)
- {
- *(dst_ptr + x) = float(*(reinterpret_cast<const bfloat16 *>(src_ptr) + x));
- }
- },
- src, dst);
+ default:
+ ARM_COMPUTE_ERROR("dst data type unsupported");
+ }
}
} // namespace cpu
diff --git a/src/cpu/operators/CpuCast.h b/src/cpu/operators/CpuCast.h
index 5e5f3e0ca6..a8342581cb 100644
--- a/src/cpu/operators/CpuCast.h
+++ b/src/cpu/operators/CpuCast.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -56,6 +56,9 @@ public:
* @param[in] src The source tensor to convert. Data types supported: U8/S8/U16/S16/U32/S32/F16/F32.
* @param[out] dst The destination tensor. Data types supported: U8/S8/U16/S16/U32/S32/F16/F32.
* @param[in] policy Conversion policy.
+ *
+ * @deprecated Support for BFLOAT16 will be removed in 23.05 release
+ *
*/
void configure(const ITensorInfo *src, ITensorInfo *dst, ConvertPolicy policy);
/** Static function to check if given info will lead to a valid configuration