From 81870c05533cba03373d5e51fed95cd5e74f741d Mon Sep 17 00:00:00 2001 From: Michele Di Giorgio Date: Thu, 30 Apr 2020 12:02:20 +0100 Subject: IVGCVSW-4743: Fix CpuAcc Hal 1.3 Comparison Failures Broadcast for QASYMM8_SIGNED was not handled. Change-Id: Id5dbb0dce78838319218de94551bba52d697f4a4 Signed-off-by: Michele Di Giorgio Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3131 Reviewed-by: Georgios Pinitas Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins --- .../NEON/kernels/NEElementwiseOperationKernel.cpp | 98 ++++++++++++++++------ tests/validation/NEON/Comparisons.cpp | 37 ++++---- tests/validation/fixtures/ComparisonFixture.h | 13 ++- 3 files changed, 102 insertions(+), 46 deletions(-) diff --git a/src/core/NEON/kernels/NEElementwiseOperationKernel.cpp b/src/core/NEON/kernels/NEElementwiseOperationKernel.cpp index 0579dc67f4..3fd5f39e9f 100644 --- a/src/core/NEON/kernels/NEElementwiseOperationKernel.cpp +++ b/src/core/NEON/kernels/NEElementwiseOperationKernel.cpp @@ -116,24 +116,6 @@ void store_quantized_signed(int8_t *output_ptr, const float32x4x4_t &rf, const f store_quantized_signed(output_ptr, out); } -float32x4x4_t dup_quantized(qasymm8_t broadcast_value, int offset, float scale) -{ - const qasymm8x16_t broadcast_value_vec = vdupq_n_u8(broadcast_value); - const int32x4_t voffset = vdupq_n_s32(offset); - const float32x4_t vscale = vdupq_n_f32(scale); - - const float32x4x4_t broadcast_vector = - { - { - vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_low_u8(broadcast_value_vec))))), voffset)), vscale), - vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_low_u8(broadcast_value_vec))))), voffset)), vscale), - vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_high_u8(broadcast_value_vec))))), voffset)), vscale), - vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_high_u8(broadcast_value_vec))))), voffset)), vscale), - } - }; - return broadcast_vector; -} - template inline ScalarType elementwise_arithm_op_scalar(const ScalarType &a, const ScalarType &b) { @@ -596,6 +578,23 @@ inline int elementwise_comp_op_quantized_broadcast_loop(int window_start_x, int return x; } +template +inline int elementwise_comp_op_quantized_signed_broadcast_loop(int window_start_x, int window_end_x, int window_step_x, + const int8_t *non_broadcast_input_ptr, float32x4x4_t broadcast_vector, uint8_t *output_ptr, + int32x4_t voffset_non_broadcast, float32x4_t vscale_non_broadcast, + float32x4_t voffseto, float32x4_t invvscaleo, bool reorder) +{ + ARM_COMPUTE_UNUSED(voffseto, invvscaleo); + int x = window_start_x; + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + const float32x4x4_t af = load_quantized_signed(non_broadcast_input_ptr + x, voffset_non_broadcast, vscale_non_broadcast); + const uint32x4x4_t rf = elementwise_comp_op(reorder ? broadcast_vector : af, reorder ? af : broadcast_vector); + store_quantized(output_ptr + x, rf); + } + return x; +} + template void elementwise_op(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window, OutputScalarType (*scalar_func)(const InputScalarType &, const InputScalarType &), @@ -728,7 +727,7 @@ void elementwise_op_quantized(const ITensor *in1, const ITensor *in2, ITensor *o const auto output_ptr = reinterpret_cast(output.ptr()); const uint8_t broadcast_value = *reinterpret_cast(broadcast_input.ptr()); - const float32x4x4_t broadcast_vector = dup_quantized(broadcast_value, broadcast_qinfo.offset, broadcast_qinfo.scale); + const float32x4x4_t broadcast_vector = vdequantize(vdupq_n_u8(broadcast_value), broadcast_qinfo); int x = (*broadcast_func)(window_start_x, window_end_x, window_step_x, non_broadcast_input_ptr, broadcast_vector, output_ptr, voffset_non_broadcast, vscale_non_broadcast, voffseto, invvscaleo, !is_broadcast_input_2); @@ -783,6 +782,8 @@ void elementwise_op_quantized(const ITensor *in1, const ITensor *in2, ITensor *o void elementwise_comp_quantized_signed(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window, uint8_t (*scalar_func)(const float &, const float &, UniformQuantizationInfo), + int (*broadcast_func)(int, int, int, const int8_t *, float32x4x4_t, uint8_t *, int32x4_t, float32x4_t, + float32x4_t, float32x4_t, const bool), int (*neon_func)(int, int, int, const int8_t *, const int8_t *, uint8_t *, int32x4_t, int32x4_t, float32x4_t, float32x4_t, float32x4_t, float32x4_t)) @@ -795,13 +796,58 @@ void elementwise_comp_quantized_signed(const ITensor *in1, const ITensor *in2, I Window win = window; win.set(Window::DimX, Window::Dimension(0, 1, 1)); - const int window_step_x = 16; - const auto window_start_x = static_cast(window.x().start()); - const auto window_end_x = static_cast(window.x().end()); - const UniformQuantizationInfo output_qinfo = out->info()->quantization_info().uniform(); + const int window_step_x = 16; + const auto window_start_x = static_cast(window.x().start()); + const auto window_end_x = static_cast(window.x().end()); + const bool is_broadcast_across_x = (input1_win.x().step() == 0) || (input2_win.x().step() == 0); + + const UniformQuantizationInfo output_qinfo = out->info()->quantization_info().uniform(); const float32x4_t voffseto = vdupq_n_f32(output_qinfo.offset); const float32x4_t invvscaleo = vdupq_n_f32(1.f / output_qinfo.scale); + + if(is_broadcast_across_x) + { + // Select the broadcast input on the X axis + const bool is_broadcast_input_2 = input2_win.x().step() == 0; + Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win; + Window non_broadcast_win = !is_broadcast_input_2 ? input2_win : input1_win; + const ITensor *broadcast_tensor = is_broadcast_input_2 ? in2 : in1; + const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? in2 : in1; + + const UniformQuantizationInfo broadcast_qinfo = broadcast_tensor->info()->quantization_info().uniform(); + const UniformQuantizationInfo non_broadcast_qinfo = non_broadcast_tensor->info()->quantization_info().uniform(); + + const int32x4_t voffset_non_broadcast = vdupq_n_s32(non_broadcast_qinfo.offset); + const float32x4_t vscale_non_broadcast = vdupq_n_f32(non_broadcast_qinfo.scale); + + // Clear X Dimension on execution window as we handle manually + non_broadcast_win.set(Window::DimX, Window::Dimension(0, 1, 1)); + + Iterator broadcast_input(broadcast_tensor, broadcast_win); + Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win); + Iterator output(out, win); + + execute_window_loop(win, [&](const Coordinates &) + { + const auto non_broadcast_input_ptr = reinterpret_cast(non_broadcast_input.ptr()); + const auto output_ptr = reinterpret_cast(output.ptr()); + + const int8_t broadcast_value = *reinterpret_cast(broadcast_input.ptr()); + const float32x4x4_t broadcast_vector = vdequantize(vdupq_n_s8(broadcast_value), broadcast_qinfo); + + int x = (*broadcast_func)(window_start_x, window_end_x, window_step_x, non_broadcast_input_ptr, broadcast_vector, output_ptr, + voffset_non_broadcast, vscale_non_broadcast, voffseto, invvscaleo, !is_broadcast_input_2); + for(; x < window_end_x; ++x) + { + const float afs = dequantize_qasymm8_signed(*(non_broadcast_input_ptr + x), non_broadcast_qinfo); + const float bfs = dequantize_qasymm8_signed(broadcast_value, broadcast_qinfo); + *(output_ptr + x) = (*scalar_func)(!is_broadcast_input_2 ? bfs : afs, !is_broadcast_input_2 ? afs : bfs, output_qinfo); + } + }, + broadcast_input, non_broadcast_input, output); + } + else { const UniformQuantizationInfo input1_qinfo = in1->info()->quantization_info().uniform(); const UniformQuantizationInfo input2_qinfo = in2->info()->quantization_info().uniform(); @@ -895,7 +941,7 @@ void elementwise_op_quantized_signed(const ITensor *in1, const ITensor *in2, ITe const auto output_ptr = reinterpret_cast(output.ptr()); const int8_t broadcast_value = *reinterpret_cast(broadcast_input.ptr()); - const float32x4x4_t broadcast_vector = dup_quantized(broadcast_value, broadcast_qinfo.offset, broadcast_qinfo.scale); + const float32x4x4_t broadcast_vector = vdequantize(vdupq_n_u8(broadcast_value), broadcast_qinfo); int x = (*broadcast_func)(window_start_x, window_end_x, window_step_x, non_broadcast_input_ptr, broadcast_vector, output_ptr, voffset_non_broadcast, vscale_non_broadcast, voffseto, invvscaleo, !is_broadcast_input_2); @@ -1003,7 +1049,9 @@ void elementwise_comp_op_quantized(const ITensor *in1, const ITensor *in2, ITens template void elementwise_comp_op_quantized_signed(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window) { - elementwise_comp_quantized_signed(in1, in2, out, window, &elementwise_comp_op_quantized_scalar, &elementwise_comp_op_quantized_signed_loop); + elementwise_comp_quantized_signed(in1, in2, out, window, &elementwise_comp_op_quantized_scalar, + &elementwise_comp_op_quantized_signed_broadcast_loop, + &elementwise_comp_op_quantized_signed_loop); } std::function diff --git a/tests/validation/NEON/Comparisons.cpp b/tests/validation/NEON/Comparisons.cpp index 38e440e649..f080c834e5 100644 --- a/tests/validation/NEON/Comparisons.cpp +++ b/tests/validation/NEON/Comparisons.cpp @@ -52,8 +52,9 @@ const auto configure_dataset = combine(datasets::SmallShapes(), DataType::F32 })); -const auto run_small_dataset = combine(datasets::ComparisonOperations(), datasets::SmallShapes()); -const auto run_large_dataset = combine(datasets::ComparisonOperations(), datasets::LargeShapes()); +const auto run_small_dataset = combine(datasets::ComparisonOperations(), datasets::SmallShapes()); +const auto run_small_broadcast_dataset = combine(datasets::ComparisonOperations(), datasets::SmallShapesBroadcast()); +const auto run_large_dataset = combine(datasets::ComparisonOperations(), datasets::LargeShapes()); } // namespace @@ -90,23 +91,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( // clang-format on // *INDENT-ON* -DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, configure_dataset, - shape, data_type) -{ - // Create tensors - Tensor ref_src1 = create_tensor(shape, data_type); - Tensor ref_src2 = create_tensor(shape, data_type); - Tensor dst = create_tensor(shape, DataType::U8); - - // Create and Configure function - NEElementwiseComparison compare; - compare.configure(&ref_src1, &ref_src2, &dst, ComparisonOperation::Equal); - - // Validate valid region - const ValidRegion valid_region = shape_to_valid_region(shape); - validate(dst.info()->valid_region(), valid_region); -} - template using NEComparisonFixture = ComparisonValidationFixture; @@ -154,6 +138,8 @@ TEST_SUITE_END() // Float template using NEComparisonQuantizedFixture = ComparisonValidationQuantizedFixture; +template +using NEComparisonQuantizedBroadcastFixture = ComparisonQuantizedBroadcastValidationFixture; TEST_SUITE(Quantized) TEST_SUITE(QASYMM8) @@ -169,11 +155,22 @@ FIXTURE_DATA_TEST_CASE(RunSmall, } TEST_SUITE_END() TEST_SUITE(QASYMM8_SIGNED) +FIXTURE_DATA_TEST_CASE(RunSmallBroadcast, + NEComparisonQuantizedBroadcastFixture, + framework::DatasetMode::ALL, + combine(combine(combine(run_small_broadcast_dataset, framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1, -30) })), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.3f, 2) }))) +{ + // Validate output + validate(Accessor(_target), _reference); +} + FIXTURE_DATA_TEST_CASE(RunSmall, NEComparisonQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(run_small_dataset, framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), - framework::dataset::make("QuantizationInfo", { QuantizationInfo() })), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1, -30) })), framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.3f, 2) }))) { // Validate output diff --git a/tests/validation/fixtures/ComparisonFixture.h b/tests/validation/fixtures/ComparisonFixture.h index b2fe42de26..d1e1a539c7 100644 --- a/tests/validation/fixtures/ComparisonFixture.h +++ b/tests/validation/fixtures/ComparisonFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 ARM Limited. + * Copyright (c) 2018-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -146,6 +146,17 @@ public: ComparisonValidationGenericFixture::setup(op, shape, shape, data_type, qinfo0, qinfo1); } }; + +template +class ComparisonQuantizedBroadcastValidationFixture : public ComparisonValidationGenericFixture +{ +public: + template + void setup(ComparisonOperation op, const TensorShape &shape0, const TensorShape &shape1, DataType data_type, QuantizationInfo qinfo0, QuantizationInfo qinfo1) + { + ComparisonValidationGenericFixture::setup(op, shape0, shape1, data_type, qinfo0, qinfo1); + } +}; } // namespace validation } // namespace test } // namespace arm_compute -- cgit v1.2.1