From 7f0f790ae7f5dd044a5d7564492583b8df974a11 Mon Sep 17 00:00:00 2001 From: Gian Marco Date: Thu, 7 Dec 2017 09:26:56 +0000 Subject: COMPMID-731 - Remove padding requirements for NEGEMMLowpOutputStage Used a left-over for loop in: - NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel - NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel in order to remove the padding requirements for AndroidNN Change-Id: I8ef529fc3d1adecf15fbe42002d99bc0030f131f Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/112266 Reviewed-by: Anthony Barbier Tested-by: BSG Visual Compute Jenkins server to access repositories on http://mpd-gerrit.cambridge.arm.com --- ...tizeDownInt32ToUint8ScaleByFixedPointKernel.cpp | 154 +++++++++++++++------ ...GEMMLowpQuantizeDownInt32ToUint8ScaleKernel.cpp | 143 +++++++++++++------ tests/datasets/ShapeDatasets.h | 8 +- tests/validation/NEON/GEMMLowp.cpp | 78 ++++++++++- 4 files changed, 295 insertions(+), 88 deletions(-) diff --git a/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp index 1e369ab03a..8b3f2383ab 100644 --- a/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp +++ b/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp @@ -66,7 +66,10 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, con std::pair validate_and_configure_window(ITensorInfo *input, ITensorInfo *bias, ITensorInfo *output) { - constexpr unsigned int num_elems_processed_per_iteration = 16; + // Note: This kernel performs 16 elements per iteration. + // However, since we use a left-over for loop, we cannot have any read or write out of memory + // For this reason num_elems_processed_per_iteration is set to 1 + constexpr unsigned int num_elems_processed_per_iteration = 1; // Configure kernel window Window win = calculate_max_window(*output, Steps(num_elems_processed_per_iteration)); @@ -86,7 +89,7 @@ std::pair validate_and_configure_window(ITensorInfo *input, ITen if(bias != nullptr) { - AccessWindowStatic bias_access(bias, 0, 0, ceil_to_multiple(bias->dimension(0), num_elems_processed_per_iteration), bias->tensor_shape()[1]); + AccessWindowStatic bias_access(bias, 0, 0, bias->dimension(0), bias->dimension(1)); window_changed = window_changed || update_window_and_padding(win, bias_access); } @@ -144,6 +147,37 @@ inline uint8x16_t finalize_quantization(int32x4x4_t &in_s32, int result_fixedpoi return out_u8; } + +/* Function used by the left-over for loop to perform the quantization */ +template +inline uint8_t finalize_quantization(int32x4_t in_s32, int result_fixedpoint_multiplier, int32_t result_shift, int32x4_t result_offset_after_shift_s32, uint8_t min_u8, uint8_t max_u8) +{ + const static int32x4_t zero_s32 = vdupq_n_s32(0); + const static int32x4_t sat_value_s32 = vdupq_n_s32(255); + + // Fixed point multiplication with vector saturating rounding doubling multiply high with scalar + in_s32 = vqrdmulhq_n_s32(in_s32, result_fixedpoint_multiplier); + + // Round to the nearest division by a power-of-two using result_shift_s32 + in_s32 = rounding_divide_by_pow2(in_s32, result_shift); + + // Add the offset terms + in_s32 = vaddq_s32(in_s32, result_offset_after_shift_s32); + + // Saturate negative values + in_s32 = vmaxq_s32(in_s32, zero_s32); + in_s32 = vminq_s32(in_s32, sat_value_s32); + + auto out_u8 = static_cast(vgetq_lane_s32(in_s32, 0)); + + if(is_bounded_relu) + { + out_u8 = std::max(out_u8, min_u8); + out_u8 = std::min(out_u8, max_u8); + } + + return out_u8; +} } // namespace namespace arm_compute @@ -161,63 +195,103 @@ void NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::run(const Window ARM_COMPUTE_UNUSED(min_u8); ARM_COMPUTE_UNUSED(max_u8); - Iterator in(_input, window); - Iterator out(_output, window); + const int window_step_x = 16; + const auto window_start_x = static_cast(window.x().start()); + const auto window_end_x = static_cast(window.x().end()); + + Window win(window); + win.set(Window::DimX, Window::Dimension(0, 1, 1)); + + Iterator in(_input, win); + Iterator out(_output, win); if(_bias != nullptr) { Window win_biases; - win_biases.set(Window::DimX, Window::Dimension(window.x().start(), window.x().end(), window.x().step())); + win_biases.set(Window::DimX, Window::Dimension(0, 1, 1)); win_biases.set(Window::DimY, Window::Dimension(0, 1, 1)); Iterator bias(_bias, win_biases); - execute_window_loop(window, [&](const Coordinates & id) + execute_window_loop(win, [&](const Coordinates & id) { - int32x4x4_t in_s32 = + // Compute 16 elements per iteration + int x = window_start_x; + for(; x <= (window_end_x - window_step_x); x += window_step_x) { + int32x4x4_t in_s32 = { - vld1q_s32(reinterpret_cast(in.ptr()) + 0), - vld1q_s32(reinterpret_cast(in.ptr()) + 4), - vld1q_s32(reinterpret_cast(in.ptr()) + 8), - vld1q_s32(reinterpret_cast(in.ptr()) + 12) - } - }; - - const int32x4x4_t bias_s32 = - { + { + vld1q_s32(reinterpret_cast(in.ptr()) + x + 0), + vld1q_s32(reinterpret_cast(in.ptr()) + x + 4), + vld1q_s32(reinterpret_cast(in.ptr()) + x + 8), + vld1q_s32(reinterpret_cast(in.ptr()) + x + 12) + } + }; + + const int32x4x4_t bias_s32 = { - vld1q_s32(reinterpret_cast(bias.ptr()) + 0), - vld1q_s32(reinterpret_cast(bias.ptr()) + 4), - vld1q_s32(reinterpret_cast(bias.ptr()) + 8), - vld1q_s32(reinterpret_cast(bias.ptr()) + 12) - } - }; - - // Add the bias to GEMM's result - in_s32.val[0] = vaddq_s32(in_s32.val[0], bias_s32.val[0]); - in_s32.val[1] = vaddq_s32(in_s32.val[1], bias_s32.val[1]); - in_s32.val[2] = vaddq_s32(in_s32.val[2], bias_s32.val[2]); - in_s32.val[3] = vaddq_s32(in_s32.val[3], bias_s32.val[3]); - - vst1q_u8(out.ptr(), finalize_quantization(in_s32, _result_fixedpoint_multiplier, _result_shift, result_offset_after_shift_s32, min_u8, max_u8)); + { + vld1q_s32(reinterpret_cast(bias.ptr()) + x + 0), + vld1q_s32(reinterpret_cast(bias.ptr()) + x + 4), + vld1q_s32(reinterpret_cast(bias.ptr()) + x + 8), + vld1q_s32(reinterpret_cast(bias.ptr()) + x + 12) + } + }; + + // Add the bias to GEMM's result + in_s32.val[0] = vaddq_s32(in_s32.val[0], bias_s32.val[0]); + in_s32.val[1] = vaddq_s32(in_s32.val[1], bias_s32.val[1]); + in_s32.val[2] = vaddq_s32(in_s32.val[2], bias_s32.val[2]); + in_s32.val[3] = vaddq_s32(in_s32.val[3], bias_s32.val[3]); + + vst1q_u8(out.ptr() + x, finalize_quantization(in_s32, _result_fixedpoint_multiplier, _result_shift, result_offset_after_shift_s32, min_u8, max_u8)); + } + + // Compute left-over elements + for(; x < window_end_x; ++x) + { + const int32_t bias_value = *(reinterpret_cast(bias.ptr()) + x); + int32_t in_value = *(reinterpret_cast(in.ptr()) + x); + + // Add bias + in_value += bias_value; + + // Finalize and store the result + *(out.ptr() + x) = finalize_quantization(vdupq_n_s32(in_value), _result_fixedpoint_multiplier, _result_shift, result_offset_after_shift_s32, static_cast(_min), + static_cast(_max)); + } }, in, bias, out); } else { - execute_window_loop(window, [&](const Coordinates & id) + execute_window_loop(win, [&](const Coordinates & id) { - int32x4x4_t in_s32 = + // Compute 16 elements per iteration + int x = window_start_x; + for(; x <= (window_end_x - window_step_x); x += window_step_x) { + int32x4x4_t in_s32 = { - vld1q_s32(reinterpret_cast(in.ptr()) + 0), - vld1q_s32(reinterpret_cast(in.ptr()) + 4), - vld1q_s32(reinterpret_cast(in.ptr()) + 8), - vld1q_s32(reinterpret_cast(in.ptr()) + 12) - } - }; - - vst1q_u8(out.ptr(), finalize_quantization(in_s32, _result_fixedpoint_multiplier, _result_shift, result_offset_after_shift_s32, min_u8, max_u8)); + { + vld1q_s32(reinterpret_cast(in.ptr()) + x + 0), + vld1q_s32(reinterpret_cast(in.ptr()) + x + 4), + vld1q_s32(reinterpret_cast(in.ptr()) + x + 8), + vld1q_s32(reinterpret_cast(in.ptr()) + x + 12) + } + }; + + vst1q_u8(out.ptr() + x, finalize_quantization(in_s32, _result_fixedpoint_multiplier, _result_shift, result_offset_after_shift_s32, min_u8, max_u8)); + } + + // Compute left-over elements + for(; x < window_end_x; ++x) + { + const int32x4_t in_s32 = vld1q_dup_s32(reinterpret_cast(in.ptr()) + x); + + // Finalize and store the result + *(out.ptr() + x) = finalize_quantization(in_s32, _result_fixedpoint_multiplier, _result_shift, result_offset_after_shift_s32, static_cast(_min), static_cast(_max)); + } }, in, out); } diff --git a/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.cpp index 7f351020b9..54513d8cdb 100644 --- a/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.cpp +++ b/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.cpp @@ -65,7 +65,10 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, con std::pair validate_and_configure_window(ITensorInfo *input, ITensorInfo *bias, ITensorInfo *output) { - constexpr unsigned int num_elems_processed_per_iteration = 16; + // Note: This kernel performs 16 elements per iteration. + // However, since we use a left-over for loop, we cannot have any read or write out of memory + // For this reason num_elems_processed_per_iteration is set to 1 + constexpr unsigned int num_elems_processed_per_iteration = 1; // Configure kernel window Window win = calculate_max_window(*output, Steps(num_elems_processed_per_iteration)); @@ -85,7 +88,7 @@ std::pair validate_and_configure_window(ITensorInfo *input, ITen if(bias != nullptr) { - AccessWindowStatic bias_access(bias, 0, 0, ceil_to_multiple(bias->dimension(0), num_elems_processed_per_iteration), bias->tensor_shape()[1]); + AccessWindowStatic bias_access(bias, 0, 0, bias->dimension(0), bias->dimension(1)); window_changed = window_changed || update_window_and_padding(win, bias_access); } @@ -163,69 +166,125 @@ void NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel::run(const Window &window) ARM_COMPUTE_UNUSED(min_u8); ARM_COMPUTE_UNUSED(max_u8); - Iterator in(_input, window); - Iterator out(_output, window); + const int window_step_x = 16; + const auto window_start_x = static_cast(window.x().start()); + const auto window_end_x = static_cast(window.x().end()); + + Window win(window); + win.set(Window::DimX, Window::Dimension(0, 1, 1)); + + Iterator in(_input, win); + Iterator out(_output, win); if(_bias != nullptr) { Window win_biases; - win_biases.set(Window::DimX, Window::Dimension(window.x().start(), window.x().end(), window.x().step())); + win_biases.set(Window::DimX, Window::Dimension(0, 1, 1)); win_biases.set(Window::DimY, Window::Dimension(0, 1, 1)); Iterator bias(_bias, win_biases); - execute_window_loop(window, [&](const Coordinates & id) + execute_window_loop(win, [&](const Coordinates & id) { - int32x4x4_t in_s32 = + // Compute 16 elements per iteration + int x = window_start_x; + for(; x <= (window_end_x - window_step_x); x += window_step_x) { + int32x4x4_t in_s32 = { - vld1q_s32(reinterpret_cast(in.ptr()) + 0), - vld1q_s32(reinterpret_cast(in.ptr()) + 4), - vld1q_s32(reinterpret_cast(in.ptr()) + 8), - vld1q_s32(reinterpret_cast(in.ptr()) + 12) - } - }; - - const int32x4x4_t bias_s32 = - { + { + vld1q_s32(reinterpret_cast(in.ptr()) + x + 0), + vld1q_s32(reinterpret_cast(in.ptr()) + x + 4), + vld1q_s32(reinterpret_cast(in.ptr()) + x + 8), + vld1q_s32(reinterpret_cast(in.ptr()) + x + 12) + } + }; + + const int32x4x4_t bias_s32 = { - vld1q_s32(reinterpret_cast(bias.ptr()) + 0), - vld1q_s32(reinterpret_cast(bias.ptr()) + 4), - vld1q_s32(reinterpret_cast(bias.ptr()) + 8), - vld1q_s32(reinterpret_cast(bias.ptr()) + 12) - } - }; - - // Add the bias to GEMM's result - in_s32.val[0] = vaddq_s32(in_s32.val[0], bias_s32.val[0]); - in_s32.val[1] = vaddq_s32(in_s32.val[1], bias_s32.val[1]); - in_s32.val[2] = vaddq_s32(in_s32.val[2], bias_s32.val[2]); - in_s32.val[3] = vaddq_s32(in_s32.val[3], bias_s32.val[3]); + { + vld1q_s32(reinterpret_cast(bias.ptr()) + x + 0), + vld1q_s32(reinterpret_cast(bias.ptr()) + x + 4), + vld1q_s32(reinterpret_cast(bias.ptr()) + x + 8), + vld1q_s32(reinterpret_cast(bias.ptr()) + x + 12) + } + }; + + // Add the bias to GEMM's result + in_s32.val[0] = vaddq_s32(in_s32.val[0], bias_s32.val[0]); + in_s32.val[1] = vaddq_s32(in_s32.val[1], bias_s32.val[1]); + in_s32.val[2] = vaddq_s32(in_s32.val[2], bias_s32.val[2]); + in_s32.val[3] = vaddq_s32(in_s32.val[3], bias_s32.val[3]); + + // Add the offset terms to GEMM's result and multiply by result_mult_int + scale_input(in_s32, result_offset_s32, _result_mult_int); + + vst1q_u8(out.ptr() + x, finalize_quantization(in_s32, result_shift_s32, min_u8, max_u8)); + } + + // Compute left-over elements + for(; x < window_end_x; ++x) + { + const int32_t bias_value = *(reinterpret_cast(bias.ptr()) + x); + int32_t in_value = *(reinterpret_cast(in.ptr()) + x); - // Add the offset terms to GEMM's result and multiply by result_mult_int - scale_input(in_s32, result_offset_s32, _result_mult_int); + // Quantize + in_value = ((in_value + bias_value + _result_offset) * _result_mult_int) >> _result_shift; - vst1q_u8(out.ptr(), finalize_quantization(in_s32, result_shift_s32, min_u8, max_u8)); + // Finalize and store the result + if(is_bounded_relu) + { + *(out.ptr() + x) = static_cast(std::max(_min, std::min(_max, in_value))); + } + else + { + *(out.ptr() + x) = static_cast(std::max(0, std::min(255, in_value))); + } + } }, in, bias, out); } else { - execute_window_loop(window, [&](const Coordinates & id) + execute_window_loop(win, [&](const Coordinates & id) { - int32x4x4_t in_s32 = + // Compute 16 elements per iteration + int x = window_start_x; + for(; x <= (window_end_x - window_step_x); x += window_step_x) { + int32x4x4_t in_s32 = { - vld1q_s32(reinterpret_cast(in.ptr()) + 0), - vld1q_s32(reinterpret_cast(in.ptr()) + 4), - vld1q_s32(reinterpret_cast(in.ptr()) + 8), - vld1q_s32(reinterpret_cast(in.ptr()) + 12) - } - }; + { + vld1q_s32(reinterpret_cast(in.ptr()) + x + 0), + vld1q_s32(reinterpret_cast(in.ptr()) + x + 4), + vld1q_s32(reinterpret_cast(in.ptr()) + x + 8), + vld1q_s32(reinterpret_cast(in.ptr()) + x + 12) + } + }; + + // Add the offset terms to GEMM's result and multiply by result_mult_int + scale_input(in_s32, result_offset_s32, _result_mult_int); + + vst1q_u8(out.ptr() + x, finalize_quantization(in_s32, result_shift_s32, min_u8, max_u8)); + } + + // Compute left-over elements + for(; x < window_end_x; ++x) + { + int32_t in_value = *(reinterpret_cast(in.ptr()) + x); - // Add the offset terms to GEMM's result and multiply by result_mult_int - scale_input(in_s32, result_offset_s32, _result_mult_int); + // Quantize + in_value = ((in_value + _result_offset) * _result_mult_int) >> _result_shift; - vst1q_u8(out.ptr(), finalize_quantization(in_s32, result_shift_s32, min_u8, max_u8)); + // Finalize and store the result + if(is_bounded_relu) + { + *(out.ptr() + x) = static_cast(std::max(_min, std::min(_max, in_value))); + } + else + { + *(out.ptr() + x) = static_cast(std::max(0, std::min(255, in_value))); + } + } }, in, out); } diff --git a/tests/datasets/ShapeDatasets.h b/tests/datasets/ShapeDatasets.h index 02a71aa7b5..c9e5510760 100644 --- a/tests/datasets/ShapeDatasets.h +++ b/tests/datasets/ShapeDatasets.h @@ -162,11 +162,11 @@ public: : ShapeDataset("Shape", { // Batch size 1 - TensorShape{ 1920U, 1080U }, - TensorShape{ 640U, 480U, 2U, 3U }, - TensorShape{ 4160U, 3120U }, + TensorShape{ 1921U, 1083U }, + TensorShape{ 641U, 485U, 2U, 3U }, + TensorShape{ 4159U, 3117U }, // Batch size 4 - TensorShape{ 800U, 600U, 1U, 4U }, + TensorShape{ 799U, 595U, 1U, 4U }, }) { } diff --git a/tests/validation/NEON/GEMMLowp.cpp b/tests/validation/NEON/GEMMLowp.cpp index 7616df9eaa..a901b442ab 100644 --- a/tests/validation/NEON/GEMMLowp.cpp +++ b/tests/validation/NEON/GEMMLowp.cpp @@ -187,6 +187,43 @@ const auto quantize_down_int32_to_uint8_scale_relu_cases = framework::dataset::m using NEGEMMLowpQuantizeDownInt32ToUint8ScaleFixture = GEMMLowpQuantizeDownInt32ToUint8ScaleValidationFixture; +// *INDENT-OFF* +// clang-format off +DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip( + framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Input not a multiple of 16 + TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Invalid min and max + TensorInfo(TensorShape(20U, 13U), 1, DataType::S32), // Wrong output data type + }), + framework::dataset::make("InputBInfo",{ TensorInfo(TensorShape(21U), 1, DataType::S32), + TensorInfo(TensorShape(21U), 1, DataType::S32), + TensorInfo(TensorShape(20U), 1, DataType::S32), + })), + framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8), + TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8), + TensorInfo(TensorShape(20U, 13U), 1, DataType::S32), + })), + framework::dataset::make("Min",{ 0, + 8, + 13, + })), + framework::dataset::make("Max",{ 205, + 300, + 180, + })), + framework::dataset::make("Expected", { true, false, false })), + a_info, b_info, output_info, min, max, expected) +{ + // Lock tensors + Status status = NEGEMMLowpQuantizeDownInt32ToUint8Scale::validate(&a_info.clone()->set_is_resizable(false), + &b_info.clone()->set_is_resizable(false), + &output_info.clone()->set_is_resizable(false), + min, + max); + ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS); +} +// clang-format on +// *INDENT-ON* + DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), quantize_down_int32_to_uint8_scale_cases), shape, result_offset, result_mult_int, result_shift, min, max, add_bias) { @@ -218,7 +255,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::da } // Validate padding - const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding(); + const PaddingSize padding(0); validate(in.info()->padding(), padding); validate(out.info()->padding(), padding); @@ -269,6 +306,43 @@ const auto quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases = framewo using NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture = GEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointValidationFixture; +// *INDENT-OFF* +// clang-format off +DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip( + framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Input not a multiple of 16 + TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Invalid min and max + TensorInfo(TensorShape(20U, 13U), 1, DataType::S32), // Wrong output data type + }), + framework::dataset::make("InputBInfo",{ TensorInfo(TensorShape(21U), 1, DataType::S32), + TensorInfo(TensorShape(21U), 1, DataType::S32), + TensorInfo(TensorShape(20U), 1, DataType::S32), + })), + framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8), + TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8), + TensorInfo(TensorShape(20U, 13U), 1, DataType::S32), + })), + framework::dataset::make("Min",{ 0, + 8, + 13, + })), + framework::dataset::make("Max",{ 205, + 300, + 180, + })), + framework::dataset::make("Expected", { true, false, false })), + a_info, b_info, output_info, min, max, expected) +{ + // Lock tensors + Status status = NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(&a_info.clone()->set_is_resizable(false), + &b_info.clone()->set_is_resizable(false), + &output_info.clone()->set_is_resizable(false), + min, + max); + ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS); +} +// clang-format on +// *INDENT-ON* + DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), quantize_down_int32_to_uint8_scale_by_fixedpoint_cases), shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias) @@ -301,7 +375,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::da } // Validate padding - const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding(); + const PaddingSize padding(0); validate(in.info()->padding(), padding); validate(out.info()->padding(), padding); -- cgit v1.2.1