From 0b1452dec577f30e01b6a11cbded9f9cb0b072a8 Mon Sep 17 00:00:00 2001 From: Michalis Spyrou Date: Thu, 27 Feb 2020 16:20:19 +0000 Subject: COMPMID-3178: Remove padding from NETransposeKernel Change-Id: I7d59f3f44bce0ab45ddaf00fd4347b632e07e2d6 Signed-off-by: Michalis Spyrou Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/2803 Tested-by: Arm Jenkins Reviewed-by: Michele Di Giorgio Comments-Addressed: Arm Jenkins --- src/core/NEON/kernels/NETransposeKernel.cpp | 69 ++++++++++------------------- tests/validation/NEON/Transpose.cpp | 44 ++---------------- 2 files changed, 28 insertions(+), 85 deletions(-) diff --git a/src/core/NEON/kernels/NETransposeKernel.cpp b/src/core/NEON/kernels/NETransposeKernel.cpp index 8fc24949ed..2951a16d0b 100644 --- a/src/core/NEON/kernels/NETransposeKernel.cpp +++ b/src/core/NEON/kernels/NETransposeKernel.cpp @@ -54,25 +54,6 @@ TensorShape transposed_tensor_shape(const TensorShape &in) return output_shape; } -unsigned int num_elems_processed(size_t element_size) -{ - switch(element_size) - { - case 1: - return 8; - break; - case 2: - return 4; - break; - case 4: - return 4; - break; - default: - break; - } - ARM_COMPUTE_ERROR("Element size not supported"); -} - Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input); @@ -90,32 +71,20 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output) return Status{}; } - -std::pair validate_and_configure_window(ITensorInfo *input, ITensorInfo *output) +unsigned int num_elems_processed(size_t element_size) { - // Note: This kernel performs 16 elements per iteration. - // However, since we use a left-over for loop on both dimensions (X and Y), we cannot have any read or write out of memory - // For this reason num_elems_processed_per_iteration_x is set to 1 - const unsigned int num_elems_processed_per_iteration_x = 1; - const unsigned int num_elems_processed_per_iteration_y = num_elems_processed(input->element_size()); - - // Configure kernel window - Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y)); - - AccessWindowRectangle input_access(input, 0, 0, num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y); - bool window_changed = update_window_and_padding(win, input_access); - - if(output->total_size() != 0) + switch(element_size) { - AccessWindowTranspose output_access(output, 0, 0, num_elems_processed_per_iteration_y, num_elems_processed_per_iteration_x); - - window_changed = window_changed || update_window_and_padding(win, output_access); - - output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape())); + case 1: + return 8; + case 2: + case 4: + return 4; + default: + break; } - Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{}; - return std::make_pair(err, win); + ARM_COMPUTE_ERROR("Element size not supported"); } void transpose_8bit_elements(const ITensor *in, ITensor *out, const Window &window) @@ -487,7 +456,6 @@ Status NETransposeKernel::validate(const ITensorInfo *input, const ITensorInfo * { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output)); - ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get()).first); return Status{}; } @@ -525,9 +493,20 @@ void NETransposeKernel::configure(const ITensor *input, ITensor *output) } // Configure kernel window - auto win_config = validate_and_configure_window(input->info(), output->info()); - ARM_COMPUTE_ERROR_THROW_ON(win_config.first); - INEKernel::configure(win_config.second); + Coordinates coord; + coord.set_num_dimensions(output->info()->num_dimensions()); + output->info()->set_valid_region(ValidRegion(coord, output->info()->tensor_shape())); + + // Note: This kernel performs 16 elements per iteration. + // However, since we use a left-over for loop on both dimensions (X and Y), we cannot have any read or write out of memory + // For this reason num_elems_processed_per_iteration_x is set to 1 + const unsigned int num_elems_processed_per_iteration_x = 1; + const unsigned int num_elems_processed_per_iteration_y = num_elems_processed(input->info()->element_size()); + + // Configure kernel window + Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y)); + + INEKernel::configure(win); } void NETransposeKernel::run(const Window &window, const ThreadInfo &info) diff --git a/tests/validation/NEON/Transpose.cpp b/tests/validation/NEON/Transpose.cpp index c9ebdd5182..1f38fcc677 100644 --- a/tests/validation/NEON/Transpose.cpp +++ b/tests/validation/NEON/Transpose.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019 ARM Limited. + * Copyright (c) 2017-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -46,21 +46,17 @@ TEST_SUITE(Transpose) // *INDENT-OFF* // clang-format off DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip( - framework::dataset::make("InputInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::U8), // Input not a multiple of 8 - TensorInfo(TensorShape(21U, 13U), 1, DataType::U16), // Invalid shape - TensorInfo(TensorShape(20U, 13U), 1, DataType::U32), // Window shrink + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::U16), // Invalid shape TensorInfo(TensorShape(20U, 13U), 1, DataType::U8), // Wrong data type TensorInfo(TensorShape(20U, 16U), 1, DataType::U16), TensorInfo(TensorShape(20U, 16U), 1, DataType::U32), }), - framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(13U, 21U), 1, DataType::U8), - TensorInfo(TensorShape(21U, 13U), 1, DataType::U16), - TensorInfo(TensorShape(13U, 20U), 1, DataType::U32), + framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(21U, 13U), 1, DataType::U16), TensorInfo(TensorShape(31U, 20U), 1, DataType::U16), TensorInfo(TensorShape(16U, 20U), 1, DataType::U16), TensorInfo(TensorShape(16U, 20U), 1, DataType::U32), })), - framework::dataset::make("Expected", { false, false, false, false, true, true })), + framework::dataset::make("Expected", { false, false, true, true })), a_info, output_info, expected) { // Lock tensors @@ -71,38 +67,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip( // clang-format on // *INDENT-ON* -DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::Small2DShapes(), framework::dataset::make("DataType", { DataType::S8, DataType::U8, DataType::S16, DataType::U16, DataType::U32, DataType::S32, DataType::F16, DataType::F32 })), - shape, data_type) -{ - // Make rows the columns of the original shape - TensorShape output_shape{ shape[1], shape[0] }; - - // Create tensors - Tensor src = create_tensor(shape, data_type); - Tensor dst = create_tensor(output_shape, data_type); - - // Create and Configure function - NETranspose trans; - trans.configure(&src, &dst); - - // Validate valid region - const ValidRegion valid_region = shape_to_valid_region(output_shape); - validate(dst.info()->valid_region(), valid_region); - - // Validate padding - const unsigned int num_elems_processed_per_iteration_x = 1; - const unsigned int num_elems_processed_per_iteration_y = std::max(4, static_cast(8 / src.info()->element_size())); - const unsigned int max_in_x = ceil_to_multiple(shape[0], num_elems_processed_per_iteration_x); - const unsigned int max_in_y = ceil_to_multiple(shape[1], num_elems_processed_per_iteration_y); - const unsigned int max_out_x = ceil_to_multiple(output_shape[0], num_elems_processed_per_iteration_y); - const unsigned int max_out_y = ceil_to_multiple(output_shape[1], num_elems_processed_per_iteration_x); - - const PaddingSize in_padding(0, max_in_x - shape[0], max_in_y - shape[1], 0); - const PaddingSize out_padding(0, max_out_x - output_shape[0], max_out_y - output_shape[1], 0); - validate(src.info()->padding(), in_padding); - validate(dst.info()->padding(), out_padding); -} - template using NETransposeFixture = TransposeValidationFixture; -- cgit v1.2.1