From 3b162e53267d13d18891baf3372f971f1d4213d3 Mon Sep 17 00:00:00 2001 From: David Svantesson Date: Tue, 28 Mar 2023 14:13:32 +0000 Subject: Reorder added Adds Reorder kernel exposing blocking reorders from arm_gemm Resolves ONCPUML-1232 Change-Id: I42bf4166311fe1771565134d3ed7039fc8e30230 Signed-off-by: David Svantesson Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/9500 Comments-Addressed: Arm Jenkins Reviewed-by: SiCong Li Tested-by: Arm Jenkins Benchmark: Arm Jenkins --- tests/validation/reference/Reorder.cpp | 156 +++++++++++++++++++++++++++++++++ tests/validation/reference/Reorder.h | 44 ++++++++++ 2 files changed, 200 insertions(+) create mode 100644 tests/validation/reference/Reorder.cpp create mode 100644 tests/validation/reference/Reorder.h (limited to 'tests/validation/reference') diff --git a/tests/validation/reference/Reorder.cpp b/tests/validation/reference/Reorder.cpp new file mode 100644 index 0000000000..8abb372596 --- /dev/null +++ b/tests/validation/reference/Reorder.cpp @@ -0,0 +1,156 @@ +/* + * Copyright (c) 2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "Reorder.h" +#include "src/core/NEON/kernels/arm_gemm/utils.hpp" + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +namespace reference +{ + +/* + * Generic transform. + * + * Assuming the untransposed case, this works by first reading + * consecutive values from the first input row. This same number of values + * are then read from the next rows. Now return to the first + * input row and repeat. + * + * Need to cope with the work requested in either dimension not actually + * being a multiple of the block sizes. + */ +template +struct Transform_ref +{ + template + static void Transform(TOut &out, const TIn in, const int stride, + const int y0, const int ymax, const int x0, const int xmax) + { + // NOTE: This code is disabled to avoid the call to get_vector_length(), so templated transforms will not be + // correct for SVE. This is not an issue as we have specializations for all SVE cases. + // For SVE cases we multiply the interleave factor by the vector length. + // const unsigned int IntBy = tIntBy * (vlt == VLType::SVE ? get_vector_length() / BlockBy : 1); + const unsigned int IntBy = tIntBy; + int out_index = 0; + + const int n_whole_y_blocks = (ymax - y0) / IntBy; + const int y_remainders = (ymax - y0) % IntBy; + const int n_y_blocks = n_whole_y_blocks + (y_remainders ? 1 : 0); + + const int n_whole_x_blocks = (xmax - x0) / BlockBy; + const int x_remainders = (xmax - x0) % BlockBy; + const int n_x_blocks = n_whole_x_blocks + (x_remainders ? 1 : 0); + + // "Y" loop: advance down the rows of the source IntBy rows at a time. + // Set up fill_rows to show the number rows to copy from, and blank_rows + // for the number of blank rows to add. + for(int y_block = 0; y_block < n_y_blocks; y_block++) + { + const int fill_rows = (y_block < n_whole_y_blocks) ? IntBy : y_remainders; + const int blank_rows = IntBy - fill_rows; + + const int y_base = y0 + (y_block * IntBy); + + // So now advance along this block of rows, BlockBy columns at a time. + for(int x_block = 0; x_block < n_x_blocks; x_block++) + { + const int fill_cols = (x_block < n_whole_x_blocks) ? BlockBy : x_remainders; + const int blank_cols = BlockBy - fill_cols; + + const int x_base = x0 + (x_block * BlockBy); + + for(int row = 0; row < fill_rows; row++) + { + for(int col = 0; col < fill_cols; col++) + { + // In-range copy. If it's transposed, we reverse the sense of rows and columns here. + if(Transposed) + { + out[out_index] = in[(x_base + col) * stride + y_base + row]; + out_index++; + } + else + { + out[out_index] = in[(y_base + row) * stride + x_base + col]; + out_index++; + } + } + // "col" tail - row is in range but column is out of range. + for(int col = 0; col < blank_cols; col++) + { + out[out_index] = 0; + out_index++; + } + } + // "row" tail - row is out of range so fill with zeros always. + const d_type zeroval = 0; + const int pads = blank_rows * (fill_cols + blank_cols); + + for(int i = 0; i < pads; i++) + { + out[out_index] = zeroval; + } + + out_index += pads; + } + } + } +}; + +template +SimpleTensor reorder_layer(const SimpleTensor &src, const TensorShape &output_shape, WeightFormat output_wf) +{ + SimpleTensor dst{ output_shape, src.data_type() }; + const int cols = src.shape()[0]; + const int rows = src.shape()[1]; + + switch(output_wf) + { + case WeightFormat::OHWIo4: + { + Transform_ref<4, 1, true, sizeof(float), sizeof(float), float, arm_gemm::VLType::None>::Transform &, SimpleTensor>(dst, src, rows, 0, rows, 0, cols); + break; + } + case WeightFormat::OHWIo8: + { + Transform_ref<8, 1, true, sizeof(float), sizeof(float), float, arm_gemm::VLType::None>::Transform &, SimpleTensor>(dst, src, rows, 0, rows, 0, cols); + break; + } + default: + break; + } + + return dst; +} + +template SimpleTensor reorder_layer(const SimpleTensor &src, const TensorShape &output_shape, WeightFormat output_wf); + +} // namespace reference +} // namespace validation +} // namespace test +} // namespace arm_compute diff --git a/tests/validation/reference/Reorder.h b/tests/validation/reference/Reorder.h new file mode 100644 index 0000000000..94ee5078f8 --- /dev/null +++ b/tests/validation/reference/Reorder.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ACL_TESTS_VALIDATION_REFERENCE_REORDER +#define ACL_TESTS_VALIDATION_REFERENCE_REORDER + +#include "tests/SimpleTensor.h" +#include "tests/Types.h" + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +namespace reference +{ +template +SimpleTensor reorder_layer(const SimpleTensor &src, const TensorShape &output_shape, WeightFormat output_wf); +} // namespace reference +} // namespace validation +} // namespace test +} // namespace arm_compute +#endif /* ACL_TESTS_VALIDATION_REFERENCE_REORDER */ -- cgit v1.2.1