From afd38f0c617d6f89b2b4532c6c44f116617e2b6f Mon Sep 17 00:00:00 2001 From: Felix Thomasmathibalan Date: Wed, 27 Sep 2023 17:46:17 +0100 Subject: Apply clang-format on repository Code is formatted as per a revised clang format configuration file(not part of this delivery). Version 14.0.6 is used. Exclusion List: - files with .cl extension - files that are not strictly C/C++ (e.g. Android.bp, Sconscript ...) And the following directories - compute_kernel_writer/validation/ - tests/ - include/ - src/core/NEON/kernels/convolution/ - src/core/NEON/kernels/arm_gemm/ - src/core/NEON/kernels/arm_conv/ - data/ There will be a follow up for formatting of .cl files and the files under tests/ and compute_kernel_writer/validation/. Signed-off-by: Felix Thomasmathibalan Change-Id: Ib7eb1fcf4e7537b9feaefcfc15098a804a3fde0a Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10391 Benchmark: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Gunes Bayir --- arm_compute/core/Helpers.inl | 57 ++++++++++++++++++++++---------------------- 1 file changed, 29 insertions(+), 28 deletions(-) (limited to 'arm_compute/core/Helpers.inl') diff --git a/arm_compute/core/Helpers.inl b/arm_compute/core/Helpers.inl index ff902bba20..60a21e9418 100644 --- a/arm_compute/core/Helpers.inl +++ b/arm_compute/core/Helpers.inl @@ -32,12 +32,9 @@ template struct IncrementIterators { template - static void unroll(T &&it, Ts &&... iterators) + static void unroll(T &&it, Ts &&...iterators) { - auto increment = [](T && it) - { - it.increment(dimension); - }; + auto increment = [](T &&it) { it.increment(dimension); }; utility::for_each(increment, std::forward(it), std::forward(iterators)...); } static void unroll() @@ -50,14 +47,14 @@ template struct ForEachDimension { template - static void unroll(const Window &w, Coordinates &id, L &&lambda_function, Ts &&... iterators) + static void unroll(const Window &w, Coordinates &id, L &&lambda_function, Ts &&...iterators) { const auto &d = w[dim - 1]; - for(auto v = d.start(); v < d.end(); v += d.step(), IncrementIterators < dim - 1 >::unroll(iterators...)) + for (auto v = d.start(); v < d.end(); v += d.step(), IncrementIterators::unroll(iterators...)) { id.set(dim - 1, v); - ForEachDimension < dim - 1 >::unroll(w, id, lambda_function, iterators...); + ForEachDimension::unroll(w, id, lambda_function, iterators...); } } }; @@ -66,7 +63,7 @@ template <> struct ForEachDimension<0> { template - static void unroll(const Window &w, Coordinates &id, L &&lambda_function, Ts &&... iterators) + static void unroll(const Window &w, Coordinates &id, L &&lambda_function, Ts &&...iterators) { ARM_COMPUTE_UNUSED(w, iterators...); lambda_function(id); @@ -74,31 +71,31 @@ struct ForEachDimension<0> }; template -inline void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators) +inline void execute_window_loop(const Window &w, L &&lambda_function, Ts &&...iterators) { w.validate(); - for(unsigned int i = 0; i < Coordinates::num_max_dimensions; ++i) + for (unsigned int i = 0; i < Coordinates::num_max_dimensions; ++i) { ARM_COMPUTE_ERROR_ON(w[i].step() == 0); } Coordinates id; - ForEachDimension::unroll(w, id, std::forward(lambda_function), std::forward(iterators)...); + ForEachDimension::unroll(w, id, std::forward(lambda_function), + std::forward(iterators)...); } -inline constexpr Iterator::Iterator() - : _ptr(nullptr), _dims() +inline constexpr Iterator::Iterator() : _ptr(nullptr), _dims() { } -inline Iterator::Iterator(const ITensor *tensor, const Window &win) - : Iterator() +inline Iterator::Iterator(const ITensor *tensor, const Window &win) : Iterator() { ARM_COMPUTE_ERROR_ON(tensor == nullptr); ARM_COMPUTE_ERROR_ON(tensor->info() == nullptr); - initialize(tensor->info()->num_dimensions(), tensor->info()->strides_in_bytes(), tensor->buffer(), tensor->info()->offset_first_element_in_bytes(), win); + initialize(tensor->info()->num_dimensions(), tensor->info()->strides_in_bytes(), tensor->buffer(), + tensor->info()->offset_first_element_in_bytes(), win); } inline Iterator::Iterator(size_t num_dims, const Strides &strides, uint8_t *buffer, size_t offset, const Window &win) @@ -107,21 +104,22 @@ inline Iterator::Iterator(size_t num_dims, const Strides &strides, uint8_t *buff initialize(num_dims, strides, buffer, offset, win); } -inline void Iterator::initialize(size_t num_dims, const Strides &strides, uint8_t *buffer, size_t offset, const Window &win) +inline void +Iterator::initialize(size_t num_dims, const Strides &strides, uint8_t *buffer, size_t offset, const Window &win) { ARM_COMPUTE_ERROR_ON(buffer == nullptr); _ptr = buffer + offset; //Initialize the stride for each dimension and calculate the position of the first element of the iteration: - for(unsigned int n = 0; n < num_dims; ++n) + for (unsigned int n = 0; n < num_dims; ++n) { _dims[n]._stride = win[n].step() * strides[n]; std::get<0>(_dims)._dim_start += static_cast(strides[n]) * win[n].start(); } //Copy the starting point to all the dimensions: - for(unsigned int n = 1; n < Coordinates::num_max_dimensions; ++n) + for (unsigned int n = 1; n < Coordinates::num_max_dimensions; ++n) { _dims[n]._dim_start = std::get<0>(_dims)._dim_start; } @@ -135,7 +133,7 @@ inline void Iterator::increment(const size_t dimension) _dims[dimension]._dim_start += _dims[dimension]._stride; - for(unsigned int n = 0; n < dimension; ++n) + for (unsigned int n = 0; n < dimension; ++n) { _dims[n]._dim_start = _dims[dimension]._dim_start; } @@ -157,7 +155,7 @@ inline void Iterator::reset(const size_t dimension) _dims[dimension]._dim_start = _dims[dimension + 1]._dim_start; - for(unsigned int n = 0; n < dimension; ++n) + for (unsigned int n = 0; n < dimension; ++n) { _dims[n]._dim_start = _dims[dimension]._dim_start; } @@ -170,9 +168,9 @@ inline Coordinates index2coords(const TensorShape &shape, int index) ARM_COMPUTE_ERROR_ON_MSG(index < 0 || index >= num_elements, "Index has to be in [0, num_elements]!"); ARM_COMPUTE_ERROR_ON_MSG(num_elements == 0, "Cannot create coordinate from empty shape!"); - Coordinates coord{ 0 }; + Coordinates coord{0}; - for(int d = shape.num_dimensions() - 1; d >= 0; --d) + for (int d = shape.num_dimensions() - 1; d >= 0; --d) { num_elements /= shape[d]; coord.set(d, index / num_elements); @@ -191,7 +189,7 @@ inline int coords2index(const TensorShape &shape, const Coordinates &coord) int index = 0; int stride = 1; - for(unsigned int d = 0; d < coord.num_dimensions(); ++d) + for (unsigned int d = 0; d < coord.num_dimensions(); ++d) { index += coord[d] * stride; stride *= shape[d]; @@ -200,9 +198,11 @@ inline int coords2index(const TensorShape &shape, const Coordinates &coord) return index; } -inline size_t get_data_layout_dimension_index(const DataLayout &data_layout, const DataLayoutDimension &data_layout_dimension) +inline size_t get_data_layout_dimension_index(const DataLayout &data_layout, + const DataLayoutDimension &data_layout_dimension) { - ARM_COMPUTE_ERROR_ON_MSG(data_layout == DataLayout::UNKNOWN, "Cannot retrieve the dimension index for an unknown layout!"); + ARM_COMPUTE_ERROR_ON_MSG(data_layout == DataLayout::UNKNOWN, + "Cannot retrieve the dimension index for an unknown layout!"); const auto &dims = get_layout_map().at(data_layout); const auto &it = std::find(dims.cbegin(), dims.cend(), data_layout_dimension); ARM_COMPUTE_ERROR_ON_MSG(it == dims.cend(), "Invalid dimension for the given layout."); @@ -211,7 +211,8 @@ inline size_t get_data_layout_dimension_index(const DataLayout &data_layout, con inline DataLayoutDimension get_index_data_layout_dimension(const DataLayout &data_layout, const size_t index) { - ARM_COMPUTE_ERROR_ON_MSG(data_layout == DataLayout::UNKNOWN, "Cannot retrieve the layout dimension for an unknown layout!"); + ARM_COMPUTE_ERROR_ON_MSG(data_layout == DataLayout::UNKNOWN, + "Cannot retrieve the layout dimension for an unknown layout!"); const auto &dims = get_layout_map().at(data_layout); ARM_COMPUTE_ERROR_ON_MSG(index >= dims.size(), "Invalid index for the given layout."); return dims[index]; -- cgit v1.2.1