aboutsummaryrefslogtreecommitdiff
path: root/src/core/utils/helpers
diff options
context:
space:
mode:
authorFelix Thomasmathibalan <felixjohnny.thomasmathibalan@arm.com>2023-09-27 17:46:17 +0100
committerfelixjohnny.thomasmathibalan <felixjohnny.thomasmathibalan@arm.com>2023-09-28 12:08:05 +0000
commitafd38f0c617d6f89b2b4532c6c44f116617e2b6f (patch)
tree03bc7d5a762099989b16a656fa8d397b490ed70e /src/core/utils/helpers
parentbdcb4c148ee2fdeaaddf4cf1e57bbb0de02bb894 (diff)
downloadComputeLibrary-afd38f0c617d6f89b2b4532c6c44f116617e2b6f.tar.gz
Apply clang-format on repository
Code is formatted as per a revised clang format configuration file(not part of this delivery). Version 14.0.6 is used. Exclusion List: - files with .cl extension - files that are not strictly C/C++ (e.g. Android.bp, Sconscript ...) And the following directories - compute_kernel_writer/validation/ - tests/ - include/ - src/core/NEON/kernels/convolution/ - src/core/NEON/kernels/arm_gemm/ - src/core/NEON/kernels/arm_conv/ - data/ There will be a follow up for formatting of .cl files and the files under tests/ and compute_kernel_writer/validation/. Signed-off-by: Felix Thomasmathibalan <felixjohnny.thomasmathibalan@arm.com> Change-Id: Ib7eb1fcf4e7537b9feaefcfc15098a804a3fde0a Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10391 Benchmark: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gunes Bayir <gunes.bayir@arm.com>
Diffstat (limited to 'src/core/utils/helpers')
-rw-r--r--src/core/utils/helpers/fft.cpp19
-rw-r--r--src/core/utils/helpers/float_ops.h3
-rw-r--r--src/core/utils/helpers/tensor_info.h14
-rw-r--r--src/core/utils/helpers/tensor_transform.cpp63
4 files changed, 58 insertions, 41 deletions
diff --git a/src/core/utils/helpers/fft.cpp b/src/core/utils/helpers/fft.cpp
index 64633c643d..edc8d0eacc 100644
--- a/src/core/utils/helpers/fft.cpp
+++ b/src/core/utils/helpers/fft.cpp
@@ -37,7 +37,7 @@ std::vector<unsigned int> decompose_stages(unsigned int N, const std::set<unsign
unsigned int res = N;
// Early exit if no supported factors are provided
- if(supported_factors.empty())
+ if (supported_factors.empty())
{
return stages;
}
@@ -46,10 +46,10 @@ std::vector<unsigned int> decompose_stages(unsigned int N, const std::set<unsign
auto rfactor_it = supported_factors.rbegin();
// Decomposition step
- while(res != 0)
+ while (res != 0)
{
const unsigned int factor = *rfactor_it;
- if(0 == (res % factor) && res >= factor)
+ if (0 == (res % factor) && res >= factor)
{
stages.push_back(factor);
res /= factor;
@@ -57,9 +57,9 @@ std::vector<unsigned int> decompose_stages(unsigned int N, const std::set<unsign
else
{
++rfactor_it;
- if(rfactor_it == supported_factors.rend())
+ if (rfactor_it == supported_factors.rend())
{
- if(res > 1)
+ if (res > 1)
{
// Couldn't decompose with given factors
stages.clear();
@@ -81,8 +81,9 @@ std::vector<unsigned int> digit_reverse_indices(unsigned int N, const std::vecto
std::vector<unsigned int> idx_digit_reverse;
// Early exit in case N and fft stages do not match
- const float stages_prod = std::accumulate(std::begin(fft_stages), std::end(fft_stages), 1, std::multiplies<unsigned int>());
- if(stages_prod != N)
+ const float stages_prod =
+ std::accumulate(std::begin(fft_stages), std::end(fft_stages), 1, std::multiplies<unsigned int>());
+ if (stages_prod != N)
{
return idx_digit_reverse;
}
@@ -94,13 +95,13 @@ std::vector<unsigned int> digit_reverse_indices(unsigned int N, const std::vecto
unsigned int n_stages = fft_stages.size();
// Scan elements
- for(unsigned int n = 0; n < N; ++n)
+ for (unsigned int n = 0; n < N; ++n)
{
unsigned int k = n;
unsigned int Nx = fft_stages[0];
// Scan stages
- for(unsigned int s = 1; s < n_stages; ++s)
+ for (unsigned int s = 1; s < n_stages; ++s)
{
// radix of stage i-th
unsigned int Ny = fft_stages[s];
diff --git a/src/core/utils/helpers/float_ops.h b/src/core/utils/helpers/float_ops.h
index 99e1ea54ee..7f7fbd13bf 100644
--- a/src/core/utils/helpers/float_ops.h
+++ b/src/core/utils/helpers/float_ops.h
@@ -39,8 +39,7 @@ union RawFloat
*
* @param[in] val Floating-point value
*/
- explicit RawFloat(float val)
- : f32(val)
+ explicit RawFloat(float val) : f32(val)
{
}
/** Extract sign of floating point number
diff --git a/src/core/utils/helpers/tensor_info.h b/src/core/utils/helpers/tensor_info.h
index 9279532e2a..fd4745a453 100644
--- a/src/core/utils/helpers/tensor_info.h
+++ b/src/core/utils/helpers/tensor_info.h
@@ -41,15 +41,17 @@ namespace tensor_info
* @return True if tensors have mismatching quantization info else false.
*/
template <typename... Ts>
-inline bool tensors_have_different_quantization_info(const ITensorInfo *tensor_info_1, const ITensorInfo *tensor_info_2, Ts... tensor_infos)
+inline bool tensors_have_different_quantization_info(const ITensorInfo *tensor_info_1,
+ const ITensorInfo *tensor_info_2,
+ Ts... tensor_infos)
{
const QuantizationInfo first_quantization_info = tensor_info_1->quantization_info();
- const std::array < const ITensorInfo *, 1 + sizeof...(Ts) > tensor_infos_array{ { tensor_info_2, std::forward<Ts>(tensor_infos)... } };
- return std::any_of(tensor_infos_array.begin(), tensor_infos_array.end(), [&](const ITensorInfo * tensor_info)
- {
- return tensor_info->quantization_info() != first_quantization_info;
- });
+ const std::array<const ITensorInfo *, 1 + sizeof...(Ts)> tensor_infos_array{
+ {tensor_info_2, std::forward<Ts>(tensor_infos)...}};
+ return std::any_of(tensor_infos_array.begin(), tensor_infos_array.end(),
+ [&](const ITensorInfo *tensor_info)
+ { return tensor_info->quantization_info() != first_quantization_info; });
}
} // namespace tensor_info
} // namespace helpers
diff --git a/src/core/utils/helpers/tensor_transform.cpp b/src/core/utils/helpers/tensor_transform.cpp
index f2216995a9..19d0badd74 100644
--- a/src/core/utils/helpers/tensor_transform.cpp
+++ b/src/core/utils/helpers/tensor_transform.cpp
@@ -36,10 +36,11 @@ int calculate_stride_on_index(int index, Coordinates strides)
return index >= static_cast<int>(strides.num_dimensions()) ? 1 : strides[index];
}
-int calculate_start_on_index(TensorShape input_shape, int index, Coordinates starts, Coordinates strides, int32_t begin_mask)
+int calculate_start_on_index(
+ TensorShape input_shape, int index, Coordinates starts, Coordinates strides, int32_t begin_mask)
{
// Early exit
- if(index >= static_cast<int>(starts.num_dimensions()))
+ if (index >= static_cast<int>(starts.num_dimensions()))
{
return 0;
}
@@ -51,14 +52,14 @@ int calculate_start_on_index(TensorShape input_shape, int index, Coordinates sta
int start = starts[index];
// Reset in case of begin mask present
- if(arm_compute::helpers::bit_ops::is_bit_set(begin_mask, index))
+ if (arm_compute::helpers::bit_ops::is_bit_set(begin_mask, index))
{
start = stride > 0 ? std::numeric_limits<int>::lowest() : std::numeric_limits<int>::max();
}
// Account negative start points
const int dim_size = input_shape[index];
- if(start < 0)
+ if (start < 0)
{
start += dim_size;
}
@@ -69,12 +70,16 @@ int calculate_start_on_index(TensorShape input_shape, int index, Coordinates sta
return start;
}
-int calculate_end_on_index(TensorShape input_shape, int index, int start_on_index,
- Coordinates ends, Coordinates strides,
- int32_t end_mask, int32_t shrink_axis_mask)
+int calculate_end_on_index(TensorShape input_shape,
+ int index,
+ int start_on_index,
+ Coordinates ends,
+ Coordinates strides,
+ int32_t end_mask,
+ int32_t shrink_axis_mask)
{
// Early exit
- if(index >= static_cast<int>(ends.num_dimensions()))
+ if (index >= static_cast<int>(ends.num_dimensions()))
{
return input_shape[index];
}
@@ -86,9 +91,9 @@ int calculate_end_on_index(TensorShape input_shape, int index, int start_on_inde
int stop = ends[index];
// Shrink dimension
- if(shrink_axis)
+ if (shrink_axis)
{
- if(start_on_index == std::numeric_limits<int>::max())
+ if (start_on_index == std::numeric_limits<int>::max())
{
stop = start_on_index;
}
@@ -99,14 +104,14 @@ int calculate_end_on_index(TensorShape input_shape, int index, int start_on_inde
}
// Reset in case of begin mask present
- if(arm_compute::helpers::bit_ops::is_bit_set(end_mask, index) && !shrink_axis)
+ if (arm_compute::helpers::bit_ops::is_bit_set(end_mask, index) && !shrink_axis)
{
stop = (stride > 0) ? std::numeric_limits<int>::max() : std::numeric_limits<int>::lowest();
}
// Account negative end points
const int dim_size = input_shape[index];
- if(stop < 0)
+ if (stop < 0)
{
stop += dim_size;
}
@@ -118,14 +123,18 @@ int calculate_end_on_index(TensorShape input_shape, int index, int start_on_inde
}
std::tuple<Coordinates, Coordinates, Coordinates> calculate_strided_slice_coords(TensorShape input_shape,
- Coordinates starts, Coordinates ends, Coordinates strides,
- int32_t begin_mask, int32_t end_mask, int32_t shrink_axis_mask)
+ Coordinates starts,
+ Coordinates ends,
+ Coordinates strides,
+ int32_t begin_mask,
+ int32_t end_mask,
+ int32_t shrink_axis_mask)
{
Coordinates starts_abs{};
Coordinates ends_abs{};
Coordinates final_strides{};
- for(unsigned int i = 0; i < input_shape.num_dimensions(); ++i)
+ for (unsigned int i = 0; i < input_shape.num_dimensions(); ++i)
{
const int start_i = calculate_start_on_index(input_shape, i, starts, strides, begin_mask);
starts_abs.set(i, start_i);
@@ -136,13 +145,19 @@ std::tuple<Coordinates, Coordinates, Coordinates> calculate_strided_slice_coords
return std::make_tuple(starts_abs, ends_abs, final_strides);
}
-TensorShape compute_strided_slice_output_shape(TensorShape input_shape, Coordinates starts, Coordinates ends, Coordinates strides,
- int32_t begin_mask, int32_t end_mask, int32_t shrink_axis_mask, bool return_unshrinked)
+TensorShape compute_strided_slice_output_shape(TensorShape input_shape,
+ Coordinates starts,
+ Coordinates ends,
+ Coordinates strides,
+ int32_t begin_mask,
+ int32_t end_mask,
+ int32_t shrink_axis_mask,
+ bool return_unshrinked)
{
unsigned int index = 0;
TensorShape output_shape;
- for(unsigned int i = 0; i < input_shape.num_dimensions(); ++i)
+ for (unsigned int i = 0; i < input_shape.num_dimensions(); ++i)
{
const int stride = calculate_stride_on_index(index, strides);
const int start = calculate_start_on_index(input_shape, i, starts, strides, begin_mask);
@@ -150,11 +165,11 @@ TensorShape compute_strided_slice_output_shape(TensorShape input_shape, Coordina
const int range = end - start;
const bool is_shrink = arm_compute::helpers::bit_ops::is_bit_set(shrink_axis_mask, i);
- if(return_unshrinked || !is_shrink)
+ if (return_unshrinked || !is_shrink)
{
- if((range == 0) || // Zero range
- (range < 0 && stride >= 0) || // Negative range with positive stride
- (range > 0 && stride <= 0)) // Positive range with negative stride
+ if ((range == 0) || // Zero range
+ (range < 0 && stride >= 0) || // Negative range with positive stride
+ (range > 0 && stride <= 0)) // Positive range with negative stride
{
output_shape.set(index, 0);
return output_shape;
@@ -173,9 +188,9 @@ int32_t construct_slice_end_mask(Coordinates ends)
{
// Create end mask
int32_t end_mask = 0;
- for(unsigned int i = 0; i < ends.num_dimensions(); ++i)
+ for (unsigned int i = 0; i < ends.num_dimensions(); ++i)
{
- if(ends[i] < 0)
+ if (ends[i] < 0)
{
end_mask |= 1 << i;
}