aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/core/utils
diff options
context:
space:
mode:
authorFelix Thomasmathibalan <felixjohnny.thomasmathibalan@arm.com>2023-09-27 17:46:17 +0100
committerfelixjohnny.thomasmathibalan <felixjohnny.thomasmathibalan@arm.com>2023-09-28 12:08:05 +0000
commitafd38f0c617d6f89b2b4532c6c44f116617e2b6f (patch)
tree03bc7d5a762099989b16a656fa8d397b490ed70e /arm_compute/core/utils
parentbdcb4c148ee2fdeaaddf4cf1e57bbb0de02bb894 (diff)
downloadComputeLibrary-afd38f0c617d6f89b2b4532c6c44f116617e2b6f.tar.gz
Apply clang-format on repository
Code is formatted as per a revised clang format configuration file(not part of this delivery). Version 14.0.6 is used. Exclusion List: - files with .cl extension - files that are not strictly C/C++ (e.g. Android.bp, Sconscript ...) And the following directories - compute_kernel_writer/validation/ - tests/ - include/ - src/core/NEON/kernels/convolution/ - src/core/NEON/kernels/arm_gemm/ - src/core/NEON/kernels/arm_conv/ - data/ There will be a follow up for formatting of .cl files and the files under tests/ and compute_kernel_writer/validation/. Signed-off-by: Felix Thomasmathibalan <felixjohnny.thomasmathibalan@arm.com> Change-Id: Ib7eb1fcf4e7537b9feaefcfc15098a804a3fde0a Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10391 Benchmark: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gunes Bayir <gunes.bayir@arm.com>
Diffstat (limited to 'arm_compute/core/utils')
-rw-r--r--arm_compute/core/utils/ActivationFunctionUtils.h2
-rw-r--r--arm_compute/core/utils/DataLayoutUtils.h2
-rw-r--r--arm_compute/core/utils/DataTypeUtils.h46
-rw-r--r--arm_compute/core/utils/FormatUtils.h30
-rw-r--r--arm_compute/core/utils/InterpolationPolicyUtils.h2
-rw-r--r--arm_compute/core/utils/StringUtils.h2
-rw-r--r--arm_compute/core/utils/helpers/AdjustVecSize.h6
-rw-r--r--arm_compute/core/utils/helpers/tensor_transform.h33
-rw-r--r--arm_compute/core/utils/logging/FilePrinter.h3
-rw-r--r--arm_compute/core/utils/logging/Helpers.h3
-rw-r--r--arm_compute/core/utils/logging/IPrinter.h3
-rw-r--r--arm_compute/core/utils/logging/LogMsgDecorators.h5
-rw-r--r--arm_compute/core/utils/logging/Logger.h6
-rw-r--r--arm_compute/core/utils/logging/LoggerRegistry.h13
-rw-r--r--arm_compute/core/utils/logging/Macros.h16
-rw-r--r--arm_compute/core/utils/logging/Types.h6
-rw-r--r--arm_compute/core/utils/math/Math.h2
-rw-r--r--arm_compute/core/utils/math/SafeOps.h23
-rw-r--r--arm_compute/core/utils/misc/InfoHelpers.h54
-rw-r--r--arm_compute/core/utils/misc/Macros.h9
-rw-r--r--arm_compute/core/utils/misc/ShapeCalculator.h373
-rw-r--r--arm_compute/core/utils/misc/Traits.h1
-rw-r--r--arm_compute/core/utils/misc/Utility.h35
-rw-r--r--arm_compute/core/utils/quantization/AsymmHelpers.h24
24 files changed, 408 insertions, 291 deletions
diff --git a/arm_compute/core/utils/ActivationFunctionUtils.h b/arm_compute/core/utils/ActivationFunctionUtils.h
index 1cb66da13d..c988efa256 100644
--- a/arm_compute/core/utils/ActivationFunctionUtils.h
+++ b/arm_compute/core/utils/ActivationFunctionUtils.h
@@ -37,5 +37,5 @@ namespace arm_compute
* @return The string describing the activation function.
*/
const std::string &string_from_activation_func(const ActivationFunction &act);
-}
+} // namespace arm_compute
#endif /*ARM_COMPUTE_CORE_UTILS_ACTIVATIONFUNCTIONUTILS_H */
diff --git a/arm_compute/core/utils/DataLayoutUtils.h b/arm_compute/core/utils/DataLayoutUtils.h
index 399f55c63f..61839c9f91 100644
--- a/arm_compute/core/utils/DataLayoutUtils.h
+++ b/arm_compute/core/utils/DataLayoutUtils.h
@@ -36,5 +36,5 @@ namespace arm_compute
* @return The string describing the data layout.
*/
const std::string &string_from_data_layout(DataLayout dl);
-}
+} // namespace arm_compute
#endif /*ARM_COMPUTE_CORE_UTILS_DATALAYOUTUTILS_H */
diff --git a/arm_compute/core/utils/DataTypeUtils.h b/arm_compute/core/utils/DataTypeUtils.h
index cbb409c8a1..7ea5eb7670 100644
--- a/arm_compute/core/utils/DataTypeUtils.h
+++ b/arm_compute/core/utils/DataTypeUtils.h
@@ -37,7 +37,7 @@ namespace arm_compute
*/
inline size_t data_size_from_type(DataType data_type)
{
- switch(data_type)
+ switch (data_type)
{
case DataType::U8:
case DataType::S8:
@@ -77,7 +77,7 @@ inline size_t data_size_from_type(DataType data_type)
*/
inline size_t element_size_from_data_type(DataType dt)
{
- switch(dt)
+ switch (dt)
{
case DataType::S8:
case DataType::U8:
@@ -114,7 +114,7 @@ inline size_t element_size_from_data_type(DataType dt)
*/
inline DataType data_type_from_format(Format format)
{
- switch(format)
+ switch (format)
{
case Format::U8:
case Format::UV88:
@@ -158,7 +158,7 @@ inline DataType data_type_from_format(Format format)
*/
inline DataType get_promoted_data_type(DataType dt)
{
- switch(dt)
+ switch (dt)
{
case DataType::U8:
return DataType::U16;
@@ -196,7 +196,7 @@ inline std::tuple<PixelValue, PixelValue> get_min_max(DataType dt)
{
PixelValue min{};
PixelValue max{};
- switch(dt)
+ switch (dt)
{
case DataType::U8:
case DataType::QASYMM8:
@@ -303,7 +303,7 @@ inline ::std::istream &operator>>(::std::istream &stream, DataType &data_type)
*/
inline bool is_data_type_float(DataType dt)
{
- switch(dt)
+ switch (dt)
{
case DataType::F16:
case DataType::F32:
@@ -323,7 +323,7 @@ inline bool is_data_type_float(DataType dt)
*/
inline bool is_data_type_quantized(DataType dt)
{
- switch(dt)
+ switch (dt)
{
case DataType::QSYMM8:
case DataType::QASYMM8:
@@ -345,7 +345,7 @@ inline bool is_data_type_quantized(DataType dt)
*/
inline bool is_data_type_quantized_asymmetric(DataType dt)
{
- switch(dt)
+ switch (dt)
{
case DataType::QASYMM8:
case DataType::QASYMM8_SIGNED:
@@ -364,7 +364,7 @@ inline bool is_data_type_quantized_asymmetric(DataType dt)
*/
inline bool is_data_type_quantized_asymmetric_signed(DataType dt)
{
- switch(dt)
+ switch (dt)
{
case DataType::QASYMM8_SIGNED:
return true;
@@ -381,7 +381,7 @@ inline bool is_data_type_quantized_asymmetric_signed(DataType dt)
*/
inline bool is_data_type_quantized_symmetric(DataType dt)
{
- switch(dt)
+ switch (dt)
{
case DataType::QSYMM8:
case DataType::QSYMM8_PER_CHANNEL:
@@ -400,7 +400,7 @@ inline bool is_data_type_quantized_symmetric(DataType dt)
*/
inline bool is_data_type_quantized_per_channel(DataType dt)
{
- switch(dt)
+ switch (dt)
{
case DataType::QSYMM8_PER_CHANNEL:
return true;
@@ -420,12 +420,13 @@ inline bool is_data_type_quantized_per_channel(DataType dt)
template <typename T>
bool check_value_range(T val, DataType dt, QuantizationInfo qinfo = QuantizationInfo())
{
- switch(dt)
+ switch (dt)
{
case DataType::U8:
{
const auto val_u8 = static_cast<uint8_t>(val);
- return ((val_u8 == val) && val >= std::numeric_limits<uint8_t>::lowest() && val <= std::numeric_limits<uint8_t>::max());
+ return ((val_u8 == val) && val >= std::numeric_limits<uint8_t>::lowest() &&
+ val <= std::numeric_limits<uint8_t>::max());
}
case DataType::QASYMM8:
{
@@ -436,29 +437,34 @@ bool check_value_range(T val, DataType dt, QuantizationInfo qinfo = Quantization
case DataType::S8:
{
const auto val_s8 = static_cast<int8_t>(val);
- return ((val_s8 == val) && val >= std::numeric_limits<int8_t>::lowest() && val <= std::numeric_limits<int8_t>::max());
+ return ((val_s8 == val) && val >= std::numeric_limits<int8_t>::lowest() &&
+ val <= std::numeric_limits<int8_t>::max());
}
case DataType::U16:
{
const auto val_u16 = static_cast<uint16_t>(val);
- return ((val_u16 == val) && val >= std::numeric_limits<uint16_t>::lowest() && val <= std::numeric_limits<uint16_t>::max());
+ return ((val_u16 == val) && val >= std::numeric_limits<uint16_t>::lowest() &&
+ val <= std::numeric_limits<uint16_t>::max());
}
case DataType::S16:
{
const auto val_s16 = static_cast<int16_t>(val);
- return ((val_s16 == val) && val >= std::numeric_limits<int16_t>::lowest() && val <= std::numeric_limits<int16_t>::max());
+ return ((val_s16 == val) && val >= std::numeric_limits<int16_t>::lowest() &&
+ val <= std::numeric_limits<int16_t>::max());
}
case DataType::U32:
{
const auto val_d64 = static_cast<double>(val);
const auto val_u32 = static_cast<uint32_t>(val);
- return ((val_u32 == val_d64) && val_d64 >= std::numeric_limits<uint32_t>::lowest() && val_d64 <= std::numeric_limits<uint32_t>::max());
+ return ((val_u32 == val_d64) && val_d64 >= std::numeric_limits<uint32_t>::lowest() &&
+ val_d64 <= std::numeric_limits<uint32_t>::max());
}
case DataType::S32:
{
const auto val_d64 = static_cast<double>(val);
const auto val_s32 = static_cast<int32_t>(val);
- return ((val_s32 == val_d64) && val_d64 >= std::numeric_limits<int32_t>::lowest() && val_d64 <= std::numeric_limits<int32_t>::max());
+ return ((val_s32 == val_d64) && val_d64 >= std::numeric_limits<int32_t>::lowest() &&
+ val_d64 <= std::numeric_limits<int32_t>::max());
}
case DataType::BFLOAT16:
return (val >= bfloat16::lowest() && val <= bfloat16::max());
@@ -482,7 +488,7 @@ inline std::string cpu_impl_dt(const DataType &data_type)
{
std::string ret = "";
- switch(data_type)
+ switch (data_type)
{
case DataType::F32:
ret = "fp32";
@@ -521,5 +527,5 @@ inline std::string cpu_impl_dt(const DataType &data_type)
return ret;
}
-}
+} // namespace arm_compute
#endif /*ARM_COMPUTE_CORE_UTILS_DATATYPEUTILS_H */
diff --git a/arm_compute/core/utils/FormatUtils.h b/arm_compute/core/utils/FormatUtils.h
index afb0f78255..a8e96bd361 100644
--- a/arm_compute/core/utils/FormatUtils.h
+++ b/arm_compute/core/utils/FormatUtils.h
@@ -37,7 +37,7 @@ namespace arm_compute
*/
inline size_t pixel_size_from_format(Format format)
{
- switch(format)
+ switch (format)
{
case Format::U8:
return 1;
@@ -77,7 +77,7 @@ inline size_t pixel_size_from_format(Format format)
*/
inline int plane_idx_from_channel(Format format, Channel channel)
{
- switch(format)
+ switch (format)
{
// Single planar formats have a single plane
case Format::U8:
@@ -99,7 +99,7 @@ inline int plane_idx_from_channel(Format format, Channel channel)
case Format::NV21:
{
// Channel U and V share the same plane of format UV88
- switch(channel)
+ switch (channel)
{
case Channel::Y:
return 0;
@@ -114,7 +114,7 @@ inline int plane_idx_from_channel(Format format, Channel channel)
case Format::IYUV:
case Format::YUV444:
{
- switch(channel)
+ switch (channel)
{
case Channel::Y:
return 0;
@@ -142,11 +142,11 @@ inline int plane_idx_from_channel(Format format, Channel channel)
*/
inline int channel_idx_from_format(Format format, Channel channel)
{
- switch(format)
+ switch (format)
{
case Format::RGB888:
{
- switch(channel)
+ switch (channel)
{
case Channel::R:
return 0;
@@ -161,7 +161,7 @@ inline int channel_idx_from_format(Format format, Channel channel)
}
case Format::RGBA8888:
{
- switch(channel)
+ switch (channel)
{
case Channel::R:
return 0;
@@ -178,7 +178,7 @@ inline int channel_idx_from_format(Format format, Channel channel)
}
case Format::YUYV422:
{
- switch(channel)
+ switch (channel)
{
case Channel::Y:
return 0;
@@ -193,7 +193,7 @@ inline int channel_idx_from_format(Format format, Channel channel)
}
case Format::UYVY422:
{
- switch(channel)
+ switch (channel)
{
case Channel::Y:
return 1;
@@ -208,7 +208,7 @@ inline int channel_idx_from_format(Format format, Channel channel)
}
case Format::NV12:
{
- switch(channel)
+ switch (channel)
{
case Channel::Y:
return 0;
@@ -223,7 +223,7 @@ inline int channel_idx_from_format(Format format, Channel channel)
}
case Format::NV21:
{
- switch(channel)
+ switch (channel)
{
case Channel::Y:
return 0;
@@ -239,7 +239,7 @@ inline int channel_idx_from_format(Format format, Channel channel)
case Format::YUV444:
case Format::IYUV:
{
- switch(channel)
+ switch (channel)
{
case Channel::Y:
return 0;
@@ -266,7 +266,7 @@ inline int channel_idx_from_format(Format format, Channel channel)
*/
inline size_t num_planes_from_format(Format format)
{
- switch(format)
+ switch (format)
{
case Format::U8:
case Format::S16:
@@ -301,7 +301,7 @@ inline size_t num_planes_from_format(Format format)
*/
inline size_t num_channels_from_format(Format format)
{
- switch(format)
+ switch (format)
{
case Format::U8:
case Format::U16:
@@ -340,5 +340,5 @@ inline size_t num_channels_from_format(Format format)
* @return The string describing the format.
*/
const std::string &string_from_format(Format format);
-}
+} // namespace arm_compute
#endif /*ARM_COMPUTE_CORE_UTILS_FORMATUTILS_H */
diff --git a/arm_compute/core/utils/InterpolationPolicyUtils.h b/arm_compute/core/utils/InterpolationPolicyUtils.h
index 79f6e3aa5f..8d4ae4321c 100644
--- a/arm_compute/core/utils/InterpolationPolicyUtils.h
+++ b/arm_compute/core/utils/InterpolationPolicyUtils.h
@@ -37,5 +37,5 @@ namespace arm_compute
* @return The string describing the interpolation policy.
*/
const std::string &string_from_interpolation_policy(InterpolationPolicy policy);
-}
+} // namespace arm_compute
#endif /*ARM_COMPUTE_CORE_UTILS_INTERPOLATIONPOLICYUTILS_H */
diff --git a/arm_compute/core/utils/StringUtils.h b/arm_compute/core/utils/StringUtils.h
index 41f29b0901..c13cbaa334 100644
--- a/arm_compute/core/utils/StringUtils.h
+++ b/arm_compute/core/utils/StringUtils.h
@@ -61,5 +61,5 @@ std::string float_to_string_with_full_precision(float val);
* @return std::string
*/
std::string join(const std::vector<std::string> strings, const std::string &sep);
-}
+} // namespace arm_compute
#endif /*ARM_COMPUTE_CORE_UTILS_STRINGUTILS_H */
diff --git a/arm_compute/core/utils/helpers/AdjustVecSize.h b/arm_compute/core/utils/helpers/AdjustVecSize.h
index bbb3048b84..842e3b57d6 100644
--- a/arm_compute/core/utils/helpers/AdjustVecSize.h
+++ b/arm_compute/core/utils/helpers/AdjustVecSize.h
@@ -39,17 +39,17 @@ inline unsigned int adjust_vec_size(unsigned int vec_size, size_t dim0)
{
ARM_COMPUTE_ERROR_ON(vec_size > 16);
- if((vec_size >= dim0) && (dim0 == 3))
+ if ((vec_size >= dim0) && (dim0 == 3))
{
return dim0;
}
- while(vec_size > dim0)
+ while (vec_size > dim0)
{
vec_size >>= 1;
}
return vec_size;
}
-}
+} // namespace arm_compute
#endif /*ARM_COMPUTE_UTILS_H */
diff --git a/arm_compute/core/utils/helpers/tensor_transform.h b/arm_compute/core/utils/helpers/tensor_transform.h
index faa5b4433c..7a61fa192a 100644
--- a/arm_compute/core/utils/helpers/tensor_transform.h
+++ b/arm_compute/core/utils/helpers/tensor_transform.h
@@ -52,7 +52,8 @@ int calculate_stride_on_index(int index, Coordinates strides);
*
* @return Absolute start position of a given index
*/
-int calculate_start_on_index(TensorShape input_shape, int index, Coordinates starts, Coordinates strides, int32_t begin_mask);
+int calculate_start_on_index(
+ TensorShape input_shape, int index, Coordinates starts, Coordinates strides, int32_t begin_mask);
/** Returns the absolute end position of a given index for a strided slice operation
*
@@ -68,8 +69,13 @@ int calculate_start_on_index(TensorShape input_shape, int index, Coordinates sta
*
* @return Absolute end position of a given index
*/
-int calculate_end_on_index(TensorShape input_shape, int index, int start_on_index, Coordinates ends, Coordinates strides,
- int32_t end_mask = 0, int32_t shrink_axis_mask = 0);
+int calculate_end_on_index(TensorShape input_shape,
+ int index,
+ int start_on_index,
+ Coordinates ends,
+ Coordinates strides,
+ int32_t end_mask = 0,
+ int32_t shrink_axis_mask = 0);
/** Calculate start, end and stride coordinates for a strided slice
*
@@ -87,8 +93,12 @@ int calculate_end_on_index(TensorShape input_shape, int index, int start_on_inde
* @return A tuple with <Start,End,Strides>
*/
std::tuple<Coordinates, Coordinates, Coordinates> calculate_strided_slice_coords(TensorShape input_shape,
- Coordinates starts, Coordinates ends, Coordinates strides,
- int32_t begin_mask = 0, int32_t end_mask = 0, int32_t shrink_axis_mask = 0);
+ Coordinates starts,
+ Coordinates ends,
+ Coordinates strides,
+ int32_t begin_mask = 0,
+ int32_t end_mask = 0,
+ int32_t shrink_axis_mask = 0);
/** Computes output shape of strided slice
*
@@ -109,9 +119,14 @@ std::tuple<Coordinates, Coordinates, Coordinates> calculate_strided_slice_coords
*
* @return The output tensor shape
*/
-TensorShape compute_strided_slice_output_shape(TensorShape input_shape, Coordinates starts, Coordinates ends, Coordinates strides,
- int32_t begin_mask = 0, int32_t end_mask = 0, int32_t shrink_axis_mask = 0,
- bool return_unshrinked = false);
+TensorShape compute_strided_slice_output_shape(TensorShape input_shape,
+ Coordinates starts,
+ Coordinates ends,
+ Coordinates strides,
+ int32_t begin_mask = 0,
+ int32_t end_mask = 0,
+ int32_t shrink_axis_mask = 0,
+ bool return_unshrinked = false);
/** Constructs end mask in case we want to perform a slice operation using the strided slice interface
*
@@ -122,7 +137,7 @@ TensorShape compute_strided_slice_output_shape(TensorShape input_shape, Coordina
* @return End mask
*/
int32_t construct_slice_end_mask(Coordinates ends);
-} // namespace tensor_tranform
+} // namespace tensor_transform
} // namespace helpers
} // namespace arm_compute
#endif /* ARM_COMPUTE_UTILS_HELPERS_TENSOR_TRANSFORM_H */
diff --git a/arm_compute/core/utils/logging/FilePrinter.h b/arm_compute/core/utils/logging/FilePrinter.h
index 0e5b84f084..a865aadddb 100644
--- a/arm_compute/core/utils/logging/FilePrinter.h
+++ b/arm_compute/core/utils/logging/FilePrinter.h
@@ -24,9 +24,8 @@
#ifndef ARM_COMPUTE_LOGGING_FILE_PRINTER_H
#define ARM_COMPUTE_LOGGING_FILE_PRINTER_H
-#include "arm_compute/core/utils/logging/IPrinter.h"
-
#include "arm_compute/core/utils/io/FileHandler.h"
+#include "arm_compute/core/utils/logging/IPrinter.h"
namespace arm_compute
{
diff --git a/arm_compute/core/utils/logging/Helpers.h b/arm_compute/core/utils/logging/Helpers.h
index 5f8b948592..c3c2f0f0b8 100644
--- a/arm_compute/core/utils/logging/Helpers.h
+++ b/arm_compute/core/utils/logging/Helpers.h
@@ -25,6 +25,7 @@
#define ARM_COMPUTE_LOGGING_HELPERS_H
#include "arm_compute/core/utils/logging/Types.h"
+
#include "support/ToolchainSupport.h"
#include <cstddef>
@@ -45,7 +46,7 @@ namespace logging
* @return The formatted string
*/
template <typename... Ts>
-inline std::string string_with_format(const std::string &fmt, Ts &&... args)
+inline std::string string_with_format(const std::string &fmt, Ts &&...args)
{
size_t size = support::cpp11::snprintf(nullptr, 0, fmt.c_str(), args...) + 1;
auto char_str = std::make_unique<char[]>(size);
diff --git a/arm_compute/core/utils/logging/IPrinter.h b/arm_compute/core/utils/logging/IPrinter.h
index 42dca58ea1..7fde4d9302 100644
--- a/arm_compute/core/utils/logging/IPrinter.h
+++ b/arm_compute/core/utils/logging/IPrinter.h
@@ -35,8 +35,7 @@ class Printer
{
public:
/** Default Constructor */
- Printer() noexcept
- : _mtx()
+ Printer() noexcept : _mtx()
{
}
/** Prevent instances of this class from being copied */
diff --git a/arm_compute/core/utils/logging/LogMsgDecorators.h b/arm_compute/core/utils/logging/LogMsgDecorators.h
index 9c9e62740f..66a8180e21 100644
--- a/arm_compute/core/utils/logging/LogMsgDecorators.h
+++ b/arm_compute/core/utils/logging/LogMsgDecorators.h
@@ -63,8 +63,7 @@ public:
*
* @param str Sting to append
*/
- StringDecorator(const std::string &str)
- : _str(str)
+ StringDecorator(const std::string &str) : _str(str)
{
_str = angle_wrap_value(str);
}
@@ -103,7 +102,7 @@ private:
auto time = std::chrono::system_clock::to_time_t(now);
// TODO: use put_time for gcc > 4.9
- char buf[100] = { 0 };
+ char buf[100] = {0};
std::strftime(buf, sizeof(buf), "%d-%m-%Y %I:%M:%S", std::localtime(&time));
return buf;
}
diff --git a/arm_compute/core/utils/logging/Logger.h b/arm_compute/core/utils/logging/Logger.h
index 4fc9bb7dbf..608db39138 100644
--- a/arm_compute/core/utils/logging/Logger.h
+++ b/arm_compute/core/utils/logging/Logger.h
@@ -88,7 +88,7 @@ public:
* @param[in] args Message arguments
*/
template <typename... Ts>
- void log(LogLevel log_level, const std::string &fmt, Ts &&... args);
+ void log(LogLevel log_level, const std::string &fmt, Ts &&...args);
/** Sets log level of the logger
*
* @warning Not thread-safe
@@ -159,11 +159,11 @@ private:
};
template <typename... Ts>
-inline void Logger::log(LogLevel log_level, const std::string &fmt, Ts &&... args)
+inline void Logger::log(LogLevel log_level, const std::string &fmt, Ts &&...args)
{
// Return if message shouldn't be logged
// i.e. if log level does not match the logger's
- if(!is_loggable(log_level))
+ if (!is_loggable(log_level))
{
return;
}
diff --git a/arm_compute/core/utils/logging/LoggerRegistry.h b/arm_compute/core/utils/logging/LoggerRegistry.h
index 7c9931a260..4e52a10935 100644
--- a/arm_compute/core/utils/logging/LoggerRegistry.h
+++ b/arm_compute/core/utils/logging/LoggerRegistry.h
@@ -27,6 +27,7 @@
#include "arm_compute/core/utils/logging/Logger.h"
#include "arm_compute/core/utils/logging/Printers.h"
#include "arm_compute/core/utils/logging/Types.h"
+
#include "support/Mutex.h"
#include <memory>
@@ -54,8 +55,9 @@ public:
* @param[in] log_level Logger's log level. Defaults to INFO
* @param[in] printers Printers to attach to the system loggers. Defaults with a @ref StdPrinter.
*/
- void create_logger(const std::string &name, LogLevel log_level = LogLevel::INFO,
- const std::vector<std::shared_ptr<Printer>> &printers = { std::make_shared<StdPrinter>() });
+ void create_logger(const std::string &name,
+ LogLevel log_level = LogLevel::INFO,
+ const std::vector<std::shared_ptr<Printer>> &printers = {std::make_shared<StdPrinter>()});
/** Remove a logger
*
* @param name Logger's name
@@ -74,16 +76,17 @@ public:
* @param[in] printers (Optional) Printers to attach to the system loggers. Defaults with a @ref StdPrinter.
*/
void create_reserved_loggers(LogLevel log_level = LogLevel::INFO,
- const std::vector<std::shared_ptr<Printer>> &printers = { std::make_shared<StdPrinter>() });
+ const std::vector<std::shared_ptr<Printer>> &printers = {
+ std::make_shared<StdPrinter>()});
private:
/** Default constructor */
LoggerRegistry();
private:
- arm_compute::Mutex _mtx;
+ arm_compute::Mutex _mtx;
std::unordered_map<std::string, std::shared_ptr<Logger>> _loggers;
- static std::set<std::string> _reserved_loggers;
+ static std::set<std::string> _reserved_loggers;
};
} // namespace logging
} // namespace arm_compute
diff --git a/arm_compute/core/utils/logging/Macros.h b/arm_compute/core/utils/logging/Macros.h
index 0ab17c4464..4d5aa5fe2c 100644
--- a/arm_compute/core/utils/logging/Macros.h
+++ b/arm_compute/core/utils/logging/Macros.h
@@ -48,48 +48,48 @@ inline std::string signature_name(const std::string &pretty_func)
do \
{ \
auto __logger = arm_compute::logging::LoggerRegistry::get().logger(logger_name); \
- if(__logger != nullptr) \
+ if (__logger != nullptr) \
{ \
__logger->log(log_level, msg); \
} \
- } while(false)
+ } while (false)
#define ARM_COMPUTE_LOG_MSG_WITH_FUNCNAME(logger_name, log_level, msg) \
do \
{ \
auto __logger = arm_compute::logging::LoggerRegistry::get().logger(logger_name); \
- if(__logger != nullptr) \
+ if (__logger != nullptr) \
{ \
std::ostringstream s; \
s << ARM_COMPUTE_SIGNATURE_NAME << " : " << msg; \
__logger->log(log_level, s.str()); \
} \
- } while(false)
+ } while (false)
#define ARM_COMPUTE_LOG_MSG_WITH_FORMAT(logger_name, log_level, fmt, ...) \
do \
{ \
auto __logger = arm_compute::logging::LoggerRegistry::get().logger(logger_name); \
- if(__logger != nullptr) \
+ if (__logger != nullptr) \
{ \
size_t size = ::snprintf(nullptr, 0, fmt, __VA_ARGS__) + 1; \
auto char_str = std::make_unique<char[]>(size); \
::snprintf(char_str.get(), size, fmt, __VA_ARGS__); \
__logger->log(log_level, std::string(char_str.get(), char_str.get() + size - 1)); \
} \
- } while(false)
+ } while (false)
#define ARM_COMPUTE_LOG_STREAM(logger_name, log_level, stream) \
do \
{ \
auto __logger = arm_compute::logging::LoggerRegistry::get().logger(logger_name); \
- if(__logger != nullptr) \
+ if (__logger != nullptr) \
{ \
std::ostringstream s; \
s << stream; \
__logger->log(log_level, s.str()); \
} \
- } while(false)
+ } while (false)
#else /* ARM_COMPUTE_LOGGING_ENABLED */
diff --git a/arm_compute/core/utils/logging/Types.h b/arm_compute/core/utils/logging/Types.h
index f0ddae6c84..64c567b984 100644
--- a/arm_compute/core/utils/logging/Types.h
+++ b/arm_compute/core/utils/logging/Types.h
@@ -44,8 +44,7 @@ enum class LogLevel
struct LogMsg
{
/** Default constructor */
- LogMsg()
- : raw_(), log_level_(LogLevel::OFF)
+ LogMsg() : raw_(), log_level_(LogLevel::OFF)
{
}
/** Construct a log message
@@ -53,8 +52,7 @@ struct LogMsg
* @param[in] msg Message to log.
* @param[in] log_level Logging level. Default: OFF
*/
- LogMsg(std::string msg, LogLevel log_level = LogLevel::OFF)
- : raw_(msg), log_level_(log_level)
+ LogMsg(std::string msg, LogLevel log_level = LogLevel::OFF) : raw_(msg), log_level_(log_level)
{
}
diff --git a/arm_compute/core/utils/math/Math.h b/arm_compute/core/utils/math/Math.h
index c1dce7ff08..e70337ba0f 100644
--- a/arm_compute/core/utils/math/Math.h
+++ b/arm_compute/core/utils/math/Math.h
@@ -67,5 +67,5 @@ inline auto floor_to_multiple(S value, T divisor) -> decltype((value / divisor)
return (value / divisor) * divisor;
}
-}
+} // namespace arm_compute
#endif /*ARM_COMPUTE_UTILS_MATH_H */
diff --git a/arm_compute/core/utils/math/SafeOps.h b/arm_compute/core/utils/math/SafeOps.h
index dc928a0e5d..ef8bcf7e14 100644
--- a/arm_compute/core/utils/math/SafeOps.h
+++ b/arm_compute/core/utils/math/SafeOps.h
@@ -25,6 +25,7 @@
#define ARM_COMPUTE_UTILS_MATH_SAFE_OPS
#include "arm_compute/core/Error.h"
+
#include "support/AclRequires.h"
#include <limits>
@@ -51,11 +52,11 @@ T safe_integer_add(T val_a, T val_b)
{
T result = 0;
- if((val_b > 0) && (val_a > std::numeric_limits<T>::max() - val_b))
+ if ((val_b > 0) && (val_a > std::numeric_limits<T>::max() - val_b))
{
result = std::numeric_limits<T>::max();
}
- else if((val_b < 0) && (val_a < std::numeric_limits<T>::min() - val_b))
+ else if ((val_b < 0) && (val_a < std::numeric_limits<T>::min() - val_b))
{
result = std::numeric_limits<T>::min();
}
@@ -83,11 +84,11 @@ T safe_integer_sub(T val_a, T val_b)
{
T result = 0;
- if((val_b < 0) && (val_a > std::numeric_limits<T>::max() + val_b))
+ if ((val_b < 0) && (val_a > std::numeric_limits<T>::max() + val_b))
{
result = std::numeric_limits<T>::max();
}
- else if((val_b > 0) && (val_a < std::numeric_limits<T>::min() + val_b))
+ else if ((val_b > 0) && (val_a < std::numeric_limits<T>::min() + val_b))
{
result = std::numeric_limits<T>::min();
}
@@ -115,13 +116,13 @@ T safe_integer_mul(T val_a, T val_b)
{
T result = 0;
- if(val_a > 0)
+ if (val_a > 0)
{
- if((val_b > 0) && (val_a > (std::numeric_limits<T>::max() / val_b)))
+ if ((val_b > 0) && (val_a > (std::numeric_limits<T>::max() / val_b)))
{
result = std::numeric_limits<T>::max();
}
- else if(val_b < (std::numeric_limits<T>::min() / val_a))
+ else if (val_b < (std::numeric_limits<T>::min() / val_a))
{
result = std::numeric_limits<T>::min();
}
@@ -132,11 +133,11 @@ T safe_integer_mul(T val_a, T val_b)
}
else
{
- if((val_b > 0) && (val_a < (std::numeric_limits<T>::min() / val_b)))
+ if ((val_b > 0) && (val_a < (std::numeric_limits<T>::min() / val_b)))
{
result = std::numeric_limits<T>::max();
}
- else if((val_a != 0) && (val_b < (std::numeric_limits<T>::max() / val_a)))
+ else if ((val_a != 0) && (val_b < (std::numeric_limits<T>::max() / val_a)))
{
result = std::numeric_limits<T>::min();
}
@@ -165,7 +166,7 @@ T safe_integer_div(T val_a, T val_b)
{
T result = 0;
- if((val_b == 0) || ((val_a == std::numeric_limits<T>::min()) && (val_b == -1)))
+ if ((val_b == 0) || ((val_a == std::numeric_limits<T>::min()) && (val_b == -1)))
{
result = std::numeric_limits<T>::min();
}
@@ -176,7 +177,7 @@ T safe_integer_div(T val_a, T val_b)
return result;
}
-} // namespace cast
+} // namespace math
} // namespace utils
} // namespace arm_compute
#endif /* ARM_COMPUTE_UTILS_MATH_SAFE_OPS */
diff --git a/arm_compute/core/utils/misc/InfoHelpers.h b/arm_compute/core/utils/misc/InfoHelpers.h
index ced0d24b56..1d1b4ea8d7 100644
--- a/arm_compute/core/utils/misc/InfoHelpers.h
+++ b/arm_compute/core/utils/misc/InfoHelpers.h
@@ -53,10 +53,12 @@ inline bool is_relu(ActivationLayerInfo activation_info)
*/
inline bool is_relu6(ActivationLayerInfo activation_info)
{
- const bool is_lu_bounded_relu = activation_info.activation() == ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU
- && activation_info.a() == 6.f && activation_info.b() == 0.f;
- const bool is_bounded_relu = activation_info.activation() == ActivationLayerInfo::ActivationFunction::BOUNDED_RELU
- && activation_info.a() == 6.f;
+ const bool is_lu_bounded_relu =
+ activation_info.activation() == ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU &&
+ activation_info.a() == 6.f && activation_info.b() == 0.f;
+ const bool is_bounded_relu =
+ activation_info.activation() == ActivationLayerInfo::ActivationFunction::BOUNDED_RELU &&
+ activation_info.a() == 6.f;
return activation_info.enabled() && (is_lu_bounded_relu || is_bounded_relu);
}
@@ -68,34 +70,37 @@ inline bool is_relu6(ActivationLayerInfo activation_info)
*
*/
template <typename T>
-inline void build_lstm_params_tensor_info(const LSTMParams<T> &lstm_params,
- LSTMParams<ITensorInfo> *lstm_params_info)
+inline void build_lstm_params_tensor_info(const LSTMParams<T> &lstm_params, LSTMParams<ITensorInfo> *lstm_params_info)
{
- if(lstm_params.has_peephole_opt())
+ if (lstm_params.has_peephole_opt())
{
ARM_COMPUTE_ERROR_ON_NULLPTR(lstm_params.cell_to_forget_weights(), lstm_params.cell_to_output_weights());
- lstm_params_info->set_peephole_params(lstm_params.cell_to_forget_weights()->info(), lstm_params.cell_to_output_weights()->info());
+ lstm_params_info->set_peephole_params(lstm_params.cell_to_forget_weights()->info(),
+ lstm_params.cell_to_output_weights()->info());
}
- if(lstm_params.has_projection())
+ if (lstm_params.has_projection())
{
ARM_COMPUTE_ERROR_ON_NULLPTR(lstm_params.projection_weights());
- lstm_params_info->set_projection_params(lstm_params.projection_weights()->info(),
- lstm_params.projection_bias() != nullptr ? lstm_params.projection_bias()->info() : nullptr);
+ lstm_params_info->set_projection_params(
+ lstm_params.projection_weights()->info(),
+ lstm_params.projection_bias() != nullptr ? lstm_params.projection_bias()->info() : nullptr);
}
- if(!lstm_params.has_cifg_opt())
+ if (!lstm_params.has_cifg_opt())
{
- ARM_COMPUTE_ERROR_ON_NULLPTR(lstm_params.input_to_input_weights(), lstm_params.recurrent_to_input_weights(), lstm_params.input_gate_bias());
+ ARM_COMPUTE_ERROR_ON_NULLPTR(lstm_params.input_to_input_weights(), lstm_params.recurrent_to_input_weights(),
+ lstm_params.input_gate_bias());
- ITensorInfo *cell_to_input_weights_info = (lstm_params.has_peephole_opt()) ? lstm_params.cell_to_input_weights()->info() : nullptr;
- lstm_params_info->set_cifg_params(lstm_params.input_to_input_weights()->info(), lstm_params.recurrent_to_input_weights()->info(),
- cell_to_input_weights_info, lstm_params.input_gate_bias()->info());
+ ITensorInfo *cell_to_input_weights_info =
+ (lstm_params.has_peephole_opt()) ? lstm_params.cell_to_input_weights()->info() : nullptr;
+ lstm_params_info->set_cifg_params(lstm_params.input_to_input_weights()->info(),
+ lstm_params.recurrent_to_input_weights()->info(), cell_to_input_weights_info,
+ lstm_params.input_gate_bias()->info());
}
- if(lstm_params.use_layer_norm())
+ if (lstm_params.use_layer_norm())
{
- ARM_COMPUTE_ERROR_ON_NULLPTR(lstm_params.forget_layer_norm_weights(),
- lstm_params.output_layer_norm_weights(),
+ ARM_COMPUTE_ERROR_ON_NULLPTR(lstm_params.forget_layer_norm_weights(), lstm_params.output_layer_norm_weights(),
lstm_params.cell_layer_norm_weights());
- if(!lstm_params.has_cifg_opt())
+ if (!lstm_params.has_cifg_opt())
{
ARM_COMPUTE_ERROR_ON_NULLPTR(lstm_params.input_layer_norm_weights());
}
@@ -103,15 +108,14 @@ inline void build_lstm_params_tensor_info(const LSTMParams<T> &lstm_params,
ITensorInfo *forget_info = lstm_params.forget_layer_norm_weights()->info();
ITensorInfo *cell_info = lstm_params.cell_layer_norm_weights()->info();
ITensorInfo *output_info = lstm_params.output_layer_norm_weights()->info();
- ITensorInfo *input_info = lstm_params.has_cifg_opt() ? nullptr : lstm_params.input_layer_norm_weights()->info();
+ ITensorInfo *input_info = lstm_params.has_cifg_opt() ? nullptr : lstm_params.input_layer_norm_weights()->info();
lstm_params_info->set_layer_normalization_params(input_info, forget_info, cell_info, output_info);
}
- lstm_params_info->set_matmul_scale_params(lstm_params.input_intermediate_scale(),
- lstm_params.forget_intermediate_scale(),
- lstm_params.cell_intermediate_scale(),
- lstm_params.output_intermediate_scale());
+ lstm_params_info->set_matmul_scale_params(
+ lstm_params.input_intermediate_scale(), lstm_params.forget_intermediate_scale(),
+ lstm_params.cell_intermediate_scale(), lstm_params.output_intermediate_scale());
lstm_params_info->set_hidden_state_params(lstm_params.hidden_state_zero(), lstm_params.hidden_state_scale());
}
diff --git a/arm_compute/core/utils/misc/Macros.h b/arm_compute/core/utils/misc/Macros.h
index de66b6a52f..fa861fa442 100644
--- a/arm_compute/core/utils/misc/Macros.h
+++ b/arm_compute/core/utils/misc/Macros.h
@@ -26,15 +26,16 @@
#if defined(__cplusplus) && (__cplusplus >= 201402L)
-#define ARM_COMPUTE_DEPRECATED [[deprecated]]
-#define ARM_COMPUTE_DEPRECATED_REL(rel) [[deprecated("Deprecated in : " #rel)]]
+#define ARM_COMPUTE_DEPRECATED [[deprecated]]
+#define ARM_COMPUTE_DEPRECATED_REL(rel) [[deprecated("Deprecated in : " #rel)]]
#define ARM_COMPUTE_DEPRECATED_REL_REPLACE(rel, replace) [[deprecated("Deprecated in : " #rel " - Use : " #replace)]]
#elif defined(__GNUC__) || defined(__clang__)
-#define ARM_COMPUTE_DEPRECATED __attribute__((deprecated))
+#define ARM_COMPUTE_DEPRECATED __attribute__((deprecated))
#define ARM_COMPUTE_DEPRECATED_REL(rel) __attribute__((deprecated("Deprecated in : " #rel)))
-#define ARM_COMPUTE_DEPRECATED_REL_REPLACE(rel, replace) __attribute__((deprecated("Deprecated in : " #rel " - Use : " #replace)))
+#define ARM_COMPUTE_DEPRECATED_REL_REPLACE(rel, replace) \
+ __attribute__((deprecated("Deprecated in : " #rel " - Use : " #replace)))
#else // defined(__cplusplus) && (__cplusplus >= 201402L)
diff --git a/arm_compute/core/utils/misc/ShapeCalculator.h b/arm_compute/core/utils/misc/ShapeCalculator.h
index 4c2037ab8d..31362f1ac4 100644
--- a/arm_compute/core/utils/misc/ShapeCalculator.h
+++ b/arm_compute/core/utils/misc/ShapeCalculator.h
@@ -28,11 +28,10 @@
#include "arm_compute/core/ITensorInfo.h"
#include "arm_compute/core/KernelDescriptors.h"
#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/utils/helpers/tensor_transform.h"
#include "arm_compute/function_info/ConvolutionInfo.h"
#include "arm_compute/runtime/FunctionDescriptors.h"
-#include "arm_compute/core/utils/helpers/tensor_transform.h"
-
#include <cmath>
namespace arm_compute
@@ -57,12 +56,12 @@ inline TensorShape calculate_reduce_mean_shape(ITensorInfo *input, const Coordin
convert_negative_axis(axis_local, input_dims);
TensorShape out_shape = input->tensor_shape();
// Configure reshape layer if we want to drop the dimensions
- if(!keep_dims)
+ if (!keep_dims)
{
// We have to sort the reduction axis vectors in order for remove_dimension
// to work properly
std::sort(axis_local.begin(), axis_local.begin() + reduction_ops);
- for(int i = 0; i < reduction_ops; ++i)
+ for (int i = 0; i < reduction_ops; ++i)
{
out_shape.remove_dimension(axis_local[i] - i, false);
}
@@ -70,7 +69,7 @@ inline TensorShape calculate_reduce_mean_shape(ITensorInfo *input, const Coordin
}
else
{
- for(int i = 0; i < reduction_ops; ++i)
+ for (int i = 0; i < reduction_ops; ++i)
{
out_shape.set(axis_local[i], 1);
}
@@ -86,7 +85,10 @@ inline TensorShape calculate_reduce_mean_shape(ITensorInfo *input, const Coordin
*
* @return the calculated shape
*/
-inline TensorShape compute_vector_to_tensor_output_shape(const TensorShape &input, size_t conv_w, size_t conv_h, const DataLayout &data_layout)
+inline TensorShape compute_vector_to_tensor_output_shape(const TensorShape &input,
+ size_t conv_w,
+ size_t conv_h,
+ const DataLayout &data_layout)
{
const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
@@ -128,10 +130,12 @@ inline TensorShape compute_reorg_output_shape(const ITensorInfo &input, int32_t
const size_t idx_channel = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::CHANNEL);
ARM_COMPUTE_ERROR_ON(stride <= 0);
- ARM_COMPUTE_ERROR_ON_MSG((input.tensor_shape()[idx_width] % stride != 0), "The width of the input tensor must be a multiple of stride");
- ARM_COMPUTE_ERROR_ON_MSG((input.tensor_shape()[idx_height] % stride != 0), "The height of the input tensor must be a multiple of stride");
+ ARM_COMPUTE_ERROR_ON_MSG((input.tensor_shape()[idx_width] % stride != 0),
+ "The width of the input tensor must be a multiple of stride");
+ ARM_COMPUTE_ERROR_ON_MSG((input.tensor_shape()[idx_height] % stride != 0),
+ "The height of the input tensor must be a multiple of stride");
- TensorShape output_shape{ input.tensor_shape() };
+ TensorShape output_shape{input.tensor_shape()};
output_shape.set(idx_width, output_shape[idx_width] / stride);
output_shape.set(idx_height, output_shape[idx_height] / stride);
@@ -148,7 +152,8 @@ inline TensorShape compute_reorg_output_shape(const ITensorInfo &input, int32_t
*
* @return the calculated shape of the reshaped weights
*/
-inline TensorShape compute_weights_reshaped_shape(const ITensorInfo &weights, bool has_bias = false, unsigned int num_groups = 1)
+inline TensorShape
+compute_weights_reshaped_shape(const ITensorInfo &weights, bool has_bias = false, unsigned int num_groups = 1)
{
// Number of groups greater than one are only supported for NCHW data layout, and the number of weights must be a multiple of it.
ARM_COMPUTE_ERROR_ON(num_groups == 0);
@@ -156,14 +161,14 @@ inline TensorShape compute_weights_reshaped_shape(const ITensorInfo &weights, bo
ARM_COMPUTE_ERROR_ON((weights.dimension(3) % num_groups) != 0);
// Calculate output shape
- TensorShape weights_reshaped{ weights.tensor_shape() };
+ TensorShape weights_reshaped{weights.tensor_shape()};
weights_reshaped.set(3, weights_reshaped[3] / num_groups);
weights_reshaped.collapse(3);
const size_t tmp_dim = weights_reshaped[0];
weights_reshaped.set(0, weights_reshaped[1]);
weights_reshaped.set(1, tmp_dim + (has_bias ? 1 : 0));
- if(weights.num_dimensions() < 5)
+ if (weights.num_dimensions() < 5)
{
weights_reshaped.set(2, num_groups);
}
@@ -179,7 +184,9 @@ inline TensorShape compute_weights_reshaped_shape(const ITensorInfo &weights, bo
*
* @return the calculated shape
*/
-inline TensorShape compute_lhs_reshaped_shape(const ITensorInfo &a, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_input_as_3d = false)
+inline TensorShape compute_lhs_reshaped_shape(const ITensorInfo &a,
+ const GEMMLHSMatrixInfo &lhs_info,
+ bool reinterpret_input_as_3d = false)
{
ARM_COMPUTE_ERROR_ON(lhs_info.m0 == 0);
ARM_COMPUTE_ERROR_ON(lhs_info.k0 == 0);
@@ -200,11 +207,11 @@ inline TensorShape compute_lhs_reshaped_shape(const ITensorInfo &a, const GEMMLH
const unsigned int output_width = block_size * num_horiz_blocks * lhs_info.v0;
const unsigned int output_height = std::ceil(num_vert_blocks / static_cast<float>(lhs_info.v0));
- TensorShape lhs_shape{ a.tensor_shape() };
+ TensorShape lhs_shape{a.tensor_shape()};
lhs_shape.set(0, output_width);
lhs_shape.set(1, output_height);
- if((reinterpret_input_as_3d) && (lhs_shape.num_dimensions() > 2))
+ if ((reinterpret_input_as_3d) && (lhs_shape.num_dimensions() > 2))
{
// When the data format is NHWC and the shapes are Nx1x1
// the tensor shape num_dimensions is automatically set to 1 instead of 3.
@@ -244,7 +251,7 @@ inline TensorShape compute_rhs_reshaped_shape(const ITensorInfo &a, const GEMMRH
const unsigned int output_width = block_size * num_vert_blocks * rhs_info.h0;
const unsigned int output_height = std::ceil(num_horiz_blocks / static_cast<float>(rhs_info.h0));
- TensorShape rhs_shape{ a.tensor_shape() };
+ TensorShape rhs_shape{a.tensor_shape()};
rhs_shape.set(0, output_width);
rhs_shape.set(1, output_height);
@@ -259,14 +266,15 @@ inline TensorShape compute_rhs_reshaped_shape(const ITensorInfo &a, const GEMMRH
*
* @return the calculated shape
*/
-inline TensorShape compute_interleaved_shape(const ITensorInfo &a, int mult_interleave4x4_height = 1, bool reinterpret_input_as_3d = false)
+inline TensorShape
+compute_interleaved_shape(const ITensorInfo &a, int mult_interleave4x4_height = 1, bool reinterpret_input_as_3d = false)
{
// The interleaved output matrix will have the following shape: [ a_height * W, ceil(a_width / W) ] where W = 4 * mult_interleave4x4_height
ARM_COMPUTE_ERROR_ON(mult_interleave4x4_height < 1);
const int interleave_width = 4 * mult_interleave4x4_height;
- TensorShape shape_interleaved_a{ a.tensor_shape() };
+ TensorShape shape_interleaved_a{a.tensor_shape()};
shape_interleaved_a.set(0, a.dimension(0) * interleave_width);
- if(reinterpret_input_as_3d)
+ if (reinterpret_input_as_3d)
{
const int M = a.dimension(1) * a.dimension(2);
const int height = std::ceil(M / static_cast<float>(interleave_width));
@@ -276,7 +284,7 @@ inline TensorShape compute_interleaved_shape(const ITensorInfo &a, int mult_inte
// the tensor shape num_dimensions is automatically set to 1 instead of 3.
// To avoid failures by removing a dimension that doesn't exist
// check if the number of dimensions is greater than 2.
- if(shape_interleaved_a.num_dimensions() > 2)
+ if (shape_interleaved_a.num_dimensions() > 2)
{
shape_interleaved_a.remove_dimension(2);
}
@@ -298,7 +306,7 @@ inline TensorShape compute_interleaved_shape(const ITensorInfo &a, int mult_inte
inline TensorShape compute_transpose1xW_shape(const ITensorInfo &b)
{
// The transpose1xW output matrix will have the following shape: [ b_height * 16, ceil(b_width / 16.0f) ]
- TensorShape shape_transposed1xW_b{ b.tensor_shape() };
+ TensorShape shape_transposed1xW_b{b.tensor_shape()};
shape_transposed1xW_b.set(0, b.dimension(1) * 16);
shape_transposed1xW_b.set(1, std::ceil(b.dimension(0) / 16.f));
@@ -318,7 +326,7 @@ inline TensorShape compute_transpose1xW_with_element_size_shape(const ITensorInf
// The transpose1xW output matrix will have the following shape:
// [ b_height * W, ceil(b_width / W) ] where W = (16 / element size of the tensor) * mult_transpose1xW_width
ARM_COMPUTE_ERROR_ON(mult_transpose1xW_width < 1);
- TensorShape shape_transposed1xW_b{ b.tensor_shape() };
+ TensorShape shape_transposed1xW_b{b.tensor_shape()};
const size_t transpose_width = (16 / b.element_size()) * mult_transpose1xW_width;
shape_transposed1xW_b.set(0, b.dimension(1) * transpose_width);
shape_transposed1xW_b.set(1, static_cast<size_t>(std::ceil(b.dimension(0) / static_cast<float>(transpose_width))));
@@ -334,8 +342,8 @@ inline TensorShape compute_transpose1xW_with_element_size_shape(const ITensorInf
*/
inline TensorShape compute_reductionA_shape(const ITensorInfo &b)
{
- TensorShape shape_vector_sum_col{ b.tensor_shape() };
- if(shape_vector_sum_col.num_dimensions() > 1)
+ TensorShape shape_vector_sum_col{b.tensor_shape()};
+ if (shape_vector_sum_col.num_dimensions() > 1)
{
shape_vector_sum_col.remove_dimension(1);
}
@@ -351,9 +359,9 @@ inline TensorShape compute_reductionA_shape(const ITensorInfo &b)
*/
inline TensorShape compute_reductionB_shape(const ITensorInfo &a)
{
- TensorShape shape_vector_sum_row{ a.tensor_shape() };
+ TensorShape shape_vector_sum_row{a.tensor_shape()};
shape_vector_sum_row.set(Window::DimX, a.dimension(1));
- if(shape_vector_sum_row.num_dimensions() > 1)
+ if (shape_vector_sum_row.num_dimensions() > 1)
{
shape_vector_sum_row.remove_dimension(1);
}
@@ -370,7 +378,10 @@ inline TensorShape compute_reductionB_shape(const ITensorInfo &a)
*
* @return the calculated shape
*/
-inline TensorShape compute_col2im_shape(const ITensorInfo &input, const Size2D &convolved_dims, bool batch_size_on_z, unsigned int num_groups = 1)
+inline TensorShape compute_col2im_shape(const ITensorInfo &input,
+ const Size2D &convolved_dims,
+ bool batch_size_on_z,
+ unsigned int num_groups = 1)
{
ARM_COMPUTE_ERROR_ON(num_groups == 0);
ARM_COMPUTE_ERROR_ON(input.tensor_shape()[1] != (convolved_dims.area()));
@@ -381,10 +392,10 @@ inline TensorShape compute_col2im_shape(const ITensorInfo &input, const Size2D &
const int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
- TensorShape col2im_shape{ input.tensor_shape() };
+ TensorShape col2im_shape{input.tensor_shape()};
// If batches start on 3rd dimension shift dimensions right by 1 to retain upper tensor shape,
// as first three will be override by H,W,C data
- if(batch_size_on_z && num_groups == 1)
+ if (batch_size_on_z && num_groups == 1)
{
col2im_shape.shift_right(1);
}
@@ -403,7 +414,7 @@ inline TensorShape compute_col2im_shape(const ITensorInfo &input, const Size2D &
*/
inline TensorShape compute_transposed_shape(const ITensorInfo &input)
{
- TensorShape shape_transposed{ input.tensor_shape() };
+ TensorShape shape_transposed{input.tensor_shape()};
shape_transposed.set(0, input.dimension(1), false);
shape_transposed.set(1, input.dimension(0), false);
@@ -419,10 +430,11 @@ inline TensorShape compute_transposed_shape(const ITensorInfo &input)
*
* @return the calculated shape
*/
-inline TensorShape compute_depthwise_convolution_shape(const ITensorInfo &input, const ITensorInfo &weights, const ConvolutionInfo &info)
+inline TensorShape
+compute_depthwise_convolution_shape(const ITensorInfo &input, const ITensorInfo &weights, const ConvolutionInfo &info)
{
- const TensorShape input_shape{ input.tensor_shape() };
- const TensorShape weights_shape{ weights.tensor_shape() };
+ const TensorShape input_shape{input.tensor_shape()};
+ const TensorShape weights_shape{weights.tensor_shape()};
const DataLayout data_layout = input.data_layout();
const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
@@ -430,16 +442,16 @@ inline TensorShape compute_depthwise_convolution_shape(const ITensorInfo &input,
const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
const DataLayout weights_data_layout = weights.data_layout();
- const int weights_width_idx = get_data_layout_dimension_index(weights_data_layout, DataLayoutDimension::WIDTH);
- const int weights_height_idx = get_data_layout_dimension_index(weights_data_layout, DataLayoutDimension::HEIGHT);
+ const int weights_width_idx = get_data_layout_dimension_index(weights_data_layout, DataLayoutDimension::WIDTH);
+ const int weights_height_idx = get_data_layout_dimension_index(weights_data_layout, DataLayoutDimension::HEIGHT);
unsigned int output_width = 0;
unsigned int output_height = 0;
- std::tie(output_width, output_height) = scaled_dimensions(input_shape[width_idx], input_shape[height_idx],
- weights_shape[weights_width_idx], weights_shape[weights_height_idx],
- info.pad_stride_info, info.dilation);
+ std::tie(output_width, output_height) =
+ scaled_dimensions(input_shape[width_idx], input_shape[height_idx], weights_shape[weights_width_idx],
+ weights_shape[weights_height_idx], info.pad_stride_info, info.dilation);
- TensorShape output_shape{ input_shape };
+ TensorShape output_shape{input_shape};
output_shape.set(width_idx, output_width);
output_shape.set(height_idx, output_height);
output_shape.set(channel_idx, input_shape[channel_idx] * info.depth_multiplier);
@@ -459,8 +471,13 @@ inline TensorShape compute_depthwise_convolution_shape(const ITensorInfo &input,
*
* @return the calculated shape
*/
-inline TensorShape compute_deconvolution_upsampled_shape(const ITensorInfo &input, const ITensorInfo &weights, unsigned int sx, unsigned int sy,
- std::pair<unsigned int, unsigned int> &out_dims, uint32_t &padx, uint32_t &pady)
+inline TensorShape compute_deconvolution_upsampled_shape(const ITensorInfo &input,
+ const ITensorInfo &weights,
+ unsigned int sx,
+ unsigned int sy,
+ std::pair<unsigned int, unsigned int> &out_dims,
+ uint32_t &padx,
+ uint32_t &pady)
{
const DataLayout data_layout = input.data_layout();
const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
@@ -491,10 +508,12 @@ inline TensorShape compute_deconvolution_upsampled_shape(const ITensorInfo &inpu
*
* @return the calculated shape
*/
-inline TensorShape compute_deconvolution_output_shape(const std::pair<unsigned int, unsigned int> &out_dims, const ITensorInfo &input, const ITensorInfo &weights)
+inline TensorShape compute_deconvolution_output_shape(const std::pair<unsigned int, unsigned int> &out_dims,
+ const ITensorInfo &input,
+ const ITensorInfo &weights)
{
- const TensorShape input_shape{ input.tensor_shape() };
- const TensorShape weights_shape{ weights.tensor_shape() };
+ const TensorShape input_shape{input.tensor_shape()};
+ const TensorShape weights_shape{weights.tensor_shape()};
const DataLayout data_layout = input.data_layout();
const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
@@ -502,7 +521,7 @@ inline TensorShape compute_deconvolution_output_shape(const std::pair<unsigned i
const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
const int batch_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
- TensorShape out_shape{ input_shape };
+ TensorShape out_shape{input_shape};
out_shape.set(width_idx, out_dims.first);
out_shape.set(height_idx, out_dims.second);
out_shape.set(channel_idx, weights_shape[batch_idx]);
@@ -522,8 +541,14 @@ inline TensorShape compute_deconvolution_output_shape(const std::pair<unsigned i
*
* @return the calculated shape
*/
-inline TensorShape compute_im2col_conv_shape(const ITensorInfo *input, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, const Size2D &dilation, bool batch_size_on_z,
- unsigned int num_groups = 1, unsigned int input_pad_right = 0)
+inline TensorShape compute_im2col_conv_shape(const ITensorInfo *input,
+ const Size2D &kernel_dims,
+ const PadStrideInfo &conv_info,
+ bool has_bias,
+ const Size2D &dilation,
+ bool batch_size_on_z,
+ unsigned int num_groups = 1,
+ unsigned int input_pad_right = 0)
{
// The output shape will be the 3D shape [ out_channels * kernel_area, num_elems_per_out_channel, batches ] if batch_size_on_z == true
// or the 4D shape [ out_channels * kernel_area / num_groups, num_elems_per_out_channel, num_groups, batches ] if batch_size_on_z == false
@@ -532,17 +557,19 @@ inline TensorShape compute_im2col_conv_shape(const ITensorInfo *input, const Siz
ARM_COMPUTE_ERROR_ON(num_groups > 1 && input->data_layout() != DataLayout::NCHW);
ARM_COMPUTE_ERROR_ON(num_groups > 1 && batch_size_on_z);
- TensorShape output_shape{ input->tensor_shape() };
+ TensorShape output_shape{input->tensor_shape()};
const DataLayout data_layout = input->data_layout();
const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
const int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
- std::pair<unsigned int, unsigned int> out_dims = scaled_dimensions(output_shape[width_idx], output_shape[height_idx], kernel_dims.width, kernel_dims.height, conv_info, dilation);
- output_shape.set(0, ((output_shape[channel_idx] + input_pad_right) / num_groups * kernel_dims.area() + (has_bias ? 1 : 0))); // NOLINT
+ std::pair<unsigned int, unsigned int> out_dims = scaled_dimensions(
+ output_shape[width_idx], output_shape[height_idx], kernel_dims.width, kernel_dims.height, conv_info, dilation);
+ output_shape.set(0, ((output_shape[channel_idx] + input_pad_right) / num_groups * kernel_dims.area() +
+ (has_bias ? 1 : 0))); // NOLINT
output_shape.set(1, (out_dims.first * out_dims.second));
- if(batch_size_on_z && output_shape.num_dimensions() >= 3)
+ if (batch_size_on_z && output_shape.num_dimensions() >= 3)
{
output_shape.remove_dimension(2);
}
@@ -564,7 +591,7 @@ inline TensorShape compute_flatten_shape(const ITensorInfo *input)
{
// The output shape will be the flatten version of the input (i.e. [ width * height * channels, num_batches, ... ] ). Used for FlattenLayer and FullyConnectedLayer.
- TensorShape output_shape{ input->tensor_shape() };
+ TensorShape output_shape{input->tensor_shape()};
output_shape.collapse(3);
@@ -586,7 +613,7 @@ inline TensorShape compute_softmax_shape(const ITensorInfo *input, size_t axis =
// - [x,y,z,w] and axis 3 will return [x*y*z, w]
TensorShape shape2D = input->tensor_shape();
- if(axis < input->num_dimensions())
+ if (axis < input->num_dimensions())
{
// Collapse from axis onward (this changes the shape)
shape2D.collapse_from(axis);
@@ -600,7 +627,7 @@ inline TensorShape compute_softmax_shape(const ITensorInfo *input, size_t axis =
shape2D.collapse(shape2D.num_dimensions());
}
- if(axis == 0)
+ if (axis == 0)
{
// If axis is zero the first dim should be one. Since
// collapse is an inclusive operation we need to shift
@@ -619,15 +646,17 @@ inline TensorShape compute_softmax_shape(const ITensorInfo *input, size_t axis =
*/
inline TensorShape compute_winograd_filter_transform_shape(const ITensorInfo &input, const WinogradInfo &winograd_info)
{
- TensorShape tensor_shape{ input.tensor_shape() };
+ TensorShape tensor_shape{input.tensor_shape()};
const Size2D kernel_size = winograd_info.kernel_size;
const Size2D output_tile_size = winograd_info.output_tile_size;
- const Size2D input_tile_size = Size2D(output_tile_size.width + kernel_size.width - 1, output_tile_size.height + kernel_size.height - 1);
+ const Size2D input_tile_size =
+ Size2D(output_tile_size.width + kernel_size.width - 1, output_tile_size.height + kernel_size.height - 1);
tensor_shape.remove_dimension(get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH));
tensor_shape.set(Window::DimX, input.dimension(3));
- tensor_shape.set(Window::DimY, input.dimension(get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::CHANNEL)));
+ tensor_shape.set(Window::DimY, input.dimension(get_data_layout_dimension_index(input.data_layout(),
+ DataLayoutDimension::CHANNEL)));
tensor_shape.set(Window::DimZ, input_tile_size.area());
return tensor_shape;
@@ -645,23 +674,22 @@ inline TensorShape compute_winograd_input_transform_shape(const ITensorInfo &inp
const PadStrideInfo conv_info = winograd_info.convolution_info;
const Size2D kernel_size = winograd_info.kernel_size;
const Size2D output_tile_size = winograd_info.output_tile_size;
- const Size2D input_tile_size = Size2D(output_tile_size.width + kernel_size.width - 1, output_tile_size.height + kernel_size.height - 1);
+ const Size2D input_tile_size =
+ Size2D(output_tile_size.width + kernel_size.width - 1, output_tile_size.height + kernel_size.height - 1);
const size_t idx_w = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH);
const size_t idx_h = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT);
const size_t idx_c = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::CHANNEL);
// Compute the number of output tiles along the x and y direction of size "output_tile_size"
- const Size2D num_tiles = compute_winograd_convolution_tiles(Size2D(input.tensor_shape()[idx_w], input.tensor_shape()[idx_h]),
- kernel_size,
- output_tile_size,
- conv_info);
+ const Size2D num_tiles = compute_winograd_convolution_tiles(
+ Size2D(input.tensor_shape()[idx_w], input.tensor_shape()[idx_h]), kernel_size, output_tile_size, conv_info);
const unsigned int width = input.tensor_shape()[idx_c];
const unsigned int height = num_tiles.area();
const unsigned int depth = input_tile_size.area();
- TensorShape output_shape{ input.tensor_shape() };
+ TensorShape output_shape{input.tensor_shape()};
output_shape.set(0, width);
output_shape.set(1, height);
output_shape.set(2, depth);
@@ -684,12 +712,12 @@ inline TensorShape compute_winograd_output_transform_shape(const ITensorInfo &in
const DataLayout data_layout = winograd_info.output_data_layout;
// Compute output shape
- unsigned int output_width = 0;
- unsigned int output_height = 0;
+ unsigned int output_width = 0;
+ unsigned int output_height = 0;
std::tie(output_width, output_height) = scaled_dimensions(input_dimensions.width, input_dimensions.height,
kernel_size.width, kernel_size.height, conv_info);
- TensorShape tensor_shape{ input.tensor_shape() };
+ TensorShape tensor_shape{input.tensor_shape()};
// Output dimension
const unsigned int out_w = output_width;
@@ -712,7 +740,10 @@ inline TensorShape compute_winograd_output_transform_shape(const ITensorInfo &in
*
* @return the calculated shape
*/
-inline TensorShape compute_deep_convolution_shape(const TensorShape &input_shape, DataLayout input_data_layout, const TensorShape &weights_shape, const PadStrideInfo &conv_info)
+inline TensorShape compute_deep_convolution_shape(const TensorShape &input_shape,
+ DataLayout input_data_layout,
+ const TensorShape &weights_shape,
+ const PadStrideInfo &conv_info)
{
const size_t idx_width = get_data_layout_dimension_index(input_data_layout, DataLayoutDimension::WIDTH);
const size_t idx_height = get_data_layout_dimension_index(input_data_layout, DataLayoutDimension::HEIGHT);
@@ -725,9 +756,10 @@ inline TensorShape compute_deep_convolution_shape(const TensorShape &input_shape
const unsigned int weights_out_channel = weights_shape[3];
unsigned int output_width = 0;
unsigned int output_height = 0;
- std::tie(output_width, output_height) = scaled_dimensions(input_width, input_height, weights_width, weights_height, conv_info);
+ std::tie(output_width, output_height) =
+ scaled_dimensions(input_width, input_height, weights_width, weights_height, conv_info);
- TensorShape output_shape{ input_shape };
+ TensorShape output_shape{input_shape};
output_shape.set(idx_width, output_width);
output_shape.set(idx_height, output_height);
output_shape.set(idx_channel, weights_out_channel);
@@ -743,7 +775,8 @@ inline TensorShape compute_deep_convolution_shape(const TensorShape &input_shape
*
* @return the calculated shape
*/
-inline TensorShape compute_deep_convolution_shape(const ITensorInfo &input, const ITensorInfo &weights, const PadStrideInfo &conv_info)
+inline TensorShape
+compute_deep_convolution_shape(const ITensorInfo &input, const ITensorInfo &weights, const PadStrideInfo &conv_info)
{
return compute_deep_convolution_shape(input.tensor_shape(), input.data_layout(), weights.tensor_shape(), conv_info);
}
@@ -758,7 +791,10 @@ inline TensorShape compute_deep_convolution_shape(const ITensorInfo &input, cons
*
* @return the calculated shape
*/
-inline TensorShape compute_indirect_buffer_shape(const TensorShape &input_shape, DataLayout input_data_layout, const TensorShape &weights_shape, const PadStrideInfo &conv_info,
+inline TensorShape compute_indirect_buffer_shape(const TensorShape &input_shape,
+ DataLayout input_data_layout,
+ const TensorShape &weights_shape,
+ const PadStrideInfo &conv_info,
const DirectConvComputeKernelInfo &desc)
{
ARM_COMPUTE_ERROR_ON_MSG(input_data_layout != DataLayout::NHWC, "The data layout can only be NHWC");
@@ -768,7 +804,8 @@ inline TensorShape compute_indirect_buffer_shape(const TensorShape &input_shape,
const unsigned int kw = weights_shape[1];
const unsigned int kh = weights_shape[2];
- TensorShape output_conv2d_shape = compute_deep_convolution_shape(input_shape, input_data_layout, weights_shape, conv_info);
+ TensorShape output_conv2d_shape =
+ compute_deep_convolution_shape(input_shape, input_data_layout, weights_shape, conv_info);
const unsigned int output_w = m0 * kw * kh;
const unsigned int output_h = DIV_CEIL(output_conv2d_shape[1] * output_conv2d_shape[2], m0);
@@ -785,7 +822,7 @@ inline TensorShape compute_indirect_buffer_shape(const TensorShape &input_shape,
*/
inline TensorShape compute_min_max_shape(const ITensorInfo *input)
{
- TensorShape output_shape{ input->tensor_shape() };
+ TensorShape output_shape{input->tensor_shape()};
output_shape.set(Window::DimX, 2);
output_shape.remove_dimension(1);
output_shape.remove_dimension(1);
@@ -805,7 +842,7 @@ inline TensorShape compute_pool_shape(const ITensorInfo &input, PoolingLayerInfo
int pooled_w = 0;
int pooled_h = 0;
- TensorShape output_shape{ input.tensor_shape() };
+ TensorShape output_shape{input.tensor_shape()};
const bool is_global_pooling = pool_info.is_global_pooling;
const int idx_width = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH);
@@ -815,9 +852,8 @@ inline TensorShape compute_pool_shape(const ITensorInfo &input, PoolingLayerInfo
const int pool_size_x = is_global_pooling ? output_shape[idx_width] : pool_info.pool_size.width;
const int pool_size_y = is_global_pooling ? output_shape[idx_height] : pool_info.pool_size.height;
- std::tie(pooled_w, pooled_h) = scaled_dimensions_signed(input_width, input_height,
- pool_size_x, pool_size_y,
- pool_info.pad_stride_info);
+ std::tie(pooled_w, pooled_h) =
+ scaled_dimensions_signed(input_width, input_height, pool_size_x, pool_size_y, pool_info.pad_stride_info);
ARM_COMPUTE_ERROR_ON_MSG((pooled_w < 1 || pooled_h < 1), "Calculated output dimension size is invalid");
@@ -850,8 +886,10 @@ inline TensorShape compute_unpool_shape(const ITensorInfo &input, PoolingLayerIn
const int pad_bottom = pad_stride_info.pad_bottom();
TensorShape output_shape = input_shape;
- const unsigned int out_width = (input_shape[idx_width] - 1) * stride_x - pad_left - pad_right + pool_info.pool_size.width;
- const unsigned int out_height = (input_shape[idx_height] - 1) * stride_y - pad_top - pad_bottom + pool_info.pool_size.height;
+ const unsigned int out_width =
+ (input_shape[idx_width] - 1) * stride_x - pad_left - pad_right + pool_info.pool_size.width;
+ const unsigned int out_height =
+ (input_shape[idx_height] - 1) * stride_y - pad_top - pad_bottom + pool_info.pool_size.height;
output_shape.set(idx_width, out_width);
output_shape.set(idx_height, out_height);
@@ -866,9 +904,10 @@ inline TensorShape compute_unpool_shape(const ITensorInfo &input, PoolingLayerIn
*
* @return the calculated shape
*/
-inline TensorShape compute_roi_align_shape(const ITensorInfo &input, const ITensorInfo &rois, ROIPoolingLayerInfo pool_info)
+inline TensorShape
+compute_roi_align_shape(const ITensorInfo &input, const ITensorInfo &rois, ROIPoolingLayerInfo pool_info)
{
- TensorShape output_shape{ input.tensor_shape() };
+ TensorShape output_shape{input.tensor_shape()};
const unsigned int idx_width = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH);
const unsigned int idx_height = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT);
@@ -889,7 +928,7 @@ inline TensorShape compute_roi_align_shape(const ITensorInfo &input, const ITens
*/
inline TensorShape compute_rnn_shape(const ITensorInfo *input, const unsigned int batch_size)
{
- TensorShape output_shape{ input->tensor_shape() };
+ TensorShape output_shape{input->tensor_shape()};
output_shape.set(1, batch_size);
return output_shape;
@@ -904,15 +943,21 @@ inline TensorShape compute_rnn_shape(const ITensorInfo *input, const unsigned in
*
* @return the calculated shape
*/
-inline TensorShape compute_mm_shape(const ITensorInfo &input0, const ITensorInfo &input1, bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info)
+inline TensorShape compute_mm_shape(const ITensorInfo &input0,
+ const ITensorInfo &input1,
+ bool is_interleaved_transposed,
+ const GEMMReshapeInfo &reshape_info)
{
ARM_COMPUTE_ERROR_ON_MSG(input0.num_dimensions() > 4, "The number of dimensions for the matrix A must be <= 4");
- ARM_COMPUTE_ERROR_ON_MSG(is_interleaved_transposed && reshape_info.reinterpret_input_as_3d(), "The first input tensor cannot be reinterpreted as 3D if is_interleaved_transposed is true");
+ ARM_COMPUTE_ERROR_ON_MSG(
+ is_interleaved_transposed && reshape_info.reinterpret_input_as_3d(),
+ "The first input tensor cannot be reinterpreted as 3D if is_interleaved_transposed is true");
const bool reinterpret_input_as_3d = reshape_info.reinterpret_input_as_3d();
const bool reinterpret_output_as_3d = reshape_info.depth_output_gemm3d() != 0;
const int depth_output_gemm3d = reinterpret_output_as_3d ? reshape_info.depth_output_gemm3d() : 1;
- const int m = reshape_info.reinterpret_input_as_3d() ? input0.dimension(1) * input0.dimension(2) : input0.dimension(1);
+ const int m =
+ reshape_info.reinterpret_input_as_3d() ? input0.dimension(1) * input0.dimension(2) : input0.dimension(1);
// If the output of GEMM has to be reinterpreted as 3D, the number of input0 rows (M) is obtained collapsing the second and third
// dimension of the output tensor
@@ -921,7 +966,7 @@ inline TensorShape compute_mm_shape(const ITensorInfo &input0, const ITensorInfo
const int dim2 = reinterpret_input_as_3d ? input0.tensor_shape()[3] : input0.tensor_shape()[2];
const int dim3 = reinterpret_input_as_3d ? 1 : input0.tensor_shape()[3];
- TensorShape output_shape{ input0.tensor_shape() };
+ TensorShape output_shape{input0.tensor_shape()};
output_shape.set(0, dim0);
output_shape.set(1, dim1);
@@ -940,7 +985,8 @@ inline TensorShape compute_mm_shape(const ITensorInfo &input0, const ITensorInfo
*
* @return the calculated shape
*/
-inline TensorShape compute_mm_shape(const ITensorInfo &input0, const ITensorInfo &input1, const GEMMReshapeInfo &gemm_info)
+inline TensorShape
+compute_mm_shape(const ITensorInfo &input0, const ITensorInfo &input1, const GEMMReshapeInfo &gemm_info)
{
ARM_COMPUTE_UNUSED(input1);
ARM_COMPUTE_ERROR_ON_MSG(input0.num_dimensions() > 4, "The number of dimensions for the matrix A must be <= 4");
@@ -949,9 +995,9 @@ inline TensorShape compute_mm_shape(const ITensorInfo &input0, const ITensorInfo
const bool reinterpret_output_as_3d = gemm_info.depth_output_gemm3d() != 0;
const int depth_output_gemm3d = reinterpret_output_as_3d ? gemm_info.depth_output_gemm3d() : 1;
- TensorShape output_shape{ input0.tensor_shape() };
+ TensorShape output_shape{input0.tensor_shape()};
- if(!reinterpret_input_as_3d && !reinterpret_output_as_3d)
+ if (!reinterpret_input_as_3d && !reinterpret_output_as_3d)
{
output_shape.set(0, gemm_info.n());
output_shape.set(1, gemm_info.m());
@@ -978,7 +1024,8 @@ inline TensorShape compute_mm_shape(const ITensorInfo &input0, const ITensorInfo
*
* @return the calculated shape
*/
-inline TensorShape compute_mm_shape(const ITensorInfo &input0, const ITensorInfo &input1, const GEMMKernelInfo &gemm_info)
+inline TensorShape
+compute_mm_shape(const ITensorInfo &input0, const ITensorInfo &input1, const GEMMKernelInfo &gemm_info)
{
ARM_COMPUTE_UNUSED(input1);
ARM_COMPUTE_ERROR_ON_MSG(input0.num_dimensions() > 4, "The number of dimensions for the matrix A must be <= 4");
@@ -987,9 +1034,9 @@ inline TensorShape compute_mm_shape(const ITensorInfo &input0, const ITensorInfo
const bool reinterpret_output_as_3d = gemm_info.depth_output_gemm3d != 0;
const unsigned int depth_output_gemm3d = reinterpret_output_as_3d ? gemm_info.depth_output_gemm3d : 1;
- TensorShape output_shape{ input0.tensor_shape() };
+ TensorShape output_shape{input0.tensor_shape()};
- if(!reinterpret_input_as_3d && !reinterpret_output_as_3d)
+ if (!reinterpret_input_as_3d && !reinterpret_output_as_3d)
{
output_shape.set(0, gemm_info.n);
output_shape.set(1, gemm_info.m);
@@ -1016,16 +1063,17 @@ inline TensorShape compute_mm_shape(const ITensorInfo &input0, const ITensorInfo
*
* @return the calculated shape
*/
-inline TensorShape compute_matmul_shape(const TensorShape &input0, const TensorShape &input1, const MatMulKernelInfo &matmul_info)
+inline TensorShape
+compute_matmul_shape(const TensorShape &input0, const TensorShape &input1, const MatMulKernelInfo &matmul_info)
{
- TensorShape output_shape{ input0 };
+ TensorShape output_shape{input0};
- if(matmul_info.adj_lhs)
+ if (matmul_info.adj_lhs)
{
output_shape.set(1, input0[0]); // The vertical (M) dimension
}
- if(matmul_info.adj_rhs)
+ if (matmul_info.adj_rhs)
{
output_shape.set(0, input1[1]); // The horizontal (N) dimension
}
@@ -1044,14 +1092,15 @@ inline TensorShape compute_matmul_shape(const TensorShape &input0, const TensorS
*
* @return the calculated shape
*/
-inline TensorShape compute_output_stage_shape(const ITensorInfo &input, unsigned int gemm_3d_depth = 1, bool batch_size_on_z = false)
+inline TensorShape
+compute_output_stage_shape(const ITensorInfo &input, unsigned int gemm_3d_depth = 1, bool batch_size_on_z = false)
{
ARM_COMPUTE_ERROR_ON(input.data_layout() != DataLayout::NHWC && gemm_3d_depth > 1);
TensorShape output_shape = input.tensor_shape();
- if(gemm_3d_depth > 1)
+ if (gemm_3d_depth > 1)
{
- if(batch_size_on_z)
+ if (batch_size_on_z)
{
output_shape.shift_right(1);
}
@@ -1076,11 +1125,16 @@ inline TensorShape compute_output_stage_shape(const ITensorInfo &input, unsigned
* @return the calculated shape
*/
inline TensorShape compute_strided_slice_shape(const ITensorInfo &input,
- const Coordinates &starts, const Coordinates &ends, const Coordinates &strides,
- int32_t begin_mask, int32_t end_mask, int32_t shrink_axis_mask)
+ const Coordinates &starts,
+ const Coordinates &ends,
+ const Coordinates &strides,
+ int32_t begin_mask,
+ int32_t end_mask,
+ int32_t shrink_axis_mask)
{
using namespace arm_compute::helpers::tensor_transform;
- return compute_strided_slice_output_shape(input.tensor_shape(), starts, ends, strides, begin_mask, end_mask, shrink_axis_mask);
+ return compute_strided_slice_output_shape(input.tensor_shape(), starts, ends, strides, begin_mask, end_mask,
+ shrink_axis_mask);
}
/** Calculate the slice output shape of a tensor
@@ -1091,13 +1145,13 @@ inline TensorShape compute_strided_slice_shape(const ITensorInfo &input,
*
* @return the calculated shape
*/
-inline TensorShape compute_slice_shape(const TensorShape &input_shape, const Coordinates &starts, const Coordinates &ends)
+inline TensorShape
+compute_slice_shape(const TensorShape &input_shape, const Coordinates &starts, const Coordinates &ends)
{
using namespace arm_compute::helpers::tensor_transform;
- return compute_strided_slice_output_shape(input_shape,
- starts, ends, BiStrides(),
- 0, construct_slice_end_mask(ends), 0);
+ return compute_strided_slice_output_shape(input_shape, starts, ends, BiStrides(), 0, construct_slice_end_mask(ends),
+ 0);
}
/** Calculate the batch to space output shape of a tensor
@@ -1110,7 +1164,8 @@ inline TensorShape compute_slice_shape(const TensorShape &input_shape, const Coo
*
* @return the calculated shape
*/
-inline TensorShape compute_batch_to_space_shape(DataLayout data_layout, const TensorShape &input, int block_x, int block_y, const CropInfo &crop_info = CropInfo{})
+inline TensorShape compute_batch_to_space_shape(
+ DataLayout data_layout, const TensorShape &input, int block_x, int block_y, const CropInfo &crop_info = CropInfo{})
{
ARM_COMPUTE_ERROR_ON(block_x < 1 || block_y < 1);
@@ -1118,7 +1173,7 @@ inline TensorShape compute_batch_to_space_shape(DataLayout data_layout, const Te
const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
const int idx_batch = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
- TensorShape output_shape{ input };
+ TensorShape output_shape{input};
unsigned int new_width = input[idx_width] * static_cast<unsigned int>(block_x);
unsigned int new_height = input[idx_height] * static_cast<unsigned int>(block_y);
@@ -1152,7 +1207,7 @@ inline TensorShape compute_depth_to_space_shape(const TensorShape &input_shape,
const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
const int idx_channel = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
- TensorShape output_shape{ input_shape };
+ TensorShape output_shape{input_shape};
output_shape.set(idx_width, input_shape[idx_width] * block);
output_shape.set(idx_height, input_shape[idx_height] * block);
output_shape.set(idx_channel, input_shape[idx_channel] / (block * block));
@@ -1173,10 +1228,10 @@ inline TensorShape compute_split_shape(const ITensorInfo *input, unsigned int ax
TensorShape empty_shape;
empty_shape.set(0, 0);
- TensorShape out_shape{ input->tensor_shape() };
+ TensorShape out_shape{input->tensor_shape()};
// Return empty shape if axis is invalid
- if(axis > input->tensor_shape().num_dimensions())
+ if (axis > input->tensor_shape().num_dimensions())
{
return empty_shape;
}
@@ -1184,7 +1239,7 @@ inline TensorShape compute_split_shape(const ITensorInfo *input, unsigned int ax
size_t axis_size = out_shape[axis];
// Return empty shape if num_split is not valid
- if(axis_size % num_splits)
+ if (axis_size % num_splits)
{
return empty_shape;
}
@@ -1203,9 +1258,10 @@ inline TensorShape compute_split_shape(const ITensorInfo *input, unsigned int ax
*
* @return the calculated shape
*/
-inline TensorShape compute_space_to_batch_shape(const ITensorInfo *input, int block_x, int block_y, const Size2D &padding_left, const Size2D &padding_right)
+inline TensorShape compute_space_to_batch_shape(
+ const ITensorInfo *input, int block_x, int block_y, const Size2D &padding_left, const Size2D &padding_right)
{
- TensorShape output_shape{ input->tensor_shape() };
+ TensorShape output_shape{input->tensor_shape()};
const DataLayout data_layout = input->data_layout();
const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
@@ -1231,7 +1287,7 @@ inline TensorShape compute_space_to_batch_shape(const ITensorInfo *input, int bl
*/
inline TensorShape compute_space_to_depth_shape(const ITensorInfo *input, int32_t block_shape)
{
- TensorShape output_shape{ input->tensor_shape() };
+ TensorShape output_shape{input->tensor_shape()};
const DataLayout data_layout = input->data_layout();
const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
@@ -1276,7 +1332,7 @@ inline TensorShape compute_prior_box_shape(const ITensorInfo &input, const Prior
inline TensorShape compute_padded_shape(const TensorShape &input_shape, const PaddingList &padding)
{
TensorShape padded_shape = input_shape;
- for(size_t dim = 0; dim < padding.size(); ++dim)
+ for (size_t dim = 0; dim < padding.size(); ++dim)
{
const auto &padding_pair = padding[dim];
const uint32_t shape_on_index = (padded_shape.num_dimensions() <= dim) ? 1 : input_shape[dim];
@@ -1295,7 +1351,7 @@ inline TensorShape compute_padded_shape(const TensorShape &input_shape, const Pa
inline TensorShape compute_tiled_shape(const TensorShape &input_shape, const Multiples &multiples)
{
TensorShape tiled_shape = input_shape;
- for(size_t dim = 0; dim < multiples.size(); ++dim)
+ for (size_t dim = 0; dim < multiples.size(); ++dim)
{
tiled_shape.set(dim, input_shape[dim] * multiples[dim]);
}
@@ -1312,9 +1368,9 @@ inline TensorShape compute_tiled_shape(const TensorShape &input_shape, const Mul
*/
inline TensorShape compute_reduced_shape(const TensorShape &input, unsigned int axis, bool keep_dims = true)
{
- TensorShape output_shape{ input };
+ TensorShape output_shape{input};
- if(!keep_dims)
+ if (!keep_dims)
{
output_shape.remove_dimension(axis);
}
@@ -1407,14 +1463,14 @@ inline TensorShape calculate_concatenate_shape(const std::vector<T *> &input, si
#if defined(ARM_COMPUTE_ASSERTS_ENABLED)
// All dimensions must match except the axis one
- for(unsigned int i = 0; i < MAX_DIMS; ++i)
+ for (unsigned int i = 0; i < MAX_DIMS; ++i)
{
- if(i == axis)
+ if (i == axis)
{
continue;
}
- for(const auto &tensor : input)
+ for (const auto &tensor : input)
{
ARM_COMPUTE_ERROR_ON(tensor == nullptr);
const TensorShape shape = extract_shape(tensor);
@@ -1425,7 +1481,7 @@ inline TensorShape calculate_concatenate_shape(const std::vector<T *> &input, si
// Calculate output shape
size_t new_size = 0;
- for(const auto &tensor : input)
+ for (const auto &tensor : input)
{
const TensorShape shape = extract_shape(tensor);
new_size += shape[axis];
@@ -1448,14 +1504,14 @@ inline TensorShape compute_stack_shape(const ITensorInfo &a, unsigned int axis,
ARM_COMPUTE_ERROR_ON(axis > a.num_dimensions());
ARM_COMPUTE_ERROR_ON(a.num_dimensions() > 4);
- TensorShape shape_out{ a.tensor_shape() };
+ TensorShape shape_out{a.tensor_shape()};
shape_out.set(axis, num_tensors);
unsigned int i_shift = 0;
- for(unsigned int i = 0; i < a.num_dimensions(); ++i)
+ for (unsigned int i = 0; i < a.num_dimensions(); ++i)
{
- if(i == axis)
+ if (i == axis)
{
i_shift++;
}
@@ -1473,7 +1529,8 @@ inline TensorShape compute_stack_shape(const ITensorInfo &a, unsigned int axis,
*
* @return the calculated shape
*/
-inline TensorShape compute_conv3d_shape(const TensorShape &src, const TensorShape &weights, const Conv3dInfo &conv3d_info)
+inline TensorShape
+compute_conv3d_shape(const TensorShape &src, const TensorShape &weights, const Conv3dInfo &conv3d_info)
{
// Weight tensor shape indices (D H W Cin Cout)
constexpr unsigned int weights_depth_dim = 4u;
@@ -1488,7 +1545,7 @@ inline TensorShape compute_conv3d_shape(const TensorShape &src, const TensorShap
constexpr unsigned int width_dim = 1u;
constexpr unsigned int channel_dim = 0u;
- TensorShape output_shape{ src };
+ TensorShape output_shape{src};
const size_t pad_left = conv3d_info.padding.left;
const size_t pad_right = conv3d_info.padding.right;
const size_t pad_top = conv3d_info.padding.top;
@@ -1506,17 +1563,41 @@ inline TensorShape compute_conv3d_shape(const TensorShape &src, const TensorShap
int output_height_size = 0;
int output_depth_size = 0;
- switch(conv3d_info.round_type)
+ switch (conv3d_info.round_type)
{
case DimensionRoundingType::FLOOR:
- output_width_size = static_cast<int>(std::floor((static_cast<float>(src[width_dim] + pad_left + pad_right - (dilation_x * (weights[weights_width_dim] - 1) + 1)) / stride_x) + 1));
- output_height_size = static_cast<int>(std::floor((static_cast<float>(src[height_dim] + pad_top + pad_bottom - (dilation_y * (weights[weights_height_dim] - 1) + 1)) / stride_y) + 1));
- output_depth_size = static_cast<int>(std::floor((static_cast<float>(src[depth_dim] + pad_front + pad_back - (dilation_z * (weights[weights_depth_dim] - 1) + 1)) / stride_z) + 1));
+ output_width_size =
+ static_cast<int>(std::floor((static_cast<float>(src[width_dim] + pad_left + pad_right -
+ (dilation_x * (weights[weights_width_dim] - 1) + 1)) /
+ stride_x) +
+ 1));
+ output_height_size =
+ static_cast<int>(std::floor((static_cast<float>(src[height_dim] + pad_top + pad_bottom -
+ (dilation_y * (weights[weights_height_dim] - 1) + 1)) /
+ stride_y) +
+ 1));
+ output_depth_size =
+ static_cast<int>(std::floor((static_cast<float>(src[depth_dim] + pad_front + pad_back -
+ (dilation_z * (weights[weights_depth_dim] - 1) + 1)) /
+ stride_z) +
+ 1));
break;
case DimensionRoundingType::CEIL:
- output_width_size = static_cast<int>(std::ceil((static_cast<float>(src[width_dim] + pad_left + pad_right - (dilation_x * (weights[weights_width_dim] - 1) + 1)) / stride_x) + 1));
- output_height_size = static_cast<int>(std::ceil((static_cast<float>(src[height_dim] + pad_top + pad_bottom - (dilation_y * (weights[weights_height_dim] - 1) + 1)) / stride_y) + 1));
- output_depth_size = static_cast<int>(std::ceil((static_cast<float>(src[depth_dim] + pad_front + pad_back - (dilation_z * (weights[weights_depth_dim] - 1) + 1)) / stride_z) + 1));
+ output_width_size =
+ static_cast<int>(std::ceil((static_cast<float>(src[width_dim] + pad_left + pad_right -
+ (dilation_x * (weights[weights_width_dim] - 1) + 1)) /
+ stride_x) +
+ 1));
+ output_height_size =
+ static_cast<int>(std::ceil((static_cast<float>(src[height_dim] + pad_top + pad_bottom -
+ (dilation_y * (weights[weights_height_dim] - 1) + 1)) /
+ stride_y) +
+ 1));
+ output_depth_size =
+ static_cast<int>(std::ceil((static_cast<float>(src[depth_dim] + pad_front + pad_back -
+ (dilation_z * (weights[weights_depth_dim] - 1) + 1)) /
+ stride_z) +
+ 1));
break;
default:
ARM_COMPUTE_ERROR("Unsupported rounding type");
@@ -1539,7 +1620,7 @@ inline TensorShape compute_conv3d_shape(const TensorShape &src, const TensorShap
*/
inline TensorShape compute_pool3d_shape(const TensorShape &src, Pooling3dLayerInfo pool3d_info)
{
- TensorShape output_shape{ src };
+ TensorShape output_shape{src};
const auto data_layout = DataLayout::NDHWC;
const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
@@ -1552,10 +1633,12 @@ inline TensorShape compute_pool3d_shape(const TensorShape &src, Pooling3dLayerIn
int output_height = 0;
int output_depth = 0;
- std::tie(output_width, output_height, output_depth) = scaled_3d_dimensions_signed(src[idx_width], src[idx_height], src[idx_depth], pool_size_width, pool_size_height,
- pool_size_depth, pool3d_info);
+ std::tie(output_width, output_height, output_depth) =
+ scaled_3d_dimensions_signed(src[idx_width], src[idx_height], src[idx_depth], pool_size_width, pool_size_height,
+ pool_size_depth, pool3d_info);
- ARM_COMPUTE_ERROR_ON_MSG((output_width < 1 || output_height < 1 || output_depth < 1), "Calculated output dimension size is invalid");
+ ARM_COMPUTE_ERROR_ON_MSG((output_width < 1 || output_height < 1 || output_depth < 1),
+ "Calculated output dimension size is invalid");
output_shape.set(idx_width, static_cast<size_t>(output_width));
output_shape.set(idx_height, static_cast<size_t>(output_height));
@@ -1576,7 +1659,8 @@ inline TensorShape compute_pool3d_shape(const TensorShape &src, Pooling3dLayerIn
*
* @return the calculated shape
*/
-inline TensorShape compute_gather_shape(const TensorShape &input_shape, const TensorShape &indices_shape, uint32_t actual_axis)
+inline TensorShape
+compute_gather_shape(const TensorShape &input_shape, const TensorShape &indices_shape, uint32_t actual_axis)
{
const auto input_num_dims = input_shape.num_dimensions();
const auto indices_num_dims = indices_shape.num_dimensions();
@@ -1587,22 +1671,23 @@ inline TensorShape compute_gather_shape(const TensorShape &input_shape, const Te
TensorShape output_shape;
size_t dim_no = 0;
- for(; dim_no < actual_axis; ++dim_no)
+ for (; dim_no < actual_axis; ++dim_no)
{
output_shape.set(dim_no, input_shape[dim_no]);
}
- for(; dim_no < actual_axis + indices_num_dims; ++dim_no)
+ for (; dim_no < actual_axis + indices_num_dims; ++dim_no)
{
output_shape.set(dim_no, indices_shape[dim_no - actual_axis]);
}
- for(; dim_no < input_num_dims + indices_num_dims - 1; ++dim_no)
+ for (; dim_no < input_num_dims + indices_num_dims - 1; ++dim_no)
{
output_shape.set(dim_no, input_shape[dim_no + 1 - indices_num_dims]);
}
- ARM_COMPUTE_ERROR_ON(input_shape.total_size() * indices_shape.total_size() != output_shape.total_size() * input_shape[actual_axis]);
+ ARM_COMPUTE_ERROR_ON(input_shape.total_size() * indices_shape.total_size() !=
+ output_shape.total_size() * input_shape[actual_axis]);
return output_shape;
}
diff --git a/arm_compute/core/utils/misc/Traits.h b/arm_compute/core/utils/misc/Traits.h
index 933922f63c..944fcb95f9 100644
--- a/arm_compute/core/utils/misc/Traits.h
+++ b/arm_compute/core/utils/misc/Traits.h
@@ -25,6 +25,7 @@
#define ARM_COMPUTE_UTILS_TRAITS_TRAITS_H
#include "arm_compute/core/Types.h"
+
#include <type_traits>
namespace arm_compute
diff --git a/arm_compute/core/utils/misc/Utility.h b/arm_compute/core/utils/misc/Utility.h
index e3e20d719f..22f10d74cc 100644
--- a/arm_compute/core/utils/misc/Utility.h
+++ b/arm_compute/core/utils/misc/Utility.h
@@ -44,7 +44,7 @@ struct index_sequence
};
template <std::size_t N, std::size_t... S>
-struct index_sequence_generator : index_sequence_generator < N - 1, N - 1, S... >
+struct index_sequence_generator : index_sequence_generator<N - 1, N - 1, S...>
{
};
@@ -58,17 +58,17 @@ template <std::size_t N>
using index_sequence_t = typename index_sequence_generator<N>::type;
template <typename T, std::size_t N, T val, T... vals>
-struct generate_array : generate_array < T, N - 1, val, val, vals... >
+struct generate_array : generate_array<T, N - 1, val, val, vals...>
{
};
template <typename T, T val, T... vals>
struct generate_array<T, 0, val, vals...>
{
- static constexpr std::array<T, sizeof...(vals)> value{ vals... };
+ static constexpr std::array<T, sizeof...(vals)> value{vals...};
};
-template <typename T, T val, T... vals>
+template <typename T, T val, T... vals>
constexpr std::array<T, sizeof...(vals)> generate_array<T, 0, val, vals...>::value;
/** @endcond */
@@ -79,7 +79,7 @@ template <std::size_t... S,
typename T = std::array<typename std::iterator_traits<Iterator>::value_type, sizeof...(S)>>
T make_array(Iterator first, index_sequence<S...>)
{
- return T{ { first[S]... } };
+ return T{{first[S]...}};
}
} // namespace detail
@@ -87,7 +87,7 @@ template <std::size_t N, typename Iterator>
std::array<typename std::iterator_traits<Iterator>::value_type, N> make_array(Iterator first, Iterator last)
{
ARM_COMPUTE_UNUSED(last);
- return detail::make_array(first, index_sequence_t<N> {});
+ return detail::make_array(first, index_sequence_t<N>{});
}
/** Performs clamping among a lower and upper value.
@@ -119,7 +119,7 @@ inline void for_each(F &&)
* @param[in] args Remaining arguments
*/
template <typename F, typename T, typename... Ts>
-inline void for_each(F &&func, T &&arg, Ts &&... args)
+inline void for_each(F &&func, T &&arg, Ts &&...args)
{
func(std::forward<T>(arg));
for_each(std::forward<F>(func), std::forward<Ts>(args)...);
@@ -143,9 +143,11 @@ inline T &&foldl(F &&, T &&value)
* @param[in] values Remaining arguments
*/
template <typename F, typename T, typename U, typename... Us>
-inline auto foldl(F &&func, T &&initial, U &&value, Us &&... values) -> decltype(func(std::forward<T>(initial), std::forward<U>(value)))
+inline auto foldl(F &&func, T &&initial, U &&value, Us &&...values)
+ -> decltype(func(std::forward<T>(initial), std::forward<U>(value)))
{
- return foldl(std::forward<F>(func), func(std::forward<T>(initial), std::forward<U>(value)), std::forward<Us>(values)...);
+ return foldl(std::forward<F>(func), func(std::forward<T>(initial), std::forward<U>(value)),
+ std::forward<Us>(values)...);
}
/** Perform an index sort of a given vector.
@@ -160,11 +162,7 @@ std::vector<size_t> sort_indices(const std::vector<T> &v)
std::vector<size_t> idx(v.size());
std::iota(idx.begin(), idx.end(), 0);
- std::sort(idx.begin(), idx.end(),
- [&v](size_t i1, size_t i2)
- {
- return v[i1] < v[i2];
- });
+ std::sort(idx.begin(), idx.end(), [&v](size_t i1, size_t i2) { return v[i1] < v[i2]; });
return idx;
}
@@ -178,7 +176,7 @@ std::vector<size_t> sort_indices(const std::vector<T> &v)
*/
inline bool endswith(const std::string &str, const std::string &suffix)
{
- if(str.size() < suffix.size())
+ if (str.size() < suffix.size())
{
return false;
}
@@ -205,10 +203,7 @@ inline bool check_aligned(void *ptr, const size_t alignment)
*/
inline std::string tolower(std::string string)
{
- std::transform(string.begin(), string.end(), string.begin(), [](unsigned char c)
- {
- return std::tolower(c);
- });
+ std::transform(string.begin(), string.end(), string.begin(), [](unsigned char c) { return std::tolower(c); });
return string;
}
@@ -227,7 +222,7 @@ inline std::string getenv(const std::string &env_name)
return std::string{};
#else // BARE_METAL
const auto env_chr = std::getenv(env_name.c_str());
- return env_chr == nullptr ? std::string{} : std::string{ env_chr };
+ return env_chr == nullptr ? std::string{} : std::string{env_chr};
#endif // BARE_METAL
}
} // namespace utility
diff --git a/arm_compute/core/utils/quantization/AsymmHelpers.h b/arm_compute/core/utils/quantization/AsymmHelpers.h
index a15f3e5cde..2324fe1838 100644
--- a/arm_compute/core/utils/quantization/AsymmHelpers.h
+++ b/arm_compute/core/utils/quantization/AsymmHelpers.h
@@ -41,7 +41,10 @@ namespace quantization
*
* @return a status
*/
-Status calculate_quantized_multiplier(float multiplier, int32_t *quant_multiplier, int32_t *shift, bool ignore_epsilon = false);
+Status calculate_quantized_multiplier(float multiplier,
+ int32_t *quant_multiplier,
+ int32_t *shift,
+ bool ignore_epsilon = false);
/** Calculate quantized representation of multiplier with value less than one.
*
* @param[in] multiplier Real multiplier.
@@ -51,7 +54,10 @@ Status calculate_quantized_multiplier(float multiplier, int32_t *quant_multiplie
*
* @return a status
*/
-Status calculate_quantized_multiplier_less_than_one(float multiplier, int32_t *quant_multiplier, int32_t *right_shift, bool ignore_epsilon = false);
+Status calculate_quantized_multiplier_less_than_one(float multiplier,
+ int32_t *quant_multiplier,
+ int32_t *right_shift,
+ bool ignore_epsilon = false);
/** Calculate quantized representation of multiplier having value greater than one.
*
* @param[in] multiplier Real multiplier.
@@ -60,7 +66,8 @@ Status calculate_quantized_multiplier_less_than_one(float multiplier, int32_t *q
*
* @return a status
*/
-Status calculate_quantized_multiplier_greater_than_one(float multiplier, int32_t *quantized_multiplier, int32_t *left_shift);
+Status
+calculate_quantized_multiplier_greater_than_one(float multiplier, int32_t *quantized_multiplier, int32_t *left_shift);
/** Calculate quantized representation of per-channel multipliers
*
@@ -71,9 +78,9 @@ Status calculate_quantized_multiplier_greater_than_one(float multiplier, int32_t
*
* @return a status
*/
-Status calculate_quantized_multipliers(const QuantizationInfo &iq_info,
- const QuantizationInfo &wq_info,
- const QuantizationInfo &oq_info,
+Status calculate_quantized_multipliers(const QuantizationInfo &iq_info,
+ const QuantizationInfo &wq_info,
+ const QuantizationInfo &oq_info,
GEMMLowpOutputStageInfo &stage_info);
/** Get minimum and maximum values for the input quantized data type
@@ -147,7 +154,10 @@ int32_t saturating_rounding_multiply_by_pow2(int32_t exponent, int32_t v);
* @param[out] output_shift Shift for inverse square root
*
*/
-void get_invsqrt_quantized_multiplier_exp(int32_t input, int32_t reverse_shift, int32_t &output_inv_sqrt, int32_t &output_shift);
+void get_invsqrt_quantized_multiplier_exp(int32_t input,
+ int32_t reverse_shift,
+ int32_t &output_inv_sqrt,
+ int32_t &output_shift);
} // namespace quantization
} // namespace arm_compute