aboutsummaryrefslogtreecommitdiff
path: root/src/core/Utils.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/Utils.cpp')
-rw-r--r--src/core/Utils.cpp508
1 files changed, 274 insertions, 234 deletions
diff --git a/src/core/Utils.cpp b/src/core/Utils.cpp
index bdde082a1f..532d08de92 100644
--- a/src/core/Utils.cpp
+++ b/src/core/Utils.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020 ARM Limited.
+ * Copyright (c) 2016-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -22,10 +22,12 @@
* SOFTWARE.
*/
-#include "arm_compute/core/Helpers.h"
-
#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/utils/StringUtils.h"
+#include "arm_compute/function_info/ActivationLayerInfo.h"
+
#include <algorithm>
#include <cmath>
#include <cstdint>
@@ -33,9 +35,9 @@
#include <map>
#include <string>
-using namespace arm_compute;
-
-std::string arm_compute::read_file(const std::string &filename, bool binary)
+namespace arm_compute
+{
+std::string read_file(const std::string &filename, bool binary)
{
std::string out;
std::ifstream fs;
@@ -47,7 +49,7 @@ std::string arm_compute::read_file(const std::string &filename, bool binary)
fs.exceptions(std::ifstream::failbit | std::ifstream::badbit);
std::ios_base::openmode mode = std::ios::in;
- if(binary)
+ if (binary)
{
mode |= std::ios::binary;
}
@@ -64,7 +66,7 @@ std::string arm_compute::read_file(const std::string &filename, bool binary)
out.assign(std::istreambuf_iterator<char>(fs), std::istreambuf_iterator<char>());
#ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED
}
- catch(const std::ifstream::failure &e)
+ catch (const std::ifstream::failure &e)
{
ARM_COMPUTE_ERROR_VAR("Accessing %s: %s", filename.c_str(), e.what());
}
@@ -73,209 +75,99 @@ std::string arm_compute::read_file(const std::string &filename, bool binary)
return out;
}
-const std::string &arm_compute::string_from_format(Format format)
-{
- static std::map<Format, const std::string> formats_map =
- {
- { Format::UNKNOWN, "UNKNOWN" },
- { Format::U8, "U8" },
- { Format::S16, "S16" },
- { Format::U16, "U16" },
- { Format::S32, "S32" },
- { Format::U32, "U32" },
- { Format::F16, "F16" },
- { Format::F32, "F32" },
- { Format::UV88, "UV88" },
- { Format::RGB888, "RGB888" },
- { Format::RGBA8888, "RGBA8888" },
- { Format::YUV444, "YUV444" },
- { Format::YUYV422, "YUYV422" },
- { Format::NV12, "NV12" },
- { Format::NV21, "NV21" },
- { Format::IYUV, "IYUV" },
- { Format::UYVY422, "UYVY422" }
- };
-
- return formats_map[format];
-}
-
-const std::string &arm_compute::string_from_channel(Channel channel)
+const std::string &string_from_channel(Channel channel)
{
- static std::map<Channel, const std::string> channels_map =
- {
- { Channel::UNKNOWN, "UNKNOWN" },
- { Channel::R, "R" },
- { Channel::G, "G" },
- { Channel::B, "B" },
- { Channel::A, "A" },
- { Channel::Y, "Y" },
- { Channel::U, "U" },
- { Channel::V, "V" },
- { Channel::C0, "C0" },
- { Channel::C1, "C1" },
- { Channel::C2, "C2" },
- { Channel::C3, "C3" }
- };
+ static std::map<Channel, const std::string> channels_map = {{Channel::UNKNOWN, "UNKNOWN"},
+ {Channel::R, "R"},
+ {Channel::G, "G"},
+ {Channel::B, "B"},
+ {Channel::A, "A"},
+ {Channel::Y, "Y"},
+ {Channel::U, "U"},
+ {Channel::V, "V"},
+ {Channel::C0, "C0"},
+ {Channel::C1, "C1"},
+ {Channel::C2, "C2"},
+ {Channel::C3, "C3"}};
return channels_map[channel];
}
-const std::string &arm_compute::string_from_data_layout(DataLayout dl)
-{
- static std::map<DataLayout, const std::string> dl_map =
- {
- { DataLayout::UNKNOWN, "UNKNOWN" },
- { DataLayout::NCHW, "NCHW" },
- { DataLayout::NHWC, "NHWC" },
- };
-
- return dl_map[dl];
-}
-
-const std::string &arm_compute::string_from_data_type(DataType dt)
-{
- static std::map<DataType, const std::string> dt_map =
- {
- { DataType::UNKNOWN, "UNKNOWN" },
- { DataType::S8, "S8" },
- { DataType::U8, "U8" },
- { DataType::S16, "S16" },
- { DataType::U16, "U16" },
- { DataType::S32, "S32" },
- { DataType::U32, "U32" },
- { DataType::S64, "S64" },
- { DataType::U64, "U64" },
- { DataType::F16, "F16" },
- { DataType::F32, "F32" },
- { DataType::F64, "F64" },
- { DataType::SIZET, "SIZET" },
- { DataType::QSYMM8, "QSYMM8" },
- { DataType::QSYMM8_PER_CHANNEL, "QSYMM8_PER_CHANNEL" },
- { DataType::QASYMM8, "QASYMM8" },
- { DataType::QASYMM8_SIGNED, "QASYMM8_SIGNED" },
- { DataType::QSYMM16, "QSYMM16" },
- { DataType::QASYMM16, "QASYMM16" },
- };
-
- return dt_map[dt];
-}
-
-const std::string &arm_compute::string_from_activation_func(ActivationLayerInfo::ActivationFunction act)
-{
- static std::map<ActivationLayerInfo::ActivationFunction, const std::string> act_map =
- {
- { ActivationLayerInfo::ActivationFunction::ABS, "ABS" },
- { ActivationLayerInfo::ActivationFunction::LINEAR, "LINEAR" },
- { ActivationLayerInfo::ActivationFunction::LOGISTIC, "LOGISTIC" },
- { ActivationLayerInfo::ActivationFunction::RELU, "RELU" },
- { ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, "BRELU" },
- { ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, "LU_BRELU" },
- { ActivationLayerInfo::ActivationFunction::LEAKY_RELU, "LRELU" },
- { ActivationLayerInfo::ActivationFunction::SOFT_RELU, "SRELU" },
- { ActivationLayerInfo::ActivationFunction::ELU, "ELU" },
- { ActivationLayerInfo::ActivationFunction::SQRT, "SQRT" },
- { ActivationLayerInfo::ActivationFunction::SQUARE, "SQUARE" },
- { ActivationLayerInfo::ActivationFunction::TANH, "TANH" },
- { ActivationLayerInfo::ActivationFunction::IDENTITY, "IDENTITY" },
- { ActivationLayerInfo::ActivationFunction::HARD_SWISH, "HARD_SWISH" }
-
- };
-
- return act_map[act];
-}
-
-const std::string &arm_compute::string_from_matrix_pattern(MatrixPattern pattern)
-{
- static std::map<MatrixPattern, const std::string> pattern_map =
- {
- { MatrixPattern::BOX, "BOX" },
- { MatrixPattern::CROSS, "CROSS" },
- { MatrixPattern::DISK, "DISK" },
- { MatrixPattern::OTHER, "OTHER" },
- };
-
- return pattern_map[pattern];
-}
-
-const std::string &arm_compute::string_from_non_linear_filter_function(NonLinearFilterFunction function)
+const std::string &string_from_border_mode(BorderMode border_mode)
{
- static std::map<NonLinearFilterFunction, const std::string> func_map =
- {
- { NonLinearFilterFunction::MAX, "MAX" },
- { NonLinearFilterFunction::MEDIAN, "MEDIAN" },
- { NonLinearFilterFunction::MIN, "MIN" },
+ static std::map<BorderMode, const std::string> border_mode_map = {
+ {BorderMode::UNDEFINED, "UNDEFINED"},
+ {BorderMode::CONSTANT, "CONSTANT"},
+ {BorderMode::REPLICATE, "REPLICATE"},
};
- return func_map[function];
+ return border_mode_map[border_mode];
}
-const std::string &arm_compute::string_from_interpolation_policy(InterpolationPolicy policy)
+const std::string &string_from_norm_type(NormType type)
{
- static std::map<InterpolationPolicy, const std::string> interpolation_policy_map =
- {
- { InterpolationPolicy::AREA, "AREA" },
- { InterpolationPolicy::BILINEAR, "BILINEAR" },
- { InterpolationPolicy::NEAREST_NEIGHBOR, "NEAREST_NEIGHBOUR" },
+ static std::map<NormType, const std::string> norm_type_map = {
+ {NormType::IN_MAP_1D, "IN_MAP_1D"},
+ {NormType::IN_MAP_2D, "IN_MAP_2D"},
+ {NormType::CROSS_MAP, "CROSS_MAP"},
};
- return interpolation_policy_map[policy];
+ return norm_type_map[type];
}
-const std::string &arm_compute::string_from_border_mode(BorderMode border_mode)
+const std::string &string_from_pooling_type(PoolingType type)
{
- static std::map<BorderMode, const std::string> border_mode_map =
- {
- { BorderMode::UNDEFINED, "UNDEFINED" },
- { BorderMode::CONSTANT, "CONSTANT" },
- { BorderMode::REPLICATE, "REPLICATE" },
+ static std::map<PoolingType, const std::string> pool_type_map = {
+ {PoolingType::MAX, "MAX"},
+ {PoolingType::AVG, "AVG"},
+ {PoolingType::L2, "L2"},
};
- return border_mode_map[border_mode];
+ return pool_type_map[type];
}
-const std::string &arm_compute::string_from_norm_type(NormType type)
+bool is_pool_region_entirely_outside_input(const PoolingLayerInfo &info)
{
- static std::map<NormType, const std::string> norm_type_map =
+ if (info.is_global_pooling || info.exclude_padding || info.pool_size.x() == 0 || info.pool_size.y() == 0)
{
- { NormType::IN_MAP_1D, "IN_MAP_1D" },
- { NormType::IN_MAP_2D, "IN_MAP_2D" },
- { NormType::CROSS_MAP, "CROSS_MAP" },
- };
-
- return norm_type_map[type];
+ return false;
+ }
+ const auto ps = info.pad_stride_info;
+ const auto pool_le_padding_x = info.pool_size.x() <= std::max({ps.pad_left(), ps.pad_right()});
+ const auto pool_le_padding_y = info.pool_size.y() <= std::max({ps.pad_top(), ps.pad_bottom()});
+ return pool_le_padding_x || pool_le_padding_y;
}
-const std::string &arm_compute::string_from_pooling_type(PoolingType type)
+bool is_pool_3d_region_entirely_outside_input(const Pooling3dLayerInfo &info)
{
- static std::map<PoolingType, const std::string> pool_type_map =
+ if (info.is_global_pooling || info.pool_size.x() == 0 || info.pool_size.y() == 0 || info.pool_size.z() == 0)
{
- { PoolingType::MAX, "MAX" },
- { PoolingType::AVG, "AVG" },
- { PoolingType::L2, "L2" },
- };
-
- return pool_type_map[type];
+ return false;
+ }
+ const auto ps = info.padding;
+ const auto pool_le_padding_x = info.pool_size.x() <= std::max({ps.left, ps.right});
+ const auto pool_le_padding_y = info.pool_size.y() <= std::max({ps.top, ps.bottom});
+ const auto pool_le_padding_z = info.pool_size.z() <= std::max({ps.front, ps.back});
+ return pool_le_padding_x || pool_le_padding_y || pool_le_padding_z;
}
-const std::string &arm_compute::string_from_gemmlowp_output_stage(GEMMLowpOutputStageType output_stage)
+const std::string &string_from_gemmlowp_output_stage(GEMMLowpOutputStageType output_stage)
{
- static std::map<GEMMLowpOutputStageType, const std::string> output_stage_map =
- {
- { GEMMLowpOutputStageType::NONE, "" },
- { GEMMLowpOutputStageType::QUANTIZE_DOWN, "quantize_down" },
- { GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT, "quantize_down_fixedpoint" },
- { GEMMLowpOutputStageType::QUANTIZE_DOWN_FLOAT, "quantize_down_float" }
- };
+ static std::map<GEMMLowpOutputStageType, const std::string> output_stage_map = {
+ {GEMMLowpOutputStageType::NONE, ""},
+ {GEMMLowpOutputStageType::QUANTIZE_DOWN, "quantize_down"},
+ {GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT, "quantize_down_fixedpoint"},
+ {GEMMLowpOutputStageType::QUANTIZE_DOWN_FLOAT, "quantize_down_float"}};
return output_stage_map[output_stage];
}
-std::string arm_compute::string_from_pixel_value(const PixelValue &value, const DataType data_type)
+std::string string_from_pixel_value(const PixelValue &value, const DataType data_type)
{
std::stringstream ss;
std::string converted_string;
- switch(data_type)
+ switch (data_type)
{
case DataType::U8:
case DataType::QASYMM8:
@@ -323,21 +215,19 @@ std::string arm_compute::string_from_pixel_value(const PixelValue &value, const
return converted_string;
}
-std::string arm_compute::lower_string(const std::string &val)
-{
- std::string res = val;
- std::transform(res.begin(), res.end(), res.begin(), ::tolower);
- return res;
-}
-
-PadStrideInfo arm_compute::calculate_same_pad(TensorShape input_shape, TensorShape weights_shape, PadStrideInfo conv_info, DataLayout data_layout, const Size2D &dilation,
- const DimensionRoundingType &rounding_type)
+PadStrideInfo calculate_same_pad(TensorShape input_shape,
+ TensorShape weights_shape,
+ PadStrideInfo conv_info,
+ DataLayout data_layout,
+ const Size2D &dilation,
+ const DimensionRoundingType &rounding_type)
{
const auto &strides = conv_info.stride();
- ARM_COMPUTE_ERROR_ON_MSG((strides.first < 1 || strides.second < 1), "Stride values should be greater than or equal to 1.");
+ ARM_COMPUTE_ERROR_ON_MSG((strides.first < 1 || strides.second < 1),
+ "Stride values should be greater than or equal to 1.");
- const unsigned int width_idx = arm_compute::get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
- const unsigned int height_idx = arm_compute::get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
+ const unsigned int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
+ const unsigned int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
const unsigned int in_width = input_shape[width_idx];
const unsigned int in_height = input_shape[height_idx];
const unsigned int kernel_width = weights_shape[width_idx];
@@ -353,8 +243,9 @@ PadStrideInfo arm_compute::calculate_same_pad(TensorShape input_shape, TensorSha
const int real_weight_height = (kernel_height - 1) * dilation.y() + 1;
// Calculate total pad
- const int pad_width = std::max(0, static_cast<int>((out_width - 1) * strides.first + real_weight_width - in_width));
- const int pad_height = std::max(0, static_cast<int>((out_height - 1) * strides.second + real_weight_height - in_height));
+ const int pad_width = std::max(0, static_cast<int>((out_width - 1) * strides.first + real_weight_width - in_width));
+ const int pad_height =
+ std::max(0, static_cast<int>((out_height - 1) * strides.second + real_weight_height - in_height));
// Calculate individual paddings
const unsigned int pad_left = pad_width / 2;
@@ -372,9 +263,11 @@ PadStrideInfo arm_compute::calculate_same_pad(TensorShape input_shape, TensorSha
return same_info;
}
-std::pair<unsigned int, unsigned int> arm_compute::deconvolution_output_dimensions(unsigned int in_width, unsigned int in_height,
- unsigned int kernel_width, unsigned int kernel_height,
- const PadStrideInfo &pad_stride_info)
+std::pair<unsigned int, unsigned int> deconvolution_output_dimensions(unsigned int in_width,
+ unsigned int in_height,
+ unsigned int kernel_width,
+ unsigned int kernel_height,
+ const PadStrideInfo &pad_stride_info)
{
const unsigned int pad_left = pad_stride_info.pad_left();
const unsigned int pad_top = pad_stride_info.pad_top();
@@ -392,10 +285,12 @@ std::pair<unsigned int, unsigned int> arm_compute::deconvolution_output_dimensio
return std::make_pair<unsigned int, unsigned int>(w, h);
}
-std::pair<unsigned int, unsigned int> arm_compute::scaled_dimensions(int width, int height,
- int kernel_width, int kernel_height,
- const PadStrideInfo &pad_stride_info,
- const Size2D &dilation)
+std::pair<unsigned int, unsigned int> scaled_dimensions(int width,
+ int height,
+ int kernel_width,
+ int kernel_height,
+ const PadStrideInfo &pad_stride_info,
+ const Size2D &dilation)
{
const int dilation_x = dilation.x();
const int dilation_y = dilation.y();
@@ -407,15 +302,25 @@ std::pair<unsigned int, unsigned int> arm_compute::scaled_dimensions(int width,
const int stride_y = pad_stride_info.stride().second;
int w = 0;
int h = 0;
- switch(pad_stride_info.round())
+ switch (pad_stride_info.round())
{
case DimensionRoundingType::FLOOR:
- w = static_cast<int>(std::floor((static_cast<float>(width + pad_left + pad_right - (dilation_x * (kernel_width - 1) + 1)) / stride_x) + 1));
- h = static_cast<int>(std::floor((static_cast<float>(height + pad_top + pad_bottom - (dilation_y * (kernel_height - 1) + 1)) / stride_y) + 1));
+ w = static_cast<int>(std::floor(
+ (static_cast<float>(width + pad_left + pad_right - (dilation_x * (kernel_width - 1) + 1)) / stride_x) +
+ 1));
+ h = static_cast<int>(
+ std::floor((static_cast<float>(height + pad_top + pad_bottom - (dilation_y * (kernel_height - 1) + 1)) /
+ stride_y) +
+ 1));
break;
case DimensionRoundingType::CEIL:
- w = static_cast<int>(std::ceil((static_cast<float>(width + pad_left + pad_right - (dilation_x * (kernel_width - 1) + 1)) / stride_x) + 1));
- h = static_cast<int>(std::ceil((static_cast<float>(height + pad_top + pad_bottom - (dilation_y * (kernel_height - 1) + 1)) / stride_y) + 1));
+ w = static_cast<int>(std::ceil(
+ (static_cast<float>(width + pad_left + pad_right - (dilation_x * (kernel_width - 1) + 1)) / stride_x) +
+ 1));
+ h = static_cast<int>(
+ std::ceil((static_cast<float>(height + pad_top + pad_bottom - (dilation_y * (kernel_height - 1) + 1)) /
+ stride_y) +
+ 1));
break;
default:
ARM_COMPUTE_ERROR("Unsupported rounding type");
@@ -426,25 +331,103 @@ std::pair<unsigned int, unsigned int> arm_compute::scaled_dimensions(int width,
return std::make_pair(static_cast<unsigned int>(w), static_cast<unsigned int>(h));
}
-bool arm_compute::needs_serialized_reduction(ReductionOperation op, DataType dt, unsigned int axis)
+std::pair<int, int> scaled_dimensions_signed(
+ int width, int height, int kernel_width, int kernel_height, const PadStrideInfo &pad_stride_info)
+{
+ const int pad_left = pad_stride_info.pad_left();
+ const int pad_top = pad_stride_info.pad_top();
+ const int pad_right = pad_stride_info.pad_right();
+ const int pad_bottom = pad_stride_info.pad_bottom();
+ const int stride_x = pad_stride_info.stride().first;
+ const int stride_y = pad_stride_info.stride().second;
+ int w = 0;
+ int h = 0;
+ switch (pad_stride_info.round())
+ {
+ case DimensionRoundingType::FLOOR:
+ w = static_cast<int>(
+ std::floor((static_cast<float>(width + pad_left + pad_right - kernel_width) / stride_x) + 1));
+ h = static_cast<int>(
+ std::floor((static_cast<float>(height + pad_top + pad_bottom - kernel_height) / stride_y) + 1));
+ break;
+ case DimensionRoundingType::CEIL:
+ w = static_cast<int>(
+ std::ceil((static_cast<float>(width + pad_left + pad_right - kernel_width) / stride_x) + 1));
+ h = static_cast<int>(
+ std::ceil((static_cast<float>(height + pad_top + pad_bottom - kernel_height) / stride_y) + 1));
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Unsupported rounding type");
+ }
+
+ return std::make_pair(static_cast<int>(w), static_cast<int>(h));
+}
+
+std::tuple<int, int, int> scaled_3d_dimensions_signed(int width,
+ int height,
+ int depth,
+ int kernel_width,
+ int kernel_height,
+ int kernel_depth,
+ const Pooling3dLayerInfo &pool3d_info)
+{
+ const int pad_left = pool3d_info.padding.left;
+ const int pad_top = pool3d_info.padding.top;
+ const int pad_right = pool3d_info.padding.right;
+ const int pad_bottom = pool3d_info.padding.bottom;
+ const int pad_front = pool3d_info.padding.front;
+ const int pad_back = pool3d_info.padding.back;
+ const int stride_x = pool3d_info.stride.x();
+ const int stride_y = pool3d_info.stride.y();
+ const int stride_z = pool3d_info.stride.z();
+ int w = 0;
+ int h = 0;
+ int d = 0;
+
+ switch (pool3d_info.round_type)
+ {
+ case DimensionRoundingType::FLOOR:
+ w = static_cast<int>(
+ std::floor((static_cast<float>(width + pad_left + pad_right - kernel_width) / stride_x) + 1));
+ h = static_cast<int>(
+ std::floor((static_cast<float>(height + pad_top + pad_bottom - kernel_height) / stride_y) + 1));
+ d = static_cast<int>(
+ std::floor((static_cast<float>(depth + pad_front + pad_back - kernel_depth) / stride_z) + 1));
+ break;
+ case DimensionRoundingType::CEIL:
+ w = static_cast<int>(
+ std::ceil((static_cast<float>(width + pad_left + pad_right - kernel_width) / stride_x) + 1));
+ h = static_cast<int>(
+ std::ceil((static_cast<float>(height + pad_top + pad_bottom - kernel_height) / stride_y) + 1));
+ d = static_cast<int>(
+ std::ceil((static_cast<float>(depth + pad_front + pad_back - kernel_depth) / stride_z) + 1));
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Unsupported rounding type");
+ }
+
+ return std::make_tuple(static_cast<int>(w), static_cast<int>(h), static_cast<int>(d));
+}
+
+bool needs_serialized_reduction(ReductionOperation op, DataType dt, unsigned int axis)
{
const bool is_min_max = (op == ReductionOperation::MAX || op == ReductionOperation::MIN);
const bool is_quantized_type = is_data_type_quantized(dt);
const bool is_first_dim = (axis == 0);
- return !is_first_dim || is_min_max || is_quantized_type;
+ return !is_first_dim || (is_quantized_type && !is_min_max);
}
-QuantizationInfo arm_compute::get_softmax_output_quantization_info(DataType input_type, bool is_log)
+QuantizationInfo get_softmax_output_quantization_info(DataType input_type, bool is_log)
{
// Note: Output quantization info for softmax should always have
// * Softmax with QASYMM8: scale = 1/256, offset = 0
// * Softmax with QASYMM8_SIGNED: scale = 1/256, offset = -128
// * LogSoftmax with QASYMM8: scale = 1/256, offset = 0
// * LogSoftmax with QASYMM8_SIGNED: scale = 16/256, offset = 127
- if(is_data_type_quantized_asymmetric_signed(input_type))
+ if (is_data_type_quantized_asymmetric_signed(input_type))
{
- if(is_log)
+ if (is_log)
{
return QuantizationInfo(16.f / 256, 127);
}
@@ -456,37 +439,72 @@ QuantizationInfo arm_compute::get_softmax_output_quantization_info(DataType inpu
return QuantizationInfo(1.f / 256, 0);
}
-float arm_compute::calculate_resize_ratio(size_t input_size, size_t output_size, bool align_corners)
+std::pair<int32_t, int32_t> get_quantized_activation_min_max(const ActivationLayerInfo &act_info,
+ DataType data_type,
+ UniformQuantizationInfo oq_info)
{
- const size_t offset = align_corners ? 1 : 0;
- const auto in = input_size - offset;
- const auto out = output_size - offset;
+ const bool is_qasymm8_signed = is_data_type_quantized_asymmetric_signed(data_type);
+ const auto a = act_info.a();
+ const auto b = act_info.b();
+ const int a_int = is_qasymm8_signed ? quantize_qasymm8_signed(a, oq_info) : quantize_qasymm8(a, oq_info);
+ const int b_int = is_qasymm8_signed ? quantize_qasymm8_signed(b, oq_info) : quantize_qasymm8(b, oq_info);
+ const auto type_max_value = std::get<1>(get_min_max(data_type)).get<int32_t>();
- ARM_COMPUTE_ERROR_ON((input_size == 0 || output_size == 0) && offset == 1);
- ARM_COMPUTE_ERROR_ON(out == 0);
+ const int32_t min_activation = act_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU
+ ? std::min(oq_info.offset, type_max_value)
+ : b_int;
+ const int32_t max_activation =
+ act_info.activation() == ActivationLayerInfo::ActivationFunction::RELU ? type_max_value : a_int;
- return static_cast<float>(in) / static_cast<float>(out);
+ return std::make_pair(min_activation, max_activation);
}
-std::pair<int32_t, int32_t> arm_compute::get_quantized_activation_min_max(ActivationLayerInfo act_info, DataType data_type, UniformQuantizationInfo oq_info)
+std::unordered_map<const ITensorInfo *, PaddingSize> get_padding_info(std::initializer_list<const ITensor *> tensors)
{
- const bool is_qasymm8_signed = is_data_type_quantized_asymmetric_signed(data_type);
- const auto a = act_info.a();
- const auto b = act_info.b();
- const int a_int = is_qasymm8_signed ? quantize_qasymm8_signed(a, oq_info) : quantize_qasymm8(a, oq_info);
- const int b_int = is_qasymm8_signed ? quantize_qasymm8_signed(b, oq_info) : quantize_qasymm8(b, oq_info);
- const auto type_max_value = std::get<1>(get_min_max(data_type)).get<int32_t>();
+ std::unordered_map<const ITensorInfo *, PaddingSize> res;
- const int32_t min_activation = act_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU ? oq_info.offset : b_int;
- const int32_t max_activation = act_info.activation() == ActivationLayerInfo::ActivationFunction::RELU ? type_max_value : a_int;
+ for (const ITensor *tensor : tensors)
+ {
+ if (tensor)
+ {
+ res.insert({tensor->info(), tensor->info()->padding()});
+ }
+ }
- return std::make_pair(min_activation, max_activation);
+ return res;
+}
+
+std::unordered_map<const ITensorInfo *, PaddingSize> get_padding_info(std::initializer_list<const ITensorInfo *> infos)
+{
+ std::unordered_map<const ITensorInfo *, PaddingSize> res;
+
+ for (const ITensorInfo *info : infos)
+ {
+ if (info)
+ {
+ res.insert({info, info->padding()});
+ }
+ }
+
+ return res;
+}
+
+bool has_padding_changed(const std::unordered_map<const ITensorInfo *, PaddingSize> &padding_map)
+{
+ return std::find_if(padding_map.begin(), padding_map.end(),
+ [](const std::pair<const ITensorInfo *, PaddingSize> &padding_info)
+ { return (padding_info.first->padding() != padding_info.second); }) != padding_map.end();
}
#ifdef ARM_COMPUTE_ASSERTS_ENABLED
-void arm_compute::print_consecutive_elements(std::ostream &s, DataType dt, const uint8_t *ptr, unsigned int n, int stream_width, const std::string &element_delim)
+void print_consecutive_elements(std::ostream &s,
+ DataType dt,
+ const uint8_t *ptr,
+ unsigned int n,
+ int stream_width,
+ const std::string &element_delim)
{
- switch(dt)
+ switch (dt)
{
case DataType::U8:
case DataType::QASYMM8:
@@ -496,39 +514,55 @@ void arm_compute::print_consecutive_elements(std::ostream &s, DataType dt, const
case DataType::QSYMM8:
case DataType::QASYMM8_SIGNED:
case DataType::QSYMM8_PER_CHANNEL:
- print_consecutive_elements_impl<int8_t>(s, reinterpret_cast<const int8_t *>(ptr), n, stream_width, element_delim);
+ print_consecutive_elements_impl<int8_t>(s, reinterpret_cast<const int8_t *>(ptr), n, stream_width,
+ element_delim);
break;
case DataType::U16:
case DataType::QASYMM16:
- print_consecutive_elements_impl<uint16_t>(s, reinterpret_cast<const uint16_t *>(ptr), n, stream_width, element_delim);
+ print_consecutive_elements_impl<uint16_t>(s, reinterpret_cast<const uint16_t *>(ptr), n, stream_width,
+ element_delim);
break;
case DataType::S16:
case DataType::QSYMM16:
- print_consecutive_elements_impl<int16_t>(s, reinterpret_cast<const int16_t *>(ptr), n, stream_width, element_delim);
+ print_consecutive_elements_impl<int16_t>(s, reinterpret_cast<const int16_t *>(ptr), n, stream_width,
+ element_delim);
break;
case DataType::U32:
- print_consecutive_elements_impl<uint32_t>(s, reinterpret_cast<const uint32_t *>(ptr), n, stream_width, element_delim);
+ print_consecutive_elements_impl<uint32_t>(s, reinterpret_cast<const uint32_t *>(ptr), n, stream_width,
+ element_delim);
break;
case DataType::S32:
- print_consecutive_elements_impl<int32_t>(s, reinterpret_cast<const int32_t *>(ptr), n, stream_width, element_delim);
+ print_consecutive_elements_impl<int32_t>(s, reinterpret_cast<const int32_t *>(ptr), n, stream_width,
+ element_delim);
+ break;
+ case DataType::U64:
+ print_consecutive_elements_impl<uint64_t>(s, reinterpret_cast<const uint64_t *>(ptr), n, stream_width,
+ element_delim);
+ break;
+ case DataType::S64:
+ print_consecutive_elements_impl<int64_t>(s, reinterpret_cast<const int64_t *>(ptr), n, stream_width,
+ element_delim);
break;
case DataType::BFLOAT16:
- print_consecutive_elements_impl<bfloat16>(s, reinterpret_cast<const bfloat16 *>(ptr), n, stream_width, element_delim);
+ print_consecutive_elements_impl<bfloat16>(s, reinterpret_cast<const bfloat16 *>(ptr), n, stream_width,
+ element_delim);
break;
case DataType::F16:
- print_consecutive_elements_impl<half>(s, reinterpret_cast<const half *>(ptr), n, stream_width, element_delim);
+ print_consecutive_elements_impl<half>(s, reinterpret_cast<const half *>(ptr), n, stream_width,
+ element_delim);
break;
case DataType::F32:
- print_consecutive_elements_impl<float>(s, reinterpret_cast<const float *>(ptr), n, stream_width, element_delim);
+ print_consecutive_elements_impl<float>(s, reinterpret_cast<const float *>(ptr), n, stream_width,
+ element_delim);
break;
default:
ARM_COMPUTE_ERROR("Undefined element size for given data type");
}
}
-int arm_compute::max_consecutive_elements_display_width(std::ostream &s, DataType dt, const uint8_t *ptr, unsigned int n)
+int max_consecutive_elements_display_width(std::ostream &s, DataType dt, const uint8_t *ptr, unsigned int n)
{
- switch(dt)
+ switch (dt)
{
case DataType::U8:
case DataType::QASYMM8:
@@ -548,6 +582,10 @@ int arm_compute::max_consecutive_elements_display_width(std::ostream &s, DataTyp
return max_consecutive_elements_display_width_impl<uint32_t>(s, reinterpret_cast<const uint32_t *>(ptr), n);
case DataType::S32:
return max_consecutive_elements_display_width_impl<int32_t>(s, reinterpret_cast<const int32_t *>(ptr), n);
+ case DataType::U64:
+ return max_consecutive_elements_display_width_impl<uint64_t>(s, reinterpret_cast<const uint64_t *>(ptr), n);
+ case DataType::S64:
+ return max_consecutive_elements_display_width_impl<int64_t>(s, reinterpret_cast<const int64_t *>(ptr), n);
case DataType::BFLOAT16:
return max_consecutive_elements_display_width_impl<bfloat16>(s, reinterpret_cast<const bfloat16 *>(ptr), n);
case DataType::F16:
@@ -560,3 +598,5 @@ int arm_compute::max_consecutive_elements_display_width(std::ostream &s, DataTyp
return 0;
}
#endif /* ARM_COMPUTE_ASSERTS_ENABLED */
+
+} // namespace arm_compute