diff options
Diffstat (limited to 'utils')
-rw-r--r-- | utils/BUILD.bazel | 48 | ||||
-rw-r--r-- | utils/CommonGraphOptions.cpp | 76 | ||||
-rw-r--r-- | utils/CommonGraphOptions.h | 30 | ||||
-rw-r--r-- | utils/GraphUtils.cpp | 257 | ||||
-rw-r--r-- | utils/GraphUtils.h | 107 | ||||
-rw-r--r-- | utils/ImageLoader.h | 146 | ||||
-rw-r--r-- | utils/TypePrinter.h | 1851 | ||||
-rw-r--r-- | utils/Utils.cpp | 72 | ||||
-rw-r--r-- | utils/Utils.h | 246 | ||||
-rw-r--r-- | utils/command_line/CommandLineParser.h | 57 | ||||
-rw-r--r-- | utils/command_line/EnumListOption.h | 25 | ||||
-rw-r--r-- | utils/command_line/EnumOption.h | 17 | ||||
-rw-r--r-- | utils/command_line/ListOption.h | 13 | ||||
-rw-r--r-- | utils/command_line/Option.h | 9 | ||||
-rw-r--r-- | utils/command_line/SimpleOption.h | 7 | ||||
-rw-r--r-- | utils/command_line/ToggleOption.h | 12 |
16 files changed, 2293 insertions, 680 deletions
diff --git a/utils/BUILD.bazel b/utils/BUILD.bazel new file mode 100644 index 0000000000..2be7ee193b --- /dev/null +++ b/utils/BUILD.bazel @@ -0,0 +1,48 @@ +# Copyright (c) 2023 Arm Limited. +# +# SPDX-License-Identifier: MIT +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +cc_library( + name = "utils", + srcs = glob( + [ + "**/*.cpp" + ], + exclude = glob( + [ + "CommonGraphOptions.cpp", + ]), + ), + hdrs = glob(["**/*.h"]), + visibility = ["//visibility:public"], + deps = [ + "//arm_compute:core_headers", + "//arm_compute:graph_headers", + "//arm_compute:runtime_headers", + "//include", + "//support", + ], +) + +exports_files( + ["CommonGraphOptions.cpp"], + visibility = ["//visibility:public"] +) diff --git a/utils/CommonGraphOptions.cpp b/utils/CommonGraphOptions.cpp index c0270726da..42524d802d 100644 --- a/utils/CommonGraphOptions.cpp +++ b/utils/CommonGraphOptions.cpp @@ -37,15 +37,15 @@ namespace { std::pair<unsigned int, unsigned int> parse_validation_range(const std::string &validation_range) { - std::pair<unsigned int /* start */, unsigned int /* end */> range = { 0, std::numeric_limits<unsigned int>::max() }; - if(!validation_range.empty()) + std::pair<unsigned int /* start */, unsigned int /* end */> range = {0, std::numeric_limits<unsigned int>::max()}; + if (!validation_range.empty()) { std::string str; std::stringstream stream(validation_range); // Get first value std::getline(stream, str, ','); - if(stream.fail()) + if (stream.fail()) { return range; } @@ -56,7 +56,7 @@ std::pair<unsigned int, unsigned int> parse_validation_range(const std::string & // Get second value std::getline(stream, str); - if(stream.fail()) + if (stream.fail()) { range.second = range.first; return range; @@ -88,24 +88,26 @@ namespace utils os << "Tuner mode : " << common_params.tuner_mode << std::endl; os << "Tuner file : " << common_params.tuner_file << std::endl; os << "MLGO file : " << common_params.mlgo_file << std::endl; - os << "Fast math enabled? : " << (common_params.fast_math_hint == FastMathHint::Enabled ? true_str : false_str) << std::endl; - if(!common_params.data_path.empty()) + os << "Fast math enabled? : " << (common_params.fast_math_hint == FastMathHint::Enabled ? true_str : false_str) + << std::endl; + if (!common_params.data_path.empty()) { os << "Data path : " << common_params.data_path << std::endl; } - if(!common_params.image.empty()) + if (!common_params.image.empty()) { os << "Image file : " << common_params.image << std::endl; } - if(!common_params.labels.empty()) + if (!common_params.labels.empty()) { os << "Labels file : " << common_params.labels << std::endl; } - if(!common_params.validation_file.empty()) + if (!common_params.validation_file.empty()) { - os << "Validation range : " << common_params.validation_range_start << "-" << common_params.validation_range_end << std::endl; + os << "Validation range : " << common_params.validation_range_start << "-" << common_params.validation_range_end + << std::endl; os << "Validation file : " << common_params.validation_file << std::endl; - if(!common_params.validation_path.empty()) + if (!common_params.validation_path.empty()) { os << "Validation path : " << common_params.validation_path << std::endl; } @@ -134,33 +136,25 @@ CommonGraphOptions::CommonGraphOptions(CommandLineParser &parser) tuner_file(parser.add_option<SimpleOption<std::string>>("tuner-file")), mlgo_file(parser.add_option<SimpleOption<std::string>>("mlgo-file")) { - std::set<arm_compute::graph::Target> supported_targets - { + std::set<arm_compute::graph::Target> supported_targets{ Target::NEON, Target::CL, Target::CLVK, }; - std::set<arm_compute::DataType> supported_data_types - { + std::set<arm_compute::DataType> supported_data_types{ DataType::F16, DataType::F32, DataType::QASYMM8, DataType::QASYMM8_SIGNED, }; - std::set<DataLayout> supported_data_layouts - { + std::set<DataLayout> supported_data_layouts{ DataLayout::NHWC, DataLayout::NCHW, }; - const std::set<CLTunerMode> supported_tuner_modes - { - CLTunerMode::EXHAUSTIVE, - CLTunerMode::NORMAL, - CLTunerMode::RAPID - }; + const std::set<CLTunerMode> supported_tuner_modes{CLTunerMode::EXHAUSTIVE, CLTunerMode::NORMAL, CLTunerMode::RAPID}; target = parser.add_option<EnumOption<Target>>("target", supported_targets, Target::NEON); data_type = parser.add_option<EnumOption<DataType>>("type", supported_data_types, DataType::F32); @@ -175,11 +169,10 @@ CommonGraphOptions::CommonGraphOptions(CommandLineParser &parser) data_layout->set_help("Data layout to use"); enable_tuner->set_help("Enable OpenCL dynamic tuner"); enable_cl_cache->set_help("Enable OpenCL program caches"); - tuner_mode->set_help( - "Configures the time taken by the tuner to tune. " - "Exhaustive: slowest but produces the most performant LWS configuration. " - "Normal: slow but produces the LWS configurations on par with Exhaustive most of the time. " - "Rapid: fast but produces less performant LWS configurations"); + tuner_mode->set_help("Configures the time taken by the tuner to tune. " + "Exhaustive: slowest but produces the most performant LWS configuration. " + "Normal: slow but produces the LWS configurations on par with Exhaustive most of the time. " + "Rapid: fast but produces less performant LWS configurations"); fast_math_hint->set_help("Enable fast math"); data_path->set_help("Path where graph parameters reside"); image->set_help("Input image for the graph"); @@ -193,8 +186,9 @@ CommonGraphOptions::CommonGraphOptions(CommandLineParser &parser) CommonGraphParams consume_common_graph_parameters(CommonGraphOptions &options) { - FastMathHint fast_math_hint_value = options.fast_math_hint->value() ? FastMathHint::Enabled : FastMathHint::Disabled; - auto validation_range = parse_validation_range(options.validation_range->value()); + FastMathHint fast_math_hint_value = + options.fast_math_hint->value() ? FastMathHint::Enabled : FastMathHint::Disabled; + auto validation_range = parse_validation_range(options.validation_range->value()); CommonGraphParams common_params; common_params.help = options.help->is_set() ? options.help->value() : false; @@ -202,19 +196,21 @@ CommonGraphParams consume_common_graph_parameters(CommonGraphOptions &options) common_params.batches = options.batches->value(); common_params.target = options.target->value(); common_params.data_type = options.data_type->value(); - if(options.data_layout->is_set()) + if (options.data_layout->is_set()) { common_params.data_layout = options.data_layout->value(); } - common_params.enable_tuner = options.enable_tuner->is_set() ? options.enable_tuner->value() : false; - common_params.enable_cl_cache = common_params.target == arm_compute::graph::Target::NEON ? false : (options.enable_cl_cache->is_set() ? options.enable_cl_cache->value() : true); - common_params.tuner_mode = options.tuner_mode->value(); - common_params.fast_math_hint = options.fast_math_hint->is_set() ? fast_math_hint_value : FastMathHint::Disabled; - common_params.data_path = options.data_path->value(); - common_params.image = options.image->value(); - common_params.labels = options.labels->value(); - common_params.validation_file = options.validation_file->value(); - common_params.validation_path = options.validation_path->value(); + common_params.enable_tuner = options.enable_tuner->is_set() ? options.enable_tuner->value() : false; + common_params.enable_cl_cache = common_params.target == arm_compute::graph::Target::NEON + ? false + : (options.enable_cl_cache->is_set() ? options.enable_cl_cache->value() : true); + common_params.tuner_mode = options.tuner_mode->value(); + common_params.fast_math_hint = options.fast_math_hint->is_set() ? fast_math_hint_value : FastMathHint::Disabled; + common_params.data_path = options.data_path->value(); + common_params.image = options.image->value(); + common_params.labels = options.labels->value(); + common_params.validation_file = options.validation_file->value(); + common_params.validation_path = options.validation_path->value(); common_params.validation_range_start = validation_range.first; common_params.validation_range_end = validation_range.second; common_params.tuner_file = options.tuner_file->value(); diff --git a/utils/CommonGraphOptions.h b/utils/CommonGraphOptions.h index afdb78b1be..c42e06cb84 100644 --- a/utils/CommonGraphOptions.h +++ b/utils/CommonGraphOptions.h @@ -24,13 +24,13 @@ #ifndef ARM_COMPUTE_EXAMPLES_UTILS_COMMON_GRAPH_OPTIONS #define ARM_COMPUTE_EXAMPLES_UTILS_COMMON_GRAPH_OPTIONS -#include "utils/command_line/CommandLineOptions.h" -#include "utils/command_line/CommandLineParser.h" - #include "arm_compute/graph/TypeLoader.h" #include "arm_compute/graph/TypePrinter.h" #include "arm_compute/runtime/CL/CLTunerTypes.h" +#include "utils/command_line/CommandLineOptions.h" +#include "utils/command_line/CommandLineParser.h" + namespace arm_compute { namespace utils @@ -92,16 +92,16 @@ namespace utils /** Structure holding all the common graph parameters */ struct CommonGraphParams { - bool help{ false }; - int threads{ 0 }; - int batches{ 1 }; - arm_compute::graph::Target target{ arm_compute::graph::Target::NEON }; - arm_compute::DataType data_type{ DataType::F32 }; - arm_compute::DataLayout data_layout{ DataLayout::NHWC }; - bool enable_tuner{ false }; - bool enable_cl_cache{ false }; - arm_compute::CLTunerMode tuner_mode{ CLTunerMode::NORMAL }; - arm_compute::graph::FastMathHint fast_math_hint{ arm_compute::graph::FastMathHint::Disabled }; + bool help{false}; + int threads{0}; + int batches{1}; + arm_compute::graph::Target target{arm_compute::graph::Target::NEON}; + arm_compute::DataType data_type{DataType::F32}; + arm_compute::DataLayout data_layout{DataLayout::NHWC}; + bool enable_tuner{false}; + bool enable_cl_cache{false}; + arm_compute::CLTunerMode tuner_mode{CLTunerMode::NORMAL}; + arm_compute::graph::FastMathHint fast_math_hint{arm_compute::graph::FastMathHint::Disabled}; std::string data_path{}; std::string image{}; std::string labels{}; @@ -109,8 +109,8 @@ struct CommonGraphParams std::string validation_path{}; std::string tuner_file{}; std::string mlgo_file{}; - unsigned int validation_range_start{ 0 }; - unsigned int validation_range_end{ std::numeric_limits<unsigned int>::max() }; + unsigned int validation_range_start{0}; + unsigned int validation_range_end{std::numeric_limits<unsigned int>::max()}; }; /** Formatted output of the CommonGraphParams type diff --git a/utils/GraphUtils.cpp b/utils/GraphUtils.cpp index 20e1369e73..7e618c9de5 100644 --- a/utils/GraphUtils.cpp +++ b/utils/GraphUtils.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021 Arm Limited. + * Copyright (c) 2017-2021, 2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -43,18 +43,21 @@ using namespace arm_compute::graph_utils; namespace { -std::pair<arm_compute::TensorShape, arm_compute::PermutationVector> compute_permutation_parameters(const arm_compute::TensorShape &shape, - arm_compute::DataLayout data_layout) +std::pair<arm_compute::TensorShape, arm_compute::PermutationVector> +compute_permutation_parameters(const arm_compute::TensorShape &shape, arm_compute::DataLayout data_layout) { // Set permutation parameters if needed arm_compute::TensorShape permuted_shape = shape; arm_compute::PermutationVector perm; // Permute only if num_dimensions greater than 2 - if(shape.num_dimensions() > 2) + if (shape.num_dimensions() > 2) { - perm = (data_layout == arm_compute::DataLayout::NHWC) ? arm_compute::PermutationVector(2U, 0U, 1U) : arm_compute::PermutationVector(1U, 2U, 0U); + perm = (data_layout == arm_compute::DataLayout::NHWC) ? arm_compute::PermutationVector(2U, 0U, 1U) + : arm_compute::PermutationVector(1U, 2U, 0U); - arm_compute::PermutationVector perm_shape = (data_layout == arm_compute::DataLayout::NCHW) ? arm_compute::PermutationVector(2U, 0U, 1U) : arm_compute::PermutationVector(1U, 2U, 0U); + arm_compute::PermutationVector perm_shape = (data_layout == arm_compute::DataLayout::NCHW) + ? arm_compute::PermutationVector(2U, 0U, 1U) + : arm_compute::PermutationVector(1U, 2U, 0U); arm_compute::permute(permuted_shape, perm_shape); } @@ -62,17 +65,16 @@ std::pair<arm_compute::TensorShape, arm_compute::PermutationVector> compute_perm } } // namespace -TFPreproccessor::TFPreproccessor(float min_range, float max_range) - : _min_range(min_range), _max_range(max_range) +TFPreproccessor::TFPreproccessor(float min_range, float max_range) : _min_range(min_range), _max_range(max_range) { } void TFPreproccessor::preprocess(ITensor &tensor) { - if(tensor.info()->data_type() == DataType::F32) + if (tensor.info()->data_type() == DataType::F32) { preprocess_typed<float>(tensor); } - else if(tensor.info()->data_type() == DataType::F16) + else if (tensor.info()->data_type() == DataType::F16) { preprocess_typed<half>(tensor); } @@ -89,19 +91,20 @@ void TFPreproccessor::preprocess_typed(ITensor &tensor) window.use_tensor_dimensions(tensor.info()->tensor_shape()); const float range = _max_range - _min_range; - execute_window_loop(window, [&](const Coordinates & id) - { - const T value = *reinterpret_cast<T *>(tensor.ptr_to_element(id)); - float res = value / 255.f; // Normalize to [0, 1] - res = res * range + _min_range; // Map to [min_range, max_range] - *reinterpret_cast<T *>(tensor.ptr_to_element(id)) = res; - }); + execute_window_loop(window, + [&](const Coordinates &id) + { + const T value = *reinterpret_cast<T *>(tensor.ptr_to_element(id)); + float res = value / 255.f; // Normalize to [0, 1] + res = res * range + _min_range; // Map to [min_range, max_range] + *reinterpret_cast<T *>(tensor.ptr_to_element(id)) = res; + }); } CaffePreproccessor::CaffePreproccessor(std::array<float, 3> mean, bool bgr, float scale) : _mean(mean), _bgr(bgr), _scale(scale) { - if(_bgr) + if (_bgr) { std::swap(_mean[0], _mean[2]); } @@ -109,11 +112,11 @@ CaffePreproccessor::CaffePreproccessor(std::array<float, 3> mean, bool bgr, floa void CaffePreproccessor::preprocess(ITensor &tensor) { - if(tensor.info()->data_type() == DataType::F32) + if (tensor.info()->data_type() == DataType::F32) { preprocess_typed<float>(tensor); } - else if(tensor.info()->data_type() == DataType::F16) + else if (tensor.info()->data_type() == DataType::F16) { preprocess_typed<half>(tensor); } @@ -130,15 +133,16 @@ void CaffePreproccessor::preprocess_typed(ITensor &tensor) window.use_tensor_dimensions(tensor.info()->tensor_shape()); const int channel_idx = get_data_layout_dimension_index(tensor.info()->data_layout(), DataLayoutDimension::CHANNEL); - execute_window_loop(window, [&](const Coordinates & id) - { - const T value = *reinterpret_cast<T *>(tensor.ptr_to_element(id)) - T(_mean[id[channel_idx]]); - *reinterpret_cast<T *>(tensor.ptr_to_element(id)) = value * T(_scale); - }); + execute_window_loop(window, + [&](const Coordinates &id) + { + const T value = + *reinterpret_cast<T *>(tensor.ptr_to_element(id)) - T(_mean[id[channel_idx]]); + *reinterpret_cast<T *>(tensor.ptr_to_element(id)) = value * T(_scale); + }); } -PPMWriter::PPMWriter(std::string name, unsigned int maximum) - : _name(std::move(name)), _iterator(0), _maximum(maximum) +PPMWriter::PPMWriter(std::string name, unsigned int maximum) : _name(std::move(name)), _iterator(0), _maximum(maximum) { } @@ -150,23 +154,27 @@ bool PPMWriter::access_tensor(ITensor &tensor) arm_compute::utils::save_to_ppm(tensor, ss.str()); _iterator++; - if(_maximum == 0) + if (_maximum == 0) { return true; } return _iterator < _maximum; } -DummyAccessor::DummyAccessor(unsigned int maximum) - : _iterator(0), _maximum(maximum) +DummyAccessor::DummyAccessor(unsigned int maximum) : _iterator(0), _maximum(maximum) { } +bool DummyAccessor::access_tensor_data() +{ + return false; +} + bool DummyAccessor::access_tensor(ITensor &tensor) { ARM_COMPUTE_UNUSED(tensor); bool ret = _maximum == 0 || _iterator < _maximum; - if(_iterator == _maximum) + if (_iterator == _maximum) { _iterator = 0; } @@ -177,7 +185,8 @@ bool DummyAccessor::access_tensor(ITensor &tensor) return ret; } -NumPyAccessor::NumPyAccessor(std::string npy_path, TensorShape shape, DataType data_type, DataLayout data_layout, std::ostream &output_stream) +NumPyAccessor::NumPyAccessor( + std::string npy_path, TensorShape shape, DataType data_type, DataLayout data_layout, std::ostream &output_stream) : _npy_tensor(), _filename(std::move(npy_path)), _output_stream(output_stream) { NumPyBinLoader loader(_filename, data_layout); @@ -198,8 +207,10 @@ void NumPyAccessor::access_numpy_tensor(ITensor &tensor, T tolerance) int num_mismatches = utils::compare_tensor<T>(tensor, _npy_tensor, tolerance); float percentage_mismatches = static_cast<float>(num_mismatches) / num_elements; - _output_stream << "Results: " << 100.f - (percentage_mismatches * 100) << " % matches with the provided output[" << _filename << "]." << std::endl; - _output_stream << " " << num_elements - num_mismatches << " out of " << num_elements << " matches with the provided output[" << _filename << "]." << std::endl + _output_stream << "Results: " << 100.f - (percentage_mismatches * 100) << " % matches with the provided output[" + << _filename << "]." << std::endl; + _output_stream << " " << num_elements - num_mismatches << " out of " << num_elements + << " matches with the provided output[" << _filename << "]." << std::endl << std::endl; } @@ -208,7 +219,7 @@ bool NumPyAccessor::access_tensor(ITensor &tensor) ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&tensor, 1, DataType::F32, DataType::QASYMM8); ARM_COMPUTE_ERROR_ON(_npy_tensor.info()->dimension(0) != tensor.info()->dimension(0)); - switch(tensor.info()->data_type()) + switch (tensor.info()->data_type()) { case DataType::QASYMM8: access_numpy_tensor<qasymm8_t>(tensor, 0); @@ -257,7 +268,7 @@ ImageAccessor::ImageAccessor(std::string filename, bool bgr, std::unique_ptr<IPr bool ImageAccessor::access_tensor(ITensor &tensor) { - if(!_already_loaded) + if (!_already_loaded) { auto image_loader = utils::ImageLoaderFactory::create(_filename); ARM_COMPUTE_EXIT_ON_MSG(image_loader == nullptr, "Unsupported image type"); @@ -268,27 +279,30 @@ bool ImageAccessor::access_tensor(ITensor &tensor) // Get permutated shape and permutation parameters TensorShape permuted_shape = tensor.info()->tensor_shape(); arm_compute::PermutationVector perm; - if(tensor.info()->data_layout() != DataLayout::NCHW) + if (tensor.info()->data_layout() != DataLayout::NCHW) { - std::tie(permuted_shape, perm) = compute_permutation_parameters(tensor.info()->tensor_shape(), tensor.info()->data_layout()); + std::tie(permuted_shape, perm) = + compute_permutation_parameters(tensor.info()->tensor_shape(), tensor.info()->data_layout()); } #ifdef __arm__ - ARM_COMPUTE_EXIT_ON_MSG_VAR(image_loader->width() != permuted_shape.x() || image_loader->height() != permuted_shape.y(), - "Failed to load image file: dimensions [%d,%d] not correct, expected [%" PRIu32 ",%" PRIu32 "].", - image_loader->width(), image_loader->height(), permuted_shape.x(), permuted_shape.y()); + ARM_COMPUTE_EXIT_ON_MSG_VAR( + image_loader->width() != permuted_shape.x() || image_loader->height() != permuted_shape.y(), + "Failed to load image file: dimensions [%d,%d] not correct, expected [%" PRIu32 ",%" PRIu32 "].", + image_loader->width(), image_loader->height(), permuted_shape.x(), permuted_shape.y()); #else // __arm__ - ARM_COMPUTE_EXIT_ON_MSG_VAR(image_loader->width() != permuted_shape.x() || image_loader->height() != permuted_shape.y(), - "Failed to load image file: dimensions [%d,%d] not correct, expected [%" PRIu64 ",%" PRIu64 "].", - image_loader->width(), image_loader->height(), - static_cast<uint64_t>(permuted_shape.x()), static_cast<uint64_t>(permuted_shape.y())); + ARM_COMPUTE_EXIT_ON_MSG_VAR( + image_loader->width() != permuted_shape.x() || image_loader->height() != permuted_shape.y(), + "Failed to load image file: dimensions [%d,%d] not correct, expected [%" PRIu64 ",%" PRIu64 "].", + image_loader->width(), image_loader->height(), static_cast<uint64_t>(permuted_shape.x()), + static_cast<uint64_t>(permuted_shape.y())); #endif // __arm__ // Fill the tensor with the PPM content (BGR) image_loader->fill_planar_tensor(tensor, _bgr); // Preprocess tensor - if(_preprocessor) + if (_preprocessor) { _preprocessor->preprocess(tensor); } @@ -305,7 +319,12 @@ ValidationInputAccessor::ValidationInputAccessor(const std::string & unsigned int start, unsigned int end, std::ostream &output_stream) - : _path(std::move(images_path)), _images(), _preprocessor(std::move(preprocessor)), _bgr(bgr), _offset(0), _output_stream(output_stream) + : _path(std::move(images_path)), + _images(), + _preprocessor(std::move(preprocessor)), + _bgr(bgr), + _offset(0), + _output_stream(output_stream) { ARM_COMPUTE_EXIT_ON_MSG(start > end, "Invalid validation range!"); @@ -317,10 +336,10 @@ ValidationInputAccessor::ValidationInputAccessor(const std::string & // Parse image names unsigned int counter = 0; - for(std::string line; !std::getline(ifs, line).fail() && counter <= end; ++counter) + for (std::string line; !std::getline(ifs, line).fail() && counter <= end; ++counter) { // Add image to process if withing range - if(counter >= start) + if (counter >= start) { std::stringstream linestream(line); std::string image_name; @@ -330,7 +349,7 @@ ValidationInputAccessor::ValidationInputAccessor(const std::string & } } } - catch(const std::ifstream::failure &e) + catch (const std::ifstream::failure &e) { ARM_COMPUTE_ERROR_VAR("Accessing %s: %s", image_list.c_str(), e.what()); } @@ -339,7 +358,7 @@ ValidationInputAccessor::ValidationInputAccessor(const std::string & bool ValidationInputAccessor::access_tensor(arm_compute::ITensor &tensor) { bool ret = _offset < _images.size(); - if(ret) + if (ret) { utils::JPEGLoader jpeg; @@ -351,28 +370,30 @@ bool ValidationInputAccessor::access_tensor(arm_compute::ITensor &tensor) // Get permutated shape and permutation parameters TensorShape permuted_shape = tensor.info()->tensor_shape(); arm_compute::PermutationVector perm; - if(tensor.info()->data_layout() != DataLayout::NCHW) + if (tensor.info()->data_layout() != DataLayout::NCHW) { - std::tie(permuted_shape, perm) = compute_permutation_parameters(tensor.info()->tensor_shape(), - tensor.info()->data_layout()); + std::tie(permuted_shape, perm) = + compute_permutation_parameters(tensor.info()->tensor_shape(), tensor.info()->data_layout()); } #ifdef __arm__ ARM_COMPUTE_EXIT_ON_MSG_VAR(jpeg.width() != permuted_shape.x() || jpeg.height() != permuted_shape.y(), - "Failed to load image file: dimensions [%d,%d] not correct, expected [%" PRIu32 ",%" PRIu32 "].", + "Failed to load image file: dimensions [%d,%d] not correct, expected [%" PRIu32 + ",%" PRIu32 "].", jpeg.width(), jpeg.height(), permuted_shape.x(), permuted_shape.y()); #else // __arm__ ARM_COMPUTE_EXIT_ON_MSG_VAR(jpeg.width() != permuted_shape.x() || jpeg.height() != permuted_shape.y(), - "Failed to load image file: dimensions [%d,%d] not correct, expected [%" PRIu64 ",%" PRIu64 "].", - jpeg.width(), jpeg.height(), - static_cast<uint64_t>(permuted_shape.x()), static_cast<uint64_t>(permuted_shape.y())); + "Failed to load image file: dimensions [%d,%d] not correct, expected [%" PRIu64 + ",%" PRIu64 "].", + jpeg.width(), jpeg.height(), static_cast<uint64_t>(permuted_shape.x()), + static_cast<uint64_t>(permuted_shape.y())); #endif // __arm__ // Fill the tensor with the JPEG content (BGR) jpeg.fill_planar_tensor(tensor, _bgr); // Preprocess tensor - if(_preprocessor) + if (_preprocessor) { _preprocessor->preprocess(tensor); } @@ -397,10 +418,10 @@ ValidationOutputAccessor::ValidationOutputAccessor(const std::string &image_list // Parse image correctly classified labels unsigned int counter = 0; - for(std::string line; !std::getline(ifs, line).fail() && counter <= end; ++counter) + for (std::string line; !std::getline(ifs, line).fail() && counter <= end; ++counter) { // Add label if within range - if(counter >= start) + if (counter >= start) { std::stringstream linestream(line); std::string image_name; @@ -411,7 +432,7 @@ ValidationOutputAccessor::ValidationOutputAccessor(const std::string &image_list } } } - catch(const std::ifstream::failure &e) + catch (const std::ifstream::failure &e) { ARM_COMPUTE_ERROR_VAR("Accessing %s: %s", image_list.c_str(), e.what()); } @@ -427,11 +448,11 @@ void ValidationOutputAccessor::reset() bool ValidationOutputAccessor::access_tensor(arm_compute::ITensor &tensor) { bool ret = _offset < _results.size(); - if(ret) + if (ret) { // Get results std::vector<size_t> tensor_results; - switch(tensor.info()->data_type()) + switch (tensor.info()->data_type()) { case DataType::QASYMM8: tensor_results = access_predictions_tensor<uint8_t>(tensor); @@ -454,7 +475,7 @@ bool ValidationOutputAccessor::access_tensor(arm_compute::ITensor &tensor) } // Report top_n accuracy - if(_offset >= _results.size()) + if (_offset >= _results.size()) { report_top_n(1, _results.size(), _positive_samples_top1); report_top_n(5, _results.size(), _positive_samples_top5); @@ -476,23 +497,19 @@ std::vector<size_t> ValidationOutputAccessor::access_predictions_tensor(arm_comp // Sort results std::iota(std::begin(index), std::end(index), static_cast<size_t>(0)); - std::sort(std::begin(index), std::end(index), - [&](size_t a, size_t b) - { - return output_net[a] > output_net[b]; - }); + std::sort(std::begin(index), std::end(index), [&](size_t a, size_t b) { return output_net[a] > output_net[b]; }); return index; } -void ValidationOutputAccessor::aggregate_sample(const std::vector<size_t> &res, size_t &positive_samples, size_t top_n, size_t correct_label) +void ValidationOutputAccessor::aggregate_sample(const std::vector<size_t> &res, + size_t &positive_samples, + size_t top_n, + size_t correct_label) { - auto is_valid_label = [correct_label](size_t label) - { - return label == correct_label; - }; + auto is_valid_label = [correct_label](size_t label) { return label == correct_label; }; - if(std::any_of(std::begin(res), std::begin(res) + top_n, is_valid_label)) + if (std::any_of(std::begin(res), std::begin(res) + top_n, is_valid_label)) { ++positive_samples; } @@ -503,14 +520,15 @@ void ValidationOutputAccessor::report_top_n(size_t top_n, size_t total_samples, size_t negative_samples = total_samples - positive_samples; float accuracy = positive_samples / static_cast<float>(total_samples); - _output_stream << "----------Top " << top_n << " accuracy ----------" << std::endl - << std::endl; + _output_stream << "----------Top " << top_n << " accuracy ----------" << std::endl << std::endl; _output_stream << "Positive samples : " << positive_samples << std::endl; _output_stream << "Negative samples : " << negative_samples << std::endl; _output_stream << "Accuracy : " << accuracy << std::endl; } -DetectionOutputAccessor::DetectionOutputAccessor(const std::string &labels_path, std::vector<TensorShape> &imgs_tensor_shapes, std::ostream &output_stream) +DetectionOutputAccessor::DetectionOutputAccessor(const std::string &labels_path, + std::vector<TensorShape> &imgs_tensor_shapes, + std::ostream &output_stream) : _labels(), _tensor_shapes(std::move(imgs_tensor_shapes)), _output_stream(output_stream) { _labels.clear(); @@ -522,12 +540,12 @@ DetectionOutputAccessor::DetectionOutputAccessor(const std::string &labels_path, ifs.exceptions(std::ifstream::badbit); ifs.open(labels_path, std::ios::in | std::ios::binary); - for(std::string line; !std::getline(ifs, line).fail();) + for (std::string line; !std::getline(ifs, line).fail();) { _labels.emplace_back(line); } } - catch(const std::ifstream::failure &e) + catch (const std::ifstream::failure &e) { ARM_COMPUTE_ERROR_VAR("Accessing %s: %s", labels_path.c_str(), e.what()); } @@ -537,26 +555,24 @@ template <typename T> void DetectionOutputAccessor::access_predictions_tensor(ITensor &tensor) { const size_t num_detection = tensor.info()->valid_region().shape.y(); - const auto output_prt = reinterpret_cast<T *>(tensor.buffer() + tensor.info()->offset_first_element_in_bytes()); + const auto output_prt = reinterpret_cast<T *>(tensor.buffer() + tensor.info()->offset_first_element_in_bytes()); - if(num_detection > 0) + if (num_detection > 0) { - _output_stream << "---------------------- Detections ----------------------" << std::endl - << std::endl; + _output_stream << "---------------------- Detections ----------------------" << std::endl << std::endl; - _output_stream << std::left << std::setprecision(4) << std::setw(8) << "Image | " << std::setw(8) << "Label | " << std::setw(12) << "Confidence | " + _output_stream << std::left << std::setprecision(4) << std::setw(8) << "Image | " << std::setw(8) << "Label | " + << std::setw(12) << "Confidence | " << "[ xmin, ymin, xmax, ymax ]" << std::endl; - for(size_t i = 0; i < num_detection; ++i) + for (size_t i = 0; i < num_detection; ++i) { auto im = static_cast<const int>(output_prt[i * 7]); - _output_stream << std::setw(8) << im << std::setw(8) - << _labels[output_prt[i * 7 + 1]] << std::setw(12) << output_prt[i * 7 + 2] - << " [" << (output_prt[i * 7 + 3] * _tensor_shapes[im].x()) - << ", " << (output_prt[i * 7 + 4] * _tensor_shapes[im].y()) - << ", " << (output_prt[i * 7 + 5] * _tensor_shapes[im].x()) - << ", " << (output_prt[i * 7 + 6] * _tensor_shapes[im].y()) - << "]" << std::endl; + _output_stream << std::setw(8) << im << std::setw(8) << _labels[output_prt[i * 7 + 1]] << std::setw(12) + << output_prt[i * 7 + 2] << " [" << (output_prt[i * 7 + 3] * _tensor_shapes[im].x()) << ", " + << (output_prt[i * 7 + 4] * _tensor_shapes[im].y()) << ", " + << (output_prt[i * 7 + 5] * _tensor_shapes[im].x()) << ", " + << (output_prt[i * 7 + 6] * _tensor_shapes[im].y()) << "]" << std::endl; } } else @@ -569,7 +585,7 @@ bool DetectionOutputAccessor::access_tensor(ITensor &tensor) { ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&tensor, 1, DataType::F32); - switch(tensor.info()->data_type()) + switch (tensor.info()->data_type()) { case DataType::F32: access_predictions_tensor<float>(tensor); @@ -581,7 +597,9 @@ bool DetectionOutputAccessor::access_tensor(ITensor &tensor) return false; } -TopNPredictionsAccessor::TopNPredictionsAccessor(const std::string &labels_path, size_t top_n, std::ostream &output_stream) +TopNPredictionsAccessor::TopNPredictionsAccessor(const std::string &labels_path, + size_t top_n, + std::ostream &output_stream) : _labels(), _output_stream(output_stream), _top_n(top_n) { _labels.clear(); @@ -593,12 +611,12 @@ TopNPredictionsAccessor::TopNPredictionsAccessor(const std::string &labels_path, ifs.exceptions(std::ifstream::badbit); ifs.open(labels_path, std::ios::in | std::ios::binary); - for(std::string line; !std::getline(ifs, line).fail();) + for (std::string line; !std::getline(ifs, line).fail();) { _labels.emplace_back(line); } } - catch(const std::ifstream::failure &e) + catch (const std::ifstream::failure &e) { ARM_COMPUTE_ERROR_VAR("Accessing %s: %s", labels_path.c_str(), e.what()); } @@ -622,18 +640,13 @@ void TopNPredictionsAccessor::access_predictions_tensor(ITensor &tensor) // Sort results std::iota(std::begin(index), std::end(index), static_cast<size_t>(0)); std::sort(std::begin(index), std::end(index), - [&](size_t a, size_t b) - { - return classes_prob[a] > classes_prob[b]; - }); + [&](size_t a, size_t b) { return classes_prob[a] > classes_prob[b]; }); - _output_stream << "---------- Top " << _top_n << " predictions ----------" << std::endl - << std::endl; - for(size_t i = 0; i < _top_n; ++i) + _output_stream << "---------- Top " << _top_n << " predictions ----------" << std::endl << std::endl; + for (size_t i = 0; i < _top_n; ++i) { - _output_stream << std::fixed << std::setprecision(4) - << +classes_prob[index.at(i)] - << " - [id = " << index.at(i) << "]" + _output_stream << std::fixed << std::setprecision(4) << +classes_prob[index.at(i)] << " - [id = " << index.at(i) + << "]" << ", " << _labels[index.at(i)] << std::endl; } } @@ -643,7 +656,7 @@ bool TopNPredictionsAccessor::access_tensor(ITensor &tensor) ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&tensor, 1, DataType::F32, DataType::QASYMM8); ARM_COMPUTE_ERROR_ON(_labels.size() != tensor.info()->dimension(0)); - switch(tensor.info()->data_type()) + switch (tensor.info()->data_type()) { case DataType::QASYMM8: access_predictions_tensor<uint8_t>(tensor); @@ -668,9 +681,9 @@ void RandomAccessor::fill(ITensor &tensor, D &&distribution) { std::mt19937 gen(_seed); - if(tensor.info()->padding().empty() && (dynamic_cast<SubTensor *>(&tensor) == nullptr)) + if (tensor.info()->padding().empty() && (dynamic_cast<SubTensor *>(&tensor) == nullptr)) { - for(size_t offset = 0; offset < tensor.info()->total_size(); offset += tensor.info()->element_size()) + for (size_t offset = 0; offset < tensor.info()->total_size(); offset += tensor.info()->element_size()) { const auto value = static_cast<T>(distribution(gen)); *reinterpret_cast<T *>(tensor.buffer() + offset) = value; @@ -682,28 +695,29 @@ void RandomAccessor::fill(ITensor &tensor, D &&distribution) Window window; window.use_tensor_dimensions(tensor.info()->tensor_shape()); - execute_window_loop(window, [&](const Coordinates & id) - { - const auto value = static_cast<T>(distribution(gen)); - *reinterpret_cast<T *>(tensor.ptr_to_element(id)) = value; - }); + execute_window_loop(window, + [&](const Coordinates &id) + { + const auto value = static_cast<T>(distribution(gen)); + *reinterpret_cast<T *>(tensor.ptr_to_element(id)) = value; + }); } } bool RandomAccessor::access_tensor(ITensor &tensor) { - switch(tensor.info()->data_type()) + switch (tensor.info()->data_type()) { case DataType::QASYMM8: case DataType::U8: { - std::uniform_int_distribution<uint8_t> distribution_u8(_lower.get<uint8_t>(), _upper.get<uint8_t>()); + std::uniform_int_distribution<uint32_t> distribution_u8(_lower.get<uint8_t>(), _upper.get<uint8_t>()); fill<uint8_t>(tensor, distribution_u8); break; } case DataType::S8: { - std::uniform_int_distribution<int8_t> distribution_s8(_lower.get<int8_t>(), _upper.get<int8_t>()); + std::uniform_int_distribution<int32_t> distribution_s8(_lower.get<int8_t>(), _upper.get<int8_t>()); fill<int8_t>(tensor, distribution_s8); break; } @@ -745,7 +759,8 @@ bool RandomAccessor::access_tensor(ITensor &tensor) } case DataType::F16: { - arm_compute::utils::uniform_real_distribution_16bit<half> distribution_f16(_lower.get<float>(), _upper.get<float>()); + arm_compute::utils::uniform_real_distribution_16bit<half> distribution_f16(_lower.get<float>(), + _upper.get<float>()); fill<half>(tensor, distribution_f16); break; } @@ -774,7 +789,7 @@ NumPyBinLoader::NumPyBinLoader(std::string filename, DataLayout file_layout) bool NumPyBinLoader::access_tensor(ITensor &tensor) { - if(!_already_loaded) + if (!_already_loaded) { utils::NPYLoader loader; loader.open(_filename, _file_layout); diff --git a/utils/GraphUtils.h b/utils/GraphUtils.h index d2b05f27c6..b48300bd01 100644 --- a/utils/GraphUtils.h +++ b/utils/GraphUtils.h @@ -66,7 +66,7 @@ public: * @param[in] bgr Boolean specifying if the preprocessing should assume BGR format * @param[in] scale Scale value */ - CaffePreproccessor(std::array<float, 3> mean = std::array<float, 3> { { 0, 0, 0 } }, bool bgr = true, float scale = 1.f); + CaffePreproccessor(std::array<float, 3> mean = std::array<float, 3>{{0, 0, 0}}, bool bgr = true, float scale = 1.f); void preprocess(ITensor &tensor) override; private: @@ -74,8 +74,8 @@ private: void preprocess_typed(ITensor &tensor); std::array<float, 3> _mean; - bool _bgr; - float _scale; + bool _bgr; + float _scale; }; /** TF preproccessor */ @@ -135,6 +135,7 @@ public: DummyAccessor(DummyAccessor &&) = default; // Inherited methods overriden: + bool access_tensor_data() override; bool access_tensor(ITensor &tensor) override; private: @@ -154,7 +155,11 @@ public: * @param[in] data_layout (Optional) DataLayout of the numpy tensor data. * @param[out] output_stream (Optional) Output stream */ - NumPyAccessor(std::string npy_path, TensorShape shape, DataType data_type, DataLayout data_layout = DataLayout::NCHW, std::ostream &output_stream = std::cout); + NumPyAccessor(std::string npy_path, + TensorShape shape, + DataType data_type, + DataLayout data_layout = DataLayout::NCHW, + std::ostream &output_stream = std::cout); /** Allow instances of this class to be move constructed */ NumPyAccessor(NumPyAccessor &&) = default; /** Prevent instances of this class from being copied (As this class contains pointers) */ @@ -352,7 +357,9 @@ public: * @param[in] imgs_tensor_shapes Network input images tensor shapes. * @param[out] output_stream (Optional) Output stream */ - DetectionOutputAccessor(const std::string &labels_path, std::vector<TensorShape> &imgs_tensor_shapes, std::ostream &output_stream = std::cout); + DetectionOutputAccessor(const std::string &labels_path, + std::vector<TensorShape> &imgs_tensor_shapes, + std::ostream &output_stream = std::cout); /** Allow instances of this class to be move constructed */ DetectionOutputAccessor(DetectionOutputAccessor &&) = default; /** Prevent instances of this class from being copied (As this class contains pointers) */ @@ -421,7 +428,7 @@ public: private: template <typename T, typename D> - void fill(ITensor &tensor, D &&distribution); + void fill(ITensor &tensor, D &&distribution); PixelValue _lower; PixelValue _upper; std::random_device::result_type _seed; @@ -457,7 +464,8 @@ private: * * @return A ramdom accessor */ -inline std::unique_ptr<graph::ITensorAccessor> get_random_accessor(PixelValue lower, PixelValue upper, const std::random_device::result_type seed = 0) +inline std::unique_ptr<graph::ITensorAccessor> +get_random_accessor(PixelValue lower, PixelValue upper, const std::random_device::result_type seed = 0) { return std::make_unique<RandomAccessor>(lower, upper, seed); } @@ -472,11 +480,10 @@ inline std::unique_ptr<graph::ITensorAccessor> get_random_accessor(PixelValue lo * * @return An appropriate tensor accessor */ -inline std::unique_ptr<graph::ITensorAccessor> get_weights_accessor(const std::string &path, - const std::string &data_file, - DataLayout file_layout = DataLayout::NCHW) +inline std::unique_ptr<graph::ITensorAccessor> +get_weights_accessor(const std::string &path, const std::string &data_file, DataLayout file_layout = DataLayout::NCHW) { - if(path.empty()) + if (path.empty()) { return std::make_unique<DummyAccessor>(); } @@ -494,30 +501,28 @@ inline std::unique_ptr<graph::ITensorAccessor> get_weights_accessor(const std::s * * @return An appropriate tensor accessor */ -inline std::unique_ptr<graph::ITensorAccessor> get_input_accessor(const arm_compute::utils::CommonGraphParams &graph_parameters, - std::unique_ptr<IPreprocessor> preprocessor = nullptr, - bool bgr = true) +inline std::unique_ptr<graph::ITensorAccessor> +get_input_accessor(const arm_compute::utils::CommonGraphParams &graph_parameters, + std::unique_ptr<IPreprocessor> preprocessor = nullptr, + bool bgr = true) { - if(!graph_parameters.validation_file.empty()) + if (!graph_parameters.validation_file.empty()) { - return std::make_unique<ValidationInputAccessor>(graph_parameters.validation_file, - graph_parameters.validation_path, - std::move(preprocessor), - bgr, - graph_parameters.validation_range_start, - graph_parameters.validation_range_end); + return std::make_unique<ValidationInputAccessor>( + graph_parameters.validation_file, graph_parameters.validation_path, std::move(preprocessor), bgr, + graph_parameters.validation_range_start, graph_parameters.validation_range_end); } else { const std::string &image_file = graph_parameters.image; const std::string &image_file_lower = lower_string(image_file); - if(arm_compute::utility::endswith(image_file_lower, ".npy")) + if (arm_compute::utility::endswith(image_file_lower, ".npy")) { return std::make_unique<NumPyBinLoader>(image_file, graph_parameters.data_layout); } - else if(arm_compute::utility::endswith(image_file_lower, ".jpeg") - || arm_compute::utility::endswith(image_file_lower, ".jpg") - || arm_compute::utility::endswith(image_file_lower, ".ppm")) + else if (arm_compute::utility::endswith(image_file_lower, ".jpeg") || + arm_compute::utility::endswith(image_file_lower, ".jpg") || + arm_compute::utility::endswith(image_file_lower, ".ppm")) { return std::make_unique<ImageAccessor>(image_file, bgr, std::move(preprocessor)); } @@ -540,20 +545,20 @@ inline std::unique_ptr<graph::ITensorAccessor> get_input_accessor(const arm_comp * * @return An appropriate tensor accessor */ -inline std::unique_ptr<graph::ITensorAccessor> get_output_accessor(const arm_compute::utils::CommonGraphParams &graph_parameters, - size_t top_n = 5, - bool is_validation = false, - std::ostream &output_stream = std::cout) +inline std::unique_ptr<graph::ITensorAccessor> +get_output_accessor(const arm_compute::utils::CommonGraphParams &graph_parameters, + size_t top_n = 5, + bool is_validation = false, + std::ostream &output_stream = std::cout) { ARM_COMPUTE_UNUSED(is_validation); - if(!graph_parameters.validation_file.empty()) + if (!graph_parameters.validation_file.empty()) { - return std::make_unique<ValidationOutputAccessor>(graph_parameters.validation_file, - output_stream, + return std::make_unique<ValidationOutputAccessor>(graph_parameters.validation_file, output_stream, graph_parameters.validation_range_start, graph_parameters.validation_range_end); } - else if(graph_parameters.labels.empty()) + else if (graph_parameters.labels.empty()) { return std::make_unique<DummyAccessor>(0); } @@ -574,20 +579,20 @@ inline std::unique_ptr<graph::ITensorAccessor> get_output_accessor(const arm_com * * @return An appropriate tensor accessor */ -inline std::unique_ptr<graph::ITensorAccessor> get_detection_output_accessor(const arm_compute::utils::CommonGraphParams &graph_parameters, - std::vector<TensorShape> tensor_shapes, - bool is_validation = false, - std::ostream &output_stream = std::cout) +inline std::unique_ptr<graph::ITensorAccessor> +get_detection_output_accessor(const arm_compute::utils::CommonGraphParams &graph_parameters, + std::vector<TensorShape> tensor_shapes, + bool is_validation = false, + std::ostream &output_stream = std::cout) { ARM_COMPUTE_UNUSED(is_validation); - if(!graph_parameters.validation_file.empty()) + if (!graph_parameters.validation_file.empty()) { - return std::make_unique<ValidationOutputAccessor>(graph_parameters.validation_file, - output_stream, + return std::make_unique<ValidationOutputAccessor>(graph_parameters.validation_file, output_stream, graph_parameters.validation_range_start, graph_parameters.validation_range_end); } - else if(graph_parameters.labels.empty()) + else if (graph_parameters.labels.empty()) { return std::make_unique<DummyAccessor>(0); } @@ -608,10 +613,13 @@ inline std::unique_ptr<graph::ITensorAccessor> get_detection_output_accessor(con * * @return An appropriate tensor accessor */ -inline std::unique_ptr<graph::ITensorAccessor> get_npy_output_accessor(const std::string &npy_path, TensorShape shape, DataType data_type, DataLayout data_layout = DataLayout::NCHW, +inline std::unique_ptr<graph::ITensorAccessor> get_npy_output_accessor(const std::string &npy_path, + TensorShape shape, + DataType data_type, + DataLayout data_layout = DataLayout::NCHW, std::ostream &output_stream = std::cout) { - if(npy_path.empty()) + if (npy_path.empty()) { return std::make_unique<DummyAccessor>(0); } @@ -630,9 +638,10 @@ inline std::unique_ptr<graph::ITensorAccessor> get_npy_output_accessor(const std * * @return An appropriate tensor accessor */ -inline std::unique_ptr<graph::ITensorAccessor> get_save_npy_output_accessor(const std::string &npy_name, const bool is_fortran = false) +inline std::unique_ptr<graph::ITensorAccessor> get_save_npy_output_accessor(const std::string &npy_name, + const bool is_fortran = false) { - if(npy_name.empty()) + if (npy_name.empty()) { return std::make_unique<DummyAccessor>(0); } @@ -663,9 +672,11 @@ inline std::unique_ptr<graph::ITensorAccessor> get_print_output_accessor(std::os */ inline TensorShape permute_shape(TensorShape tensor_shape, DataLayout in_data_layout, DataLayout out_data_layout) { - if(in_data_layout != out_data_layout) + if (in_data_layout != out_data_layout) { - arm_compute::PermutationVector perm_vec = (in_data_layout == DataLayout::NCHW) ? arm_compute::PermutationVector(2U, 0U, 1U) : arm_compute::PermutationVector(1U, 2U, 0U); + arm_compute::PermutationVector perm_vec = (in_data_layout == DataLayout::NCHW) + ? arm_compute::PermutationVector(2U, 0U, 1U) + : arm_compute::PermutationVector(1U, 2U, 0U); arm_compute::permute(tensor_shape, perm_vec); } return tensor_shape; @@ -680,7 +691,7 @@ inline TensorShape permute_shape(TensorShape tensor_shape, DataLayout in_data_la inline graph::Target set_target_hint(int target) { ARM_COMPUTE_ERROR_ON_MSG(target > 2, "Invalid target. Target must be 0 (NEON), 1 (OpenCL), 2 (OpenCL + Tuner)"); - if((target == 1 || target == 2)) + if ((target == 1 || target == 2)) { return graph::Target::CL; } diff --git a/utils/ImageLoader.h b/utils/ImageLoader.h index aab0f5e770..2ae1a416e2 100644 --- a/utils/ImageLoader.h +++ b/utils/ImageLoader.h @@ -68,8 +68,7 @@ public: * * @param[in] fs Image file stream */ - FileImageFeeder(std::ifstream &fs) - : _fs(fs) + FileImageFeeder(std::ifstream &fs) : _fs(fs) { } // Inherited overridden methods @@ -94,8 +93,7 @@ public: * * @param[in] data Pointer to data */ - MemoryImageFeeder(const uint8_t *data) - : _data(data) + MemoryImageFeeder(const uint8_t *data) : _data(data) { } /** Prevent instances of this class from being copied (As this class contains pointers) */ @@ -127,8 +125,7 @@ class IImageLoader { public: /** Default Constructor */ - IImageLoader() - : _feeder(nullptr), _width(0), _height(0) + IImageLoader() : _feeder(nullptr), _width(0), _height(0) { } /** Virtual base destructor */ @@ -188,7 +185,7 @@ public: // Validate feeding data validate_info(image.info()); - switch(image.info()->format()) + switch (image.info()->format()) { case Format::U8: { @@ -204,15 +201,17 @@ public: unsigned char green = 0; unsigned char blue = 0; - execute_window_loop(window, [&](const Coordinates &) - { - red = _feeder->get(); - green = _feeder->get(); - blue = _feeder->get(); + execute_window_loop( + window, + [&](const Coordinates &) + { + red = _feeder->get(); + green = _feeder->get(); + blue = _feeder->get(); - *out.ptr() = 0.2126f * red + 0.7152f * green + 0.0722f * blue; - }, - out); + *out.ptr() = 0.2126f * red + 0.7152f * green + 0.0722f * blue; + }, + out); break; } @@ -226,11 +225,8 @@ public: Iterator out(&image, window); size_t row_size = _width * image.info()->element_size(); - execute_window_loop(window, [&](const Coordinates &) - { - _feeder->get_row(out.ptr(), row_size); - }, - out); + execute_window_loop( + window, [&](const Coordinates &) { _feeder->get_row(out.ptr(), row_size); }, out); break; } @@ -241,7 +237,7 @@ public: // Unmap buffer if creating a CLTensor unmap(image); } - catch(const std::ifstream::failure &e) + catch (const std::ifstream::failure &e) { ARM_COMPUTE_ERROR_VAR("Loading image file: %s", e.what()); } @@ -257,15 +253,19 @@ public: void fill_planar_tensor(T &tensor, bool bgr = false) { ARM_COMPUTE_ERROR_ON(!is_open()); - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&tensor, 1, DataType::U8, DataType::QASYMM8, DataType::F32, DataType::F16); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&tensor, 1, DataType::U8, DataType::QASYMM8, DataType::F32, + DataType::F16); const DataLayout data_layout = tensor.info()->data_layout(); const TensorShape tensor_shape = tensor.info()->tensor_shape(); ARM_COMPUTE_UNUSED(tensor_shape); - ARM_COMPUTE_ERROR_ON(tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH)] != _width); - ARM_COMPUTE_ERROR_ON(tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT)] != _height); - ARM_COMPUTE_ERROR_ON(tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL)] != 3); + ARM_COMPUTE_ERROR_ON(tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH)] != + _width); + ARM_COMPUTE_ERROR_ON(tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT)] != + _height); + ARM_COMPUTE_ERROR_ON(tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL)] != + 3); ARM_COMPUTE_ERROR_ON(_feeder.get() == nullptr); @@ -282,7 +282,7 @@ public: // Iterate through every pixel of the image Window window; - if(data_layout == DataLayout::NCHW) + if (data_layout == DataLayout::NCHW) { window.set(Window::DimX, Window::Dimension(0, _width, 1)); window.set(Window::DimY, Window::Dimension(0, _height, 1)); @@ -303,48 +303,50 @@ public: unsigned char green = 0; unsigned char blue = 0; - execute_window_loop(window, [&](const Coordinates &) - { - red = _feeder->get(); - green = _feeder->get(); - blue = _feeder->get(); - - switch(tensor.info()->data_type()) + execute_window_loop( + window, + [&](const Coordinates &) { - case DataType::U8: - case DataType::QASYMM8: - { - *(out.ptr() + 0 * stride_z) = bgr ? blue : red; - *(out.ptr() + 1 * stride_z) = green; - *(out.ptr() + 2 * stride_z) = bgr ? red : blue; - break; - } - case DataType::F32: - { - *reinterpret_cast<float *>(out.ptr() + 0 * stride_z) = static_cast<float>(bgr ? blue : red); - *reinterpret_cast<float *>(out.ptr() + 1 * stride_z) = static_cast<float>(green); - *reinterpret_cast<float *>(out.ptr() + 2 * stride_z) = static_cast<float>(bgr ? red : blue); - break; - } - case DataType::F16: - { - *reinterpret_cast<half *>(out.ptr() + 0 * stride_z) = static_cast<half>(bgr ? blue : red); - *reinterpret_cast<half *>(out.ptr() + 1 * stride_z) = static_cast<half>(green); - *reinterpret_cast<half *>(out.ptr() + 2 * stride_z) = static_cast<half>(bgr ? red : blue); - break; - } - default: + red = _feeder->get(); + green = _feeder->get(); + blue = _feeder->get(); + + switch (tensor.info()->data_type()) { - ARM_COMPUTE_ERROR("Unsupported data type"); + case DataType::U8: + case DataType::QASYMM8: + { + *(out.ptr() + 0 * stride_z) = bgr ? blue : red; + *(out.ptr() + 1 * stride_z) = green; + *(out.ptr() + 2 * stride_z) = bgr ? red : blue; + break; + } + case DataType::F32: + { + *reinterpret_cast<float *>(out.ptr() + 0 * stride_z) = static_cast<float>(bgr ? blue : red); + *reinterpret_cast<float *>(out.ptr() + 1 * stride_z) = static_cast<float>(green); + *reinterpret_cast<float *>(out.ptr() + 2 * stride_z) = static_cast<float>(bgr ? red : blue); + break; + } + case DataType::F16: + { + *reinterpret_cast<half *>(out.ptr() + 0 * stride_z) = static_cast<half>(bgr ? blue : red); + *reinterpret_cast<half *>(out.ptr() + 1 * stride_z) = static_cast<half>(green); + *reinterpret_cast<half *>(out.ptr() + 2 * stride_z) = static_cast<half>(bgr ? red : blue); + break; + } + default: + { + ARM_COMPUTE_ERROR("Unsupported data type"); + } } - } - }, - out); + }, + out); // Unmap buffer if creating a CLTensor unmap(tensor); } - catch(const std::ifstream::failure &e) + catch (const std::ifstream::failure &e) { ARM_COMPUTE_ERROR_VAR("Loading image file: %s", e.what()); } @@ -368,8 +370,7 @@ class PPMLoader : public IImageLoader { public: /** Default Constructor */ - PPMLoader() - : IImageLoader(), _fs() + PPMLoader() : IImageLoader(), _fs() { } @@ -386,7 +387,7 @@ public: _fs.exceptions(std::ifstream::failbit | std::ifstream::badbit); _fs.open(filename, std::ios::in | std::ios::binary); - unsigned int max_val = 0; + unsigned int max_val = 0; std::tie(_width, _height, max_val) = parse_ppm_header(_fs); ARM_COMPUTE_ERROR_ON_MSG_VAR(max_val >= 256, "2 bytes per colour channel not supported in file %s", @@ -394,14 +395,14 @@ public: _feeder = std::make_unique<FileImageFeeder>(_fs); } - catch(std::runtime_error &e) + catch (std::runtime_error &e) { ARM_COMPUTE_ERROR_VAR("Accessing %s: %s", filename.c_str(), e.what()); } } void close() override { - if(is_open()) + if (is_open()) { _fs.close(); _feeder = nullptr; @@ -443,8 +444,7 @@ private: public: /** Default Constructor */ - JPEGLoader() - : IImageLoader(), _is_loaded(false), _data(nullptr) + JPEGLoader() : IImageLoader(), _is_loaded(false), _data(nullptr) { } @@ -457,7 +457,7 @@ public: { int bpp, width, height; uint8_t *rgb_image = stbi_load(filename.c_str(), &width, &height, &bpp, 3); - if(rgb_image == NULL) + if (rgb_image == NULL) { ARM_COMPUTE_ERROR_VAR("Accessing %s failed", filename.c_str()); } @@ -472,7 +472,7 @@ public: } void close() override { - if(is_open()) + if (is_open()) { _width = 0; _height = 0; @@ -483,7 +483,7 @@ public: /** Explicitly Releases the memory of the loaded data */ void release() { - if(_is_loaded) + if (_is_loaded) { _data.reset(); _is_loaded = false; @@ -492,7 +492,7 @@ public: } private: - bool _is_loaded; + bool _is_loaded; std::unique_ptr<uint8_t, malloc_deleter> _data; }; @@ -509,7 +509,7 @@ public: static std::unique_ptr<IImageLoader> create(const std::string &filename) { ImageType type = arm_compute::utils::get_image_type_from_file(filename); - switch(type) + switch (type) { case ImageType::PPM: return std::make_unique<PPMLoader>(); diff --git a/utils/TypePrinter.h b/utils/TypePrinter.h index e8cb6e85b7..2d106d849a 100644 --- a/utils/TypePrinter.h +++ b/utils/TypePrinter.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021 Arm Limited. + * Copyright (c) 2017-2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,8 +21,13 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef __ARM_COMPUTE_TYPE_PRINTER_H__ -#define __ARM_COMPUTE_TYPE_PRINTER_H__ + +#ifndef ACL_UTILS_TYPEPRINTER_H +#define ACL_UTILS_TYPEPRINTER_H + +#ifdef ARM_COMPUTE_OPENCL_ENABLED +#include "arm_compute/core/CL/ICLTensor.h" +#endif /* ARM_COMPUTE_OPENCL_ENABLED */ #include "arm_compute/core/Dimensions.h" #include "arm_compute/core/Error.h" @@ -32,8 +37,26 @@ #include "arm_compute/core/Strides.h" #include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/Types.h" +#include "arm_compute/dynamic_fusion/sketch/attributes/CastAttributes.h" +#include "arm_compute/dynamic_fusion/sketch/attributes/ClampAttributes.h" +#include "arm_compute/dynamic_fusion/sketch/attributes/Conv2dAttributes.h" +#include "arm_compute/dynamic_fusion/sketch/attributes/DepthwiseConv2dAttributes.h" +#include "arm_compute/dynamic_fusion/sketch/attributes/Pool2dAttributes.h" +#include "arm_compute/dynamic_fusion/sketch/attributes/ResizeAttributes.h" +#include "arm_compute/dynamic_fusion/sketch/attributes/SoftmaxAttributes.h" +#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuPool2d.h" +#include "arm_compute/function_info/ConvolutionInfo.h" +#include "arm_compute/function_info/FullyConnectedLayerInfo.h" +#include "arm_compute/function_info/GEMMInfo.h" +#include "arm_compute/function_info/MatMulInfo.h" +#include "arm_compute/function_info/ScatterInfo.h" #include "arm_compute/runtime/CL/CLTunerTypes.h" #include "arm_compute/runtime/CL/CLTypes.h" +#include "arm_compute/runtime/common/LSTMParams.h" +#include "arm_compute/runtime/FunctionDescriptors.h" +#include "arm_compute/runtime/NEON/functions/NEMatMul.h" + +#include "support/Cast.h" #include "support/StringSupport.h" #include <ostream> @@ -51,7 +74,7 @@ namespace arm_compute template <typename T> std::string to_string_if_not_null(T *arg) { - if(arg == nullptr) + if (arg == nullptr) { return "nullptr"; } @@ -61,6 +84,74 @@ std::string to_string_if_not_null(T *arg) } } +/** Fallback method: try to use std::to_string: + * + * @param[in] val Value to convert to string + * + * @return String representing val. + */ +template <typename T> +inline std::string to_string(const T &val) +{ + return support::cpp11::to_string(val); +} + +/** Formatted output of a vector of objects. + * + * @note: Using the overloaded to_string() instead of overloaded operator<<(), because to_string() functions are + * overloaded for all types, where two or more of them can use the same operator<<(), ITensor is an example. + * + * @param[out] os Output stream + * @param[in] args Vector of objects to print + * + * @return Modified output stream. + */ +template <typename T> +::std::ostream &operator<<(::std::ostream &os, const std::vector<T> &args) +{ + const size_t max_print_size = 5U; + + os << "["; + bool first = true; + size_t i; + for (i = 0; i < args.size(); ++i) + { + if (i == max_print_size) + { + break; + } + if (first) + { + first = false; + } + else + { + os << ", "; + } + os << to_string(args[i]); + } + if (i < args.size()) + { + os << ", ..."; + } + os << "]"; + return os; +} + +/** Formatted output of a vector of objects. + * + * @param[in] args Vector of objects to print + * + * @return String representing args. + */ +template <typename T> +std::string to_string(const std::vector<T> &args) +{ + std::stringstream str; + str << args; + return str.str(); +} + /** Formatted output of the Dimensions type. * * @param[out] os Output stream. @@ -71,13 +162,13 @@ std::string to_string_if_not_null(T *arg) template <typename T> inline ::std::ostream &operator<<(::std::ostream &os, const Dimensions<T> &dimensions) { - if(dimensions.num_dimensions() > 0) + if (dimensions.num_dimensions() > 0) { os << dimensions[0]; - for(unsigned int d = 1; d < dimensions.num_dimensions(); ++d) + for (unsigned int d = 1; d < dimensions.num_dimensions(); ++d) { - os << "x" << dimensions[d]; + os << "," << dimensions[d]; } } @@ -93,7 +184,7 @@ inline ::std::ostream &operator<<(::std::ostream &os, const Dimensions<T> &dimen */ inline ::std::ostream &operator<<(::std::ostream &os, const RoundingPolicy &rounding_policy) { - switch(rounding_policy) + switch (rounding_policy) { case RoundingPolicy::TO_ZERO: os << "TO_ZERO"; @@ -121,7 +212,8 @@ inline ::std::ostream &operator<<(::std::ostream &os, const RoundingPolicy &roun inline ::std::ostream &operator<<(::std::ostream &os, const WeightsInfo &weights_info) { os << weights_info.are_reshaped() << ";"; - os << weights_info.num_kernels() << ";" << weights_info.kernel_size().first << "," << weights_info.kernel_size().second; + os << weights_info.num_kernels() << ";" << weights_info.kernel_size().first << "," + << weights_info.kernel_size().second; return os; } @@ -161,17 +253,17 @@ inline std::string to_string(const ROIPoolingLayerInfo &pool_info) */ inline ::std::ostream &operator<<(::std::ostream &os, const GEMMKernelInfo &gemm_info) { - os << "( m= " << gemm_info.m; - os << " n= " << gemm_info.n; - os << " k= " << gemm_info.k; - os << " depth_output_gemm3d= " << gemm_info.depth_output_gemm3d; - os << " reinterpret_input_as_3d= " << gemm_info.reinterpret_input_as_3d; - os << " broadcast_bias= " << gemm_info.broadcast_bias; - os << " fp_mixed_precision= " << gemm_info.fp_mixed_precision; - os << " mult_transpose1xW_width= " << gemm_info.mult_transpose1xW_width; - os << " mult_interleave4x4_height= " << gemm_info.mult_interleave4x4_height; - os << " a_offset = " << gemm_info.a_offset; - os << " b_offset = " << gemm_info.b_offset; + os << "( m=" << gemm_info.m; + os << " n=" << gemm_info.n; + os << " k=" << gemm_info.k; + os << " depth_output_gemm3d=" << gemm_info.depth_output_gemm3d; + os << " reinterpret_input_as_3d=" << gemm_info.reinterpret_input_as_3d; + os << " broadcast_bias=" << gemm_info.broadcast_bias; + os << " fp_mixed_precision=" << gemm_info.fp_mixed_precision; + os << " mult_transpose1xW_width=" << gemm_info.mult_transpose1xW_width; + os << " mult_interleave4x4_height=" << gemm_info.mult_interleave4x4_height; + os << " a_offset=" << gemm_info.a_offset; + os << " b_offset=" << gemm_info.b_offset; os << ")"; return os; } @@ -185,7 +277,8 @@ inline ::std::ostream &operator<<(::std::ostream &os, const GEMMKernelInfo &gemm */ inline ::std::ostream &operator<<(::std::ostream &os, const GEMMLHSMatrixInfo &gemm_info) { - os << "( m0= " << (unsigned int)gemm_info.m0 << " k0= " << gemm_info.k0 << " v0= " << gemm_info.v0 << " trans= " << gemm_info.transpose << " inter= " << gemm_info.interleave << "})"; + os << "( m0=" << (unsigned int)gemm_info.m0 << " k0=" << gemm_info.k0 << " v0=" << gemm_info.v0 + << " trans=" << gemm_info.transpose << " inter=" << gemm_info.interleave << "})"; return os; } @@ -198,8 +291,9 @@ inline ::std::ostream &operator<<(::std::ostream &os, const GEMMLHSMatrixInfo &g */ inline ::std::ostream &operator<<(::std::ostream &os, const GEMMRHSMatrixInfo &gemm_info) { - os << "( n0= " << (unsigned int)gemm_info.n0 << " k0= " << gemm_info.k0 << " h0= " << gemm_info.h0 << " trans= " << gemm_info.transpose << " inter= " << gemm_info.interleave << " exp_img=" << - gemm_info.export_to_cl_image << "})"; + os << "( n0=" << (unsigned int)gemm_info.n0 << " k0=" << gemm_info.k0 << " h0=" << gemm_info.h0 + << " trans=" << gemm_info.transpose << " inter=" << gemm_info.interleave + << " exp_img=" << gemm_info.export_to_cl_image << "})"; return os; } @@ -252,11 +346,18 @@ inline std::string to_string(const GEMMKernelInfo &gemm_info) inline ::std::ostream &operator<<(::std::ostream &os, const BoundingBoxTransformInfo &bbox_info) { auto weights = bbox_info.weights(); - os << "(" << bbox_info.img_width() << "x" << bbox_info.img_height() << ")~" << bbox_info.scale() << "(weights = {" << weights[0] << ", " << weights[1] << ", " << weights[2] << ", " << weights[3] << - "})"; + os << "(" << bbox_info.img_width() << "x" << bbox_info.img_height() << ")~" << bbox_info.scale() << "(weights={" + << weights[0] << ", " << weights[1] << ", " << weights[2] << ", " << weights[3] << "})"; return os; } +#if defined(ARM_COMPUTE_ENABLE_BF16) +inline ::std::ostream &operator<<(::std::ostream &os, const bfloat16 &v) +{ + return os << float(v); +} +#endif /* defined(ARM_COMPUTE_ENABLE_BF16) */ + /** Formatted output of the BoundingBoxTransformInfo type. * * @param[in] bbox_info Type to output. @@ -359,7 +460,7 @@ inline std::string to_string(const QuantizationInfo &quantization_info) */ inline ::std::ostream &operator<<(::std::ostream &os, const ActivationLayerInfo::ActivationFunction &act_function) { - switch(act_function) + switch (act_function) { case ActivationLayerInfo::ActivationFunction::ABS: os << "ABS"; @@ -403,6 +504,12 @@ inline ::std::ostream &operator<<(::std::ostream &os, const ActivationLayerInfo: case ActivationLayerInfo::ActivationFunction::HARD_SWISH: os << "HARD_SWISH"; break; + case ActivationLayerInfo::ActivationFunction::SWISH: + os << "SWISH"; + break; + case ActivationLayerInfo::ActivationFunction::GELU: + os << "GELU"; + break; default: ARM_COMPUTE_ERROR("NOT_SUPPORTED!"); @@ -413,20 +520,51 @@ inline ::std::ostream &operator<<(::std::ostream &os, const ActivationLayerInfo: /** Formatted output of the activation function info type. * - * @param[in] info Type to output. + * @param[in] info ActivationLayerInfo to output. * * @return Formatted string. */ inline std::string to_string(const arm_compute::ActivationLayerInfo &info) { std::stringstream str; - if(info.enabled()) + if (info.enabled()) { str << info.activation(); } return str.str(); } +/** Formatted output of the activation function info. + * + * @param[out] os Output stream. + * @param[in] info ActivationLayerInfo to output. + * + * @return Formatted string. + */ +inline ::std::ostream &operator<<(::std::ostream &os, const ActivationLayerInfo *info) +{ + if (info != nullptr) + { + if (info->enabled()) + { + os << info->activation(); + os << "("; + os << "VAL_A=" << info->a() << ","; + os << "VAL_B=" << info->b(); + os << ")"; + } + else + { + os << "disabled"; + } + } + else + { + os << "nullptr"; + } + return os; +} + /** Formatted output of the activation function type. * * @param[in] function Type to output. @@ -449,7 +587,7 @@ inline std::string to_string(const arm_compute::ActivationLayerInfo::ActivationF */ inline ::std::ostream &operator<<(::std::ostream &os, const NormType &norm_type) { - switch(norm_type) + switch (norm_type) { case NormType::CROSS_MAP: os << "CROSS_MAP"; @@ -502,7 +640,7 @@ inline ::std::ostream &operator<<(::std::ostream &os, const NormalizationLayerIn */ inline ::std::ostream &operator<<(::std::ostream &os, const PoolingType &pool_type) { - switch(pool_type) + switch (pool_type) { case PoolingType::AVG: os << "AVG"; @@ -557,7 +695,7 @@ inline std::string to_string(const RoundingPolicy &rounding_policy) */ inline ::std::ostream &operator<<(::std::ostream &os, const DataLayout &data_layout) { - switch(data_layout) + switch (data_layout) { case DataLayout::UNKNOWN: os << "UNKNOWN"; @@ -568,6 +706,12 @@ inline ::std::ostream &operator<<(::std::ostream &os, const DataLayout &data_lay case DataLayout::NCHW: os << "NCHW"; break; + case DataLayout::NDHWC: + os << "NDHWC"; + break; + case DataLayout::NCDHW: + os << "NCDHW"; + break; default: ARM_COMPUTE_ERROR("NOT_SUPPORTED!"); } @@ -598,7 +742,7 @@ inline std::string to_string(const arm_compute::DataLayout &data_layout) */ inline ::std::ostream &operator<<(::std::ostream &os, const DataLayoutDimension &data_layout_dim) { - switch(data_layout_dim) + switch (data_layout_dim) { case DataLayoutDimension::WIDTH: os << "WIDTH"; @@ -609,6 +753,9 @@ inline ::std::ostream &operator<<(::std::ostream &os, const DataLayoutDimension case DataLayoutDimension::CHANNEL: os << "CHANNEL"; break; + case DataLayoutDimension::DEPTH: + os << "DEPTH"; + break; case DataLayoutDimension::BATCHES: os << "BATCHES"; break; @@ -627,7 +774,7 @@ inline ::std::ostream &operator<<(::std::ostream &os, const DataLayoutDimension */ inline ::std::ostream &operator<<(::std::ostream &os, const DataType &data_type) { - switch(data_type) + switch (data_type) { case DataType::UNKNOWN: os << "UNKNOWN"; @@ -718,7 +865,7 @@ inline std::string to_string(const arm_compute::DataType &data_type) */ inline ::std::ostream &operator<<(::std::ostream &os, const Format &format) { - switch(format) + switch (format) { case Format::UNKNOWN: os << "UNKNOWN"; @@ -800,7 +947,7 @@ inline std::string to_string(const Format &format) */ inline ::std::ostream &operator<<(::std::ostream &os, const Channel &channel) { - switch(channel) + switch (channel) { case Channel::UNKNOWN: os << "UNKNOWN"; @@ -867,7 +1014,7 @@ inline std::string to_string(const Channel &channel) */ inline ::std::ostream &operator<<(::std::ostream &os, const BorderMode &mode) { - switch(mode) + switch (mode) { case BorderMode::UNDEFINED: os << "UNDEFINED"; @@ -894,10 +1041,7 @@ inline ::std::ostream &operator<<(::std::ostream &os, const BorderMode &mode) */ inline ::std::ostream &operator<<(::std::ostream &os, const BorderSize &border) { - os << border.top << "," - << border.right << "," - << border.bottom << "," - << border.left; + os << border.top << "," << border.right << "," << border.bottom << "," << border.left; return os; } @@ -912,7 +1056,7 @@ inline ::std::ostream &operator<<(::std::ostream &os, const BorderSize &border) inline ::std::ostream &operator<<(::std::ostream &os, const PaddingList &padding) { os << "{"; - for(auto const &p : padding) + for (auto const &p : padding) { os << "{" << p.first << "," << p.second << "}"; } @@ -930,7 +1074,7 @@ inline ::std::ostream &operator<<(::std::ostream &os, const PaddingList &padding inline ::std::ostream &operator<<(::std::ostream &os, const Multiples &multiples) { os << "("; - for(size_t i = 0; i < multiples.size() - 1; i++) + for (size_t i = 0; i < multiples.size() - 1; i++) { os << multiples[i] << ", "; } @@ -947,7 +1091,7 @@ inline ::std::ostream &operator<<(::std::ostream &os, const Multiples &multiples */ inline ::std::ostream &operator<<(::std::ostream &os, const InterpolationPolicy &policy) { - switch(policy) + switch (policy) { case InterpolationPolicy::NEAREST_NEIGHBOR: os << "NEAREST_NEIGHBOR"; @@ -974,7 +1118,7 @@ inline ::std::ostream &operator<<(::std::ostream &os, const InterpolationPolicy */ inline ::std::ostream &operator<<(::std::ostream &os, const SamplingPolicy &policy) { - switch(policy) + switch (policy) { case SamplingPolicy::CENTER: os << "CENTER"; @@ -1003,34 +1147,23 @@ inline ::std::ostream &operator<<(std::ostream &os, const ITensorInfo *info) os << "Shape=" << info->tensor_shape() << "," << "DataLayout=" << string_from_data_layout(data_layout) << "," - << "DataType=" << string_from_data_type(data_type) << ","; + << "DataType=" << string_from_data_type(data_type); - if(is_data_type_quantized(data_type)) + if (is_data_type_quantized(data_type)) { - const QuantizationInfo qinfo = info->quantization_info(); - os << "QuantizationInfo="; - if(is_data_type_quantized_per_channel(data_type)) - { - os << "["; - const auto scales = qinfo.scale(); - const auto offsets = qinfo.offset(); - os << "(" << scales[0] << ", " << offsets[0] << ")"; - for(size_t i = 1; i < scales.size(); ++i) - { - os << ",(" << scales[i] << ", " << offsets[i] << ")"; - } - os << "]"; - } - else - { - os << "(" << qinfo.uniform().scale << ", " - << qinfo.uniform().offset << ")"; - } + const QuantizationInfo qinfo = info->quantization_info(); + const auto scales = qinfo.scale(); + const auto offsets = qinfo.offset(); + + os << ", QuantizationInfo={" + << "scales.size=" << scales.size() << ", scale(s)=" << scales << ", "; + + os << "offsets.size=" << offsets.size() << ", offset(s)=" << offsets << "}"; } return os; } -/** Formatted output of the TensorInfo type. +/** Formatted output of the const TensorInfo& type. * * @param[out] os Output stream. * @param[in] info Type to output. @@ -1043,7 +1176,7 @@ inline ::std::ostream &operator<<(::std::ostream &os, const TensorInfo &info) return os; } -/** Formatted output of the TensorInfo type. +/** Formatted output of the const TensorInfo& type. * * @param[in] info Type to output. * @@ -1056,6 +1189,147 @@ inline std::string to_string(const TensorInfo &info) return str.str(); } +/** Formatted output of the const ITensorInfo& type. + * + * @param[in] info Type to output. + * + * @return Formatted string. + */ +inline std::string to_string(const ITensorInfo &info) +{ + std::stringstream str; + str << &info; + return str.str(); +} + +/** Formatted output of the const ITensorInfo* type. + * + * @param[in] info Type to output. + * + * @return Formatted string. + */ +inline std::string to_string(const ITensorInfo *info) +{ + std::string ret_str = "nullptr"; + if (info != nullptr) + { + std::stringstream str; + str << info; + ret_str = str.str(); + } + return ret_str; +} + +/** Formatted output of the ITensorInfo* type. + * + * @param[in] info Type to output. + * + * @return Formatted string. + */ +inline std::string to_string(ITensorInfo *info) +{ + return to_string(static_cast<const ITensorInfo *>(info)); +} + +/** Formatted output of the ITensorInfo type obtained from const ITensor* type. + * + * @param[in] tensor Type to output. + * + * @return Formatted string. + */ +inline std::string to_string(const ITensor *tensor) +{ + std::string ret_str = "nullptr"; + if (tensor != nullptr) + { + std::stringstream str; + str << "ITensor->info(): " << tensor->info(); + ret_str = str.str(); + } + return ret_str; +} + +/** Formatted output of the ITensorInfo type obtained from the ITensor* type. + * + * @param[in] tensor Type to output. + * + * @return Formatted string. + */ +inline std::string to_string(ITensor *tensor) +{ + return to_string(static_cast<const ITensor *>(tensor)); +} + +/** Formatted output of the ITensorInfo type obtained from the ITensor& type. + * + * @param[in] tensor Type to output. + * + * @return Formatted string. + */ +inline std::string to_string(ITensor &tensor) +{ + std::stringstream str; + str << "ITensor.info(): " << tensor.info(); + return str.str(); +} + +#ifdef ARM_COMPUTE_OPENCL_ENABLED +/** Formatted output of the ITensorInfo type obtained from the const ICLTensor& type. + * + * @param[in] cl_tensor Type to output. + * + * @return Formatted string. + */ +inline std::string to_string(const ICLTensor *cl_tensor) +{ + std::string ret_str = "nullptr"; + if (cl_tensor != nullptr) + { + std::stringstream str; + str << "ICLTensor->info(): " << cl_tensor->info(); + ret_str = str.str(); + } + return ret_str; +} + +/** Formatted output of the ITensorInfo type obtained from the ICLTensor& type. + * + * @param[in] cl_tensor Type to output. + * + * @return Formatted string. + */ +inline std::string to_string(ICLTensor *cl_tensor) +{ + return to_string(static_cast<const ICLTensor *>(cl_tensor)); +} + +/** Formatted output of the cl::NDRange type. + * + * @param[out] os Output stream. + * @param[in] nd_range cl::NDRange to output. + * + * @return Modified output stream. + */ +inline ::std::ostream &operator<<(::std::ostream &os, const cl::NDRange &nd_range) +{ + os << "{" << nd_range[0] << "," << nd_range[1] << "," << nd_range[2] << "}"; + return os; +} + +/** Formatted output of the cl::NDRange type + * + * @param[in] nd_Range Type to output. + * + * @return Formatted string. + */ +inline std::string to_string(const cl::NDRange &nd_range) +{ + std::stringstream str; + str << nd_range; + return str.str(); +} +#endif /* ARM_COMPUTE_OPENCL_ENABLED */ + /** Formatted output of the Dimensions type. * * @param[in] dimensions Type to output. @@ -1145,7 +1419,8 @@ inline ::std::ostream &operator<<(::std::ostream &os, const GEMMInfo &info) os << "retain_internal_weights=" << info.retain_internal_weights() << ","; os << "fp_mixed_precision=" << info.fp_mixed_precision() << ","; os << "broadcast_bias=" << info.broadcast_bias() << ","; - os << "pretranpose_B=" << info.pretranpose_B() << ","; + os << "pretranspose_B=" << info.pretranspose_B() << ","; + os << "}"; return os; } @@ -1173,9 +1448,9 @@ inline ::std::ostream &operator<<(::std::ostream &os, const Window::Dimension &d inline ::std::ostream &operator<<(::std::ostream &os, const Window &win) { os << "{"; - for(unsigned int i = 0; i < Coordinates::num_max_dimensions; i++) + for (unsigned int i = 0; i < Coordinates::num_max_dimensions; i++) { - if(i > 0) + if (i > 0) { os << ", "; } @@ -1237,7 +1512,7 @@ inline std::string to_string(const Window::Dimension &dim) str << dim; return str.str(); } -/** Formatted output of the Window type. +/** Formatted output of the Window& type. * * @param[in] win Type to output. * @@ -1250,6 +1525,24 @@ inline std::string to_string(const Window &win) return str.str(); } +/** Formatted output of the Window* type. + * + * @param[in] win Type to output. + * + * @return Formatted string. + */ +inline std::string to_string(Window *win) +{ + std::string ret_str = "nullptr"; + if (win != nullptr) + { + std::stringstream str; + str << *win; + ret_str = str.str(); + } + return ret_str; +} + /** Formatted output of the Rectangle type. * * @param[out] os Output stream. @@ -1274,7 +1567,7 @@ inline ::std::ostream &operator<<(::std::ostream &os, const Rectangle &rect) */ inline ::std::ostream &operator<<(::std::ostream &os, const PaddingMode &mode) { - switch(mode) + switch (mode) { case PaddingMode::CONSTANT: os << "CONSTANT"; @@ -1316,8 +1609,8 @@ inline ::std::ostream &operator<<(::std::ostream &os, const PadStrideInfo &pad_s { os << pad_stride_info.stride().first << "," << pad_stride_info.stride().second; os << ";"; - os << pad_stride_info.pad_left() << "," << pad_stride_info.pad_right() << "," - << pad_stride_info.pad_top() << "," << pad_stride_info.pad_bottom(); + os << pad_stride_info.pad_left() << "," << pad_stride_info.pad_right() << "," << pad_stride_info.pad_top() << "," + << pad_stride_info.pad_bottom(); return os; } @@ -1422,7 +1715,7 @@ inline std::string to_string(const SamplingPolicy &policy) */ inline ::std::ostream &operator<<(::std::ostream &os, const ConvertPolicy &policy) { - switch(policy) + switch (policy) { case ConvertPolicy::WRAP: os << "WRAP"; @@ -1453,7 +1746,7 @@ inline std::string to_string(const ConvertPolicy &policy) */ inline ::std::ostream &operator<<(::std::ostream &os, const ArithmeticOperation &op) { - switch(op) + switch (op) { case ArithmeticOperation::ADD: os << "ADD"; @@ -1476,6 +1769,9 @@ inline ::std::ostream &operator<<(::std::ostream &os, const ArithmeticOperation case ArithmeticOperation::POWER: os << "POWER"; break; + case ArithmeticOperation::PRELU: + os << "PRELU"; + break; default: ARM_COMPUTE_ERROR("NOT_SUPPORTED!"); } @@ -1505,7 +1801,7 @@ inline std::string to_string(const ArithmeticOperation &op) */ inline ::std::ostream &operator<<(::std::ostream &os, const ReductionOperation &op) { - switch(op) + switch (op) { case ReductionOperation::SUM: os << "SUM"; @@ -1560,7 +1856,7 @@ inline std::string to_string(const ReductionOperation &op) */ inline ::std::ostream &operator<<(::std::ostream &os, const ComparisonOperation &op) { - switch(op) + switch (op) { case ComparisonOperation::Equal: os << "Equal"; @@ -1596,7 +1892,7 @@ inline ::std::ostream &operator<<(::std::ostream &os, const ComparisonOperation */ inline ::std::ostream &operator<<(::std::ostream &os, const ElementWiseUnary &op) { - switch(op) + switch (op) { case ElementWiseUnary::RSQRT: os << "RSQRT"; @@ -1610,9 +1906,18 @@ inline ::std::ostream &operator<<(::std::ostream &os, const ElementWiseUnary &op case ElementWiseUnary::LOG: os << "LOG"; break; + case ElementWiseUnary::SIN: + os << "SIN"; + break; + case ElementWiseUnary::ABS: + os << "ABS"; + break; case ElementWiseUnary::ROUND: os << "ROUND"; break; + case ElementWiseUnary::LOGICAL_NOT: + os << "LOGICAL_NOT"; + break; default: ARM_COMPUTE_ERROR("NOT_SUPPORTED!"); } @@ -1684,7 +1989,7 @@ inline std::string to_string(const PoolingLayerInfo &info) str << "{Type=" << info.pool_type << "," << "DataLayout=" << info.data_layout << "," << "IsGlobalPooling=" << info.is_global_pooling; - if(!info.is_global_pooling) + if (!info.is_global_pooling) { str << "," << "PoolSize=" << info.pool_size.width << "," << info.pool_size.height << "," @@ -1694,6 +1999,121 @@ inline std::string to_string(const PoolingLayerInfo &info) return str.str(); } +/** Formatted output of the Size3D type. + * + * @param[out] os Output stream + * @param[in] size Type to output + * + * @return Modified output stream. + */ +inline ::std::ostream &operator<<(::std::ostream &os, const Size3D &size) +{ + os << size.width << "x" << size.height << "x" << size.depth; + + return os; +} + +/** Formatted output of the Size3D type. + * + * @param[in] type Type to output + * + * @return Formatted string. + */ +inline std::string to_string(const Size3D &type) +{ + std::stringstream str; + str << type; + return str.str(); +} + +/** Formatted output of the Padding3D type. + * + * @param[out] os Output stream. + * @param[in] padding3d Padding info for 3D spatial dimension shape. + * + * @return Modified output stream. + */ +inline ::std::ostream &operator<<(::std::ostream &os, const Padding3D &padding3d) +{ + os << padding3d.left << "," << padding3d.right << "," << padding3d.top << "," << padding3d.bottom << "," + << padding3d.front << "," << padding3d.back; + return os; +} + +/** Converts a @ref Padding3D to string + * + * @param[in] padding3d Padding3D value to be converted + * + * @return String representing the corresponding Padding3D + */ +inline std::string to_string(const Padding3D &padding3d) +{ + std::stringstream str; + str << padding3d; + return str.str(); +} + +/** Formatted output of the DimensionRoundingType type. + * + * @param[out] os Output stream. + * @param[in] rounding_type DimensionRoundingType Dimension rounding type when down-scaling, or compute output shape of pooling(2D or 3D). + * + * @return Modified output stream. + */ +inline ::std::ostream &operator<<(::std::ostream &os, const DimensionRoundingType &rounding_type) +{ + switch (rounding_type) + { + case DimensionRoundingType::CEIL: + os << "CEIL"; + break; + case DimensionRoundingType::FLOOR: + os << "FLOOR"; + break; + default: + ARM_COMPUTE_ERROR("NOT_SUPPORTED!"); + } + return os; +} + +/** Formatted output of the Pooling 3d Layer Info. + * + * @param[out] os Output stream. + * @param[in] info Pooling 3D layer info to print to output stream. + * + * @return Modified output stream. + */ +inline ::std::ostream &operator<<(::std::ostream &os, const Pooling3dLayerInfo &info) +{ + os << "{Type=" << info.pool_type << "," + << "IsGlobalPooling=" << info.is_global_pooling; + if (!info.is_global_pooling) + { + os << "," + << "PoolSize=" << info.pool_size << ", " + << "Stride=" << info.stride << ", " + << "Padding=" << info.padding << ", " + << "Exclude Padding=" << info.exclude_padding << ", " + << "fp_mixed_precision=" << info.fp_mixed_precision << ", " + << "DimensionRoundingType=" << info.round_type; + } + os << "}"; + return os; +} + +/** Formatted output of the Pooling 3d Layer Info. + * + * @param[in] info Type to output. + * + * @return Formatted string. + */ +inline std::string to_string(const Pooling3dLayerInfo &info) +{ + std::stringstream str; + str << info; + return str.str(); +} + /** Formatted output of the PriorBoxLayerInfo. * * @param[in] info Type to output. @@ -1704,16 +2124,10 @@ inline std::string to_string(const PriorBoxLayerInfo &info) { std::stringstream str; str << "{"; - str << "Clip:" << info.clip() - << "Flip:" << info.flip() - << "StepX:" << info.steps()[0] - << "StepY:" << info.steps()[1] - << "MinSizes:" << info.min_sizes().size() - << "MaxSizes:" << info.max_sizes().size() - << "ImgSizeX:" << info.img_size().x - << "ImgSizeY:" << info.img_size().y - << "Offset:" << info.offset() - << "Variances:" << info.variances().size(); + str << "Clip:" << info.clip() << "Flip:" << info.flip() << "StepX:" << info.steps()[0] + << "StepY:" << info.steps()[1] << "MinSizes:" << info.min_sizes().size() + << "MaxSizes:" << info.max_sizes().size() << "ImgSizeX:" << info.img_size().x + << "ImgSizeY:" << info.img_size().y << "Offset:" << info.offset() << "Variances:" << info.variances().size(); str << "}"; return str.str(); } @@ -1754,7 +2168,7 @@ inline std::string to_string(const Size2D &type) */ inline ::std::ostream &operator<<(::std::ostream &os, const ConvolutionMethod &conv_method) { - switch(conv_method) + switch (conv_method) { case ConvolutionMethod::GEMM: os << "GEMM"; @@ -1765,6 +2179,12 @@ inline ::std::ostream &operator<<(::std::ostream &os, const ConvolutionMethod &c case ConvolutionMethod::WINOGRAD: os << "WINOGRAD"; break; + case ConvolutionMethod::FFT: + os << "FFT"; + break; + case ConvolutionMethod::GEMM_CONV2D: + os << "GEMM_CONV2D"; + break; default: ARM_COMPUTE_ERROR("NOT_SUPPORTED!"); } @@ -1794,11 +2214,14 @@ inline std::string to_string(const ConvolutionMethod &conv_method) */ inline ::std::ostream &operator<<(::std::ostream &os, const GPUTarget &gpu_target) { - switch(gpu_target) + switch (gpu_target) { case GPUTarget::GPU_ARCH_MASK: os << "GPU_ARCH_MASK"; break; + case GPUTarget::GPU_GENERATION_MASK: + os << "GPU_GENERATION_MASK"; + break; case GPUTarget::MIDGARD: os << "MIDGARD"; break; @@ -1808,6 +2231,9 @@ inline ::std::ostream &operator<<(::std::ostream &os, const GPUTarget &gpu_targe case GPUTarget::VALHALL: os << "VALHALL"; break; + case GPUTarget::FIFTHGEN: + os << "FIFTHGEN"; + break; case GPUTarget::T600: os << "T600"; break; @@ -1832,17 +2258,56 @@ inline ::std::ostream &operator<<(::std::ostream &os, const GPUTarget &gpu_targe case GPUTarget::G51LIT: os << "G51LIT"; break; + case GPUTarget::G31: + os << "G31"; + break; case GPUTarget::G76: os << "G76"; break; + case GPUTarget::G52: + os << "G52"; + break; + case GPUTarget::G52LIT: + os << "G52LIT"; + break; case GPUTarget::G77: os << "G77"; break; + case GPUTarget::G57: + os << "G57"; + break; case GPUTarget::G78: os << "G78"; break; - case GPUTarget::TODX: - os << "TODX"; + case GPUTarget::G68: + os << "G68"; + break; + case GPUTarget::G78AE: + os << "G78AE"; + break; + case GPUTarget::G710: + os << "G710"; + break; + case GPUTarget::G610: + os << "G610"; + break; + case GPUTarget::G510: + os << "G510"; + break; + case GPUTarget::G310: + os << "G310"; + break; + case GPUTarget::G715: + os << "G715"; + break; + case GPUTarget::G615: + os << "G615"; + break; + case GPUTarget::G720: + os << "G720"; + break; + case GPUTarget::G620: + os << "G620"; break; default: ARM_COMPUTE_ERROR("NOT_SUPPORTED!"); @@ -1892,7 +2357,7 @@ inline ::std::ostream &operator<<(::std::ostream &os, const DetectionWindow &det */ inline ::std::ostream &operator<<(::std::ostream &os, const DetectionOutputLayerCodeType &detection_code) { - switch(detection_code) + switch (detection_code) { case DetectionOutputLayerCodeType::CENTER_SIZE: os << "CENTER_SIZE"; @@ -1944,8 +2409,7 @@ inline ::std::ostream &operator<<(::std::ostream &os, const DetectionOutputLayer << "BackgroundLabelId=" << detection_info.background_label_id() << "," << "ConfidenceThreshold=" << detection_info.confidence_threshold() << "," << "TopK=" << detection_info.top_k() << "," - << "NumLocClasses=" << detection_info.num_loc_classes() - << "}"; + << "NumLocClasses=" << detection_info.num_loc_classes() << "}"; return os; } @@ -1981,8 +2445,7 @@ inline ::std::ostream &operator<<(::std::ostream &os, const DetectionPostProcess << "ScaleValue_h=" << detection_info.scale_value_h() << "," << "ScaleValue_w=" << detection_info.scale_value_w() << "," << "UseRegularNms=" << detection_info.use_regular_nms() << "," - << "DetectionPerClass=" << detection_info.detection_per_class() - << "}"; + << "DetectionPerClass=" << detection_info.detection_per_class() << "}"; return os; } @@ -2013,34 +2476,6 @@ inline std::string to_string(const DetectionWindow &detection_window) return str.str(); } -/** Formatted output of a vector of objects. - * - * @param[out] os Output stream - * @param[in] args Vector of objects to print - * - * @return Modified output stream. - */ -template <typename T> -inline ::std::ostream &operator<<(::std::ostream &os, const std::vector<T> &args) -{ - os << "["; - bool first = true; - for(auto &arg : args) - { - if(first) - { - first = false; - } - else - { - os << ", "; - } - os << arg; - } - os << "]"; - return os; -} - /** Formatted output of @ref PriorBoxLayerInfo. * * @param[out] os Output stream. @@ -2050,34 +2485,13 @@ inline ::std::ostream &operator<<(::std::ostream &os, const std::vector<T> &args */ inline ::std::ostream &operator<<(::std::ostream &os, const PriorBoxLayerInfo &info) { - os << "Clip:" << info.clip() - << "Flip:" << info.flip() - << "StepX:" << info.steps()[0] - << "StepY:" << info.steps()[1] - << "MinSizes:" << info.min_sizes() - << "MaxSizes:" << info.max_sizes() - << "ImgSizeX:" << info.img_size().x - << "ImgSizeY:" << info.img_size().y - << "Offset:" << info.offset() - << "Variances:" << info.variances(); + os << "Clip:" << info.clip() << "Flip:" << info.flip() << "StepX:" << info.steps()[0] << "StepY:" << info.steps()[1] + << "MinSizes:" << info.min_sizes() << "MaxSizes:" << info.max_sizes() << "ImgSizeX:" << info.img_size().x + << "ImgSizeY:" << info.img_size().y << "Offset:" << info.offset() << "Variances:" << info.variances(); return os; } -/** Formatted output of a vector of objects. - * - * @param[in] args Vector of objects to print - * - * @return String representing args. - */ -template <typename T> -std::string to_string(const std::vector<T> &args) -{ - std::stringstream str; - str << args; - return str.str(); -} - /** Formatted output of the WinogradInfo type. */ inline ::std::ostream &operator<<(::std::ostream &os, const WinogradInfo &info) { @@ -2096,18 +2510,6 @@ inline std::string to_string(const WinogradInfo &type) return str.str(); } -/** Fallback method: try to use std::to_string: - * - * @param[in] val Value to convert to string - * - * @return String representing val. - */ -template <typename T> -inline std::string to_string(const T &val) -{ - return support::cpp11::to_string(val); -} - /** Convert a CLTunerMode value to a string * * @param val CLTunerMode value to be converted @@ -2116,7 +2518,7 @@ inline std::string to_string(const T &val) */ inline std::string to_string(const CLTunerMode val) { - switch(val) + switch (val) { case CLTunerMode::EXHAUSTIVE: { @@ -2145,16 +2547,8 @@ inline std::string to_string(const CLTunerMode val) */ inline std::string to_string(CLGEMMKernelType val) { - switch(val) + switch (val) { - case CLGEMMKernelType::NATIVE_V1: - { - return "Native_V1"; - } - case CLGEMMKernelType::RESHAPED_V1: - { - return "Reshaped_V1"; - } case CLGEMMKernelType::NATIVE: { return "Native"; @@ -2187,6 +2581,1115 @@ inline ::std::ostream &operator<<(::std::ostream &os, const CLTunerMode &val) return os; } +/** Formatted output of the ConvolutionInfo type. + * + * @param[out] os Output stream. + * @param[in] conv_info ConvolutionInfo to output. + * + * @return Modified output stream. + */ +inline ::std::ostream &operator<<(::std::ostream &os, const ConvolutionInfo &conv_info) +{ + os << "{PadStrideInfo=" << conv_info.pad_stride_info << ", " + << "depth_multiplier=" << conv_info.depth_multiplier << ", " + << "act_info=" << to_string(conv_info.act_info) << ", " + << "dilation=" << conv_info.dilation << "}"; + return os; +} + +/** Converts a @ref ConvolutionInfo to string + * + * @param[in] info ConvolutionInfo value to be converted + * + * @return String representing the corresponding ConvolutionInfo + */ +inline std::string to_string(const ConvolutionInfo &info) +{ + std::stringstream str; + str << info; + return str.str(); +} + +/** Formatted output of the FullyConnectedLayerInfo type. + * + * @param[out] os Output stream. + * @param[in] layer_info FullyConnectedLayerInfo to output. + * + * @return Modified output stream. + */ +inline ::std::ostream &operator<<(::std::ostream &os, const FullyConnectedLayerInfo &layer_info) +{ + os << "{activation_info=" << to_string(layer_info.activation_info) << ", " + << "weights_trained_layout=" << layer_info.weights_trained_layout << ", " + << "transpose_weights=" << layer_info.transpose_weights << ", " + << "are_weights_reshaped=" << layer_info.are_weights_reshaped << ", " + << "retain_internal_weights=" << layer_info.retain_internal_weights << ", " + << "fp_mixed_precision=" << layer_info.fp_mixed_precision << "}"; + return os; +} + +/** Converts a @ref FullyConnectedLayerInfo to string + * + * @param[in] info FullyConnectedLayerInfo value to be converted + * + * @return String representing the corresponding FullyConnectedLayerInfo + */ +inline std::string to_string(const FullyConnectedLayerInfo &info) +{ + std::stringstream str; + str << info; + return str.str(); +} + +/** Formatted output of the GEMMLowpOutputStageType type. + * + * @param[out] os Output stream. + * @param[in] gemm_type GEMMLowpOutputStageType to output. + * + * @return Modified output stream. + */ +inline ::std::ostream &operator<<(::std::ostream &os, const GEMMLowpOutputStageType &gemm_type) +{ + switch (gemm_type) + { + case GEMMLowpOutputStageType::NONE: + os << "NONE"; + break; + case GEMMLowpOutputStageType::QUANTIZE_DOWN: + os << "QUANTIZE_DOWN"; + break; + case GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT: + os << "QUANTIZE_DOWN_FIXEDPOINT"; + break; + case GEMMLowpOutputStageType::QUANTIZE_DOWN_FLOAT: + os << "QUANTIZE_DOWN_FLOAT"; + break; + default: + ARM_COMPUTE_ERROR("NOT_SUPPORTED!"); + } + return os; +} + +/** Converts a @ref GEMMLowpOutputStageType to string + * + * @param[in] gemm_type GEMMLowpOutputStageType value to be converted + * + * @return String representing the corresponding GEMMLowpOutputStageType + */ +inline std::string to_string(const GEMMLowpOutputStageType &gemm_type) +{ + std::stringstream str; + str << gemm_type; + return str.str(); +} + +/** Formatted output of the GEMMLowpOutputStageInfo type. + * + * @param[out] os Output stream. + * @param[in] gemm_info GEMMLowpOutputStageInfo to output. + * + * @return Modified output stream. + */ +inline ::std::ostream &operator<<(::std::ostream &os, const GEMMLowpOutputStageInfo &gemm_info) +{ + os << "{type=" << gemm_info.type << ", " + << "gemlowp_offset=" << gemm_info.gemmlowp_offset << ", " + << "gemmlowp_multiplier=" << gemm_info.gemmlowp_multiplier << ", " + << "gemmlowp_shift=" << gemm_info.gemmlowp_shift << ", " + << "gemmlowp_min_bound=" << gemm_info.gemmlowp_min_bound << ", " + << "gemmlowp_max_bound=" << gemm_info.gemmlowp_max_bound << ", " + << "gemmlowp_multipliers=" << gemm_info.gemmlowp_multiplier << ", " + << "gemmlowp_shifts=" << gemm_info.gemmlowp_shift << ", " + << "gemmlowp_real_multiplier=" << gemm_info.gemmlowp_real_multiplier << ", " + << "is_quantized_per_channel=" << gemm_info.is_quantized_per_channel << ", " + << "output_data_type=" << gemm_info.output_data_type << "}"; + return os; +} + +/** Converts a @ref GEMMLowpOutputStageInfo to string + * + * @param[in] gemm_info GEMMLowpOutputStageInfo value to be converted + * + * @return String representing the corresponding GEMMLowpOutputStageInfo + */ +inline std::string to_string(const GEMMLowpOutputStageInfo &gemm_info) +{ + std::stringstream str; + str << gemm_info; + return str.str(); +} + +/** Formatted output of the Conv2dInfo type. + * + * @param[out] os Output stream. + * @param[in] conv_info Conv2dInfo to output. + * + * @return Modified output stream. + */ +inline ::std::ostream &operator<<(::std::ostream &os, const Conv2dInfo &conv_info) +{ + os << "{conv_info=" << conv_info.conv_info << ", " + << "dilation=" << conv_info.dilation << ", " + << "act_info=" << to_string(conv_info.act_info) << ", " + << "enable_fast_math=" << conv_info.enable_fast_math << ", " + << "num_groups=" << conv_info.num_groups << "," + << "}"; + return os; +} + +/** Converts a @ref Conv2dInfo to string + * + * @param[in] conv_info Conv2dInfo value to be converted + * + * @return String representing the corresponding Conv2dInfo + */ +inline std::string to_string(const Conv2dInfo &conv_info) +{ + std::stringstream str; + str << conv_info; + return str.str(); +} + +/** Formatted output of the PixelValue type. + * + * @param[out] os Output stream. + * @param[in] pixel_value PixelValue to output. + * + * @return Modified output stream. + */ +inline ::std::ostream &operator<<(::std::ostream &os, const PixelValue &pixel_value) +{ + os << "{value.u64=" << pixel_value.get<uint64_t>() << "}"; + return os; +} + +/** Converts a @ref PixelValue to string + * + * @param[in] pixel_value PixelValue value to be converted + * + * @return String representing the corresponding PixelValue + */ +inline std::string to_string(const PixelValue &pixel_value) +{ + std::stringstream str; + str << pixel_value; + return str.str(); +} + +/** Formatted output of the ScaleKernelInfo type. + * + * @param[out] os Output stream. + * @param[in] scale_info ScaleKernelInfo to output. + * + * @return Modified output stream. + */ +inline ::std::ostream &operator<<(::std::ostream &os, const ScaleKernelInfo &scale_info) +{ + os << "{interpolation_policy=" << scale_info.interpolation_policy << ", " + << "BorderMode=" << scale_info.border_mode << ", " + << "PixelValue=" << scale_info.constant_border_value << ", " + << "SamplingPolicy=" << scale_info.sampling_policy << ", " + << "use_padding=" << scale_info.use_padding << ", " + << "align_corners=" << scale_info.align_corners << ", " + << "data_layout=" << scale_info.data_layout << "}"; + return os; +} + +/** Converts a @ref ScaleKernelInfo to string + * + * @param[in] scale_info ScaleKernelInfo value to be converted + * + * @return String representing the corresponding ScaleKernelInfo + */ +inline std::string to_string(const ScaleKernelInfo &scale_info) +{ + std::stringstream str; + str << scale_info; + return str.str(); +} + +/** Formatted output of the FFTDirection type. + * + * @param[out] os Output stream. + * @param[in] fft_dir FFTDirection to output. + * + * @return Modified output stream. + */ +inline ::std::ostream &operator<<(::std::ostream &os, const FFTDirection &fft_dir) +{ + switch (fft_dir) + { + case FFTDirection::Forward: + os << "Forward"; + break; + case FFTDirection::Inverse: + os << "Inverse"; + break; + default: + ARM_COMPUTE_ERROR("NOT_SUPPORTED!"); + } + return os; +} + +/** Converts a @ref FFT1DInfo to string + * + * @param[in] fft_dir FFT1DInfo value to be converted + * + * @return String representing the corresponding FFT1DInfo + */ +inline std::string to_string(const FFTDirection &fft_dir) +{ + std::stringstream str; + str << "{" << fft_dir << "}"; + return str.str(); +} + +/** Formatted output of the FFT1DInfo type. + * + * @param[out] os Output stream. + * @param[in] fft1d_info FFT1DInfo to output. + * + * @return Modified output stream. + */ +inline ::std::ostream &operator<<(::std::ostream &os, const FFT1DInfo &fft1d_info) +{ + os << "{axis=" << fft1d_info.axis << ", " + << "direction=" << fft1d_info.direction << "}"; + return os; +} + +/** Converts a @ref FFT1DInfo to string + * + * @param[in] fft1d_info FFT1DInfo value to be converted + * + * @return String representing the corresponding FFT1DInfo + */ +inline std::string to_string(const FFT1DInfo &fft1d_info) +{ + std::stringstream str; + str << fft1d_info; + return str.str(); +} + +/** Formatted output of the FFT2DInfo type. + * + * @param[out] os Output stream. + * @param[in] fft2d_info FFT2DInfo to output. + * + * @return Modified output stream. + */ +inline ::std::ostream &operator<<(::std::ostream &os, const FFT2DInfo &fft2d_info) +{ + os << "{axis=" << fft2d_info.axis0 << ", " + << "axis=" << fft2d_info.axis1 << ", " + << "direction=" << fft2d_info.direction << "}"; + return os; +} + +/** Converts a @ref FFT2DInfo to string + * + * @param[in] fft2d_info FFT2DInfo value to be converted + * + * @return String representing the corresponding FFT2DInfo + */ +inline std::string to_string(const FFT2DInfo &fft2d_info) +{ + std::stringstream str; + str << fft2d_info; + return str.str(); +} + +/** Formatted output of the Coordinates2D type. + * + * @param[out] os Output stream. + * @param[in] coord_2d Coordinates2D to output. + * + * @return Modified output stream. + */ +inline ::std::ostream &operator<<(::std::ostream &os, const Coordinates2D &coord_2d) +{ + os << "{x=" << coord_2d.x << ", " + << "y=" << coord_2d.y << "}"; + return os; +} + +/** Converts a @ref Coordinates2D to string + * + * @param[in] coord_2d Coordinates2D value to be converted + * + * @return String representing the corresponding Coordinates2D + */ +inline std::string to_string(const Coordinates2D &coord_2d) +{ + std::stringstream str; + str << coord_2d; + return str.str(); +} + +/** Formatted output of the FuseBatchNormalizationType type. + * + * @param[out] os Output stream. + * @param[in] fuse_type FuseBatchNormalizationType to output. + * + * @return Modified output stream. + */ +inline ::std::ostream &operator<<(::std::ostream &os, const FuseBatchNormalizationType &fuse_type) +{ + switch (fuse_type) + { + case FuseBatchNormalizationType::CONVOLUTION: + os << "CONVOLUTION"; + break; + case FuseBatchNormalizationType::DEPTHWISECONVOLUTION: + os << "DEPTHWISECONVOLUTION"; + break; + default: + ARM_COMPUTE_ERROR("NOT_SUPPORTED!"); + } + return os; +} + +/** Converts a @ref FuseBatchNormalizationType to string + * + * @param[in] fuse_type FuseBatchNormalizationType value to be converted + * + * @return String representing the corresponding FuseBatchNormalizationType + */ +inline std::string to_string(const FuseBatchNormalizationType &fuse_type) +{ + std::stringstream str; + str << fuse_type; + return str.str(); +} + +/** Formatted output of the SoftmaxKernelInfo type. + * + * @param[out] os Output stream. + * @param[in] info SoftmaxKernelInfo to output. + * + * @return Modified output stream. + */ +inline ::std::ostream &operator<<(::std::ostream &os, const SoftmaxKernelInfo &info) +{ + os << "{beta=" << info.beta << ", " + << "is_log=" << info.is_log << ", " + << "input_data_type=" << info.input_data_type << ", " + << "axis=" << info.axis << "}"; + return os; +} + +/** Converts a @ref SoftmaxKernelInfo to string + * + * @param[in] info SoftmaxKernelInfo value to be converted + * + * @return String representing the corresponding SoftmaxKernelInfo + */ +inline std::string to_string(const SoftmaxKernelInfo &info) +{ + std::stringstream str; + str << info; + return str.str(); +} + +/** Formatted output of the ScaleKernelInfo type. + * + * @param[out] os Output stream. + * @param[in] lstm_params LSTMParams to output. + * + * @return Modified output stream. + */ +template <typename T> +::std::ostream &operator<<(::std::ostream &os, const LSTMParams<T> &lstm_params) +{ + os << "{input_to_input_weights=" << to_string(lstm_params.input_to_input_weights()) << ", " + << "recurrent_to_input_weights=" << to_string(lstm_params.recurrent_to_input_weights()) << ", " + << "cell_to_input_weights=" << to_string(lstm_params.cell_to_input_weights()) << ", " + << "input_gate_bias=" << to_string(lstm_params.input_gate_bias()) << ", " + << "cell_to_forget_weights=" << to_string(lstm_params.cell_to_forget_weights()) << ", " + << "cell_to_output_weights=" << to_string(lstm_params.cell_to_output_weights()) << ", " + << "projection_weights=" << to_string(lstm_params.projection_weights()) << ", " + << "projection_bias=" << to_string(lstm_params.projection_bias()) << ", " + << "input_layer_norm_weights=" << to_string(lstm_params.input_layer_norm_weights()) << ", " + << "forget_layer_norm_weights=" << to_string(lstm_params.forget_layer_norm_weights()) << ", " + << "cell_layer_norm_weights=" << to_string(lstm_params.cell_layer_norm_weights()) << ", " + << "output_layer_norm_weights=" << to_string(lstm_params.output_layer_norm_weights()) << ", " + << "cell_clip=" << lstm_params.cell_clip() << ", " + << "projection_clip=" << lstm_params.projection_clip() << ", " + << "input_intermediate_scale=" << lstm_params.input_intermediate_scale() << ", " + << "forget_intermediate_scale=" << lstm_params.forget_intermediate_scale() << ", " + << "cell_intermediate_scale=" << lstm_params.cell_intermediate_scale() << ", " + << "hidden_state_zero=" << lstm_params.hidden_state_zero() << ", " + << "hidden_state_scale=" << lstm_params.hidden_state_scale() << ", " + << "has_peephole_opt=" << lstm_params.has_peephole_opt() << ", " + << "has_projection=" << lstm_params.has_projection() << ", " + << "has_cifg_opt=" << lstm_params.has_cifg_opt() << ", " + << "use_layer_norm=" << lstm_params.use_layer_norm() << "}"; + return os; +} + +/** Converts a @ref LSTMParams to string + * + * @param[in] lstm_params LSTMParams<T> value to be converted + * + * @return String representing the corresponding LSTMParams + */ +template <typename T> +std::string to_string(const LSTMParams<T> &lstm_params) +{ + std::stringstream str; + str << lstm_params; + return str.str(); +} + +/** Converts a @ref LSTMParams to string + * + * @param[in] num uint8_t value to be converted + * + * @return String representing the corresponding uint8_t + */ +inline std::string to_string(const uint8_t num) +{ + // Explicity cast the uint8_t to signed integer and call the corresponding overloaded to_string() function. + return ::std::to_string(static_cast<int>(num)); +} + +/** Available non maxima suppression types */ +/** Formatted output of the NMSType type. + * + * @param[out] os Output stream. + * @param[in] nms_type NMSType to output. + * + * @return Modified output stream. + */ +inline ::std::ostream &operator<<(::std::ostream &os, const NMSType &nms_type) +{ + switch (nms_type) + { + case NMSType::LINEAR: + os << "LINEAR"; + break; + case NMSType::GAUSSIAN: + os << "GAUSSIAN"; + break; + case NMSType::ORIGINAL: + os << "ORIGINAL"; + break; + default: + ARM_COMPUTE_ERROR("NOT_SUPPORTED!"); + } + return os; +} + +/** Converts a @ref NMSType to string + * + * @param[in] nms_type NMSType value to be converted + * + * @return String representing the corresponding NMSType + */ +inline std::string to_string(const NMSType nms_type) +{ + std::stringstream str; + str << nms_type; + return str.str(); +} + +/** Formatted output of the BoxNMSLimitInfo type. + * + * @param[out] os Output stream. + * @param[in] info BoxNMSLimitInfo to output. + * + * @return Modified output stream. + */ +inline ::std::ostream &operator<<(::std::ostream &os, const BoxNMSLimitInfo &info) +{ + os << "{score_thresh=" << info.score_thresh() << ", " + << "nms=" << info.nms() << ", " + << "detections_per_im=" << info.detections_per_im() << ", " + << "soft_nms_enabled=" << info.soft_nms_enabled() << ", " + << "soft_nms_min_score_thres=" << info.soft_nms_min_score_thres() << ", " + << "suppress_size=" << info.suppress_size() << ", " + << "min_size=" << info.min_size() << ", " + << "im_width=" << info.im_width() << ", " + << "im_height=" << info.im_height() << "}"; + return os; +} + +/** Converts a @ref BoxNMSLimitInfo to string + * + * @param[in] info BoxNMSLimitInfo value to be converted + * + * @return String representing the corresponding BoxNMSLimitInfo + */ +inline std::string to_string(const BoxNMSLimitInfo &info) +{ + std::stringstream str; + str << info; + return str.str(); +} + +/** Converts a @ref DimensionRoundingType to string + * + * @param[in] rounding_type DimensionRoundingType value to be converted + * + * @return String representing the corresponding DimensionRoundingType + */ +inline std::string to_string(const DimensionRoundingType &rounding_type) +{ + std::stringstream str; + str << rounding_type; + return str.str(); +} + +/** Formatted output of the Conv3dInfo type. + * + * @param[out] os Output stream. + * @param[in] conv3d_info Type to output. + * + * @return Modified output stream. + */ +inline ::std::ostream &operator<<(::std::ostream &os, const Conv3dInfo &conv3d_info) +{ + os << conv3d_info.stride; + os << ";"; + os << conv3d_info.padding; + os << ";"; + os << to_string(conv3d_info.act_info); + os << ";"; + os << conv3d_info.dilation; + os << ";"; + os << conv3d_info.round_type; + os << ";"; + os << conv3d_info.enable_fast_math; + + return os; +} + +/** Formatted output of the Conv3dInfo type. + * + * @param[in] conv3d_info Type to output. + * + * @return Formatted string. + */ +inline std::string to_string(const Conv3dInfo &conv3d_info) +{ + std::stringstream str; + str << conv3d_info; + return str.str(); +} + +/** Formatted output of the arm_compute::WeightFormat type. + * + * @param[in] wf arm_compute::WeightFormat Type to output. + * + * @return Formatted string. + */ +inline std::string to_string(const WeightFormat wf) +{ +#define __CASE_WEIGHT_FORMAT(wf) \ + case WeightFormat::wf: \ + return #wf; + switch (wf) + { + __CASE_WEIGHT_FORMAT(UNSPECIFIED) + __CASE_WEIGHT_FORMAT(ANY) + __CASE_WEIGHT_FORMAT(OHWI) + __CASE_WEIGHT_FORMAT(OHWIo2) + __CASE_WEIGHT_FORMAT(OHWIo4) + __CASE_WEIGHT_FORMAT(OHWIo8) + __CASE_WEIGHT_FORMAT(OHWIo16) + __CASE_WEIGHT_FORMAT(OHWIo32) + __CASE_WEIGHT_FORMAT(OHWIo64) + __CASE_WEIGHT_FORMAT(OHWIo128) + __CASE_WEIGHT_FORMAT(OHWIo4i2) + __CASE_WEIGHT_FORMAT(OHWIo4i2_bf16) + __CASE_WEIGHT_FORMAT(OHWIo8i2) + __CASE_WEIGHT_FORMAT(OHWIo8i2_bf16) + __CASE_WEIGHT_FORMAT(OHWIo16i2) + __CASE_WEIGHT_FORMAT(OHWIo16i2_bf16) + __CASE_WEIGHT_FORMAT(OHWIo32i2) + __CASE_WEIGHT_FORMAT(OHWIo32i2_bf16) + __CASE_WEIGHT_FORMAT(OHWIo64i2) + __CASE_WEIGHT_FORMAT(OHWIo64i2_bf16) + __CASE_WEIGHT_FORMAT(OHWIo4i4) + __CASE_WEIGHT_FORMAT(OHWIo4i4_bf16) + __CASE_WEIGHT_FORMAT(OHWIo8i4) + __CASE_WEIGHT_FORMAT(OHWIo8i4_bf16) + __CASE_WEIGHT_FORMAT(OHWIo16i4) + __CASE_WEIGHT_FORMAT(OHWIo16i4_bf16) + __CASE_WEIGHT_FORMAT(OHWIo32i4) + __CASE_WEIGHT_FORMAT(OHWIo32i4_bf16) + __CASE_WEIGHT_FORMAT(OHWIo64i4) + __CASE_WEIGHT_FORMAT(OHWIo64i4_bf16) + __CASE_WEIGHT_FORMAT(OHWIo2i8) + __CASE_WEIGHT_FORMAT(OHWIo4i8) + __CASE_WEIGHT_FORMAT(OHWIo8i8) + __CASE_WEIGHT_FORMAT(OHWIo16i8) + __CASE_WEIGHT_FORMAT(OHWIo32i8) + __CASE_WEIGHT_FORMAT(OHWIo64i8) + default: + return "invalid value"; + } +#undef __CASE_WEIGHT_FORMAT +} + +/** Formatted output of the arm_compute::WeightFormat type. + * + * @param[out] os Output stream. + * @param[in] wf WeightFormat to output. + * + * @return Modified output stream. + */ +inline ::std::ostream &operator<<(::std::ostream &os, const arm_compute::WeightFormat &wf) +{ + os << to_string(wf); + return os; +} + +/** Formatted output of the std::tuple<TensorShape, TensorShape, arm_compute::WeightFormat> tuple. + * + * @param[in] values tuple of input and output tensor shapes and WeightFormat used. + * + * @return Formatted string. + */ +inline std::string to_string(const std::tuple<TensorShape, TensorShape, arm_compute::WeightFormat> values) +{ + std::stringstream str; + str << "[Input shape = " << std::get<0>(values); + str << ", "; + str << "Expected output shape = " << std::get<1>(values); + + str << ", "; + str << "WeightFormat = " << std::get<2>(values) << "]"; + return str.str(); +} + +/** Formatted output of the Padding2D type. + * + * @param[out] os Output stream. + * @param[in] padding2d Padding info for 2D dimension shape. + * + * @return Modified output stream. + */ +inline ::std::ostream &operator<<(::std::ostream &os, const Padding2D &padding2d) +{ + os << padding2d.left << "," << padding2d.right << "," << padding2d.top << "," << padding2d.bottom; + return os; +} + +/** Converts a @ref Padding2D to string + * + * @param[in] padding2d Padding2D value to be converted + * + * @return String representing the corresponding Padding2D + */ +inline std::string to_string(const Padding2D &padding2d) +{ + std::stringstream str; + str << padding2d; + return str.str(); +} + +/** Formatted output of the arm_compute::experimental::dynamic_fusion::Pool2dAttributes type. + * + * @param[out] os Output stream. + * @param[in] pool2d_attr arm_compute::experimental::dynamic_fusion::Pool2dAttributes type to output. + * + * @return Modified output stream. + */ +inline ::std::ostream &operator<<(::std::ostream &os, const experimental::dynamic_fusion::Pool2dAttributes &pool2d_attr) +{ + os << "Pool2dAttributes=" + << "[" + << "PoolingType=" << pool2d_attr.pool_type() << "," + << "PoolSize=" << pool2d_attr.pool_size() << "," + << "Padding=" << pool2d_attr.pad() << "," + << "Stride=" << pool2d_attr.stride() << "," + << "ExcludePadding" << pool2d_attr.exclude_padding() << "]"; + + return os; +} + +/** Formatted output of the arm_compute::experimental::dynamic_fusion::Pool2dAttributes type. + * + * @param[in] pool2d_attr arm_compute::experimental::dynamic_fusion::Pool2dAttributes type to output. + * + * @return Formatted string. + */ +inline std::string to_string(const experimental::dynamic_fusion::Pool2dAttributes &pool2d_attr) +{ + std::stringstream str; + str << pool2d_attr; + return str.str(); +} + +/** Formatted output of the arm_compute::experimental::dynamic_fusion::GpuPool2dSettings type + * + * @param[out] os Output stream + * @param[in] settings arm_compute::dynamic_fusion::GpuPool2dSettings type to output + */ +inline ::std::ostream &operator<<(::std::ostream &os, const experimental::dynamic_fusion::GpuPool2dSettings &settings) +{ + os << "Settings=" + << "[" + << "UseInfAsLimit=" << settings.use_inf_as_limit() << "]"; + return os; +} + +/** Formatted output of the arm_compute::experimental::dynamic_fusion::GpuPool2dSettings type. + * + * @param[in] settings arm_compute::experimental::dynamic_fusion::GpuPool2dSettings type to output. + * + * @return Formatted string. + */ +inline std::string to_string(const experimental::dynamic_fusion::GpuPool2dSettings &settings) +{ + std::stringstream str; + str << settings; + return str.str(); +} + +/** Formatted output of the arm_compute::experimental::dynamic_fusion::Conv2dAttributes type. + * + * @param[out] os Output stream. + * @param[in] conv2d_attr arm_compute::experimental::dynamic_fusion::Conv2dAttributes type to output. + * + * @return Modified output stream. + */ +inline ::std::ostream &operator<<(::std::ostream &os, const experimental::dynamic_fusion::Conv2dAttributes &conv2d_attr) +{ + os << "Conv2dAttributes=" + << "[" + << "Padding=" << conv2d_attr.pad() << ", " + << "Size2D=" << conv2d_attr.stride() << ", " + << "Dialation=" << conv2d_attr.dilation() << "]"; + + return os; +} + +/** Formatted output of the arm_compute::experimental::dynamic_fusion::Conv2dAttributes type. + * + * @param[in] conv2d_attr arm_compute::experimental::dynamic_fusion::Conv2dAttributes type to output. + * + * @return Formatted string. + */ +inline std::string to_string(const experimental::dynamic_fusion::Conv2dAttributes &conv2d_attr) +{ + std::stringstream str; + str << conv2d_attr; + return str.str(); +} + +/** Formatted output of the arm_compute::experimental::dynamic_fusion::CastAttributes type. + * + * @param[out] os Output stream. + * @param[in] cast_attr arm_compute::experimental::dynamic_fusion::CastAttributes type to output. + * + * @return Modified output stream. + */ +inline ::std::ostream &operator<<(::std::ostream &os, const experimental::dynamic_fusion::CastAttributes &cast_attr) +{ + os << "CastAttributes=" + << "[" + << "Data Type=" << cast_attr.data_type() << ", " + << "Convert Policy=" << cast_attr.convert_policy() << "]"; + + return os; +} +/** Formatted output of the arm_compute::experimental::dynamic_fusion::CastAttributes type. + * + * @param[in] cast_attr arm_compute::experimental::dynamic_fusion::CastAttributes type to output. + * + * @return Formatted string. + */ +inline std::string to_string(const experimental::dynamic_fusion::CastAttributes &cast_attr) +{ + std::stringstream str; + str << cast_attr; + return str.str(); +} + +/** Formatted output of the arm_compute::experimental::dynamic_fusion::DepthwiseConv2dAttributes type. + * + * @param[out] os Output stream. + * @param[in] dw_conv2d_attr arm_compute::experimental::dynamic_fusion::DepthwiseConv2dAttributes type to output. + * + * @return Modified output stream. + */ +inline ::std::ostream &operator<<(::std::ostream &os, + const experimental::dynamic_fusion::DepthwiseConv2dAttributes &dw_conv2d_attr) +{ + os << "DepthwiseConv2dAttributes=" + << "[" + << "Padding=" << dw_conv2d_attr.pad() << ", " + << "Size2D=" << dw_conv2d_attr.stride() << ", " + << "Depth Multiplier=" << dw_conv2d_attr.depth_multiplier() << ", " + << "Dilation=" << dw_conv2d_attr.dilation() << "," + << "DimensionRoundingType: " << dw_conv2d_attr.dimension_rounding_type() << "]"; + + return os; +} +/** Formatted output of the arm_compute::experimental::dynamic_fusion::DepthwiseConv2dAttributes type. + * + * @param[in] dw_conv2d_attr arm_compute::experimental::dynamic_fusion::DepthwiseConv2dAttributes type to output. + * + * @return Formatted string. + */ +inline std::string to_string(const experimental::dynamic_fusion::DepthwiseConv2dAttributes &dw_conv2d_attr) +{ + std::stringstream str; + str << dw_conv2d_attr; + return str.str(); +} + +/** Formatted output of the arm_compute::experimental::dynamic_fusion::ClampAttributes type. + * + * @param[out] os Output stream. + * @param[in] clamp_attr arm_compute::experimental::dynamic_fusion::ClampAttributes type to output. + * + * @return Modified output stream. + */ +inline ::std::ostream &operator<<(::std::ostream &os, const experimental::dynamic_fusion::ClampAttributes &clamp_attr) +{ + os << "ClampAttributes=" + << "[" + << "Min value=" << clamp_attr.min_val() << ", " + << "Max value=" << clamp_attr.max_val() << "]"; + return os; +} +/** Formatted output of the arm_compute::experimental::dynamic_fusion::ClampAttributes type. + * + * @param[in] clamp_attr arm_compute::experimental::dynamic_fusion::ClampAttributes type to output. + * + * @return Formatted string. + */ +inline std::string to_string(const experimental::dynamic_fusion::ClampAttributes &clamp_attr) +{ + std::stringstream str; + str << clamp_attr; + return str.str(); +} + +/** Formatted output of the arm_compute::experimental::dynamic_fusion::ResizeAttributes type. + * + * @param[out] os Output stream. + * @param[in] resize_attr arm_compute::experimental::dynamic_fusion::ResizeAttributes type to output. + * + * @return Modified output stream. + */ +inline ::std::ostream &operator<<(::std::ostream &os, const experimental::dynamic_fusion::ResizeAttributes &resize_attr) +{ + os << "ResizeAttributes=" + << "[" + << "AlignCorners=" << resize_attr.align_corners() << ", " + << "InterpolationPolicy=" << resize_attr.interpolation_policy() << ", " + << "OutputHeight=" << resize_attr.output_height() << ", " + << "OutputWidth=" << resize_attr.output_width() << ", " + << "SamplingPolicy=" << resize_attr.sampling_policy() << "]"; + return os; +} + +/** Formatted output of the arm_compute::experimental::dynamic_fusion::ResizeAttributes type. + * + * @param[in] resize_attr arm_compute::experimental::dynamic_fusion::ResizeAttributes type to output. + * + * @return Formatted string. + */ +inline std::string to_string(const experimental::dynamic_fusion::ResizeAttributes &resize_attr) +{ + std::stringstream str; + str << resize_attr; + return str.str(); +} + +/** Formatted output of the arm_compute::experimental::dynamic_fusion::SoftmaxAttributes type. + * + * @param[out] os Output stream. + * @param[in] softmax_attr arm_compute::experimental::dynamic_fusion::SoftmaxAttributes type to output. + * + * @return Modified output stream. + */ +inline ::std::ostream &operator<<(::std::ostream &os, + const experimental::dynamic_fusion::SoftmaxAttributes &softmax_attr) +{ + os << "SoftmaxAttributes=" + << "[" + << "Beta=" << softmax_attr.beta() << ", " + << "Is Log Softmax=" << softmax_attr.is_log_softmax() << ", " + << "Axis=" << softmax_attr.axis() << "]"; + return os; +} +/** Formatted output of the arm_compute::experimental::dynamic_fusion::SoftmaxAttributes type. + * + * @param[in] softmax_attr arm_compute::experimental::dynamic_fusion::SoftmaxAttributes type to output. + * + * @return Formatted string. + */ +inline std::string to_string(const experimental::dynamic_fusion::SoftmaxAttributes &softmax_attr) +{ + std::stringstream str; + str << softmax_attr; + return str.str(); +} +/** Formatted output of the arm_compute::MatMulInfo type. + * + * @param[out] os Output stream. + * @param[in] matmul_info arm_compute::MatMulInfo type to output. + * + * @return Modified output stream. + */ +inline ::std::ostream &operator<<(::std::ostream &os, const arm_compute::MatMulInfo &matmul_info) +{ + os << "MatMulKernelInfo=" + << "[" + << "adj_lhs=" << matmul_info.adj_lhs() << ", " + << "adj_rhs=" << matmul_info.adj_rhs() << "] "; + return os; +} +/** Formatted output of the arm_compute::MatMulInfo type. + * + * @param[in] matmul_info arm_compute::MatMulInfo type to output. + * + * @return Formatted string. + */ +inline std::string to_string(const arm_compute::MatMulInfo &matmul_info) +{ + std::stringstream str; + str << matmul_info; + return str.str(); +} + +/** Formatted output of the arm_compute::MatMulKernelInfo type. + * + * @param[out] os Output stream. + * @param[in] matmul_info arm_compute::MatMulKernelInfo type to output. + * + * @return Modified output stream. + */ +inline ::std::ostream &operator<<(::std::ostream &os, const arm_compute::MatMulKernelInfo &matmul_info) +{ + os << "MatMulKernelInfo=" + << "[" + << "adj_lhs=" << matmul_info.adj_lhs << ", " + << "adj_rhs=" << matmul_info.adj_rhs << ", " + << "M0=" << matmul_info.m0 << ", " + << "N0=" << matmul_info.n0 << ", " + << "K0=" << matmul_info.k0 << ", " + << "export_rhs_to_cl_image=" << matmul_info.export_rhs_to_cl_image << "]"; + + return os; +} +/** Formatted output of the arm_compute::MatMulKernelInfo type. + * + * @param[in] matmul_info arm_compute::MatMulKernelInfo type to output. + * + * @return Formatted string. + */ +inline std::string to_string(const arm_compute::MatMulKernelInfo &matmul_info) +{ + std::stringstream str; + str << matmul_info; + return str.str(); +} + +/** Formatted output of the arm_compute::CpuMatMulSettings type. + * + * @param[out] os Output stream. + * @param[in] settings arm_compute::CpuMatMulSettings type to output. + * + * @return Modified output stream. + */ +inline ::std::ostream &operator<<(::std::ostream &os, const arm_compute::CpuMatMulSettings &settings) +{ + os << "CpuMatMulSettings=" + << "[" + << "fast_math=" << settings.fast_math() << ",fixed_format=" << settings.fixed_format() << "]"; + + return os; +} +/** Formatted output of the arm_compute::CpuMatMulSettings type. + * + * @param[in] settings arm_compute::CpuMatMulSettings type to output. + * + * @return Formatted string. + */ +inline std::string to_string(const arm_compute::CpuMatMulSettings &settings) +{ + std::stringstream str; + str << settings; + return str.str(); +} + +/** Formatted output of the scatter function type. + * + * @param[out] os Output stream. + * @param[in] function arm_compute::ScatterFunction type to output. + * + * @return Modified output stream. + */ +inline ::std::ostream &operator<<(::std::ostream &os, const ScatterFunction &function) +{ + switch (function) + { + case ScatterFunction::Update: + os << "UPDATE"; + break; + case ScatterFunction::Add: + os << "ADD"; + break; + case ScatterFunction::Sub: + os << "SUB"; + break; + case ScatterFunction::Max: + os << "MAX"; + break; + case ScatterFunction::Min: + os << "MIN"; + break; + default: + ARM_COMPUTE_ERROR("NOT_SUPPORTED!"); + } + return os; +} +/** Formatted output of the arm_compute::ScatterFunction type. + * + * @param[in] func arm_compute::ScatterFunction type to output. + * + * @return Formatted string. + */ +inline std::string to_string(const arm_compute::ScatterFunction &func) +{ + std::stringstream str; + str << func; + return str.str(); +} +/** Formatted output of the arm_compute::ScatterInfo type. + * + * @param[out] os Output stream. + * @param[in] info arm_compute::ScatterInfo type to output. + * + * @return Modified output stream. + */ +inline ::std::ostream &operator<<(::std::ostream &os, const arm_compute::ScatterInfo &info) +{ + os << "ScatterInfo=" + << "[" + << "Function=" << info.func << ", " + << "InitialiseZero=" << info.zero_initialization << "] "; + return os; +} +/** Formatted output of the arm_compute::ScatterInfo type. + * + * @param[in] info arm_compute::ScatterInfo type to output. + * + * @return Formatted string. + */ +inline std::string to_string(const arm_compute::ScatterInfo &info) +{ + std::stringstream str; + str << info; + return str.str(); +} + } // namespace arm_compute -#endif /* __ARM_COMPUTE_TYPE_PRINTER_H__ */ +#endif // ACL_UTILS_TYPEPRINTER_H diff --git a/utils/Utils.cpp b/utils/Utils.cpp index 7380ad7909..a143dc497f 100644 --- a/utils/Utils.cpp +++ b/utils/Utils.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019 Arm Limited. + * Copyright (c) 2017-2019, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -59,7 +59,7 @@ namespace */ void discard_comments(std::ifstream &fs) { - while(fs.peek() == '#') + while (fs.peek() == '#') { fs.ignore(std::numeric_limits<std::streamsize>::max(), '\n'); } @@ -71,11 +71,11 @@ void discard_comments(std::ifstream &fs) */ void discard_comments_and_spaces(std::ifstream &fs) { - while(true) + while (true) { discard_comments(fs); - if(isspace(fs.peek()) == 0) + if (isspace(fs.peek()) == 0) { break; } @@ -88,13 +88,12 @@ void discard_comments_and_spaces(std::ifstream &fs) #ifndef BENCHMARK_EXAMPLES int run_example(int argc, char **argv, std::unique_ptr<Example> example) { - std::cout << "\n" - << argv[0] << "\n\n"; + std::cout << "\n" << argv[0] << "\n\n"; try { bool status = example->do_setup(argc, argv); - if(!status) + if (!status) { return 1; } @@ -105,19 +104,17 @@ int run_example(int argc, char **argv, std::unique_ptr<Example> example) return 0; } #ifdef ARM_COMPUTE_CL - catch(cl::Error &err) + catch (cl::Error &err) { std::cerr << "!!!!!!!!!!!!!!!!!!!!!!!!!!!" << std::endl; - std::cerr << std::endl - << "ERROR " << err.what() << "(" << err.err() << ")" << std::endl; + std::cerr << std::endl << "ERROR " << err.what() << "(" << err.err() << ")" << std::endl; std::cerr << "!!!!!!!!!!!!!!!!!!!!!!!!!!!" << std::endl; } #endif /* ARM_COMPUTE_CL */ - catch(std::runtime_error &err) + catch (std::runtime_error &err) { std::cerr << "!!!!!!!!!!!!!!!!!!!!!!!!!!!" << std::endl; - std::cerr << std::endl - << "ERROR " << err.what() << " " << (errno ? strerror(errno) : "") << std::endl; + std::cerr << std::endl << "ERROR " << err.what() << " " << (errno ? strerror(errno) : "") << std::endl; std::cerr << "!!!!!!!!!!!!!!!!!!!!!!!!!!!" << std::endl; } @@ -131,13 +128,15 @@ void draw_detection_rectangle(ITensor *tensor, const DetectionWindow &rect, uint { ARM_COMPUTE_ERROR_ON_FORMAT_NOT_IN(tensor, Format::RGB888); - uint8_t *top = tensor->info()->offset_element_in_bytes(Coordinates(rect.x, rect.y)) + tensor->buffer(); - uint8_t *bottom = tensor->info()->offset_element_in_bytes(Coordinates(rect.x, rect.y + rect.height)) + tensor->buffer(); - uint8_t *left = top; - uint8_t *right = tensor->info()->offset_element_in_bytes(Coordinates(rect.x + rect.width, rect.y)) + tensor->buffer(); - size_t stride = tensor->info()->strides_in_bytes()[Window::DimY]; + uint8_t *top = tensor->info()->offset_element_in_bytes(Coordinates(rect.x, rect.y)) + tensor->buffer(); + uint8_t *bottom = + tensor->info()->offset_element_in_bytes(Coordinates(rect.x, rect.y + rect.height)) + tensor->buffer(); + uint8_t *left = top; + uint8_t *right = + tensor->info()->offset_element_in_bytes(Coordinates(rect.x + rect.width, rect.y)) + tensor->buffer(); + size_t stride = tensor->info()->strides_in_bytes()[Window::DimY]; - for(size_t x = 0; x < rect.width; ++x) + for (size_t x = 0; x < rect.width; ++x) { top[0] = r; top[1] = g; @@ -150,7 +149,7 @@ void draw_detection_rectangle(ITensor *tensor, const DetectionWindow &rect, uint bottom += 3; } - for(size_t y = 0; y < rect.height; ++y) + for (size_t y = 0; y < rect.height; ++y) { left[0] = r; left[1] = g; @@ -176,22 +175,22 @@ ImageType get_image_type_from_file(const std::string &filename) fs.open(filename, std::ios::in | std::ios::binary); // Identify type from magic number - std::array<unsigned char, 2> magic_number{ { 0 } }; + std::array<unsigned char, 2> magic_number{{0}}; fs >> magic_number[0] >> magic_number[1]; // PPM check - if(static_cast<char>(magic_number[0]) == 'P' && static_cast<char>(magic_number[1]) == '6') + if (static_cast<char>(magic_number[0]) == 'P' && static_cast<char>(magic_number[1]) == '6') { type = ImageType::PPM; } - else if(magic_number[0] == 0xFF && magic_number[1] == 0xD8) + else if (magic_number[0] == 0xFF && magic_number[1] == 0xD8) { type = ImageType::JPEG; } fs.close(); } - catch(std::runtime_error &e) + catch (std::runtime_error &e) { ARM_COMPUTE_ERROR_VAR("Accessing %s: %s", filename.c_str(), e.what()); } @@ -202,7 +201,7 @@ ImageType get_image_type_from_file(const std::string &filename) std::tuple<unsigned int, unsigned int, int> parse_ppm_header(std::ifstream &fs) { // Check the PPM magic number is valid - std::array<char, 2> magic_number{ { 0 } }; + std::array<char, 2> magic_number{{0}}; fs >> magic_number[0] >> magic_number[1]; ARM_COMPUTE_ERROR_ON_MSG(magic_number[0] != 'P' || magic_number[1] != '6', "Invalid file type"); ARM_COMPUTE_UNUSED(magic_number); @@ -230,21 +229,20 @@ std::tuple<unsigned int, unsigned int, int> parse_ppm_header(std::ifstream &fs) return std::make_tuple(width, height, max_val); } -std::tuple<std::vector<unsigned long>, bool, std::string> parse_npy_header(std::ifstream &fs) //NOLINT +npy::header_t parse_npy_header(std::ifstream &fs) //NOLINT { - std::vector<unsigned long> shape; // NOLINT - // Read header - std::string header = npy::read_header(fs); + std::string header_s = npy::read_header(fs); // Parse header - bool fortran_order = false; - std::string typestr; - npy::parse_header(header, typestr, fortran_order, shape); + npy::header_t header = npy::parse_header(header_s); + + bool fortran_order = false; + std::vector<unsigned long> shape = header.shape; std::reverse(shape.begin(), shape.end()); - return std::make_tuple(shape, fortran_order, typestr); + return npy::header_t{header.dtype, fortran_order, shape}; } /** This function returns the amount of memory free reading from /proc/meminfo @@ -256,15 +254,15 @@ uint64_t get_mem_free_from_meminfo() std::string line_attribute; std::ifstream file_meminfo("/proc/meminfo"); - if(file_meminfo.is_open()) + if (file_meminfo.is_open()) { - while(!(file_meminfo >> line_attribute).fail()) + while (!(file_meminfo >> line_attribute).fail()) { //Test if is the line containing MemFree - if(line_attribute == "MemFree:") + if (line_attribute == "MemFree:") { uint64_t mem_available; - if(!(file_meminfo >> mem_available).fail()) + if (!(file_meminfo >> mem_available).fail()) { return mem_available; } diff --git a/utils/Utils.h b/utils/Utils.h index d46fbc3633..626cbcf07f 100644 --- a/utils/Utils.h +++ b/utils/Utils.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021 Arm Limited. + * Copyright (c) 2016-2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -87,9 +87,9 @@ public: return true; }; /** Run the example. */ - virtual void do_run() {}; + virtual void do_run(){}; /** Teardown the example. */ - virtual void do_teardown() {}; + virtual void do_teardown(){}; /** Default destructor. */ virtual ~Example() = default; @@ -117,7 +117,8 @@ int run_example(int argc, char **argv) * @param[in] g Green colour to use * @param[in] b Blue colour to use */ -void draw_detection_rectangle(arm_compute::ITensor *tensor, const arm_compute::DetectionWindow &rect, uint8_t r, uint8_t g, uint8_t b); +void draw_detection_rectangle( + arm_compute::ITensor *tensor, const arm_compute::DetectionWindow &rect, uint8_t r, uint8_t g, uint8_t b); /** Gets image type given a file * @@ -143,7 +144,7 @@ std::tuple<unsigned int, unsigned int, int> parse_ppm_header(std::ifstream &fs); * * @return The width and height stored in the header of the NPY file */ -std::tuple<std::vector<unsigned long>, bool, std::string> parse_npy_header(std::ifstream &fs); +npy::header_t parse_npy_header(std::ifstream &fs); /** Obtain numpy type string from DataType. * @@ -157,7 +158,7 @@ inline std::string get_typestring(DataType data_type) const unsigned int i = 1; const char *c = reinterpret_cast<const char *>(&i); std::string endianness; - if(*c == 1) + if (*c == 1) { endianness = std::string("<"); } @@ -167,7 +168,7 @@ inline std::string get_typestring(DataType data_type) } const std::string no_endianness("|"); - switch(data_type) + switch (data_type) { case DataType::U8: case DataType::QASYMM8: @@ -253,7 +254,8 @@ inline void unmap(CLTensor &tensor) template <typename T> class uniform_real_distribution_16bit { - static_assert(std::is_same<T, half>::value || std::is_same<T, bfloat16>::value, "Only half and bfloat16 data types supported"); + static_assert(std::is_same<T, half>::value || std::is_same<T, bfloat16>::value, + "Only half and bfloat16 data types supported"); public: using result_type = T; @@ -262,8 +264,7 @@ public: * @param[in] min Minimum value of the distribution * @param[in] max Maximum value of the distribution */ - explicit uniform_real_distribution_16bit(float min = 0.f, float max = 1.0) - : dist(min, max) + explicit uniform_real_distribution_16bit(float min = 0.f, float max = 1.0) : dist(min, max) { } @@ -285,8 +286,7 @@ class NPYLoader { public: /** Default constructor */ - NPYLoader() - : _fs(), _shape(), _fortran_order(false), _typestring(), _file_layout(DataLayout::NCHW) + NPYLoader() : _fs(), _shape(), _fortran_order(false), _typestring(), _file_layout(DataLayout::NCHW) { } @@ -305,9 +305,12 @@ public: _fs.exceptions(std::ifstream::failbit | std::ifstream::badbit); _file_layout = file_layout; - std::tie(_shape, _fortran_order, _typestring) = parse_npy_header(_fs); + npy::header_t header = parse_npy_header(_fs); + _shape = header.shape; + _fortran_order = header.fortran_order; + _typestring = header.dtype.str(); } - catch(const std::ifstream::failure &e) + catch (const std::ifstream::failure &e) { ARM_COMPUTE_ERROR_VAR("Accessing %s: %s", npy_filename.c_str(), e.what()); } @@ -338,10 +341,10 @@ public: // Use the size of the input NPY tensor TensorShape shape; shape.set_num_dimensions(_shape.size()); - for(size_t i = 0; i < _shape.size(); ++i) + for (size_t i = 0; i < _shape.size(); ++i) { size_t src = i; - if(_fortran_order) + if (_fortran_order) { src = _shape.size() - 1 - i; } @@ -362,7 +365,8 @@ public: void fill_tensor(T &tensor) { ARM_COMPUTE_ERROR_ON(!is_open()); - ARM_COMPUTE_ERROR_ON_DATA_TYPE_NOT_IN(&tensor, arm_compute::DataType::QASYMM8, arm_compute::DataType::S32, arm_compute::DataType::F32, arm_compute::DataType::F16); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_NOT_IN(&tensor, arm_compute::DataType::QASYMM8, arm_compute::DataType::S32, + arm_compute::DataType::F32, arm_compute::DataType::F16); try { // Map buffer if creating a CLTensor @@ -374,21 +378,37 @@ public: const size_t end_position = _fs.tellg(); _fs.seekg(current_position, std::ios_base::beg); - ARM_COMPUTE_ERROR_ON_MSG((end_position - current_position) < tensor.info()->tensor_shape().total_size() * tensor.info()->element_size(), + ARM_COMPUTE_ERROR_ON_MSG((end_position - current_position) < + tensor.info()->tensor_shape().total_size() * tensor.info()->element_size(), "Not enough data in file"); ARM_COMPUTE_UNUSED(end_position); // Check if the typestring matches the given one std::string expect_typestr = get_typestring(tensor.info()->data_type()); - ARM_COMPUTE_ERROR_ON_MSG(_typestring != expect_typestr, "Typestrings mismatch"); + + bool enable_f32_to_f16_conversion = false; + if (_typestring != expect_typestr) + { + const std::string f32_typestring = "<f4"; + const std::string f16_typestring = "<f2"; + // if typestring does not match, check whether _typestring is F32 and can be downcasted to expect_typestr + if (_typestring == f32_typestring && expect_typestr == f16_typestring) + { + enable_f32_to_f16_conversion = true; + } + else + { + ARM_COMPUTE_ERROR("Typestrings mismatch"); + } + } bool are_layouts_different = (_file_layout != tensor.info()->data_layout()); // Correct dimensions (Needs to match TensorShape dimension corrections) - if(_shape.size() != tensor.info()->tensor_shape().num_dimensions()) + if (_shape.size() != tensor.info()->tensor_shape().num_dimensions()) { - for(int i = static_cast<int>(_shape.size()) - 1; i > 0; --i) + for (int i = static_cast<int>(_shape.size()) - 1; i > 0; --i) { - if(_shape[i] == 1) + if (_shape[i] == 1) { _shape.pop_back(); } @@ -401,22 +421,28 @@ public: TensorShape permuted_shape = tensor.info()->tensor_shape(); arm_compute::PermutationVector perm; - if(are_layouts_different && tensor.info()->tensor_shape().num_dimensions() > 2) + if (are_layouts_different && tensor.info()->tensor_shape().num_dimensions() > 2) { - perm = (tensor.info()->data_layout() == arm_compute::DataLayout::NHWC) ? arm_compute::PermutationVector(2U, 0U, 1U) : arm_compute::PermutationVector(1U, 2U, 0U); - arm_compute::PermutationVector perm_vec = (tensor.info()->data_layout() == arm_compute::DataLayout::NCHW) ? arm_compute::PermutationVector(2U, 0U, 1U) : arm_compute::PermutationVector(1U, 2U, 0U); + perm = (tensor.info()->data_layout() == arm_compute::DataLayout::NHWC) + ? arm_compute::PermutationVector(2U, 0U, 1U) + : arm_compute::PermutationVector(1U, 2U, 0U); + arm_compute::PermutationVector perm_vec = + (tensor.info()->data_layout() == arm_compute::DataLayout::NCHW) + ? arm_compute::PermutationVector(2U, 0U, 1U) + : arm_compute::PermutationVector(1U, 2U, 0U); arm_compute::permute(permuted_shape, perm_vec); } // Validate tensor shape - ARM_COMPUTE_ERROR_ON_MSG(_shape.size() != tensor.info()->tensor_shape().num_dimensions(), "Tensor ranks mismatch"); - for(size_t i = 0; i < _shape.size(); ++i) + ARM_COMPUTE_ERROR_ON_MSG(_shape.size() != tensor.info()->tensor_shape().num_dimensions(), + "Tensor ranks mismatch"); + for (size_t i = 0; i < _shape.size(); ++i) { ARM_COMPUTE_ERROR_ON_MSG(permuted_shape[i] != _shape[i], "Tensor dimensions mismatch"); } - switch(tensor.info()->data_type()) + switch (tensor.info()->data_type()) { case arm_compute::DataType::QASYMM8: case arm_compute::DataType::S32: @@ -424,7 +450,8 @@ public: case arm_compute::DataType::F16: { // Read data - if(!are_layouts_different && !_fortran_order && tensor.info()->padding().empty()) + if (!are_layouts_different && !_fortran_order && tensor.info()->padding().empty() && + !enable_f32_to_f16_conversion) { // If tensor has no padding read directly from stream. _fs.read(reinterpret_cast<char *>(tensor.buffer()), tensor.info()->total_size()); @@ -434,19 +461,19 @@ public: // If tensor has padding or is in fortran order accessing tensor elements through execution window. Window window; const unsigned int num_dims = _shape.size(); - if(_fortran_order) + if (_fortran_order) { - for(unsigned int dim = 0; dim < num_dims; dim++) + for (unsigned int dim = 0; dim < num_dims; dim++) { permuted_shape.set(dim, _shape[num_dims - dim - 1]); perm.set(dim, num_dims - dim - 1); } - if(are_layouts_different) + if (are_layouts_different) { // Permute only if num_dimensions greater than 2 - if(num_dims > 2) + if (num_dims > 2) { - if(_file_layout == DataLayout::NHWC) // i.e destination is NCHW --> permute(1,2,0) + if (_file_layout == DataLayout::NHWC) // i.e destination is NCHW --> permute(1,2,0) { arm_compute::permute(perm, arm_compute::PermutationVector(1U, 2U, 0U)); } @@ -459,12 +486,25 @@ public: } window.use_tensor_dimensions(permuted_shape); - execute_window_loop(window, [&](const Coordinates & id) - { - Coordinates dst(id); - arm_compute::permute(dst, perm); - _fs.read(reinterpret_cast<char *>(tensor.ptr_to_element(dst)), tensor.info()->element_size()); - }); + execute_window_loop(window, + [&](const Coordinates &id) + { + Coordinates dst(id); + arm_compute::permute(dst, perm); + if (enable_f32_to_f16_conversion) + { + float f32_val = 0; + _fs.read(reinterpret_cast<char *>(&f32_val), 4u); + half f16_val = + half_float::half_cast<half, std::round_to_nearest>(f32_val); + *(reinterpret_cast<half *>(tensor.ptr_to_element(dst))) = f16_val; + } + else + { + _fs.read(reinterpret_cast<char *>(tensor.ptr_to_element(dst)), + tensor.info()->element_size()); + } + }); } break; @@ -476,7 +516,7 @@ public: // Unmap buffer if creating a CLTensor unmap(tensor); } - catch(const std::ifstream::failure &e) + catch (const std::ifstream::failure &e) { ARM_COMPUTE_ERROR_VAR("Loading NPY file: %s", e.what()); } @@ -515,13 +555,12 @@ void save_to_ppm(T &tensor, const std::string &ppm_filename) const unsigned int width = tensor.info()->tensor_shape()[0]; const unsigned int height = tensor.info()->tensor_shape()[1]; - fs << "P6\n" - << width << " " << height << " 255\n"; + fs << "P6\n" << width << " " << height << " 255\n"; // Map buffer if creating a CLTensor map(tensor, true); - switch(tensor.info()->format()) + switch (tensor.info()->format()) { case arm_compute::Format::U8: { @@ -531,13 +570,15 @@ void save_to_ppm(T &tensor, const std::string &ppm_filename) arm_compute::Iterator in(&tensor, window); - arm_compute::execute_window_loop(window, [&](const arm_compute::Coordinates &) - { - const unsigned char value = *in.ptr(); + arm_compute::execute_window_loop( + window, + [&](const arm_compute::Coordinates &) + { + const unsigned char value = *in.ptr(); - fs << value << value << value; - }, - in); + fs << value << value << value; + }, + in); break; } @@ -549,11 +590,13 @@ void save_to_ppm(T &tensor, const std::string &ppm_filename) arm_compute::Iterator in(&tensor, window); - arm_compute::execute_window_loop(window, [&](const arm_compute::Coordinates &) - { - fs.write(reinterpret_cast<std::fstream::char_type *>(in.ptr()), width * tensor.info()->element_size()); - }, - in); + arm_compute::execute_window_loop( + window, + [&](const arm_compute::Coordinates &) { + fs.write(reinterpret_cast<std::fstream::char_type *>(in.ptr()), + width * tensor.info()->element_size()); + }, + in); break; } @@ -564,7 +607,7 @@ void save_to_ppm(T &tensor, const std::string &ppm_filename) // Unmap buffer if creating a CLTensor unmap(tensor); } - catch(const std::ofstream::failure &e) + catch (const std::ofstream::failure &e) { ARM_COMPUTE_ERROR_VAR("Writing %s: (%s)", ppm_filename.c_str(), e.what()); } @@ -592,7 +635,7 @@ void save_to_npy(T &tensor, const std::string &npy_filename, bool fortran_order) std::vector<npy::ndarray_len_t> shape(tensor.info()->num_dimensions()); - for(unsigned int i = 0, j = tensor.info()->num_dimensions() - 1; i < tensor.info()->num_dimensions(); ++i, --j) + for (unsigned int i = 0, j = tensor.info()->num_dimensions() - 1; i < tensor.info()->num_dimensions(); ++i, --j) { shape[i] = tensor.info()->tensor_shape()[!fortran_order ? j : i]; } @@ -603,27 +646,27 @@ void save_to_npy(T &tensor, const std::string &npy_filename, bool fortran_order) using typestring_type = typename std::conditional<std::is_floating_point<U>::value, float, qasymm8_t>::type; std::vector<typestring_type> tmp; /* Used only to get the typestring */ - npy::Typestring typestring_o{ tmp }; - std::string typestring = typestring_o.str(); + const npy::dtype_t dtype = npy::dtype_map.at(std::type_index(typeid(tmp))); std::ofstream stream(npy_filename, std::ofstream::binary); - npy::write_header(stream, typestring, fortran_order, shape); + npy::header_t header{dtype, fortran_order, shape}; + npy::write_header(stream, header); arm_compute::Window window; window.use_tensor_dimensions(tensor.info()->tensor_shape()); arm_compute::Iterator in(&tensor, window); - arm_compute::execute_window_loop(window, [&](const arm_compute::Coordinates &) - { - stream.write(reinterpret_cast<const char *>(in.ptr()), sizeof(typestring_type)); - }, - in); + arm_compute::execute_window_loop( + window, + [&](const arm_compute::Coordinates &) + { stream.write(reinterpret_cast<const char *>(in.ptr()), sizeof(typestring_type)); }, + in); // Unmap buffer if creating a CLTensor unmap(tensor); } - catch(const std::ofstream::failure &e) + catch (const std::ofstream::failure &e) { ARM_COMPUTE_ERROR_VAR("Writing %s: (%s)", npy_filename.c_str(), e.what()); } @@ -647,7 +690,7 @@ void load_trained_data(T &tensor, const std::string &filename) // Open file fs.open(filename, std::ios::in | std::ios::binary); - if(!fs.good()) + if (!fs.good()) { throw std::runtime_error("Could not load binary data: " + filename); } @@ -659,23 +702,26 @@ void load_trained_data(T &tensor, const std::string &filename) window.set(arm_compute::Window::DimX, arm_compute::Window::Dimension(0, 1, 1)); - for(unsigned int d = 1; d < tensor.info()->num_dimensions(); ++d) + for (unsigned int d = 1; d < tensor.info()->num_dimensions(); ++d) { window.set(d, Window::Dimension(0, tensor.info()->tensor_shape()[d], 1)); } arm_compute::Iterator in(&tensor, window); - execute_window_loop(window, [&](const Coordinates &) - { - fs.read(reinterpret_cast<std::fstream::char_type *>(in.ptr()), tensor.info()->tensor_shape()[0] * tensor.info()->element_size()); - }, - in); + execute_window_loop( + window, + [&](const Coordinates &) + { + fs.read(reinterpret_cast<std::fstream::char_type *>(in.ptr()), + tensor.info()->tensor_shape()[0] * tensor.info()->element_size()); + }, + in); // Unmap buffer if creating a CLTensor unmap(tensor); } - catch(const std::ofstream::failure &e) + catch (const std::ofstream::failure &e) { ARM_COMPUTE_ERROR_VAR("Writing %s: (%s)", filename.c_str(), e.what()); } @@ -690,11 +736,8 @@ void fill_tensor_value(TensorType &tensor, T value) window.use_tensor_dimensions(tensor.info()->tensor_shape()); Iterator it_tensor(&tensor, window); - execute_window_loop(window, [&](const Coordinates &) - { - *reinterpret_cast<T *>(it_tensor.ptr()) = value; - }, - it_tensor); + execute_window_loop( + window, [&](const Coordinates &) { *reinterpret_cast<T *>(it_tensor.ptr()) = value; }, it_tensor); unmap(tensor); } @@ -717,22 +760,23 @@ void fill_tensor_vector(TensorType &tensor, std::vector<T> vec) int i = 0; Iterator it_tensor(&tensor, window); - execute_window_loop(window, [&](const Coordinates &) - { - *reinterpret_cast<T *>(it_tensor.ptr()) = vec.at(i++); - }, - it_tensor); + execute_window_loop( + window, [&](const Coordinates &) { *reinterpret_cast<T *>(it_tensor.ptr()) = vec.at(i++); }, it_tensor); unmap(tensor); } template <typename T, typename TensorType> -void fill_random_tensor(TensorType &tensor, std::random_device::result_type seed, T lower_bound = std::numeric_limits<T>::lowest(), T upper_bound = std::numeric_limits<T>::max()) +void fill_random_tensor(TensorType &tensor, + std::random_device::result_type seed, + T lower_bound = std::numeric_limits<T>::lowest(), + T upper_bound = std::numeric_limits<T>::max()) { constexpr bool is_fp_16bit = std::is_same<T, half>::value || std::is_same<T, bfloat16>::value; constexpr bool is_integral = std::is_integral<T>::value && !is_fp_16bit; - using fp_dist_type = typename std::conditional<is_fp_16bit, arm_compute::utils::uniform_real_distribution_16bit<T>, std::uniform_real_distribution<T>>::type; + using fp_dist_type = typename std::conditional<is_fp_16bit, arm_compute::utils::uniform_real_distribution_16bit<T>, + std::uniform_real_distribution<T>>::type; using dist_type = typename std::conditional<is_integral, std::uniform_int_distribution<T>, fp_dist_type>::type; std::mt19937 gen(seed); @@ -744,17 +788,16 @@ void fill_random_tensor(TensorType &tensor, std::random_device::result_type seed window.use_tensor_dimensions(tensor.info()->tensor_shape()); Iterator it(&tensor, window); - execute_window_loop(window, [&](const Coordinates &) - { - *reinterpret_cast<T *>(it.ptr()) = dist(gen); - }, - it); + execute_window_loop( + window, [&](const Coordinates &) { *reinterpret_cast<T *>(it.ptr()) = dist(gen); }, it); unmap(tensor); } template <typename T, typename TensorType> -void fill_random_tensor(TensorType &tensor, T lower_bound = std::numeric_limits<T>::lowest(), T upper_bound = std::numeric_limits<T>::max()) +void fill_random_tensor(TensorType &tensor, + T lower_bound = std::numeric_limits<T>::lowest(), + T upper_bound = std::numeric_limits<T>::max()) { std::random_device rd; fill_random_tensor(tensor, rd(), lower_bound, upper_bound); @@ -763,7 +806,8 @@ void fill_random_tensor(TensorType &tensor, T lower_bound = std::numeric_limits< template <typename T> void init_sgemm_output(T &dst, T &src0, T &src1, arm_compute::DataType dt) { - dst.allocator()->init(TensorInfo(TensorShape(src1.info()->dimension(0), src0.info()->dimension(1), src0.info()->dimension(2)), 1, dt)); + dst.allocator()->init(TensorInfo( + TensorShape(src1.info()->dimension(0), src0.info()->dimension(1), src0.info()->dimension(2)), 1, dt)); } /** This function returns the amount of memory free reading from /proc/meminfo * @@ -795,14 +839,16 @@ int compare_tensor(ITensor &tensor1, ITensor &tensor2, T tolerance) Iterator itensor1(&tensor1, window); Iterator itensor2(&tensor2, window); - execute_window_loop(window, [&](const Coordinates &) - { - if(std::abs(*reinterpret_cast<T *>(itensor1.ptr()) - *reinterpret_cast<T *>(itensor2.ptr())) > tolerance) + execute_window_loop( + window, + [&](const Coordinates &) { - ++num_mismatches; - } - }, - itensor1, itensor2); + if (std::abs(*reinterpret_cast<T *>(itensor1.ptr()) - *reinterpret_cast<T *>(itensor2.ptr())) > tolerance) + { + ++num_mismatches; + } + }, + itensor1, itensor2); unmap(itensor1); unmap(itensor2); diff --git a/utils/command_line/CommandLineParser.h b/utils/command_line/CommandLineParser.h index e8fabc4251..57796bce73 100644 --- a/utils/command_line/CommandLineParser.h +++ b/utils/command_line/CommandLineParser.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020 Arm Limited. + * Copyright (c) 2017-2020, 2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -24,13 +24,13 @@ #ifndef ARM_COMPUTE_UTILS_COMMANDLINEPARSER #define ARM_COMPUTE_UTILS_COMMANDLINEPARSER -#include "Option.h" #include "arm_compute/core/utils/misc/Utility.h" +#include "Option.h" +#include <cstring> #include <iostream> #include <map> #include <memory> -#include <memory> #include <regex> #include <string> #include <utility> @@ -55,7 +55,7 @@ public: * @return Pointer to the option. The option is owned by the parser. */ template <typename T, typename... As> - T *add_option(const std::string &name, As &&... args); + T *add_option(const std::string &name, As &&...args); /** Function to add a new positional argument to the parser. * @@ -64,7 +64,7 @@ public: * @return Pointer to the option. The option is owned by the parser. */ template <typename T, typename... As> - T *add_positional_option(As &&... args); + T *add_positional_option(As &&...args); /** Parses the command line arguments and updates the options accordingly. * @@ -100,14 +100,14 @@ private: }; template <typename T, typename... As> -inline T *CommandLineParser::add_option(const std::string &name, As &&... args) +inline T *CommandLineParser::add_option(const std::string &name, As &&...args) { auto result = _options.emplace(name, std::make_unique<T>(name, std::forward<As>(args)...)); return static_cast<T *>(result.first->second.get()); } template <typename T, typename... As> -inline T *CommandLineParser::add_positional_option(As &&... args) +inline T *CommandLineParser::add_positional_option(As &&...args) { _positional_options.emplace_back(std::make_unique<T>(std::forward<As>(args)...)); return static_cast<T *>(_positional_options.back().get()); @@ -115,11 +115,11 @@ inline T *CommandLineParser::add_positional_option(As &&... args) inline void CommandLineParser::parse(int argc, char **argv) { - const std::regex option_regex{ "--((?:no-)?)([^=]+)(?:=(.*))?" }; + const std::regex option_regex{"--((?:no-)?)([^=]+)(?:=(.*))?"}; - const auto set_option = [&](const std::string & option, const std::string & name, const std::string & value) + const auto set_option = [&](const std::string &option, const std::string &name, const std::string &value) { - if(_options.find(name) == _options.end()) + if (_options.find(name) == _options.end()) { _unknown_options.push_back(option); return; @@ -127,7 +127,7 @@ inline void CommandLineParser::parse(int argc, char **argv) const bool success = _options[name]->parse(value); - if(!success) + if (!success) { _invalid_options.push_back(option); } @@ -135,26 +135,27 @@ inline void CommandLineParser::parse(int argc, char **argv) unsigned int positional_index = 0; - for(int i = 1; i < argc; ++i) + for (int i = 1; i < argc; ++i) { - std::string mixed_case_opt{ argv[i] }; + std::string mixed_case_opt{argv[i]}; int equal_sign = mixed_case_opt.find('='); int pos = (equal_sign == -1) ? strlen(argv[i]) : equal_sign; - const std::string option = arm_compute::utility::tolower(mixed_case_opt.substr(0, pos)) + mixed_case_opt.substr(pos); - std::smatch option_matches; + const std::string option = + arm_compute::utility::tolower(mixed_case_opt.substr(0, pos)) + mixed_case_opt.substr(pos); + std::smatch option_matches; - if(std::regex_match(option, option_matches, option_regex)) + if (std::regex_match(option, option_matches, option_regex)) { // Boolean option - if(option_matches.str(3).empty()) + if (option_matches.str(3).empty()) { set_option(option, option_matches.str(2), option_matches.str(1).empty() ? "true" : "false"); } else { // Can't have "no-" and a value - if(!option_matches.str(1).empty()) + if (!option_matches.str(1).empty()) { _invalid_options.emplace_back(option); } @@ -166,7 +167,7 @@ inline void CommandLineParser::parse(int argc, char **argv) } else { - if(positional_index >= _positional_options.size()) + if (positional_index >= _positional_options.size()) { _invalid_options.push_back(mixed_case_opt); } @@ -183,30 +184,30 @@ inline bool CommandLineParser::validate() const { bool is_valid = true; - for(const auto &option : _options) + for (const auto &option : _options) { - if(option.second->is_required() && !option.second->is_set()) + if (option.second->is_required() && !option.second->is_set()) { is_valid = false; std::cerr << "ERROR: Option '" << option.second->name() << "' is required but not given!\n"; } } - for(const auto &option : _positional_options) + for (const auto &option : _positional_options) { - if(option->is_required() && !option->is_set()) + if (option->is_required() && !option->is_set()) { is_valid = false; std::cerr << "ERROR: Option '" << option->name() << "' is required but not given!\n"; } } - for(const auto &option : _unknown_options) + for (const auto &option : _unknown_options) { std::cerr << "WARNING: Skipping unknown option '" << option << "'!\n"; } - for(const auto &option : _invalid_options) + for (const auto &option : _invalid_options) { std::cerr << "WARNING: Skipping invalid option '" << option << "'!\n"; } @@ -218,19 +219,19 @@ inline void CommandLineParser::print_help(const std::string &program_name) const { std::cout << "usage: " << program_name << " \n"; - for(const auto &option : _options) + for (const auto &option : _options) { std::cout << option.second->help() << "\n"; } - for(const auto &option : _positional_options) + for (const auto &option : _positional_options) { std::string help_to_print; // Extract help sub-string const std::string help_str = option->help(); const size_t help_pos = help_str.find(" - "); - if(help_pos != std::string::npos) + if (help_pos != std::string::npos) { help_to_print = help_str.substr(help_pos); } diff --git a/utils/command_line/EnumListOption.h b/utils/command_line/EnumListOption.h index f4ee283528..6c4146fa75 100644 --- a/utils/command_line/EnumListOption.h +++ b/utils/command_line/EnumListOption.h @@ -25,7 +25,6 @@ #define ARM_COMPUTE_UTILS_ENUMLISTOPTION #include "Option.h" - #include <initializer_list> #include <set> #include <sstream> @@ -57,7 +56,7 @@ public: */ EnumListOption(std::string name, std::set<T> allowed_values, std::initializer_list<T> &&default_values); - bool parse(std::string value) override; + bool parse(std::string value) override; std::string help() const override; /** Get the values of the option. @@ -73,13 +72,17 @@ private: template <typename T> inline EnumListOption<T>::EnumListOption(std::string name, std::set<T> allowed_values) - : Option{ std::move(name) }, _allowed_values{ std::move(allowed_values) } + : Option{std::move(name)}, _allowed_values{std::move(allowed_values)} { } template <typename T> -inline EnumListOption<T>::EnumListOption(std::string name, std::set<T> allowed_values, std::initializer_list<T> &&default_values) - : Option{ std::move(name), false, true }, _values{ std::forward<std::initializer_list<T>>(default_values) }, _allowed_values{ std::move(allowed_values) } +inline EnumListOption<T>::EnumListOption(std::string name, + std::set<T> allowed_values, + std::initializer_list<T> &&default_values) + : Option{std::move(name), false, true}, + _values{std::forward<std::initializer_list<T>>(default_values)}, + _allowed_values{std::move(allowed_values)} { } @@ -90,10 +93,10 @@ bool EnumListOption<T>::parse(std::string value) _values.clear(); _is_set = true; - std::stringstream stream{ value }; + std::stringstream stream{value}; std::string item; - while(!std::getline(stream, item, ',').fail()) + while (!std::getline(stream, item, ',').fail()) { try { @@ -102,9 +105,9 @@ bool EnumListOption<T>::parse(std::string value) item_stream >> typed_value; - if(!item_stream.fail()) + if (!item_stream.fail()) { - if(_allowed_values.count(typed_value) == 0) + if (_allowed_values.count(typed_value) == 0) { _is_set = false; continue; @@ -115,7 +118,7 @@ bool EnumListOption<T>::parse(std::string value) _is_set = _is_set && !item_stream.fail(); } - catch(const std::invalid_argument &) + catch (const std::invalid_argument &) { _is_set = false; } @@ -130,7 +133,7 @@ std::string EnumListOption<T>::help() const std::stringstream msg; msg << "--" + name() + "={"; - for(const auto &value : _allowed_values) + for (const auto &value : _allowed_values) { msg << value << ","; } diff --git a/utils/command_line/EnumOption.h b/utils/command_line/EnumOption.h index 6bcfe5f14e..eb43b6c54e 100644 --- a/utils/command_line/EnumOption.h +++ b/utils/command_line/EnumOption.h @@ -25,7 +25,6 @@ #define ARM_COMPUTE_UTILS_ENUMOPTION #include "SimpleOption.h" - #include <set> #include <sstream> #include <stdexcept> @@ -55,7 +54,7 @@ public: */ EnumOption(std::string name, std::set<T> allowed_values, T default_value); - bool parse(std::string value) override; + bool parse(std::string value) override; std::string help() const override; /** Get the selected value. @@ -70,13 +69,13 @@ private: template <typename T> inline EnumOption<T>::EnumOption(std::string name, std::set<T> allowed_values) - : SimpleOption<T>{ std::move(name) }, _allowed_values{ std::move(allowed_values) } + : SimpleOption<T>{std::move(name)}, _allowed_values{std::move(allowed_values)} { } template <typename T> inline EnumOption<T>::EnumOption(std::string name, std::set<T> allowed_values, T default_value) - : SimpleOption<T>{ std::move(name), std::move(default_value) }, _allowed_values{ std::move(allowed_values) } + : SimpleOption<T>{std::move(name), std::move(default_value)}, _allowed_values{std::move(allowed_values)} { } @@ -85,14 +84,14 @@ bool EnumOption<T>::parse(std::string value) { try { - std::stringstream stream{ value }; + std::stringstream stream{value}; T typed_value{}; stream >> typed_value; - if(!stream.fail()) + if (!stream.fail()) { - if(_allowed_values.count(typed_value) == 0) + if (_allowed_values.count(typed_value) == 0) { return false; } @@ -104,7 +103,7 @@ bool EnumOption<T>::parse(std::string value) return false; } - catch(const std::invalid_argument &) + catch (const std::invalid_argument &) { return false; } @@ -116,7 +115,7 @@ std::string EnumOption<T>::help() const std::stringstream msg; msg << "--" + this->name() + "={"; - for(const auto &value : _allowed_values) + for (const auto &value : _allowed_values) { msg << value << ","; } diff --git a/utils/command_line/ListOption.h b/utils/command_line/ListOption.h index b290191e08..f318e1646a 100644 --- a/utils/command_line/ListOption.h +++ b/utils/command_line/ListOption.h @@ -25,7 +25,6 @@ #define ARM_COMPUTE_UTILS_LISTOPTION #include "Option.h" - #include <initializer_list> #include <sstream> #include <stdexcept> @@ -50,7 +49,7 @@ public: */ ListOption(std::string name, std::initializer_list<T> &&default_values); - bool parse(std::string value) override; + bool parse(std::string value) override; std::string help() const override; /** Get the list of option values. @@ -65,7 +64,7 @@ private: template <typename T> inline ListOption<T>::ListOption(std::string name, std::initializer_list<T> &&default_values) - : Option{ std::move(name), false, true }, _values{ std::forward<std::initializer_list<T>>(default_values) } + : Option{std::move(name), false, true}, _values{std::forward<std::initializer_list<T>>(default_values)} { } @@ -76,17 +75,17 @@ bool ListOption<T>::parse(std::string value) try { - std::stringstream stream{ value }; + std::stringstream stream{value}; std::string item; - while(!std::getline(stream, item, ',').fail()) + while (!std::getline(stream, item, ',').fail()) { std::stringstream item_stream(item); T typed_value{}; item_stream >> typed_value; - if(!item_stream.fail()) + if (!item_stream.fail()) { _values.emplace_back(typed_value); } @@ -96,7 +95,7 @@ bool ListOption<T>::parse(std::string value) return _is_set; } - catch(const std::invalid_argument &) + catch (const std::invalid_argument &) { return false; } diff --git a/utils/command_line/Option.h b/utils/command_line/Option.h index c845e5499f..b4288538b0 100644 --- a/utils/command_line/Option.h +++ b/utils/command_line/Option.h @@ -97,18 +97,17 @@ public: protected: std::string _name; - bool _is_required{ false }; - bool _is_set{ false }; + bool _is_required{false}; + bool _is_set{false}; std::string _help{}; }; -inline Option::Option(std::string name) - : _name{ std::move(name) } +inline Option::Option(std::string name) : _name{std::move(name)} { } inline Option::Option(std::string name, bool is_required, bool is_set) - : _name{ std::move(name) }, _is_required{ is_required }, _is_set{ is_set } + : _name{std::move(name)}, _is_required{is_required}, _is_set{is_set} { } diff --git a/utils/command_line/SimpleOption.h b/utils/command_line/SimpleOption.h index d76797375d..f6329c1790 100644 --- a/utils/command_line/SimpleOption.h +++ b/utils/command_line/SimpleOption.h @@ -25,7 +25,6 @@ #define ARM_COMPUTE_UTILS_SIMPLEOPTION #include "Option.h" - #include <sstream> #include <stdexcept> #include <string> @@ -74,7 +73,7 @@ protected: template <typename T> inline SimpleOption<T>::SimpleOption(std::string name, T default_value) - : Option{ std::move(name), false, true }, _value{ std::move(default_value) } + : Option{std::move(name), false, true}, _value{std::move(default_value)} { } @@ -83,12 +82,12 @@ bool SimpleOption<T>::parse(std::string value) { try { - std::stringstream stream{ std::move(value) }; + std::stringstream stream{std::move(value)}; stream >> _value; _is_set = !stream.fail(); return _is_set; } - catch(const std::invalid_argument &) + catch (const std::invalid_argument &) { return false; } diff --git a/utils/command_line/ToggleOption.h b/utils/command_line/ToggleOption.h index d3c68663b5..694b7bb9e6 100644 --- a/utils/command_line/ToggleOption.h +++ b/utils/command_line/ToggleOption.h @@ -25,7 +25,6 @@ #define ARM_COMPUTE_UTILS_TOGGLEOPTION #include "SimpleOption.h" - #include <string> namespace arm_compute @@ -45,26 +44,23 @@ public: */ ToggleOption(std::string name, bool default_value); - bool parse(std::string value) override; + bool parse(std::string value) override; std::string help() const override; }; inline ToggleOption::ToggleOption(std::string name, bool default_value) - : SimpleOption<bool> -{ - std::move(name), default_value -} + : SimpleOption<bool>{std::move(name), default_value} { } inline bool ToggleOption::parse(std::string value) { - if(value == "true") + if (value == "true") { _value = true; _is_set = true; } - else if(value == "false") + else if (value == "false") { _value = false; _is_set = true; |