/* * Copyright (c) 2019 ARM Limited. * * SPDX-License-Identifier: MIT * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef __GRAPH_VALIDATE_UTILS_H__ #define __GRAPH_VALIDATE_UTILS_H__ #include "arm_compute/graph.h" #include "ValidateExample.h" #include "utils/command_line/CommandLineParser.h" namespace arm_compute { namespace utils { /*Available Padding modes */ enum class ConvolutionPaddingMode { Valid, Same, Manual }; /** Stream Input operator for the ConvolutionPaddingMode type * * @param[in] stream Input stream. * @param[out] Mode Convolution parameters to output * * @return input stream. */ inline ::std::istream &operator>>(::std::istream &stream, ConvolutionPaddingMode &Mode) { static const std::map modes = { { "valid", ConvolutionPaddingMode::Valid }, { "same", ConvolutionPaddingMode::Same }, { "manual", ConvolutionPaddingMode::Manual } }; std::string value; stream >> value; #ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED try { #endif /* ARM_COMPUTE_EXCEPTIONS_DISABLED */ Mode = modes.at(arm_compute::utility::tolower(value)); #ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED } catch(const std::out_of_range &) { throw std::invalid_argument(value); } #endif /* ARM_COMPUTE_EXCEPTIONS_DISABLED */ return stream; } /** Formatted output of the ConvolutionPaddingMode type * * @param[out] os Output stream. * @param[in] Mode ConvolutionPaddingMode to output * * @return Modified output stream. */ inline ::std::ostream &operator<<(::std::ostream &os, ConvolutionPaddingMode Mode) { switch(Mode) { case ConvolutionPaddingMode::Valid: os << "Valid"; break; case ConvolutionPaddingMode::Same: os << "Same"; break; case ConvolutionPaddingMode::Manual: os << "Manual"; break; default: throw std::invalid_argument("Unsupported padding mode format"); } return os; } /** Structure holding all the input tensor graph parameters */ struct TensorParams { int width{ 1 }; int height{ 1 }; int fm{ 1 }; int batch{ 1 }; QuantizationInfo quant_info{ 1.0f, 0 }; std::string npy{}; uint64_t range_low{ 0 }; uint64_t range_high{ 16 }; }; /** Structure holding all the verification graph parameters */ struct VerificationParams { float absolute_tolerance{ -1.f }; float relative_tolerance{ -1.f }; float tolerance_number{ -1.f }; }; /** Structure holding all the common graph parameters */ struct FrameworkParams { bool help{ false }; int threads{ 0 }; arm_compute::graph::Target target{ arm_compute::graph::Target::NEON }; }; /** Structure holding all the graph Example parameters */ struct CommonParams { FrameworkParams common_params{}; TensorParams input{}; TensorParams weights{}; TensorParams bias{}; TensorParams output{}; VerificationParams verification{}; arm_compute::DataType data_type{ DataType::F32 }; }; /** Structure holding all the Convolution layer graph parameters */ struct ConvolutionParams { int depth_multiplier{ 1 }; /** Padding graph parameters */ int padding_top{ 0 }; int padding_bottom{ 0 }; int padding_left{ 0 }; int padding_right{ 0 }; int padding_stride_x{ 0 }; int padding_stride_y{ 0 }; ConvolutionPaddingMode padding_mode{ ConvolutionPaddingMode::Valid }; struct { struct { int X{ 0 }; int Y{ 0 }; } stride{}; ConvolutionPaddingMode mode{ ConvolutionPaddingMode::Valid }; } padding{}; }; /** Structure holding all the fully_connected layer graph parameters */ struct FullyConnectedParams { FullyConnectedLayerInfo info{}; int num_outputs{ 1 }; }; /** Structure holding all the graph Example parameters */ struct ExampleParams : public CommonParams { FullyConnectedParams fully_connected{}; ConvolutionParams convolution{}; arm_compute::graph::DepthwiseConvolutionMethod depth_convolution_method{ arm_compute::graph::DepthwiseConvolutionMethod::Default }; arm_compute::graph::ConvolutionMethod convolution_method{ arm_compute::graph::ConvolutionMethod::Default }; arm_compute::DataLayout data_layout{ DataLayout::NCHW }; }; /** Calculate stride information. * * Depending on the selected padding mode create the desired PadStrideInfo * * @param[in] params Convolution parameters supplied by the user. * * @return PadStrideInfo with the correct padding mode. */ inline PadStrideInfo calculate_convolution_padding(ExampleParams params) { switch(params.convolution.padding_mode) { case ConvolutionPaddingMode::Manual: { return PadStrideInfo(params.convolution.padding_stride_x, params.convolution.padding_stride_y, params.convolution.padding_left, params.convolution.padding_right, params.convolution.padding_top, params.convolution.padding_bottom, DimensionRoundingType::FLOOR); } case ConvolutionPaddingMode::Valid: { return PadStrideInfo(); } case ConvolutionPaddingMode::Same: { return arm_compute::calculate_same_pad(TensorShape(params.input.width, params.input.height), TensorShape(params.weights.width, params.weights.height), PadStrideInfo(params.convolution.padding_stride_x, params.convolution.padding_stride_y)); } default: ARM_COMPUTE_ERROR("NOT SUPPORTED!"); } } /** CommonGraphValidateOptions command line options used to configure the graph examples * * (Similar to common options) * The options in this object get populated when "parse()" is called on the parser used to construct it. * The expected workflow is: * * CommandLineParser parser; * CommonOptions options( parser ); * parser.parse(argc, argv); */ class CommonGraphValidateOptions { public: explicit CommonGraphValidateOptions(CommandLineParser &parser) noexcept : help(parser.add_option("help")), threads(parser.add_option>("threads")), target(), data_type(), absolute_tolerance(parser.add_option>("abs_tolerance", -1.0f)), relative_tolerance(parser.add_option>("rel_tolerance", -1.0f)), tolerance_number(parser.add_option>("tolerance_num", -1.0f)) { const std::set supported_targets { arm_compute::graph::Target::NEON, arm_compute::graph::Target::CL, arm_compute::graph::Target::GC, }; const std::set supported_data_types { DataType::F16, DataType::F32, DataType::QASYMM8, }; target = parser.add_option>("target", supported_targets, arm_compute::graph::Target::NEON); data_type = parser.add_option>("type", supported_data_types, DataType::F32); target->set_help("Target to execute on"); data_type->set_help("Data type to use"); help->set_help("Show this help message"); absolute_tolerance->set_help("Absolute tolerance used for verification"); relative_tolerance->set_help("Absolute tolerance used for verification"); tolerance_number->set_help("Absolute tolerance used for verification"); ; } /** Prevent instances of this class from being copied (As this class contains pointers) */ CommonGraphValidateOptions(const CommonGraphValidateOptions &) = delete; /** Prevent instances of this class from being copied (As this class contains pointers) */ CommonGraphValidateOptions &operator=(const CommonGraphValidateOptions &) = delete; /** Allow instances of this class to be moved */ CommonGraphValidateOptions(CommonGraphValidateOptions &&) noexcept(true) = default; /** Allow instances of this class to be moved */ CommonGraphValidateOptions &operator=(CommonGraphValidateOptions &&) noexcept(true) = default; /** Default destructor */ virtual ~CommonGraphValidateOptions() = default; void consume_common_parameters(CommonParams &common_params) { common_params.common_params.help = help->is_set() ? help->value() : false; common_params.common_params.threads = threads->value(); common_params.common_params.target = target->value(); common_params.verification.absolute_tolerance = absolute_tolerance->value(); common_params.verification.relative_tolerance = relative_tolerance->value(); common_params.verification.tolerance_number = tolerance_number->value(); } /** Formatted output of the ExampleParams type * * @param[out] os Output stream. * @param[in] common_params Example parameters to output * * @return None. */ virtual void print_parameters(::std::ostream &os, const ExampleParams &common_params) { os << "Threads : " << common_params.common_params.threads << std::endl; os << "Target : " << common_params.common_params.target << std::endl; os << "Data type : " << common_params.data_type << std::endl; } ToggleOption *help; /**< show help message */ SimpleOption *threads; /**< Number of threads option */ EnumOption *target; /**< Graph execution target */ EnumOption *data_type; /**< Graph data type */ SimpleOption *absolute_tolerance; /**< Absolute tolerance used in verification */ SimpleOption *relative_tolerance; /**< Relative tolerance used in verification */ SimpleOption *tolerance_number; /**< Tolerance number used in verification */ }; /** Consumes the consume_common_graph_parameters graph options and creates a structure containing any information * * @param[in] options Options to consume * @param[out] common_params params structure to consume. * * @return consume_common_graph_parameters structure containing the common graph parameters */ void consume_common_graph_parameters(CommonGraphValidateOptions &options, CommonParams &common_params) { common_params.common_params.help = options.help->is_set() ? options.help->value() : false; common_params.common_params.threads = options.threads->value(); common_params.common_params.target = options.target->value(); common_params.verification.absolute_tolerance = options.absolute_tolerance->value(); common_params.verification.relative_tolerance = options.relative_tolerance->value(); common_params.verification.tolerance_number = options.tolerance_number->value(); } /** Generates appropriate accessor according to the specified graph parameters * * @param[in] tensor Tensor parameters * @param[in] lower Lower random values bound * @param[in] upper Upper random values bound * @param[in] seed Random generator seed * * @return An appropriate tensor accessor */ inline std::unique_ptr get_accessor(const TensorParams &tensor, PixelValue lower, PixelValue upper, const std::random_device::result_type seed = 0) { if(!tensor.npy.empty()) { return arm_compute::support::cpp14::make_unique(tensor.npy); } else { return arm_compute::support::cpp14::make_unique(lower, upper, seed); } } /** Graph example validation accessor class */ template class VerifyAccessor : public graph::ITensorAccessor { public: using TBias = typename std::conditional::type, uint8_t>::value, int32_t, D>::type; /** Constructor * * @param[in] params Convolution parameters */ explicit VerifyAccessor(ExampleParams ¶ms) : _params(std::move(params)) { } // Inherited methods overriden: bool access_tensor(ITensor &tensor) override { if(_params.output.npy.empty()) { arm_compute::test::SimpleTensor src; arm_compute::test::SimpleTensor weights; arm_compute::test::SimpleTensor bias; //Create Input tensors create_tensors(src, weights, bias, tensor); //Fill the tensors with random values fill_tensor(src, 0, static_cast(_params.input.range_low), static_cast(_params.input.range_high)); fill_tensor(weights, 1, static_cast(_params.weights.range_low), static_cast(_params.weights.range_high)); fill_tensor(bias, 2, static_cast(_params.input.range_low), static_cast(_params.input.range_high)); arm_compute::test::SimpleTensor output = reference(src, weights, bias, output_shape(tensor)); validate(tensor, output); } else { //The user provided a reference file use an npy accessor to validate arm_compute::graph_utils::NumPyAccessor(_params.output.npy, tensor.info()->tensor_shape(), tensor.info()->data_type()).access_tensor(tensor); } return false; } /** Create reference tensors. * * Validate the given tensor against the reference result. * * @param[out] src The tensor with the source data. * @param[out] weights The tensor with the weigths data. * @param[out] bias The tensor with the bias data. * @param[in] tensor Tensor result of the actual operation passed into the Accessor. * * @return None. */ virtual void create_tensors(arm_compute::test::SimpleTensor &src, arm_compute::test::SimpleTensor &weights, arm_compute::test::SimpleTensor &bias, ITensor &tensor) { ARM_COMPUTE_UNUSED(tensor); //Create Input tensors src = arm_compute::test::SimpleTensor { TensorShape(_params.input.width, _params.input.height, _params.input.fm, _params.input.batch), _params.data_type, 1, _params.input.quant_info }; weights = arm_compute::test::SimpleTensor { TensorShape(_params.weights.width, _params.weights.height, _params.weights.fm), _params.data_type, 1, _params.weights.quant_info }; bias = arm_compute::test::SimpleTensor { TensorShape(_params.input.height), _params.data_type, 1, _params.input.quant_info }; } /** Calculate reference output tensor shape. * * @param[in] tensor Tensor result of the actual operation passed into the Accessor. * * @return output tensor shape. */ virtual TensorShape output_shape(ITensor &tensor) { return arm_compute::graph_utils::permute_shape(tensor.info()->tensor_shape(), _params.data_layout, DataLayout::NCHW); } /** Calculate reference tensor. * * Validate the given tensor against the reference result. * * @param[in] src The tensor with the source data. * @param[in] weights The tensor with the weigths data. * @param[in] bias The tensor with the bias data. * @param[in] output_shape Shape of the output tensor. * * @return Tensor with the reference output. */ virtual arm_compute::test::SimpleTensor reference(arm_compute::test::SimpleTensor &src, arm_compute::test::SimpleTensor &weights, arm_compute::test::SimpleTensor &bias, const arm_compute::TensorShape &output_shape) = 0; /** Fill QASYMM tensor with Random values. * * Validate the given tensor against the reference result. * * @param[out] tensor The tensor we want to file * @param[in] seed seed for the randomization function * @param[in] low lower bound for random values * @param[in] high upper bound for random values * * @return None. */ void fill_tensor(arm_compute::test::SimpleTensor &tensor, std::random_device::result_type seed, uint8_t low, uint8_t high) { ARM_COMPUTE_ERROR_ON(tensor.data_type() != arm_compute::DataType::QASYMM8); const UniformQuantizationInfo qinfo = tensor.quantization_info().uniform(); uint8_t qasymm8_low = quantize_qasymm8(low, qinfo); uint8_t qasymm8_high = quantize_qasymm8(high, qinfo); std::mt19937 gen(seed); std::uniform_int_distribution distribution(qasymm8_low, qasymm8_high); for(int i = 0; i < tensor.num_elements(); ++i) { tensor[i] = quantize_qasymm8(distribution(gen), qinfo); } } /** Fill S32 tensor with Random values. * * Validate the given tensor against the reference result. * * @param[out] tensor The tensor we want to file * @param[in] seed seed for the randomization function * @param[in] low lower bound for random values * @param[in] high upper bound for random values * * @return None. */ void fill_tensor(arm_compute::test::SimpleTensor &tensor, std::random_device::result_type seed, int32_t low, int32_t high) { std::mt19937 gen(seed); std::uniform_int_distribution distribution(static_cast(low), static_cast(high)); for(int i = 0; i < tensor.num_elements(); ++i) { tensor[i] = distribution(gen); } } /** Fill F32 tensor with Random values. * * Validate the given tensor against the reference result. * * @param[out] tensor The tensor we want to file * @param[in] seed seed for the randomization function * @param[in] low lower bound for random values * @param[in] high upper bound for random values * * @return None. */ void fill_tensor(arm_compute::test::SimpleTensor &tensor, std::random_device::result_type seed, float low, float high) { ARM_COMPUTE_ERROR_ON(tensor.data_type() != arm_compute::DataType::F32); std::mt19937 gen(seed); std::uniform_real_distribution distribution(low, high); for(int i = 0; i < tensor.num_elements(); ++i) { tensor[i] = distribution(gen); } } /** Fill F16 tensor with Random values. * * Validate the given tensor against the reference result. * * @param[out] tensor The tensor we want to file * @param[in] seed seed for the randomization function * @param[in] low lower bound for random values * @param[in] high upper bound for random values * * @return None. */ void fill_tensor(arm_compute::test::SimpleTensor &tensor, std::random_device::result_type seed, half low, half high) { ARM_COMPUTE_ERROR_ON(tensor.data_type() != arm_compute::DataType::F16); std::mt19937 gen(seed); std::uniform_real_distribution distribution(static_cast(low), static_cast(high)); for(int i = 0; i < tensor.num_elements(); ++i) { tensor[i] = static_cast(distribution(gen)); } } /** Select relative tolerance. * * Select relative tolerance if not supplied by user. * * @return Appropriate relative tolerance. */ virtual float relative_tolerance() = 0; /** Select absolute tolerance. * * Select absolute tolerance if not supplied by user. * * @return Appropriate absolute tolerance. */ virtual float absolute_tolerance() = 0; /** Select tolerance number. * * Select tolerance number if not supplied by user. * * @return Appropriate tolerance number. */ virtual float tolerance_number() = 0; /** Validate the output versus the reference. * * @param[in] tensor Tensor result of the actual operation passed into the Accessor. * @param[in] output Tensor result of the reference implementation. * * @return None. */ void validate(ITensor &tensor, arm_compute::test::SimpleTensor output) { float user_relative_tolerance = _params.verification.relative_tolerance; float user_absolute_tolerance = _params.verification.absolute_tolerance; float user_tolerance_num = _params.verification.tolerance_number; /* If no user input was provided override with defaults. */ if(user_relative_tolerance == -1) { user_relative_tolerance = relative_tolerance(); } if(user_absolute_tolerance == -1) { user_absolute_tolerance = absolute_tolerance(); } if(user_tolerance_num == -1) { user_tolerance_num = tolerance_number(); } const arm_compute::test::validation::RelativeTolerance rel_tolerance(user_relative_tolerance); /**< Relative tolerance */ const arm_compute::test::validation::AbsoluteTolerance abs_tolerance(user_absolute_tolerance); /**< Absolute tolerance */ const float tolerance_num(user_tolerance_num); /**< Tolerance number */ arm_compute::test::validation::validate(arm_compute::test::Accessor(tensor), output, rel_tolerance, tolerance_num, abs_tolerance); } ExampleParams _params; }; /** Generates appropriate convolution verify accessor * * @param[in] params User supplied parameters for convolution. * * @return A convolution verify accessor for the requested datatype. */ template