From 8d94269d7985b9cee67e52581e2f58b6c99d7f0d Mon Sep 17 00:00:00 2001 From: John Kesapides Date: Tue, 26 Feb 2019 14:52:12 +0000 Subject: COMPMID-1492 Create tests/validate_examples/graph_depthwise_convolution Add new validate graph example and unify common example code Change-Id: Ibfd7ae2067ad805d6c82d953fe3febfbea961149 Signed-off-by: John Kesapides Reviewed-on: https://review.mlplatform.org/c/825 Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Georgios Pinitas --- arm_compute/graph/TypeLoader.h | 24 + src/graph/TypeLoader.cpp | 25 + tests/validate_examples/graph_convolution.cpp | 668 ++++---------------- .../graph_depthwiseconvolution.cpp | 396 ++++++++++++ tests/validate_examples/graph_fully_connected.cpp | 499 ++++----------- tests/validate_examples/graph_validate_utils.h | 695 +++++++++++++++++++++ .../reference/DepthwiseConvolutionLayer.cpp | 17 +- .../reference/DepthwiseConvolutionLayer.h | 2 +- 8 files changed, 1376 insertions(+), 950 deletions(-) create mode 100644 tests/validate_examples/graph_depthwiseconvolution.cpp create mode 100644 tests/validate_examples/graph_validate_utils.h diff --git a/arm_compute/graph/TypeLoader.h b/arm_compute/graph/TypeLoader.h index dcdc1736a7..41f382ad1d 100644 --- a/arm_compute/graph/TypeLoader.h +++ b/arm_compute/graph/TypeLoader.h @@ -123,6 +123,30 @@ inline ::std::istream &operator>>(::std::istream &stream, ConvolutionMethod &tar target = Convolution_method_from_name(value); return stream; } + +/** Converts a string to a strong types enumeration @ref DepthwiseConvolutionMethod + * + * @param[in] name String to convert + * + * @return Converted Target enumeration + */ +DepthwiseConvolutionMethod depthwise_convolution_method_from_name(const std::string &name); + +/** Input Stream operator for @ref DepthwiseConvolutionMethod + * + * @param[in] stream Stream to parse + * @param[out] target Output target + * + * @return Updated stream + */ +inline ::std::istream &operator>>(::std::istream &stream, DepthwiseConvolutionMethod &target) +{ + std::string value; + stream >> value; + target = depthwise_convolution_method_from_name(value); + return stream; +} + } // namespace graph } // namespace arm_compute #endif /* __ARM_COMPUTE_GRAPH_TYPE_LOADER_H__ */ diff --git a/src/graph/TypeLoader.cpp b/src/graph/TypeLoader.cpp index 0c1ce25b92..b63672b39b 100644 --- a/src/graph/TypeLoader.cpp +++ b/src/graph/TypeLoader.cpp @@ -125,5 +125,30 @@ ConvolutionMethod Convolution_method_from_name(const std::string &name) } #endif /* ARM_COMPUTE_EXCEPTIONS_DISABLED */ } + +DepthwiseConvolutionMethod depthwise_convolution_method_from_name(const std::string &name) +{ + static const std::map methods = + { + { "default", DepthwiseConvolutionMethod::Default }, + { "gemv", DepthwiseConvolutionMethod::GEMV }, + { "optimized3x3", DepthwiseConvolutionMethod::Optimized3x3 }, + }; + +#ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED + try + { +#endif /* ARM_COMPUTE_EXCEPTIONS_DISABLED */ + return methods.at(arm_compute::utility::tolower(name)); + +#ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED + } + catch(const std::out_of_range &) + { + throw std::invalid_argument(name); + } +#endif /* ARM_COMPUTE_EXCEPTIONS_DISABLED */ +} + } // namespace graph } // namespace arm_compute diff --git a/tests/validate_examples/graph_convolution.cpp b/tests/validate_examples/graph_convolution.cpp index 4f5ab0dc08..acc1e69544 100644 --- a/tests/validate_examples/graph_convolution.cpp +++ b/tests/validate_examples/graph_convolution.cpp @@ -35,6 +35,7 @@ #include "utils/Utils.h" #include "ValidateExample.h" +#include "graph_validate_utils.h" #include @@ -45,161 +46,9 @@ using namespace arm_compute::graph; using namespace arm_compute; using namespace arm_compute::test; using namespace arm_compute::test::validation; -namespace -{ -/*Available Padding modes */ -enum class PaddingMode -{ - Valid, - Same, - Manual -}; -/** Stream Input operator for the PaddingMode type - * - * @param[in] stream Input stream. - * @param[out] Mode Convolution parameters to output - * - * @return input stream. - */ -inline ::std::istream &operator>>(::std::istream &stream, PaddingMode &Mode) -{ - static const std::map modes = - { - { "valid", PaddingMode::Valid }, - { "same", PaddingMode::Same }, - { "manual", PaddingMode::Manual } - }; - std::string value; - stream >> value; - try - { - Mode = modes.at(arm_compute::utility::tolower(value)); - } - catch(const std::out_of_range &) - { - throw std::invalid_argument(value); - } - - return stream; -} - -/** Formatted output of the PaddingMode type - * - * @param[out] os Output stream. - * @param[in] Mode PaddingMode to output - * - * @return Modified output stream. - */ -inline ::std::ostream &operator<<(::std::ostream &os, PaddingMode Mode) -{ - switch(Mode) - { - case PaddingMode::Valid: - os << "Valid"; - break; - case PaddingMode::Same: - os << "Same"; - break; - case PaddingMode::Manual: - os << "Manual"; - break; - default: - throw std::invalid_argument("Unsupported padding mode format"); - } - - return os; -} -/** Structure holding all the input tensor graph parameters */ -struct TensorParams -{ - int width{ 0 }; - int height{ 0 }; - int fm{ 0 }; - int batch{ 0 }; - QuantizationInfo quant_info{ 1.0f, 0 }; - std::string npy{}; - uint64_t range_low{ 0 }; - uint64_t range_high{ 16 }; -}; -/** Structure holding all the verification graph parameters */ -struct VerificationParams -{ - float absolute_tolerance{ -1.f }; - float relative_tolerance{ -1.f }; - float tolerance_number{ -1.f }; -}; - -/** Structure holding all the common graph parameters */ -struct FrameworkParams -{ - bool help{ false }; - int threads{ 0 }; - arm_compute::graph::Target target{ arm_compute::graph::Target::NEON }; -}; - -/** Structure holding all the Convolution layer graph parameters */ -struct ConvolutionParams -{ - arm_compute::DataType data_type{ DataType::F32 }; - arm_compute::DataLayout data_layout{ DataLayout::NCHW }; - arm_compute::graph::ConvolutionMethod convolution_method{ arm_compute::graph::ConvolutionMethod::Default }; - - /** Padding graph parameters */ - int padding_top{ 0 }; - int padding_bottom{ 0 }; - int padding_left{ 0 }; - int padding_right{ 0 }; - int padding_stride_x{ 0 }; - int padding_stride_y{ 0 }; - PaddingMode padding_mode{ PaddingMode::Valid }; - struct - { - struct - { - int X{ 0 }; - int Y{ 0 }; - } stride{}; - PaddingMode mode{ PaddingMode::Valid }; - } padding{}; -}; - -/** Structure holding all the graph Example parameters */ -struct ExampleParams -{ - FrameworkParams common_params{}; - TensorParams input{}; - TensorParams weights{}; - TensorParams bias{}; - TensorParams output{}; - VerificationParams verification{}; - ConvolutionParams convolution{}; -}; - -/** Formatted output of the ConvolutionParams type - * - * @param[out] os Output stream. - * @param[in] common_params Convolution parameters to output - * - * @return Modified output stream. - */ -::std::ostream &operator<<(::std::ostream &os, const ExampleParams &common_params) +namespace { - os << "Threads : " << common_params.common_params.threads << std::endl; - os << "Target : " << common_params.common_params.target << std::endl; - os << "Data type : " << common_params.convolution.data_type << std::endl; - os << "Input dimensions(X,Y, Channels, Batch) : (" << common_params.input.width << "," << common_params.input.height << "," << common_params.input.fm << "," << common_params.input.batch << ")" - << std::endl; - os << "Weight dimensions(X,Y, Channels(same as input), OFM) : (" << common_params.weights.width << "," << common_params.weights.height << "," << common_params.input.fm << "," << - common_params.weights.fm << ")" << std::endl; - os << "Padding(top, bottom, left, right) (stride x, stride y) : (" << common_params.convolution.padding_top << "," << common_params.convolution.padding_bottom << "," << - common_params.convolution.padding_left << "," << common_params.convolution.padding_right << ") (" << common_params.convolution.padding_stride_x << "," << common_params.convolution.padding_stride_y << - ")" << std::endl; - os << "Padding Mode: " << common_params.convolution.padding_mode << std::endl; - os << "Convolution Method: " << common_params.convolution.convolution_method << std::endl; - return os; -} - /** Convolution command line options used to configure the graph examples * * (Similar to common options) @@ -210,11 +59,12 @@ struct ExampleParams * CommonOptions options( parser ); * parser.parse(argc, argv); */ -class ConvolutionOptions final +class ConvolutionOptions final : public CommonGraphValidateOptions { public: explicit ConvolutionOptions(CommandLineParser &parser) noexcept - : width(parser.add_option>("width", 9)), + : CommonGraphValidateOptions(parser), + width(parser.add_option>("width", 9)), height(parser.add_option>("height", 9)), channels(parser.add_option>("channels", 1)), batch(parser.add_option>("batch", 1)), @@ -227,16 +77,9 @@ public: padding_right(parser.add_option>("padding_right", 0)), stride_x(parser.add_option>("stride_x", 1)), stride_y(parser.add_option>("stride_y", 1)), - help(parser.add_option("help")), - threads(parser.add_option>("threads")), - target(), - data_type(), padding_mode(), conv_mode(), data_layout(), - absolute_tolerance(parser.add_option>("abs_tolerance", -1.0f)), - relative_tolerance(parser.add_option>("rel_tolerance", -1.0f)), - tolerance_number(parser.add_option>("tolerance_num", -1.0f)), scale(parser.add_option>("scale", 1.0f)), offset(parser.add_option>("offset", 0)), weights_scale(parser.add_option>("weights_scale", 1.0f)), @@ -252,24 +95,10 @@ public: weights_npy(parser.add_option>("weights_npy")), bias_npy(parser.add_option>("bias_image")) { - const std::set available_padding_modes - { - PaddingMode::Valid, - PaddingMode::Same - }; - - const std::set supported_targets + const std::set available_padding_modes { - Target::NEON, - Target::CL, - Target::GC, - }; - - const std::set supported_data_types - { - DataType::F16, - DataType::F32, - DataType::QASYMM8, + ConvolutionPaddingMode::Valid, + ConvolutionPaddingMode::Same }; const std::set supported_convolution_methods @@ -286,14 +115,10 @@ public: DataLayout::NCHW, }; - padding_mode = parser.add_option>("padding_mode", available_padding_modes, PaddingMode::Valid); - target = parser.add_option>("target", supported_targets, Target::NEON); - data_type = parser.add_option>("type", supported_data_types, DataType::F32); + padding_mode = parser.add_option>("padding_mode", available_padding_modes, ConvolutionPaddingMode::Valid); conv_mode = parser.add_option>("convolution_method", supported_convolution_methods, arm_compute::graph::ConvolutionMethod::Default); data_layout = parser.add_option>("layout", supported_data_layouts, DataLayout::NHWC); - target->set_help("Target to execute on"); - data_type->set_help("Data type to use"); padding_mode->set_help("Set padding mode"); help->set_help("Show this help message"); width->set_help("Set Input dimension width"); @@ -310,10 +135,6 @@ public: stride_x->set_help("Set padding stride x"); stride_y->set_help("Set padding stride y"); conv_mode->set_help("Set convolution method"); - data_layout->set_help("Data layout to use"); - absolute_tolerance->set_help("Absolute tolerance used for verification"); - relative_tolerance->set_help("Absolute tolerance used for verification"); - tolerance_number->set_help("Absolute tolerance used for verification"); scale->set_help("Quantization scale from QASYMM8"); offset->set_help("Quantization offset from QASYMM8"); weights_scale->set_help("Quantization scale from QASYMM8"); @@ -328,6 +149,69 @@ public: weights_range_high->set_help("Lower bound for input randomization range"); } + /** Fill out the supplied parameters with user supplied parameters + * + * @param[out] os Output stream. + * @param[in] common_params Example parameters to output + * + * @return None. + */ + void consume_parameters(ExampleParams &common_params) + { + common_params.input.width = width->value(); + common_params.input.height = height->value(); + common_params.input.fm = channels->value(); + common_params.input.batch = batch->value(); + common_params.input.quant_info.scale = scale->value(); + common_params.input.quant_info.offset = offset->value(); + common_params.input.npy = input_npy->value(); + common_params.input.range_low = input_range_low->value(); + common_params.input.range_high = input_range_high->value(); + + common_params.weights.width = weights_width->value(); + common_params.weights.height = weights_height->value(); + common_params.weights.fm = OFM->value(); + common_params.weights.npy = weights_npy->value(); + common_params.weights.quant_info.scale = weights_scale->value(); + common_params.weights.quant_info.offset = weights_offset->value(); + common_params.weights.range_low = weights_range_low->value(); + common_params.weights.range_high = weights_range_high->value(); + + common_params.bias.npy = bias_npy->value(); + + common_params.output.quant_info.scale = output_scale->value(); + common_params.output.quant_info.offset = output_offset->value(); + common_params.output.npy = output_npy->value(); + + common_params.convolution.padding_mode = padding_mode->value(); + common_params.convolution.padding_top = padding_top->value(); + common_params.convolution.padding_bottom = padding_bottom->value(); + common_params.convolution.padding_left = padding_left->value(); + common_params.convolution.padding_right = padding_right->value(); + common_params.convolution.padding_stride_x = stride_x->value(); + common_params.convolution.padding_stride_y = stride_y->value(); + + common_params.data_type = data_type->value(); + common_params.data_layout = data_layout->value(); + common_params.convolution_method = conv_mode->value(); + } + + void print_parameters(::std::ostream &os, const ExampleParams &common_params) override + { + os << "Threads : " << common_params.common_params.threads << std::endl; + os << "Target : " << common_params.common_params.target << std::endl; + os << "Data type : " << common_params.data_type << std::endl; + os << "Input dimensions(X,Y, Channels, Batch) : (" << common_params.input.width << "," << common_params.input.height << "," << common_params.input.fm << "," << common_params.input.batch << ")" + << std::endl; + os << "Weight dimensions(X,Y, Channels(same as input), OFM) : (" << common_params.weights.width << "," << common_params.weights.height << "," << common_params.input.fm << "," << + common_params.weights.fm << ")" << std::endl; + os << "Padding(top, bottom, left, right) (stride x, stride y) : (" << common_params.convolution.padding_top << "," << common_params.convolution.padding_bottom << "," << + common_params.convolution.padding_left << "," << common_params.convolution.padding_right << ") (" << common_params.convolution.padding_stride_x << "," << common_params.convolution.padding_stride_y << + ")" << std::endl; + os << "Padding Mode: " << common_params.convolution.padding_mode << std::endl; + os << "Convolution Method: " << common_params.convolution_method << std::endl; + } + /** Prevent instances of this class from being copied (As this class contains pointers) */ ConvolutionOptions(const ConvolutionOptions &) = delete; /** Prevent instances of this class from being copied (As this class contains pointers) */ @@ -337,7 +221,7 @@ public: /** Allow instances of this class to be moved */ ConvolutionOptions &operator=(ConvolutionOptions &&) noexcept(true) = default; /** Default destructor */ - ~ConvolutionOptions() = default; + ~ConvolutionOptions() override = default; SimpleOption *width; /**< Input width */ SimpleOption *height; /**< Input height */ @@ -352,16 +236,9 @@ public: SimpleOption *padding_right; /**< Padding right */ SimpleOption *stride_x; /**< Padding stride x */ SimpleOption *stride_y; /**< Padding stride y */ - ToggleOption *help; /**< show help message */ - SimpleOption *threads; /**< Number of threads option */ - EnumOption *target; /**< Graph execution target */ - EnumOption *data_type; /**< Graph data type */ - EnumOption *padding_mode; /**< Padding mode */ + EnumOption *padding_mode; /**< Padding mode */ EnumOption *conv_mode; /**< Convolution method */ EnumOption *data_layout; /**< Graph data layout */ - SimpleOption *absolute_tolerance; /**< Absolute tolerance used in verification */ - SimpleOption *relative_tolerance; /**< Relative tolerance used in verification */ - SimpleOption *tolerance_number; /**< Tolerance number used in verification */ SimpleOption *scale; /**< Input Quantization scale from QASYMM8 */ SimpleOption *offset; /**< Input Quantization offset from QASYMM8 */ SimpleOption *weights_scale; /**< Weights Quantization scale from QASYMM8 */ @@ -379,227 +256,26 @@ public: SimpleOption *bias_npy; /**< Use bias .npy image */ }; -/** Consumes the convolution graph options and creates a structure containing any information - * - * @param[in] options Options to consume - * - * @return Convolutionparams structure containing the common graph parameters - */ -ExampleParams consume_covolution_graph_parameters(ConvolutionOptions &options) -{ - ExampleParams common_params; - - common_params.common_params.help = options.help->is_set() ? options.help->value() : false; - common_params.common_params.threads = options.threads->value(); - common_params.common_params.target = options.target->value(); - - common_params.input.width = options.width->value(); - common_params.input.height = options.height->value(); - common_params.input.fm = options.channels->value(); - common_params.input.batch = options.batch->value(); - common_params.input.quant_info.scale = options.scale->value(); - common_params.input.quant_info.offset = options.offset->value(); - common_params.input.npy = options.input_npy->value(); - common_params.input.range_low = options.input_range_low->value(); - common_params.input.range_high = options.input_range_high->value(); - - common_params.weights.width = options.weights_width->value(); - common_params.weights.height = options.weights_height->value(); - common_params.weights.fm = options.OFM->value(); - common_params.weights.npy = options.weights_npy->value(); - common_params.weights.quant_info.scale = options.weights_scale->value(); - common_params.weights.quant_info.offset = options.weights_offset->value(); - common_params.weights.range_low = options.weights_range_low->value(); - common_params.weights.range_high = options.weights_range_high->value(); - - common_params.bias.npy = options.bias_npy->value(); - - common_params.output.quant_info.scale = options.output_scale->value(); - common_params.output.quant_info.offset = options.output_offset->value(); - common_params.output.npy = options.output_npy->value(); - - common_params.convolution.padding_mode = options.padding_mode->value(); - common_params.convolution.padding_top = options.padding_top->value(); - common_params.convolution.padding_bottom = options.padding_bottom->value(); - common_params.convolution.padding_left = options.padding_left->value(); - common_params.convolution.padding_right = options.padding_right->value(); - common_params.convolution.padding_stride_x = options.stride_x->value(); - common_params.convolution.padding_stride_y = options.stride_y->value(); - common_params.convolution.convolution_method = options.conv_mode->value(); - common_params.convolution.data_type = options.data_type->value(); - common_params.convolution.data_layout = options.data_layout->value(); - - common_params.verification.absolute_tolerance = options.absolute_tolerance->value(); - common_params.verification.relative_tolerance = options.relative_tolerance->value(); - common_params.verification.tolerance_number = options.tolerance_number->value(); - - return common_params; -} - -/** Calculate stride information. - * - * Depending on the selected padding mode create the desired PadStrideInfo - * - * @param[in] params Convolution parameters supplied by the user. - * - * @return PadStrideInfo with the correct padding mode. - */ -inline PadStrideInfo calculate_convolution_padding(ExampleParams params) -{ - switch(params.convolution.padding_mode) - { - case PaddingMode::Manual: - { - return PadStrideInfo(params.convolution.padding_stride_x, params.convolution.padding_stride_y, params.convolution.padding_left, params.convolution.padding_right, params.convolution.padding_top, - params.convolution.padding_bottom, DimensionRoundingType::FLOOR); - } - case PaddingMode::Valid: - { - return PadStrideInfo(); - } - case PaddingMode::Same: - { - return arm_compute::calculate_same_pad(TensorShape(params.input.width, params.input.height), TensorShape(params.weights.width, params.weights.height), - PadStrideInfo(params.convolution.padding_stride_x, - params.convolution.padding_stride_y)); - } - default: - ARM_COMPUTE_ERROR("NOT SUPPORTED!"); - } -} - /** ConvolutionLayer Graph example validation accessor class */ template -class ConvolutionVerifyAccessor final : public graph::ITensorAccessor +class ConvolutionVerifyAccessor final : public VerifyAccessor { -public: + using BaseClassType = VerifyAccessor; + using BaseClassType::BaseClassType; + using BaseClassType::_params; using TBias = typename std::conditional::type, uint8_t>::value, int32_t, D>::type; - /** Constructor - * - * @param[in] params Convolution parameters - */ - explicit ConvolutionVerifyAccessor(ExampleParams ¶ms) - : _params(std::move(params)) + SimpleTensor reference(SimpleTensor &src, SimpleTensor &weights, SimpleTensor &bias, const TensorShape &output_shape) override { - } + // Calculate padding information + const PadStrideInfo padding_info = calculate_convolution_padding(_params); - // Inherited methods overriden: - bool access_tensor(ITensor &tensor) override - { - if(_params.output.npy.empty()) - { - const RelativeTolerance rel_tolerance(relative_tolenace(_params.verification.relative_tolerance)); /**< Relative tolerance */ - const AbsoluteTolerance abs_tolerance(absolute_tolerance(_params.verification.absolute_tolerance)); /**< Absolute tolerance */ - const float tolerance_num(tolerance_number(_params.verification.tolerance_number)); /**< Tolerance number */ - - //Create Input tensors - SimpleTensor src{ TensorShape(_params.input.width, _params.input.height, _params.input.fm, _params.input.batch), _params.convolution.data_type, 1, _params.input.quant_info }; - SimpleTensor weights{ TensorShape(_params.weights.width, _params.weights.height, _params.weights.fm), _params.convolution.data_type, 1, _params.weights.quant_info }; - SimpleTensor bias{ TensorShape(_params.input.height), _params.convolution.data_type, 1, _params.input.quant_info }; - - //Fill the tenors with random values - fill_tensor(src, 0, static_cast(_params.input.range_low), static_cast(_params.input.range_high)); - fill_tensor(weights, 1, static_cast(_params.weights.range_low), static_cast(_params.weights.range_high)); - fill_tensor(bias, 2, static_cast(_params.input.range_low), static_cast(_params.input.range_high)); - - // Calculate padding information - const PadStrideInfo padding_info = calculate_convolution_padding(_params); - - //Calculate reference - SimpleTensor output = reference::convolution_layer(src, weights, bias, permute_shape(tensor.info()->tensor_shape(), _params.convolution.data_layout, DataLayout::NCHW), padding_info, Size2D(1, - 1), - 1, - _params.output.quant_info); - - arm_compute::test::validation::validate(Accessor(tensor), output, rel_tolerance, tolerance_num, abs_tolerance); - } - else - { - //The user provided a reference file use an npy accessor to validate - NumPyAccessor(_params.output.npy, tensor.info()->tensor_shape(), tensor.info()->data_type()).access_tensor(tensor); - } - return false; + //Calculate reference + return reference::convolution_layer(src, weights, bias, output_shape, padding_info, Size2D(1, 1), + 1, _params.output.quant_info); } -private: - /** Fill tensor with Random values. - * - * Validate the given tensor against the reference result. - * - * @param[out] tensor The tensor we want to file - * @param[in] seed seed for the randomization function - * @param[in] low lower bound for random values - * @param[in] high upper bound for random values - * - * @return None. - */ - template - void fill_tensor(arm_compute::test::SimpleTensor &tensor, std::random_device::result_type seed, T low, T high) - { - std::mt19937 gen(seed); - switch(tensor.data_type()) - { - case arm_compute::DataType::QASYMM8: - { - uint8_t qasymm8_low = tensor.quantization_info().quantize(low, RoundingPolicy::TO_NEAREST_UP); - uint8_t qasymm8_high = tensor.quantization_info().quantize(high, RoundingPolicy::TO_NEAREST_UP); - - std::uniform_int_distribution distribution(qasymm8_low, qasymm8_high); - - for(int i = 0; i < tensor.num_elements(); ++i) - { - tensor[i] = tensor.quantization_info().quantize(distribution(gen), RoundingPolicy::TO_NEAREST_UP); - } - - break; - } - case arm_compute::DataType::S32: - { - std::uniform_int_distribution distribution(static_cast(low), static_cast(high)); - - for(int i = 0; i < tensor.num_elements(); ++i) - { - tensor[i] = distribution(gen); - } - - break; - } - - case arm_compute::DataType::F16: - { - std::uniform_real_distribution distribution(static_cast(low), static_cast(high)); - - for(int i = 0; i < tensor.num_elements(); ++i) - { - tensor[i] = static_cast(distribution(gen)); - } - break; - } - case arm_compute::DataType::F32: - { - std::uniform_real_distribution distribution(static_cast(low), static_cast(high)); - - for(int i = 0; i < tensor.num_elements(); ++i) - { - tensor[i] = distribution(gen); - } - - break; - } - default: - ARM_COMPUTE_ERROR("NOT SUPPORTED!"); - } - } - /** Select relative tolerance. - * - * Select relative tolerance if not supplied by user. - * - * @param[in] user_value supplied relative tolerance. -1 designates no user input - * - * @return Appropriate relative tolerance. - */ - float relative_tolenace(float user_value) + float relative_tolerance() override { const std::map> relative_tolerance { @@ -618,32 +294,20 @@ private: } } }; - if(user_value == -1) + + if(_params.convolution_method == arm_compute::graph::ConvolutionMethod::Winograd + && _params.data_type == DataType::F32 + && _params.common_params.target == arm_compute::graph::Target::NEON) { - if(_params.convolution.convolution_method == arm_compute::graph::ConvolutionMethod::Winograd - && _params.convolution.data_type == DataType::F32 - && _params.common_params.target == arm_compute::graph::Target::NEON) - { - return 0.05f; - } - else - { - return relative_tolerance.at(_params.common_params.target).at(_params.convolution.data_type); - } + return 0.05f; + } + else + { + return relative_tolerance.at(_params.common_params.target).at(_params.data_type); } - - return user_value; } - /** Select absolute tolerance. - * - * Select absolute tolerance if not supplied by user. - * - * @param[in] user_value supplied absolute tolerance. -1 designates no user input - * - * @return Appropriate absolute tolerance. - */ - float absolute_tolerance(float user_value) + float absolute_tolerance() override { const std::map> absolute_tolerance { @@ -663,21 +327,10 @@ private: } }; - if(user_value == -1) - { - return absolute_tolerance.at(_params.common_params.target).at(_params.convolution.data_type); - } - return user_value; + return absolute_tolerance.at(_params.common_params.target).at(_params.data_type); } - /** Select tolerance number. - * - * Select tolerance number if not supplied by user. - * - * @param[in] user_value supplied tolerance number. -1 designates no user input - * - * @return Appropriate tolerance number. - */ - float tolerance_number(float user_value) + + float tolerance_number() override { const std::map> absolute_tolerance { @@ -697,133 +350,38 @@ private: } }; - if(user_value == -1) - { - return absolute_tolerance.at(_params.common_params.target).at(_params.convolution.data_type); - } - return user_value; + return absolute_tolerance.at(_params.common_params.target).at(_params.data_type); } - - ExampleParams _params; }; -/** Generates appropriate convolution verify accessor - * - * @param[in] params User supplied parameters for convolution. - * - * @return A convolution verify accessor for the requested datatype. - */ -inline std::unique_ptr get_convolution_verify_accessor(ExampleParams params) -{ - switch(params.convolution.data_type) - { - case DataType::QASYMM8: - { - return arm_compute::support::cpp14::make_unique>( - params); - } - case DataType::F16: - { - return arm_compute::support::cpp14::make_unique>( - params); - } - case DataType::F32: - { - return arm_compute::support::cpp14::make_unique>( - params); - } - default: - ARM_COMPUTE_ERROR("NOT SUPPORTED!"); - } -} -/** Generates appropriate accessor according to the specified graph parameters - * - * @param[in] graph_parameters Graph parameters - * @param[in] lower Lower random values bound - * @param[in] upper Upper random values bound - * @param[in] seed Random generator seed - * - * @return An appropriate tensor accessor - */ -inline std::unique_ptr get_accessor(const TensorParams &tensor, PixelValue lower, PixelValue upper, const std::random_device::result_type seed = 0) -{ - if(!tensor.npy.empty()) - { - return arm_compute::support::cpp14::make_unique(tensor.npy); - } - else - { - return arm_compute::support::cpp14::make_unique(lower, upper, seed); - } -} } // namespace -class GraphConvolutionValidateExample final : public ValidateExample +class GraphConvolutionValidateExample final : public GraphValidateExample { + using GraphValidateExample::graph; + public: GraphConvolutionValidateExample() - : graph(0, "Convolution Graph example") + : GraphValidateExample("Convolution Graph example") { } - bool do_setup(int argc, char **argv) override - { - CommandLineParser parser; - - ConvolutionOptions Options(parser); - parser.parse(argc, argv); - - ExampleParams params = consume_covolution_graph_parameters(Options); - - if(params.common_params.help) - { - parser.print_help(argv[0]); - return false; - } + ConvolutionLayer GraphFunctionLayer(ExampleParams ¶ms) override + { + const PixelValue lower = PixelValue(params.input.range_low, params.data_type, params.input.quant_info); + const PixelValue upper = PixelValue(params.input.range_high, params.data_type, params.input.quant_info); - std::cout << params << std::endl; + const PixelValue weights_lower = PixelValue(params.weights.range_low, params.data_type, params.weights.quant_info); + const PixelValue weights_upper = PixelValue(params.weights.range_high, params.data_type, params.weights.quant_info); // Calculate padding information const PadStrideInfo padding_info = calculate_convolution_padding(params); - // Create input descriptor - const TensorShape input_shape = permute_shape(TensorShape(params.input.width, params.input.height, params.input.fm, params.input.batch), DataLayout::NCHW, params.convolution.data_layout); - TensorDescriptor input_descriptor = TensorDescriptor(input_shape, params.convolution.data_type, params.input.quant_info, params.convolution.data_layout); - - const PixelValue lower = PixelValue(params.input.range_low, params.convolution.data_type, params.input.quant_info); - const PixelValue upper = PixelValue(params.input.range_high, params.convolution.data_type, params.input.quant_info); - - const PixelValue weights_lower = PixelValue(params.weights.range_low, params.convolution.data_type, params.weights.quant_info); - const PixelValue weights_upper = PixelValue(params.weights.range_high, params.convolution.data_type, params.weights.quant_info); - - graph << params.common_params.target - << params.convolution.convolution_method - << InputLayer(input_descriptor, get_accessor(params.input, lower, upper, 0)) - << ConvolutionLayer(params.weights.width, params.weights.height, params.weights.fm, - get_accessor(params.weights, weights_lower, weights_upper, 1), - get_accessor(params.bias, lower, upper, 2), - padding_info, 1, params.weights.quant_info, params.output.quant_info) - << OutputLayer(get_convolution_verify_accessor(params)); - - GraphConfig config; - config.num_threads = params.common_params.threads; - - graph.finalize(params.common_params.target, config); - - return true; - } - - void do_run() override - { - graph.run(); - } - - void do_teardown() override - { + return ConvolutionLayer(params.weights.width, params.weights.height, params.weights.fm, + get_accessor(params.weights, weights_lower, weights_upper, 1), + get_accessor(params.bias, lower, upper, 2), + padding_info, 1, params.weights.quant_info, params.output.quant_info); } - -private: - Stream graph; }; /** Main program for Graph Convolution test diff --git a/tests/validate_examples/graph_depthwiseconvolution.cpp b/tests/validate_examples/graph_depthwiseconvolution.cpp new file mode 100644 index 0000000000..cdad404dfa --- /dev/null +++ b/tests/validate_examples/graph_depthwiseconvolution.cpp @@ -0,0 +1,396 @@ +/* + * Copyright (c) 2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/graph.h" + +#include "support/ToolchainSupport.h" + +#include "tests/NEON/Accessor.h" +#include "tests/validation/Validation.h" +#include "tests/validation/reference/DepthwiseConvolutionLayer.h" +#include "tests/validation/reference/Permute.h" + +#include "utils/CommonGraphOptions.h" +#include "utils/GraphUtils.h" +#include "utils/Utils.h" + +#include "ValidateExample.h" +#include "graph_validate_utils.h" + +#include + +using namespace arm_compute::utils; +using namespace arm_compute::graph::frontend; +using namespace arm_compute::graph_utils; +using namespace arm_compute::graph; +using namespace arm_compute; +using namespace arm_compute::test; +using namespace arm_compute::test::validation; + +namespace +{ +/** Depthwise Convolution command line options used to configure the graph examples + * + * (Similar to common options) + * The options in this object get populated when "parse()" is called on the parser used to construct it. + * The expected workflow is: + * + * CommandLineParser parser; + * CommonOptions options( parser ); + * parser.parse(argc, argv); + */ +class DepthConvolutionOptions final : public CommonGraphValidateOptions +{ +public: + explicit DepthConvolutionOptions(CommandLineParser &parser) noexcept + : CommonGraphValidateOptions(parser), + width(parser.add_option>("width", 9)), + height(parser.add_option>("height", 9)), + channels(parser.add_option>("channels", 1)), + batch(parser.add_option>("batch", 1)), + weights_width(parser.add_option>("weights_width", 3)), + weights_height(parser.add_option>("weights_height", 3)), + padding_top(parser.add_option>("padding_top", 0)), + padding_left(parser.add_option>("padding_left", 0)), + padding_bottom(parser.add_option>("padding_bottom", 0)), + padding_right(parser.add_option>("padding_right", 0)), + stride_x(parser.add_option>("stride_x", 1)), + stride_y(parser.add_option>("stride_y", 1)), + padding_mode(), + conv_mode(), + depth_multiplier(parser.add_option>("depth_multiplier", 1)), + data_layout(), + scale(parser.add_option>("scale", 1.0f)), + offset(parser.add_option>("offset", 0)), + weights_scale(parser.add_option>("weights_scale", 1.0f)), + weights_offset(parser.add_option>("weights_offset", 0)), + output_scale(parser.add_option>("output_scale", 1.0f)), + output_offset(parser.add_option>("output_offset", 0)), + input_range_low(parser.add_option>("input_range_low")), + input_range_high(parser.add_option>("input_range_high")), + weights_range_low(parser.add_option>("weights_range_low")), + weights_range_high(parser.add_option>("weights_range_high")), + input_npy(parser.add_option>("input_image")), + output_npy(parser.add_option>("reference_image")), + weights_npy(parser.add_option>("weights_npy")), + bias_npy(parser.add_option>("bias_image")) + { + const std::set available_padding_modes + { + ConvolutionPaddingMode::Valid, + ConvolutionPaddingMode::Same + }; + + const std::set supported_convolution_methods + { + arm_compute::graph::DepthwiseConvolutionMethod::Default, + arm_compute::graph::DepthwiseConvolutionMethod::GEMV, + arm_compute::graph::DepthwiseConvolutionMethod::Optimized3x3, + }; + + const std::set supported_data_layouts + { + DataLayout::NHWC, + DataLayout::NCHW, + }; + + padding_mode = parser.add_option>("padding_mode", available_padding_modes, ConvolutionPaddingMode::Valid); + conv_mode = parser.add_option>("convolution_method", supported_convolution_methods, + arm_compute::graph::DepthwiseConvolutionMethod::Default); + data_layout = parser.add_option>("layout", supported_data_layouts, DataLayout::NHWC); + + padding_mode->set_help("Set padding mode"); + width->set_help("Set Input dimension width"); + height->set_help("Set Input dimension height"); + channels->set_help("Set Input dimension channels"); + batch->set_help("Set Input dimension batch"); + weights_width->set_help("Set weights_dimensions width"); + weights_height->set_help("Set weights_dimensions height"); + padding_top->set_help("Set padding top"); + padding_bottom->set_help("Set padding bottom"); + padding_left->set_help("Set padding left"); + padding_right->set_help("Set padding right"); + stride_x->set_help("Set padding stride x"); + stride_y->set_help("Set padding stride y"); + conv_mode->set_help("Set convolution method"); + data_layout->set_help("Data layout to use"); + scale->set_help("Quantization scale from QASYMM8"); + offset->set_help("Quantization offset from QASYMM8"); + output_scale->set_help("Quantization scale from QASYMM8"); + output_offset->set_help("Quantization offset from QASYMM8"); + input_npy->set_help("Use input .npy instead"); + output_npy->set_help("Use .npy as a reference"); + input_range_low->set_help("Lower bound for input randomization range"); + input_range_high->set_help("Lower bound for input randomization range"); + weights_scale->set_help("Quantization scale from QASYMM8"); + weights_offset->set_help("Quantization offset from QASYMM8"); + weights_range_low->set_help("Lower bound for input randomization range"); + weights_range_high->set_help("Lower bound for input randomization range"); + depth_multiplier->set_help("Depth multiplier"); + } + + /** Fill out the supplied parameters with user supplied parameters + * + * @param[out] os Output stream. + * @param[in] common_params Example parameters to output + * + * @return None. + */ + void consume_parameters(ExampleParams &common_params) + { + common_params.input.width = width->value(); + common_params.input.height = height->value(); + common_params.input.fm = channels->value(); + common_params.input.batch = batch->value(); + common_params.input.quant_info.scale = scale->value(); + common_params.input.quant_info.offset = offset->value(); + common_params.input.npy = input_npy->value(); + common_params.input.range_low = input_range_low->value(); + common_params.input.range_high = input_range_high->value(); + + common_params.weights.width = weights_width->value(); + common_params.weights.height = weights_height->value(); + common_params.weights.npy = weights_npy->value(); + common_params.weights.range_low = weights_range_low->value(); + common_params.weights.range_high = weights_range_high->value(); + common_params.weights.quant_info.scale = weights_scale->value(); + common_params.weights.quant_info.offset = weights_offset->value(); + + common_params.bias.npy = bias_npy->value(); + + common_params.output.quant_info.scale = output_scale->value(); + common_params.output.quant_info.offset = output_offset->value(); + common_params.output.npy = output_npy->value(); + + common_params.convolution.padding_mode = padding_mode->value(); + common_params.convolution.padding_top = padding_top->value(); + common_params.convolution.padding_bottom = padding_bottom->value(); + common_params.convolution.padding_left = padding_left->value(); + common_params.convolution.padding_right = padding_right->value(); + common_params.convolution.padding_stride_x = stride_x->value(); + common_params.convolution.padding_stride_y = stride_y->value(); + common_params.convolution.depth_multiplier = depth_multiplier->value(); + + common_params.data_type = data_type->value(); + common_params.data_layout = data_layout->value(); + common_params.depth_convolution_method = conv_mode->value(); + } + + void print_parameters(::std::ostream &os, const ExampleParams &common_params) override + { + os << "Threads : " << common_params.common_params.threads << std::endl; + os << "Target : " << common_params.common_params.target << std::endl; + os << "Data type : " << common_params.data_type << std::endl; + os << "Input dimensions(X,Y, Channels, Batch) : (" << common_params.input.width << "," << common_params.input.height << "," << common_params.input.fm << "," << common_params.input.batch << ")" + << std::endl; + os << "Weight dimensions(X,Y, Channels(same as input)) : (" << common_params.weights.width << "," << common_params.weights.height << "," << common_params.input.fm << "," + << ")" << std::endl; + os << "Padding(top, bottom, left, right) (stride x, stride y) : (" << common_params.convolution.padding_top << "," << common_params.convolution.padding_bottom << "," << + common_params.convolution.padding_left << "," << common_params.convolution.padding_right << ") (" << common_params.convolution.padding_stride_x << "," << common_params.convolution.padding_stride_y << + ")" << std::endl; + os << "Padding Mode: " << common_params.convolution.padding_mode << std::endl; + os << "Convolution Method: " << common_params.depth_convolution_method << std::endl; + os << "Depth multiplier: " << common_params.convolution.depth_multiplier; + } + + /** Prevent instances of this class from being copied (As this class contains pointers) */ + DepthConvolutionOptions(const DepthConvolutionOptions &) = delete; + /** Prevent instances of this class from being copied (As this class contains pointers) */ + DepthConvolutionOptions &operator=(const DepthConvolutionOptions &) = delete; + /** Allow instances of this class to be moved */ + DepthConvolutionOptions(DepthConvolutionOptions &&) noexcept(true) = default; + /** Allow instances of this class to be moved */ + DepthConvolutionOptions &operator=(DepthConvolutionOptions &&) noexcept(true) = default; + /** Default destructor */ + ~DepthConvolutionOptions() override = default; + + SimpleOption *width; /**< Input width */ + SimpleOption *height; /**< Input height */ + SimpleOption *channels; /**< Input channels */ + SimpleOption *batch; /**< Input batch */ + SimpleOption *weights_width; /**< weights width */ + SimpleOption *weights_height; /**< weights height */ + SimpleOption *padding_top; /**< Padding top */ + SimpleOption *padding_left; /**< Padding left */ + SimpleOption *padding_bottom; /**< Padding bottom */ + SimpleOption *padding_right; /**< Padding right */ + SimpleOption *stride_x; /**< Padding stride x */ + SimpleOption *stride_y; /**< Padding stride y */ + EnumOption *padding_mode; /**< Padding mode */ + EnumOption *conv_mode; /**< Convolution method */ + SimpleOption *depth_multiplier; /**< Depth multiplier */ + EnumOption *data_layout; /**< Graph data layout */ + SimpleOption *scale; /**< Input Quantization scale from QASYMM8 */ + SimpleOption *offset; /**< Input Quantization offset from QASYMM8 */ + SimpleOption *weights_scale; /**< Weights Quantization scale from QASYMM8 */ + SimpleOption *weights_offset; /**< Weights Quantization offset from QASYMM8 */ + SimpleOption *output_scale; /**< Output Quantization scale from QASYMM8 */ + SimpleOption *output_offset; /**< Output Quantization offset from QASYMM8 */ + SimpleOption *input_range_low; /**< Lower bound for input randomization range */ + SimpleOption *input_range_high; /**< Upper bound for input randomization range */ + SimpleOption *weights_range_low; /**< Lower bound for weights randomization range */ + SimpleOption *weights_range_high; /**< Upper bound for weights randomization range */ + + SimpleOption *input_npy; /**< Use input .npy image */ + SimpleOption *output_npy; /**< Use output .npy image to verify*/ + SimpleOption *weights_npy; /**< Use weights .npy image */ + SimpleOption *bias_npy; /**< Use bias .npy image */ +}; + +/** DepthwiseConvolutionLayer Graph example validation accessor class */ +template +class DepthConvolutionVerifyAccessor final : public VerifyAccessor +{ +public: + using BaseClassType = VerifyAccessor; + using BaseClassType::BaseClassType; + using BaseClassType::_params; + using TBias = typename std::conditional::type, uint8_t>::value, int32_t, D>::type; + +public: + SimpleTensor reference(SimpleTensor &src, SimpleTensor &weights, SimpleTensor &bias, const TensorShape &output_shape) override + { + // Calculate padding information + const PadStrideInfo padding_info = calculate_convolution_padding(_params); + + //Calculate reference + return reference::depthwise_convolution(src, weights, bias, output_shape, padding_info, + _params.convolution.depth_multiplier, + Size2D(1U, 1U), + _params.output.quant_info); + } + + float relative_tolerance() override + { + const std::map> relative_tolerance + { + { + arm_compute::graph::Target::CL, + { { DataType::F16, 0.01f }, + { DataType::F32, 0.01f }, + { DataType::QASYMM8, 0.0f } + } + }, + { + arm_compute::graph::Target::NEON, + { { DataType::F16, 0.01f }, + { DataType::F32, 0.01f }, + { DataType::QASYMM8, 1.0f } + } + } + }; + + return relative_tolerance.at(_params.common_params.target).at(_params.data_type); + } + + float absolute_tolerance() override + { + const std::map> absolute_tolerance + { + { + Target::CL, + { { DataType::F16, 0.0f }, + { DataType::F32, 0.0000f }, + { DataType::QASYMM8, 0.0f } + } + }, + { + Target::NEON, + { { DataType::F16, 0.2f }, + { DataType::F32, 0.002f }, + { DataType::QASYMM8, 0.0f } + } + } + }; + + return absolute_tolerance.at(_params.common_params.target).at(_params.data_type); + } + + float tolerance_number() override + { + const std::map> absolute_tolerance + { + { + Target::CL, + { { DataType::F16, 0.05f }, + { DataType::F32, 0.00f }, + { DataType::QASYMM8, 0.0f } + } + }, + { + Target::NEON, + { { DataType::F16, 0.05f }, + { DataType::F32, 0.0f }, + { DataType::QASYMM8, 0.0f } + } + } + }; + + return absolute_tolerance.at(_params.common_params.target).at(_params.data_type); + } +}; + +} // namespace + +class GraphDepthwiseConvolutionValidateExample final : public GraphValidateExample +{ + using GraphValidateExample::graph; + +public: + GraphDepthwiseConvolutionValidateExample() + : GraphValidateExample("DepthWiseConvolution Graph example") + { + } + + DepthwiseConvolutionLayer GraphFunctionLayer(ExampleParams ¶ms) override + { + const PixelValue lower = PixelValue(params.input.range_low, params.data_type, params.input.quant_info); + const PixelValue upper = PixelValue(params.input.range_high, params.data_type, params.input.quant_info); + + const PixelValue weights_lower = PixelValue(params.weights.range_low, params.data_type, params.weights.quant_info); + const PixelValue weights_upper = PixelValue(params.weights.range_high, params.data_type, params.weights.quant_info); + + // Calculate padding information + const PadStrideInfo padding_info = calculate_convolution_padding(params); + + return DepthwiseConvolutionLayer(params.weights.width, params.weights.height, + get_accessor(params.weights, weights_lower, weights_upper, 1), + get_accessor(params.bias, lower, upper, 2), + padding_info, params.convolution.depth_multiplier, params.weights.quant_info, params.output.quant_info); + } +}; + +/** Main program for Graph Depthwise Convolution test + * + * @param[in] argc Number of arguments + * @param[in] argv Arguments ( Input dimensions [width, height, channels, batch] + * Weights dimensions [width, height, channels] + * Padding [top,bottom,left,right, Stride x, Stride y, mode [Valid / Same / Manual] ) + * Convolution Method[ Default/GEMV/Optimized3x3] + * Verification[tolerance_number,absolute_tolerance,relative_tolerance] ) + * + */ +int main(int argc, char **argv) +{ + return arm_compute::utils::run_example(argc, argv); +} diff --git a/tests/validate_examples/graph_fully_connected.cpp b/tests/validate_examples/graph_fully_connected.cpp index e4f51175f0..085518c865 100644 --- a/tests/validate_examples/graph_fully_connected.cpp +++ b/tests/validate_examples/graph_fully_connected.cpp @@ -35,6 +35,7 @@ #include "utils/Utils.h" #include "ValidateExample.h" +#include "graph_validate_utils.h" #include @@ -45,77 +46,10 @@ using namespace arm_compute::graph; using namespace arm_compute; using namespace arm_compute::test; using namespace arm_compute::test::validation; -namespace -{ -/** Structure holding all the input tensor graph parameters */ -struct TensorParams -{ - int width{ 1 }; - int height{ 1 }; - int fm{ 1 }; - int batch{ 1 }; - QuantizationInfo quant_info{ 1.0f, 0 }; - uint64_t range_low{ 0 }; - uint64_t range_high{ 16 }; -}; -/** Structure holding all the verification graph parameters */ -struct VerificationParams -{ - float absolute_tolerance{ -1.f }; - float relative_tolerance{ -1.f }; - float tolerance_number{ -1.f }; -}; - -/** Structure holding all the common graph parameters */ -struct FrameworkParams -{ - bool help{ false }; - int threads{ 0 }; - arm_compute::graph::Target target{ arm_compute::graph::Target::NEON }; -}; -/** Structure holding all the fully_connected layer graph parameters */ -struct FullyConnectedParams -{ - arm_compute::DataType data_type{ DataType::F32 }; - arm_compute::DataLayout data_layout{ DataLayout::NCHW }; - FullyConnectedLayerInfo info{}; - int num_outputs{ 1 }; -}; - -/** Structure holding all the graph Example parameters */ -struct ExampleParams -{ - FrameworkParams common_params{}; - TensorParams input{}; - TensorParams weights{}; - TensorParams output{}; - VerificationParams verification{}; - FullyConnectedParams fully_connected{}; -}; - -/** Formatted output of the fully_connectedParams type - * - * @param[out] os Output stream. - * @param[in] common_params fully_connected parameters to output - * - * @return Modified output stream. - */ -::std::ostream &operator<<(::std::ostream &os, const ExampleParams &common_params) +namespace { - std::string false_str = std::string("false"); - std::string true_str = std::string("true"); - - os << "Threads : " << common_params.common_params.threads << std::endl; - os << "Target : " << common_params.common_params.target << std::endl; - os << "Data type : " << common_params.fully_connected.data_type << std::endl; - os << "Input dimensions(X,Y, Channels, Batch) : (" << common_params.input.width << "," << common_params.input.height << "," << common_params.input.fm << "," << common_params.input.batch << ")" - << std::endl; - os << "Number of outputs : " << common_params.fully_connected.num_outputs << std::endl; - return os; -} - -/** fully_connected command line options used to configure the graph examples +/** Fully connected command line options used to configure the graph examples * * (Similar to common options) * The options in this object get populated when "parse()" is called on the parser used to construct it. @@ -125,19 +59,13 @@ struct ExampleParams * CommonOptions options( parser ); * parser.parse(argc, argv); */ -class FullyConnectedOptions final +class FullyConnectedOptions final : public CommonGraphValidateOptions { public: explicit FullyConnectedOptions(CommandLineParser &parser) noexcept - : width(parser.add_option>("width", 3)), + : CommonGraphValidateOptions(parser), + width(parser.add_option>("width", 3)), batch(parser.add_option>("batch", 1)), - help(parser.add_option("help")), - threads(parser.add_option>("threads")), - target(), - data_type(), - absolute_tolerance(parser.add_option>("abs_tolerance", -1.0f)), - relative_tolerance(parser.add_option>("rel_tolerance", -1.0f)), - tolerance_number(parser.add_option>("tolerance_num", -1.0f)), input_scale(parser.add_option>("input_scale", 1.0f)), input_offset(parser.add_option>("input_offset", 0)), weights_scale(parser.add_option>("weights_scale", 1.0f)), @@ -150,31 +78,8 @@ public: weights_range_low(parser.add_option>("weights_range_low")), weights_range_high(parser.add_option>("weights_range_high")) { - const std::set supported_targets - { - Target::NEON, - Target::CL, - Target::GC, - }; - - const std::set supported_data_types - { - DataType::F16, - DataType::F32, - DataType::QASYMM8, - }; - - target = parser.add_option>("target", supported_targets, Target::NEON); - data_type = parser.add_option>("type", supported_data_types, DataType::F32); - - target->set_help("Target to execute on"); - data_type->set_help("Data type to use"); - help->set_help("Show this help message"); width->set_help("Set Input dimension width"); batch->set_help("Set Input dimension batch"); - absolute_tolerance->set_help("Absolute tolerance used for verification"); - relative_tolerance->set_help("Absolute tolerance used for verification"); - tolerance_number->set_help("Absolute tolerance used for verification"); input_scale->set_help("Quantization scale from QASYMM8"); input_offset->set_help("Quantization offset from QASYMM8"); weights_scale->set_help("Quantization scale from QASYMM8"); @@ -188,6 +93,44 @@ public: weights_range_high->set_help("Lower bound for input randomization range"); } + /** Fill out the supplied parameters with user supplied parameters + * + * @param[out] os Output stream. + * @param[in] common_params Example parameters to output + * + * @return None. + */ + void consume_parameters(ExampleParams &common_params) + { + common_params.input.width = width->value(); + common_params.input.batch = batch->value(); + common_params.input.quant_info.scale = input_scale->value(); + common_params.input.quant_info.offset = input_offset->value(); + common_params.input.range_low = input_range_low->value(); + common_params.input.range_high = input_range_high->value(); + + common_params.weights.quant_info.scale = weights_scale->value(); + common_params.weights.quant_info.offset = weights_offset->value(); + common_params.weights.range_low = weights_range_low->value(); + common_params.weights.range_high = weights_range_high->value(); + + common_params.output.quant_info.scale = output_scale->value(); + common_params.output.quant_info.offset = output_offset->value(); + + common_params.data_type = data_type->value(); + common_params.fully_connected.num_outputs = num_outputs->value(); + } + + void print_parameters(::std::ostream &os, const ExampleParams &common_params) override + { + os << "Threads : " << common_params.common_params.threads << std::endl; + os << "Target : " << common_params.common_params.target << std::endl; + os << "Data type : " << common_params.data_type << std::endl; + os << "Input dimensions(X,Y, Channels, Batch) : (" << common_params.input.width << "," << common_params.input.height << "," << common_params.input.fm << "," << common_params.input.batch << ")" + << std::endl; + os << "Number of outputs : " << common_params.fully_connected.num_outputs << std::endl; + } + /** Prevent instances of this class from being copied (As this class contains pointers) */ FullyConnectedOptions(const FullyConnectedOptions &) = delete; /** Prevent instances of this class from being copied (As this class contains pointers) */ @@ -197,95 +140,41 @@ public: /** Allow instances of this class to be moved */ FullyConnectedOptions &operator=(FullyConnectedOptions &&) noexcept(true) = default; /** Default destructor */ - ~FullyConnectedOptions() = default; - - SimpleOption *width; /**< Input width */ - SimpleOption *batch; /**< Input batch */ - ToggleOption *help; /**< show help message */ - SimpleOption *threads; /**< Number of threads option */ - EnumOption *target; /**< Graph execution target */ - EnumOption *data_type; /**< Graph data type */ - SimpleOption *absolute_tolerance; /**< Absolute tolerance used in verification */ - SimpleOption *relative_tolerance; /**< Relative tolerance used in verification */ - SimpleOption *tolerance_number; /**< Tolerance number used in verification */ - SimpleOption *input_scale; /**< Input Quantization scale from QASSYMM8 */ - SimpleOption *input_offset; /**< Input Quantization offset from QASSYMM8 */ - SimpleOption *weights_scale; /**< Weights Quantization scale from QASSYMM8 */ - SimpleOption *weights_offset; /**< Weights Quantization offset from QASSYMM8 */ - SimpleOption *output_scale; /**< Output Quantization scale from QASSYMM8 */ - SimpleOption *output_offset; /**< Output Quantization offset from QASSYMM8 */ - SimpleOption *num_outputs; /**< Number of outputs. */ - SimpleOption *input_range_low; /**< Lower bound for input randomization range */ - SimpleOption *input_range_high; /**< Upper bound for input randomization range */ - SimpleOption *weights_range_low; /**< Lower bound for weights randomization range */ - SimpleOption *weights_range_high; /**< Upper bound for weights randomization range */ + ~FullyConnectedOptions() override = default; + + SimpleOption *width; /**< Input width */ + SimpleOption *batch; /**< Input batch */ + SimpleOption *input_scale; /**< Input Quantization scale from QASSYMM8 */ + SimpleOption *input_offset; /**< Input Quantization offset from QASSYMM8 */ + SimpleOption *weights_scale; /**< Weights Quantization scale from QASSYMM8 */ + SimpleOption *weights_offset; /**< Weights Quantization offset from QASSYMM8 */ + SimpleOption *output_scale; /**< Output Quantization scale from QASSYMM8 */ + SimpleOption *output_offset; /**< Output Quantization offset from QASSYMM8 */ + SimpleOption *num_outputs; /**< Number of outputs. */ + SimpleOption *input_range_low; /**< Lower bound for input randomization range */ + SimpleOption *input_range_high; /**< Upper bound for input randomization range */ + SimpleOption *weights_range_low; /**< Lower bound for weights randomization range */ + SimpleOption *weights_range_high; /**< Upper bound for weights randomization range */ }; -/** Consumes the fully_connected graph options and creates a structure containing any information - * - * @param[in] options Options to consume - * - * @return fully_connectedparams structure containing the common graph parameters - */ -ExampleParams consume_fully_connected_graph_parameters(FullyConnectedOptions &options) -{ - ExampleParams common_params; - - common_params.common_params.help = options.help->is_set() ? options.help->value() : false; - common_params.common_params.threads = options.threads->value(); - common_params.common_params.target = options.target->value(); - - common_params.input.width = options.width->value(); - common_params.input.batch = options.batch->value(); - common_params.input.quant_info.scale = options.input_scale->value(); - common_params.input.quant_info.offset = options.input_offset->value(); - common_params.input.range_low = options.input_range_low->value(); - common_params.input.range_high = options.input_range_high->value(); - - common_params.weights.quant_info.scale = options.weights_scale->value(); - common_params.weights.quant_info.offset = options.weights_offset->value(); - common_params.weights.range_low = options.weights_range_low->value(); - common_params.weights.range_high = options.weights_range_high->value(); - - common_params.output.quant_info.scale = options.output_scale->value(); - common_params.output.quant_info.offset = options.output_offset->value(); - - common_params.fully_connected.data_type = options.data_type->value(); - common_params.fully_connected.num_outputs = options.num_outputs->value(); - - common_params.verification.absolute_tolerance = options.absolute_tolerance->value(); - common_params.verification.relative_tolerance = options.relative_tolerance->value(); - common_params.verification.tolerance_number = options.tolerance_number->value(); - - return common_params; -} - -/** fully_connectedLayer Graph example validation accessor class */ +/** Fully Connected Layer Graph example validation accessor class */ template -class FullyConnectedVerifyAccessor final : public graph::ITensorAccessor +class FullyConnectedVerifyAccessor final : public VerifyAccessor { -public: + using BaseClassType = VerifyAccessor; + using BaseClassType::BaseClassType; + using BaseClassType::_params; using TBias = typename std::conditional::type, uint8_t>::value, int32_t, D>::type; - /** Constructor - * - * @param[in] params fully_connected parameters - */ - explicit FullyConnectedVerifyAccessor(ExampleParams ¶ms) - : _params(params) - { - } - - // Inherited methods overridden: - bool access_tensor(ITensor &tensor) override + // Inherited methods overriden: + void create_tensors(arm_compute::test::SimpleTensor &src, + arm_compute::test::SimpleTensor &weights, + arm_compute::test::SimpleTensor &bias, + ITensor &tensor) override { - const RelativeTolerance rel_tolerance(relative_tolenace(_params.verification.relative_tolerance)); /**< Relative tolerance */ - const AbsoluteTolerance abs_tolerance(absolute_tolerance(_params.verification.absolute_tolerance)); /**< Absolute tolerance */ - const float tolerance_num(tolerance_number(_params.verification.tolerance_number)); /**< Tolerance number */ - // Calculate Tensor shapes for verification const TensorShape input_shape = TensorShape(_params.input.width, _params.input.height, _params.input.fm, _params.input.batch); - const TensorDescriptor input_descriptor = TensorDescriptor(input_shape, _params.fully_connected.data_type, _params.input.quant_info); + const TensorDescriptor input_descriptor = TensorDescriptor(input_shape, _params.data_type, _params.input.quant_info); const TensorDescriptor weights_descriptor = FullyConnectedLayerNode::compute_weights_descriptor(input_descriptor, _params.fully_connected.num_outputs, _params.fully_connected.info, @@ -293,101 +182,31 @@ public: const TensorDescriptor output_desciptor = FullyConnectedLayerNode::compute_output_descriptor(input_descriptor, _params.fully_connected.num_outputs, _params.output.quant_info); //Create Input tensors - SimpleTensor src{ input_descriptor.shape, _params.fully_connected.data_type, 1, input_descriptor.quant_info }; - SimpleTensor weights{ weights_descriptor.shape, _params.fully_connected.data_type, 1, weights_descriptor.quant_info }; - SimpleTensor bias{ TensorShape(tensor.info()->tensor_shape().x()), _params.fully_connected.data_type, 1, _params.input.quant_info }; - - //Fill the tensors with random values - fill_tensor(src, 0, static_cast(_params.input.range_low), static_cast(_params.input.range_high)); - fill_tensor(weights, 1, static_cast(_params.weights.range_low), static_cast(_params.weights.range_high)); - fill_tensor(bias, 2, static_cast(_params.input.range_low), static_cast(_params.input.range_high)); - - //Calculate reference - SimpleTensor output = reference::fully_connected_layer(src, weights, bias, output_desciptor.shape, _params.output.quant_info); - - arm_compute::test::validation::validate(Accessor(tensor), output, rel_tolerance, tolerance_num, abs_tolerance); - - return false; + src = SimpleTensor { input_descriptor.shape, _params.data_type, 1, input_descriptor.quant_info }; + weights = SimpleTensor { weights_descriptor.shape, _params.data_type, 1, weights_descriptor.quant_info }; + bias = SimpleTensor { TensorShape(tensor.info()->tensor_shape().x()), _params.data_type, 1, _params.input.quant_info }; } -private: - /** Fill tensor with Random values. - * - * Validate the given tensor against the reference result. - * - * @param[out] tensor The tensor we want to file - * @param[in] seed seed for the randomization function - * @param[in] low lower bound for random values - * @param[in] high upper bound for random values - * - * @return None. - */ - template - void fill_tensor(arm_compute::test::SimpleTensor &tensor, std::random_device::result_type seed, T low, T high) + TensorShape output_shape(ITensor &tensor) override { - std::mt19937 gen(seed); - switch(tensor.data_type()) - { - case arm_compute::DataType::QASYMM8: - { - const uint8_t qasymm8_low = tensor.quantization_info().quantize(low, RoundingPolicy::TO_NEAREST_UP); - const uint8_t qasymm8_high = tensor.quantization_info().quantize(high, RoundingPolicy::TO_NEAREST_UP); + ARM_COMPUTE_UNUSED(tensor); - std::uniform_int_distribution distribution(qasymm8_low, qasymm8_high); - - for(int i = 0; i < tensor.num_elements(); ++i) - { - tensor[i] = tensor.quantization_info().quantize(distribution(gen), RoundingPolicy::TO_NEAREST_UP); - } - - break; - } - case arm_compute::DataType::S32: - { - std::uniform_int_distribution distribution(static_cast(low), static_cast(high)); - - for(int i = 0; i < tensor.num_elements(); ++i) - { - tensor[i] = distribution(gen); - } - - break; - } - - case arm_compute::DataType::F16: - { - std::uniform_real_distribution distribution(static_cast(low), static_cast(high)); - - for(int i = 0; i < tensor.num_elements(); ++i) - { - tensor[i] = static_cast(distribution(gen)); - } - break; - } - case arm_compute::DataType::F32: - { - std::uniform_real_distribution distribution(static_cast(low), static_cast(high)); + const TensorShape input_shape = TensorShape(_params.input.width, _params.input.height, _params.input.fm, _params.input.batch); + const TensorDescriptor input_descriptor = TensorDescriptor(input_shape, _params.data_type, _params.input.quant_info); + const TensorDescriptor output_desciptor = FullyConnectedLayerNode::compute_output_descriptor(input_descriptor, _params.fully_connected.num_outputs, _params.output.quant_info); - for(int i = 0; i < tensor.num_elements(); ++i) - { - tensor[i] = distribution(gen); - } + return output_desciptor.shape; + } - break; - } - default: - ARM_COMPUTE_ERROR("NOT SUPPORTED!"); - } + arm_compute::test::SimpleTensor reference(arm_compute::test::SimpleTensor &src, + arm_compute::test::SimpleTensor &weights, + arm_compute::test::SimpleTensor &bias, + const arm_compute::TensorShape &output_shape) override + { + return reference::fully_connected_layer(src, weights, bias, output_shape, _params.output.quant_info); } - /** Select relative tolerance. - * - * Select relative tolerance if not supplied by user. - * - * @param[in] user_value supplied relative tolerance. -1 designates no user input - * - * @return Appropriate relative tolerance. - */ - float relative_tolenace(float user_value) + + float relative_tolerance() override { const std::map> relative_tolerance { @@ -406,23 +225,11 @@ private: } } }; - if(user_value == -1) - { - return relative_tolerance.at(_params.common_params.target).at(_params.fully_connected.data_type); - } - return user_value; + return relative_tolerance.at(_params.common_params.target).at(_params.data_type); } - /** Select absolute tolerance. - * - * Select absolute tolerance if not supplied by user. - * - * @param[in] user_value supplied absolute tolerance. -1 designates no user input - * - * @return Appropriate absolute tolerance. - */ - float absolute_tolerance(float user_value) + float absolute_tolerance() override { const std::map> absolute_tolerance { @@ -442,21 +249,10 @@ private: } }; - if(user_value == -1) - { - return absolute_tolerance.at(_params.common_params.target).at(_params.fully_connected.data_type); - } - return user_value; + return absolute_tolerance.at(_params.common_params.target).at(_params.data_type); } - /** Select tolerance number. - * - * Select tolerance number if not supplied by user. - * - * @param[in] user_value supplied tolerance number. -1 designates no user input - * - * @return Appropriate tolerance number. - */ - float tolerance_number(float user_value) + + float tolerance_number() override { const std::map> absolute_tolerance { @@ -476,110 +272,35 @@ private: } }; - if(user_value == -1) - { - return absolute_tolerance.at(_params.common_params.target).at(_params.fully_connected.data_type); - } - return user_value; + return absolute_tolerance.at(_params.common_params.target).at(_params.data_type); } - - ExampleParams _params; }; -/** Generates appropriate fully_connected verify accessor - * - * @param[in] params User supplied parameters for fully_connected. - * - * @return A fully_connected verify accessor for the requested datatype. - */ -inline std::unique_ptr get_fully_connected_verify_accessor(ExampleParams params) -{ - switch(params.fully_connected.data_type) - { - case DataType::QASYMM8: - { - return arm_compute::support::cpp14::make_unique>( - params); - } - case DataType::F16: - { - return arm_compute::support::cpp14::make_unique>( - params); - } - case DataType::F32: - { - return arm_compute::support::cpp14::make_unique>( - params); - } - default: - ARM_COMPUTE_ERROR("NOT SUPPORTED!"); - } -} - } // namespace -class Graphfully_connectedValidateExample final : public ValidateExample +class GraphFullyConnectedValidateExample final : public GraphValidateExample { + using GraphValidateExample::graph; + public: - Graphfully_connectedValidateExample() - : graph(0, "fully_connected Graph example") - { - } - bool do_setup(int argc, char **argv) override + GraphFullyConnectedValidateExample() + : GraphValidateExample("Fully_connected Graph example") { - CommandLineParser parser; - - FullyConnectedOptions Options(parser); - - parser.parse(argc, argv); - - ExampleParams params = consume_fully_connected_graph_parameters(Options); - - if(params.common_params.help) - { - parser.print_help(argv[0]); - return false; - } - - std::cout << params << std::endl; - - // Create input descriptor - const TensorShape input_shape = TensorShape(params.input.width, params.input.height, params.input.fm, params.input.batch); - const TensorDescriptor input_descriptor = TensorDescriptor(input_shape, params.fully_connected.data_type, params.input.quant_info, params.fully_connected.data_layout); - - const PixelValue lower = PixelValue(params.input.range_low, params.fully_connected.data_type, params.input.quant_info); - const PixelValue upper = PixelValue(params.input.range_high, params.fully_connected.data_type, params.input.quant_info); - - const PixelValue weights_lower = PixelValue(params.weights.range_low, params.fully_connected.data_type, params.weights.quant_info); - const PixelValue weights_upper = PixelValue(params.weights.range_high, params.fully_connected.data_type, params.weights.quant_info); - - graph << params.common_params.target - << InputLayer(input_descriptor, get_random_accessor(lower, upper, 0)) - << FullyConnectedLayer(params.fully_connected.num_outputs, - get_random_accessor(weights_lower, weights_upper, 1), - get_random_accessor(lower, upper, 2), - params.fully_connected.info, params.weights.quant_info, params.output.quant_info) - << OutputLayer(get_fully_connected_verify_accessor(params)); - - GraphConfig config; - config.num_threads = params.common_params.threads; - - graph.finalize(params.common_params.target, config); - - return true; } - void do_run() override + FullyConnectedLayer GraphFunctionLayer(ExampleParams ¶ms) override { - graph.run(); - } + const PixelValue lower = PixelValue(params.input.range_low, params.data_type, params.input.quant_info); + const PixelValue upper = PixelValue(params.input.range_high, params.data_type, params.input.quant_info); - void do_teardown() override - { - } + const PixelValue weights_lower = PixelValue(params.weights.range_low, params.data_type, params.weights.quant_info); + const PixelValue weights_upper = PixelValue(params.weights.range_high, params.data_type, params.weights.quant_info); -private: - Stream graph; + return FullyConnectedLayer(params.fully_connected.num_outputs, + get_random_accessor(weights_lower, weights_upper, 1), + get_random_accessor(lower, upper, 2), + params.fully_connected.info, params.weights.quant_info, params.output.quant_info); + } }; /** Main program for Graph fully_connected test @@ -592,5 +313,5 @@ private: */ int main(int argc, char **argv) { - return arm_compute::utils::run_example(argc, argv); + return arm_compute::utils::run_example(argc, argv); } diff --git a/tests/validate_examples/graph_validate_utils.h b/tests/validate_examples/graph_validate_utils.h new file mode 100644 index 0000000000..485d3c1409 --- /dev/null +++ b/tests/validate_examples/graph_validate_utils.h @@ -0,0 +1,695 @@ +/* + * Copyright (c) 2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __GRAPH_VALIDATE_UTILS_H__ +#define __GRAPH_VALIDATE_UTILS_H__ + +#include "arm_compute/graph.h" + +#include "ValidateExample.h" +#include "utils/command_line/CommandLineParser.h" + +namespace arm_compute +{ +namespace utils +{ +/*Available Padding modes */ +enum class ConvolutionPaddingMode +{ + Valid, + Same, + Manual +}; + +/** Stream Input operator for the ConvolutionPaddingMode type + * + * @param[in] stream Input stream. + * @param[out] Mode Convolution parameters to output + * + * @return input stream. + */ +inline ::std::istream &operator>>(::std::istream &stream, ConvolutionPaddingMode &Mode) +{ + static const std::map modes = + { + { "valid", ConvolutionPaddingMode::Valid }, + { "same", ConvolutionPaddingMode::Same }, + { "manual", ConvolutionPaddingMode::Manual } + }; + std::string value; + stream >> value; +#ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED + try + { +#endif /* ARM_COMPUTE_EXCEPTIONS_DISABLED */ + Mode = modes.at(arm_compute::utility::tolower(value)); +#ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED + } + catch(const std::out_of_range &) + { + throw std::invalid_argument(value); + } +#endif /* ARM_COMPUTE_EXCEPTIONS_DISABLED */ + + return stream; +} + +/** Formatted output of the ConvolutionPaddingMode type + * + * @param[out] os Output stream. + * @param[in] Mode ConvolutionPaddingMode to output + * + * @return Modified output stream. + */ +inline ::std::ostream &operator<<(::std::ostream &os, ConvolutionPaddingMode Mode) +{ + switch(Mode) + { + case ConvolutionPaddingMode::Valid: + os << "Valid"; + break; + case ConvolutionPaddingMode::Same: + os << "Same"; + break; + case ConvolutionPaddingMode::Manual: + os << "Manual"; + break; + default: + throw std::invalid_argument("Unsupported padding mode format"); + } + + return os; +} + +/** Structure holding all the input tensor graph parameters */ +struct TensorParams +{ + int width{ 1 }; + int height{ 1 }; + int fm{ 1 }; + int batch{ 1 }; + QuantizationInfo quant_info{ 1.0f, 0 }; + std::string npy{}; + uint64_t range_low{ 0 }; + uint64_t range_high{ 16 }; +}; + +/** Structure holding all the verification graph parameters */ +struct VerificationParams +{ + float absolute_tolerance{ -1.f }; + float relative_tolerance{ -1.f }; + float tolerance_number{ -1.f }; +}; + +/** Structure holding all the common graph parameters */ +struct FrameworkParams +{ + bool help{ false }; + int threads{ 0 }; + arm_compute::graph::Target target{ arm_compute::graph::Target::NEON }; +}; + +/** Structure holding all the graph Example parameters */ +struct CommonParams +{ + FrameworkParams common_params{}; + TensorParams input{}; + TensorParams weights{}; + TensorParams bias{}; + TensorParams output{}; + VerificationParams verification{}; + arm_compute::DataType data_type{ DataType::F32 }; +}; + +/** Structure holding all the Convolution layer graph parameters */ +struct ConvolutionParams +{ + int depth_multiplier{ 1 }; + /** Padding graph parameters */ + int padding_top{ 0 }; + int padding_bottom{ 0 }; + int padding_left{ 0 }; + int padding_right{ 0 }; + int padding_stride_x{ 0 }; + int padding_stride_y{ 0 }; + ConvolutionPaddingMode padding_mode{ ConvolutionPaddingMode::Valid }; + struct + { + struct + { + int X{ 0 }; + int Y{ 0 }; + } stride{}; + ConvolutionPaddingMode mode{ ConvolutionPaddingMode::Valid }; + } padding{}; +}; + +/** Structure holding all the fully_connected layer graph parameters */ +struct FullyConnectedParams +{ + FullyConnectedLayerInfo info{}; + int num_outputs{ 1 }; +}; + +/** Structure holding all the graph Example parameters */ +struct ExampleParams : public CommonParams +{ + FullyConnectedParams fully_connected{}; + ConvolutionParams convolution{}; + arm_compute::graph::DepthwiseConvolutionMethod depth_convolution_method{ arm_compute::graph::DepthwiseConvolutionMethod::Default }; + arm_compute::graph::ConvolutionMethod convolution_method{ arm_compute::graph::ConvolutionMethod::Default }; + arm_compute::DataLayout data_layout{ DataLayout::NCHW }; +}; + +/** Calculate stride information. + * + * Depending on the selected padding mode create the desired PadStrideInfo + * + * @param[in] params Convolution parameters supplied by the user. + * + * @return PadStrideInfo with the correct padding mode. + */ +inline PadStrideInfo calculate_convolution_padding(ExampleParams params) +{ + switch(params.convolution.padding_mode) + { + case ConvolutionPaddingMode::Manual: + { + return PadStrideInfo(params.convolution.padding_stride_x, params.convolution.padding_stride_y, params.convolution.padding_left, params.convolution.padding_right, params.convolution.padding_top, + params.convolution.padding_bottom, DimensionRoundingType::FLOOR); + } + case ConvolutionPaddingMode::Valid: + { + return PadStrideInfo(); + } + case ConvolutionPaddingMode::Same: + { + return arm_compute::calculate_same_pad(TensorShape(params.input.width, params.input.height), TensorShape(params.weights.width, params.weights.height), + PadStrideInfo(params.convolution.padding_stride_x, + params.convolution.padding_stride_y)); + } + default: + ARM_COMPUTE_ERROR("NOT SUPPORTED!"); + } +} +/** CommonGraphValidateOptions command line options used to configure the graph examples + * + * (Similar to common options) + * The options in this object get populated when "parse()" is called on the parser used to construct it. + * The expected workflow is: + * + * CommandLineParser parser; + * CommonOptions options( parser ); + * parser.parse(argc, argv); + */ +class CommonGraphValidateOptions +{ +public: + explicit CommonGraphValidateOptions(CommandLineParser &parser) noexcept + : help(parser.add_option("help")), + threads(parser.add_option>("threads")), + target(), + data_type(), + absolute_tolerance(parser.add_option>("abs_tolerance", -1.0f)), + relative_tolerance(parser.add_option>("rel_tolerance", -1.0f)), + tolerance_number(parser.add_option>("tolerance_num", -1.0f)) + { + const std::set supported_targets + { + arm_compute::graph::Target::NEON, + arm_compute::graph::Target::CL, + arm_compute::graph::Target::GC, + }; + + const std::set supported_data_types + { + DataType::F16, + DataType::F32, + DataType::QASYMM8, + }; + + target = parser.add_option>("target", supported_targets, arm_compute::graph::Target::NEON); + data_type = parser.add_option>("type", supported_data_types, DataType::F32); + + target->set_help("Target to execute on"); + data_type->set_help("Data type to use"); + help->set_help("Show this help message"); + absolute_tolerance->set_help("Absolute tolerance used for verification"); + relative_tolerance->set_help("Absolute tolerance used for verification"); + tolerance_number->set_help("Absolute tolerance used for verification"); + ; + } + + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CommonGraphValidateOptions(const CommonGraphValidateOptions &) = delete; + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CommonGraphValidateOptions &operator=(const CommonGraphValidateOptions &) = delete; + /** Allow instances of this class to be moved */ + CommonGraphValidateOptions(CommonGraphValidateOptions &&) noexcept(true) = default; + /** Allow instances of this class to be moved */ + CommonGraphValidateOptions &operator=(CommonGraphValidateOptions &&) noexcept(true) = default; + /** Default destructor */ + virtual ~CommonGraphValidateOptions() = default; + + void consume_common_parameters(CommonParams &common_params) + { + common_params.common_params.help = help->is_set() ? help->value() : false; + common_params.common_params.threads = threads->value(); + common_params.common_params.target = target->value(); + + common_params.verification.absolute_tolerance = absolute_tolerance->value(); + common_params.verification.relative_tolerance = relative_tolerance->value(); + common_params.verification.tolerance_number = tolerance_number->value(); + } + + /** Formatted output of the ExampleParams type + * + * @param[out] os Output stream. + * @param[in] common_params Example parameters to output + * + * @return None. + */ + virtual void print_parameters(::std::ostream &os, const ExampleParams &common_params) + { + os << "Threads : " << common_params.common_params.threads << std::endl; + os << "Target : " << common_params.common_params.target << std::endl; + os << "Data type : " << common_params.data_type << std::endl; + } + + ToggleOption *help; /**< show help message */ + SimpleOption *threads; /**< Number of threads option */ + EnumOption *target; /**< Graph execution target */ + EnumOption *data_type; /**< Graph data type */ + SimpleOption *absolute_tolerance; /**< Absolute tolerance used in verification */ + SimpleOption *relative_tolerance; /**< Relative tolerance used in verification */ + SimpleOption *tolerance_number; /**< Tolerance number used in verification */ +}; + +/** Consumes the consume_common_graph_parameters graph options and creates a structure containing any information + * + * @param[in] options Options to consume + * @param[out] common_params params structure to consume. + * + * @return consume_common_graph_parameters structure containing the common graph parameters + */ +void consume_common_graph_parameters(CommonGraphValidateOptions &options, CommonParams &common_params) +{ + common_params.common_params.help = options.help->is_set() ? options.help->value() : false; + common_params.common_params.threads = options.threads->value(); + common_params.common_params.target = options.target->value(); + + common_params.verification.absolute_tolerance = options.absolute_tolerance->value(); + common_params.verification.relative_tolerance = options.relative_tolerance->value(); + common_params.verification.tolerance_number = options.tolerance_number->value(); +} + +/** Generates appropriate accessor according to the specified graph parameters + * + * @param[in] tensor Tensor parameters + * @param[in] lower Lower random values bound + * @param[in] upper Upper random values bound + * @param[in] seed Random generator seed + * + * @return An appropriate tensor accessor + */ +inline std::unique_ptr get_accessor(const TensorParams &tensor, PixelValue lower, PixelValue upper, const std::random_device::result_type seed = 0) +{ + if(!tensor.npy.empty()) + { + return arm_compute::support::cpp14::make_unique(tensor.npy); + } + else + { + return arm_compute::support::cpp14::make_unique(lower, upper, seed); + } +} + +/** Graph example validation accessor class */ +template +class VerifyAccessor : public graph::ITensorAccessor +{ +public: + using TBias = typename std::conditional::type, uint8_t>::value, int32_t, D>::type; + /** Constructor + * + * @param[in] params Convolution parameters + */ + explicit VerifyAccessor(ExampleParams ¶ms) + : _params(std::move(params)) + { + } + // Inherited methods overriden: + bool access_tensor(ITensor &tensor) override + { + if(_params.output.npy.empty()) + { + arm_compute::test::SimpleTensor src; + arm_compute::test::SimpleTensor weights; + arm_compute::test::SimpleTensor bias; + + //Create Input tensors + create_tensors(src, weights, bias, tensor); + + //Fill the tensors with random values + fill_tensor(src, 0, static_cast(_params.input.range_low), static_cast(_params.input.range_high)); + fill_tensor(weights, 1, static_cast(_params.weights.range_low), static_cast(_params.weights.range_high)); + fill_tensor(bias, 2, static_cast(_params.input.range_low), static_cast(_params.input.range_high)); + + arm_compute::test::SimpleTensor output = reference(src, weights, bias, output_shape(tensor)); + + validate(tensor, output); + } + else + { + //The user provided a reference file use an npy accessor to validate + arm_compute::graph_utils::NumPyAccessor(_params.output.npy, tensor.info()->tensor_shape(), tensor.info()->data_type()).access_tensor(tensor); + } + return false; + } + + /** Create reference tensors. + * + * Validate the given tensor against the reference result. + * + * @param[out] src The tensor with the source data. + * @param[out] weights The tensor with the weigths data. + * @param[out] bias The tensor with the bias data. + * @param[in] tensor Tensor result of the actual operation passed into the Accessor. + * + * @return None. + */ + virtual void create_tensors(arm_compute::test::SimpleTensor &src, + arm_compute::test::SimpleTensor &weights, + arm_compute::test::SimpleTensor &bias, + ITensor &tensor) + { + //Create Input tensors + src = arm_compute::test::SimpleTensor { TensorShape(_params.input.width, _params.input.height, _params.input.fm, _params.input.batch), _params.data_type, 1, _params.input.quant_info }; + weights = arm_compute::test::SimpleTensor { TensorShape(_params.weights.width, _params.weights.height, _params.weights.fm), _params.data_type, 1, _params.weights.quant_info }; + bias = arm_compute::test::SimpleTensor { TensorShape(_params.input.height), _params.data_type, 1, _params.input.quant_info }; + } + + /** Calculate reference output tensor shape. + * + * @param[in] tensor Tensor result of the actual operation passed into the Accessor. + * + * @return output tensor shape. + */ + virtual TensorShape output_shape(ITensor &tensor) + { + return arm_compute::graph_utils::permute_shape(tensor.info()->tensor_shape(), _params.data_layout, DataLayout::NCHW); + } + + /** Calculate reference tensor. + * + * Validate the given tensor against the reference result. + * + * @param[in] src The tensor with the source data. + * @param[in] weights The tensor with the weigths data. + * @param[in] bias The tensor with the bias data. + * @param[in] output_shape Shape of the output tensor. + * + * @return Tensor with the reference output. + */ + virtual arm_compute::test::SimpleTensor reference(arm_compute::test::SimpleTensor &src, + arm_compute::test::SimpleTensor &weights, + arm_compute::test::SimpleTensor &bias, + const arm_compute::TensorShape &output_shape) = 0; + + /** Fill QASYMM tensor with Random values. + * + * Validate the given tensor against the reference result. + * + * @param[out] tensor The tensor we want to file + * @param[in] seed seed for the randomization function + * @param[in] low lower bound for random values + * @param[in] high upper bound for random values + * + * @return None. + */ + void fill_tensor(arm_compute::test::SimpleTensor &tensor, std::random_device::result_type seed, uint8_t low, uint8_t high) + { + ARM_COMPUTE_ERROR_ON(tensor.data_type() != arm_compute::DataType::QASYMM8); + + std::mt19937 gen(seed); + + uint8_t qasymm8_low = tensor.quantization_info().quantize(low, RoundingPolicy::TO_NEAREST_UP); + uint8_t qasymm8_high = tensor.quantization_info().quantize(high, RoundingPolicy::TO_NEAREST_UP); + + std::uniform_int_distribution distribution(qasymm8_low, qasymm8_high); + + for(int i = 0; i < tensor.num_elements(); ++i) + { + tensor[i] = tensor.quantization_info().quantize(distribution(gen), RoundingPolicy::TO_NEAREST_UP); + } + } + /** Fill S32 tensor with Random values. + * + * Validate the given tensor against the reference result. + * + * @param[out] tensor The tensor we want to file + * @param[in] seed seed for the randomization function + * @param[in] low lower bound for random values + * @param[in] high upper bound for random values + * + * @return None. + */ + void fill_tensor(arm_compute::test::SimpleTensor &tensor, std::random_device::result_type seed, int32_t low, int32_t high) + { + std::mt19937 gen(seed); + std::uniform_int_distribution distribution(static_cast(low), static_cast(high)); + + for(int i = 0; i < tensor.num_elements(); ++i) + { + tensor[i] = distribution(gen); + } + } + /** Fill F32 tensor with Random values. + * + * Validate the given tensor against the reference result. + * + * @param[out] tensor The tensor we want to file + * @param[in] seed seed for the randomization function + * @param[in] low lower bound for random values + * @param[in] high upper bound for random values + * + * @return None. + */ + void fill_tensor(arm_compute::test::SimpleTensor &tensor, std::random_device::result_type seed, float low, float high) + { + ARM_COMPUTE_ERROR_ON(tensor.data_type() != arm_compute::DataType::F32); + std::mt19937 gen(seed); + std::uniform_real_distribution distribution(low, high); + + for(int i = 0; i < tensor.num_elements(); ++i) + { + tensor[i] = distribution(gen); + } + } + /** Fill F16 tensor with Random values. + * + * Validate the given tensor against the reference result. + * + * @param[out] tensor The tensor we want to file + * @param[in] seed seed for the randomization function + * @param[in] low lower bound for random values + * @param[in] high upper bound for random values + * + * @return None. + */ + void fill_tensor(arm_compute::test::SimpleTensor &tensor, std::random_device::result_type seed, half low, half high) + { + ARM_COMPUTE_ERROR_ON(tensor.data_type() != arm_compute::DataType::F16); + std::mt19937 gen(seed); + std::uniform_real_distribution distribution(static_cast(low), static_cast(high)); + + for(int i = 0; i < tensor.num_elements(); ++i) + { + tensor[i] = static_cast(distribution(gen)); + } + } + + /** Select relative tolerance. + * + * Select relative tolerance if not supplied by user. + * + * @return Appropriate relative tolerance. + */ + virtual float relative_tolerance() = 0; + + /** Select absolute tolerance. + * + * Select absolute tolerance if not supplied by user. + * + * @return Appropriate absolute tolerance. + */ + virtual float absolute_tolerance() = 0; + + /** Select tolerance number. + * + * Select tolerance number if not supplied by user. + * + * @return Appropriate tolerance number. + */ + virtual float tolerance_number() = 0; + + /** Validate the output versus the reference. + * + * @param[in] tensor Tensor result of the actual operation passed into the Accessor. + * @param[in] output Tensor result of the reference implementation. + * + * @return None. + */ + void validate(ITensor &tensor, arm_compute::test::SimpleTensor output) + { + float user_relative_tolerance = _params.verification.relative_tolerance; + float user_absolute_tolerance = _params.verification.absolute_tolerance; + float user_tolerance_num = _params.verification.tolerance_number; + /* If no user input was provided override with defaults. */ + if(user_relative_tolerance == -1) + { + user_relative_tolerance = relative_tolerance(); + } + + if(user_absolute_tolerance == -1) + { + user_absolute_tolerance = absolute_tolerance(); + } + + if(user_tolerance_num == -1) + { + user_tolerance_num = tolerance_number(); + } + + const arm_compute::test::validation::RelativeTolerance rel_tolerance(user_relative_tolerance); /**< Relative tolerance */ + const arm_compute::test::validation::AbsoluteTolerance abs_tolerance(user_absolute_tolerance); /**< Absolute tolerance */ + const float tolerance_num(user_tolerance_num); /**< Tolerance number */ + + arm_compute::test::validation::validate(arm_compute::test::Accessor(tensor), output, rel_tolerance, tolerance_num, abs_tolerance); + } + + ExampleParams _params; +}; + +/** Generates appropriate convolution verify accessor + * + * @param[in] params User supplied parameters for convolution. + * + * @return A convolution verify accessor for the requested datatype. + */ +template