aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Kesapides <john.kesapides@arm.com>2019-02-26 14:52:12 +0000
committerJohn Kesapides <john.kesapides@arm.com>2019-04-10 10:42:53 +0000
commit8d94269d7985b9cee67e52581e2f58b6c99d7f0d (patch)
tree33d12c8ae7a6de559dae4a12f240b2e228cfe3ef
parent165308cf6904f800206217ad2f09b8e5c8d5c286 (diff)
downloadComputeLibrary-8d94269d7985b9cee67e52581e2f58b6c99d7f0d.tar.gz
COMPMID-1492 Create tests/validate_examples/graph_depthwise_convolution
Add new validate graph example and unify common example code Change-Id: Ibfd7ae2067ad805d6c82d953fe3febfbea961149 Signed-off-by: John Kesapides <john.kesapides@arm.com> Reviewed-on: https://review.mlplatform.org/c/825 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
-rw-r--r--arm_compute/graph/TypeLoader.h24
-rw-r--r--src/graph/TypeLoader.cpp25
-rw-r--r--tests/validate_examples/graph_convolution.cpp668
-rw-r--r--tests/validate_examples/graph_depthwiseconvolution.cpp396
-rw-r--r--tests/validate_examples/graph_fully_connected.cpp499
-rw-r--r--tests/validate_examples/graph_validate_utils.h695
-rw-r--r--tests/validation/reference/DepthwiseConvolutionLayer.cpp17
-rw-r--r--tests/validation/reference/DepthwiseConvolutionLayer.h2
8 files changed, 1376 insertions, 950 deletions
diff --git a/arm_compute/graph/TypeLoader.h b/arm_compute/graph/TypeLoader.h
index dcdc1736a7..41f382ad1d 100644
--- a/arm_compute/graph/TypeLoader.h
+++ b/arm_compute/graph/TypeLoader.h
@@ -123,6 +123,30 @@ inline ::std::istream &operator>>(::std::istream &stream, ConvolutionMethod &tar
target = Convolution_method_from_name(value);
return stream;
}
+
+/** Converts a string to a strong types enumeration @ref DepthwiseConvolutionMethod
+ *
+ * @param[in] name String to convert
+ *
+ * @return Converted Target enumeration
+ */
+DepthwiseConvolutionMethod depthwise_convolution_method_from_name(const std::string &name);
+
+/** Input Stream operator for @ref DepthwiseConvolutionMethod
+ *
+ * @param[in] stream Stream to parse
+ * @param[out] target Output target
+ *
+ * @return Updated stream
+ */
+inline ::std::istream &operator>>(::std::istream &stream, DepthwiseConvolutionMethod &target)
+{
+ std::string value;
+ stream >> value;
+ target = depthwise_convolution_method_from_name(value);
+ return stream;
+}
+
} // namespace graph
} // namespace arm_compute
#endif /* __ARM_COMPUTE_GRAPH_TYPE_LOADER_H__ */
diff --git a/src/graph/TypeLoader.cpp b/src/graph/TypeLoader.cpp
index 0c1ce25b92..b63672b39b 100644
--- a/src/graph/TypeLoader.cpp
+++ b/src/graph/TypeLoader.cpp
@@ -125,5 +125,30 @@ ConvolutionMethod Convolution_method_from_name(const std::string &name)
}
#endif /* ARM_COMPUTE_EXCEPTIONS_DISABLED */
}
+
+DepthwiseConvolutionMethod depthwise_convolution_method_from_name(const std::string &name)
+{
+ static const std::map<std::string, DepthwiseConvolutionMethod> methods =
+ {
+ { "default", DepthwiseConvolutionMethod::Default },
+ { "gemv", DepthwiseConvolutionMethod::GEMV },
+ { "optimized3x3", DepthwiseConvolutionMethod::Optimized3x3 },
+ };
+
+#ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED
+ try
+ {
+#endif /* ARM_COMPUTE_EXCEPTIONS_DISABLED */
+ return methods.at(arm_compute::utility::tolower(name));
+
+#ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED
+ }
+ catch(const std::out_of_range &)
+ {
+ throw std::invalid_argument(name);
+ }
+#endif /* ARM_COMPUTE_EXCEPTIONS_DISABLED */
+}
+
} // namespace graph
} // namespace arm_compute
diff --git a/tests/validate_examples/graph_convolution.cpp b/tests/validate_examples/graph_convolution.cpp
index 4f5ab0dc08..acc1e69544 100644
--- a/tests/validate_examples/graph_convolution.cpp
+++ b/tests/validate_examples/graph_convolution.cpp
@@ -35,6 +35,7 @@
#include "utils/Utils.h"
#include "ValidateExample.h"
+#include "graph_validate_utils.h"
#include <utility>
@@ -45,161 +46,9 @@ using namespace arm_compute::graph;
using namespace arm_compute;
using namespace arm_compute::test;
using namespace arm_compute::test::validation;
-namespace
-{
-/*Available Padding modes */
-enum class PaddingMode
-{
- Valid,
- Same,
- Manual
-};
-/** Stream Input operator for the PaddingMode type
- *
- * @param[in] stream Input stream.
- * @param[out] Mode Convolution parameters to output
- *
- * @return input stream.
- */
-inline ::std::istream &operator>>(::std::istream &stream, PaddingMode &Mode)
-{
- static const std::map<std::string, PaddingMode> modes =
- {
- { "valid", PaddingMode::Valid },
- { "same", PaddingMode::Same },
- { "manual", PaddingMode::Manual }
- };
- std::string value;
- stream >> value;
- try
- {
- Mode = modes.at(arm_compute::utility::tolower(value));
- }
- catch(const std::out_of_range &)
- {
- throw std::invalid_argument(value);
- }
-
- return stream;
-}
-
-/** Formatted output of the PaddingMode type
- *
- * @param[out] os Output stream.
- * @param[in] Mode PaddingMode to output
- *
- * @return Modified output stream.
- */
-inline ::std::ostream &operator<<(::std::ostream &os, PaddingMode Mode)
-{
- switch(Mode)
- {
- case PaddingMode::Valid:
- os << "Valid";
- break;
- case PaddingMode::Same:
- os << "Same";
- break;
- case PaddingMode::Manual:
- os << "Manual";
- break;
- default:
- throw std::invalid_argument("Unsupported padding mode format");
- }
-
- return os;
-}
-/** Structure holding all the input tensor graph parameters */
-struct TensorParams
-{
- int width{ 0 };
- int height{ 0 };
- int fm{ 0 };
- int batch{ 0 };
- QuantizationInfo quant_info{ 1.0f, 0 };
- std::string npy{};
- uint64_t range_low{ 0 };
- uint64_t range_high{ 16 };
-};
-/** Structure holding all the verification graph parameters */
-struct VerificationParams
-{
- float absolute_tolerance{ -1.f };
- float relative_tolerance{ -1.f };
- float tolerance_number{ -1.f };
-};
-
-/** Structure holding all the common graph parameters */
-struct FrameworkParams
-{
- bool help{ false };
- int threads{ 0 };
- arm_compute::graph::Target target{ arm_compute::graph::Target::NEON };
-};
-
-/** Structure holding all the Convolution layer graph parameters */
-struct ConvolutionParams
-{
- arm_compute::DataType data_type{ DataType::F32 };
- arm_compute::DataLayout data_layout{ DataLayout::NCHW };
- arm_compute::graph::ConvolutionMethod convolution_method{ arm_compute::graph::ConvolutionMethod::Default };
-
- /** Padding graph parameters */
- int padding_top{ 0 };
- int padding_bottom{ 0 };
- int padding_left{ 0 };
- int padding_right{ 0 };
- int padding_stride_x{ 0 };
- int padding_stride_y{ 0 };
- PaddingMode padding_mode{ PaddingMode::Valid };
- struct
- {
- struct
- {
- int X{ 0 };
- int Y{ 0 };
- } stride{};
- PaddingMode mode{ PaddingMode::Valid };
- } padding{};
-};
-
-/** Structure holding all the graph Example parameters */
-struct ExampleParams
-{
- FrameworkParams common_params{};
- TensorParams input{};
- TensorParams weights{};
- TensorParams bias{};
- TensorParams output{};
- VerificationParams verification{};
- ConvolutionParams convolution{};
-};
-
-/** Formatted output of the ConvolutionParams type
- *
- * @param[out] os Output stream.
- * @param[in] common_params Convolution parameters to output
- *
- * @return Modified output stream.
- */
-::std::ostream &operator<<(::std::ostream &os, const ExampleParams &common_params)
+namespace
{
- os << "Threads : " << common_params.common_params.threads << std::endl;
- os << "Target : " << common_params.common_params.target << std::endl;
- os << "Data type : " << common_params.convolution.data_type << std::endl;
- os << "Input dimensions(X,Y, Channels, Batch) : (" << common_params.input.width << "," << common_params.input.height << "," << common_params.input.fm << "," << common_params.input.batch << ")"
- << std::endl;
- os << "Weight dimensions(X,Y, Channels(same as input), OFM) : (" << common_params.weights.width << "," << common_params.weights.height << "," << common_params.input.fm << "," <<
- common_params.weights.fm << ")" << std::endl;
- os << "Padding(top, bottom, left, right) (stride x, stride y) : (" << common_params.convolution.padding_top << "," << common_params.convolution.padding_bottom << "," <<
- common_params.convolution.padding_left << "," << common_params.convolution.padding_right << ") (" << common_params.convolution.padding_stride_x << "," << common_params.convolution.padding_stride_y <<
- ")" << std::endl;
- os << "Padding Mode: " << common_params.convolution.padding_mode << std::endl;
- os << "Convolution Method: " << common_params.convolution.convolution_method << std::endl;
- return os;
-}
-
/** Convolution command line options used to configure the graph examples
*
* (Similar to common options)
@@ -210,11 +59,12 @@ struct ExampleParams
* CommonOptions options( parser );
* parser.parse(argc, argv);
*/
-class ConvolutionOptions final
+class ConvolutionOptions final : public CommonGraphValidateOptions
{
public:
explicit ConvolutionOptions(CommandLineParser &parser) noexcept
- : width(parser.add_option<SimpleOption<int>>("width", 9)),
+ : CommonGraphValidateOptions(parser),
+ width(parser.add_option<SimpleOption<int>>("width", 9)),
height(parser.add_option<SimpleOption<int>>("height", 9)),
channels(parser.add_option<SimpleOption<int>>("channels", 1)),
batch(parser.add_option<SimpleOption<int>>("batch", 1)),
@@ -227,16 +77,9 @@ public:
padding_right(parser.add_option<SimpleOption<int>>("padding_right", 0)),
stride_x(parser.add_option<SimpleOption<int>>("stride_x", 1)),
stride_y(parser.add_option<SimpleOption<int>>("stride_y", 1)),
- help(parser.add_option<ToggleOption>("help")),
- threads(parser.add_option<SimpleOption<int>>("threads")),
- target(),
- data_type(),
padding_mode(),
conv_mode(),
data_layout(),
- absolute_tolerance(parser.add_option<SimpleOption<float>>("abs_tolerance", -1.0f)),
- relative_tolerance(parser.add_option<SimpleOption<float>>("rel_tolerance", -1.0f)),
- tolerance_number(parser.add_option<SimpleOption<float>>("tolerance_num", -1.0f)),
scale(parser.add_option<SimpleOption<float>>("scale", 1.0f)),
offset(parser.add_option<SimpleOption<int>>("offset", 0)),
weights_scale(parser.add_option<SimpleOption<float>>("weights_scale", 1.0f)),
@@ -252,24 +95,10 @@ public:
weights_npy(parser.add_option<SimpleOption<std::string>>("weights_npy")),
bias_npy(parser.add_option<SimpleOption<std::string>>("bias_image"))
{
- const std::set<PaddingMode> available_padding_modes
- {
- PaddingMode::Valid,
- PaddingMode::Same
- };
-
- const std::set<arm_compute::graph::Target> supported_targets
+ const std::set<ConvolutionPaddingMode> available_padding_modes
{
- Target::NEON,
- Target::CL,
- Target::GC,
- };
-
- const std::set<arm_compute::DataType> supported_data_types
- {
- DataType::F16,
- DataType::F32,
- DataType::QASYMM8,
+ ConvolutionPaddingMode::Valid,
+ ConvolutionPaddingMode::Same
};
const std::set<arm_compute::graph::ConvolutionMethod> supported_convolution_methods
@@ -286,14 +115,10 @@ public:
DataLayout::NCHW,
};
- padding_mode = parser.add_option<EnumOption<PaddingMode>>("padding_mode", available_padding_modes, PaddingMode::Valid);
- target = parser.add_option<EnumOption<Target>>("target", supported_targets, Target::NEON);
- data_type = parser.add_option<EnumOption<DataType>>("type", supported_data_types, DataType::F32);
+ padding_mode = parser.add_option<EnumOption<ConvolutionPaddingMode>>("padding_mode", available_padding_modes, ConvolutionPaddingMode::Valid);
conv_mode = parser.add_option<EnumOption<arm_compute::graph::ConvolutionMethod>>("convolution_method", supported_convolution_methods, arm_compute::graph::ConvolutionMethod::Default);
data_layout = parser.add_option<EnumOption<DataLayout>>("layout", supported_data_layouts, DataLayout::NHWC);
- target->set_help("Target to execute on");
- data_type->set_help("Data type to use");
padding_mode->set_help("Set padding mode");
help->set_help("Show this help message");
width->set_help("Set Input dimension width");
@@ -310,10 +135,6 @@ public:
stride_x->set_help("Set padding stride x");
stride_y->set_help("Set padding stride y");
conv_mode->set_help("Set convolution method");
- data_layout->set_help("Data layout to use");
- absolute_tolerance->set_help("Absolute tolerance used for verification");
- relative_tolerance->set_help("Absolute tolerance used for verification");
- tolerance_number->set_help("Absolute tolerance used for verification");
scale->set_help("Quantization scale from QASYMM8");
offset->set_help("Quantization offset from QASYMM8");
weights_scale->set_help("Quantization scale from QASYMM8");
@@ -328,6 +149,69 @@ public:
weights_range_high->set_help("Lower bound for input randomization range");
}
+ /** Fill out the supplied parameters with user supplied parameters
+ *
+ * @param[out] os Output stream.
+ * @param[in] common_params Example parameters to output
+ *
+ * @return None.
+ */
+ void consume_parameters(ExampleParams &common_params)
+ {
+ common_params.input.width = width->value();
+ common_params.input.height = height->value();
+ common_params.input.fm = channels->value();
+ common_params.input.batch = batch->value();
+ common_params.input.quant_info.scale = scale->value();
+ common_params.input.quant_info.offset = offset->value();
+ common_params.input.npy = input_npy->value();
+ common_params.input.range_low = input_range_low->value();
+ common_params.input.range_high = input_range_high->value();
+
+ common_params.weights.width = weights_width->value();
+ common_params.weights.height = weights_height->value();
+ common_params.weights.fm = OFM->value();
+ common_params.weights.npy = weights_npy->value();
+ common_params.weights.quant_info.scale = weights_scale->value();
+ common_params.weights.quant_info.offset = weights_offset->value();
+ common_params.weights.range_low = weights_range_low->value();
+ common_params.weights.range_high = weights_range_high->value();
+
+ common_params.bias.npy = bias_npy->value();
+
+ common_params.output.quant_info.scale = output_scale->value();
+ common_params.output.quant_info.offset = output_offset->value();
+ common_params.output.npy = output_npy->value();
+
+ common_params.convolution.padding_mode = padding_mode->value();
+ common_params.convolution.padding_top = padding_top->value();
+ common_params.convolution.padding_bottom = padding_bottom->value();
+ common_params.convolution.padding_left = padding_left->value();
+ common_params.convolution.padding_right = padding_right->value();
+ common_params.convolution.padding_stride_x = stride_x->value();
+ common_params.convolution.padding_stride_y = stride_y->value();
+
+ common_params.data_type = data_type->value();
+ common_params.data_layout = data_layout->value();
+ common_params.convolution_method = conv_mode->value();
+ }
+
+ void print_parameters(::std::ostream &os, const ExampleParams &common_params) override
+ {
+ os << "Threads : " << common_params.common_params.threads << std::endl;
+ os << "Target : " << common_params.common_params.target << std::endl;
+ os << "Data type : " << common_params.data_type << std::endl;
+ os << "Input dimensions(X,Y, Channels, Batch) : (" << common_params.input.width << "," << common_params.input.height << "," << common_params.input.fm << "," << common_params.input.batch << ")"
+ << std::endl;
+ os << "Weight dimensions(X,Y, Channels(same as input), OFM) : (" << common_params.weights.width << "," << common_params.weights.height << "," << common_params.input.fm << "," <<
+ common_params.weights.fm << ")" << std::endl;
+ os << "Padding(top, bottom, left, right) (stride x, stride y) : (" << common_params.convolution.padding_top << "," << common_params.convolution.padding_bottom << "," <<
+ common_params.convolution.padding_left << "," << common_params.convolution.padding_right << ") (" << common_params.convolution.padding_stride_x << "," << common_params.convolution.padding_stride_y <<
+ ")" << std::endl;
+ os << "Padding Mode: " << common_params.convolution.padding_mode << std::endl;
+ os << "Convolution Method: " << common_params.convolution_method << std::endl;
+ }
+
/** Prevent instances of this class from being copied (As this class contains pointers) */
ConvolutionOptions(const ConvolutionOptions &) = delete;
/** Prevent instances of this class from being copied (As this class contains pointers) */
@@ -337,7 +221,7 @@ public:
/** Allow instances of this class to be moved */
ConvolutionOptions &operator=(ConvolutionOptions &&) noexcept(true) = default;
/** Default destructor */
- ~ConvolutionOptions() = default;
+ ~ConvolutionOptions() override = default;
SimpleOption<int> *width; /**< Input width */
SimpleOption<int> *height; /**< Input height */
@@ -352,16 +236,9 @@ public:
SimpleOption<int> *padding_right; /**< Padding right */
SimpleOption<int> *stride_x; /**< Padding stride x */
SimpleOption<int> *stride_y; /**< Padding stride y */
- ToggleOption *help; /**< show help message */
- SimpleOption<int> *threads; /**< Number of threads option */
- EnumOption<arm_compute::graph::Target> *target; /**< Graph execution target */
- EnumOption<arm_compute::DataType> *data_type; /**< Graph data type */
- EnumOption<PaddingMode> *padding_mode; /**< Padding mode */
+ EnumOption<ConvolutionPaddingMode> *padding_mode; /**< Padding mode */
EnumOption<arm_compute::graph::ConvolutionMethod> *conv_mode; /**< Convolution method */
EnumOption<arm_compute::DataLayout> *data_layout; /**< Graph data layout */
- SimpleOption<float> *absolute_tolerance; /**< Absolute tolerance used in verification */
- SimpleOption<float> *relative_tolerance; /**< Relative tolerance used in verification */
- SimpleOption<float> *tolerance_number; /**< Tolerance number used in verification */
SimpleOption<float> *scale; /**< Input Quantization scale from QASYMM8 */
SimpleOption<int> *offset; /**< Input Quantization offset from QASYMM8 */
SimpleOption<float> *weights_scale; /**< Weights Quantization scale from QASYMM8 */
@@ -379,227 +256,26 @@ public:
SimpleOption<std::string> *bias_npy; /**< Use bias .npy image */
};
-/** Consumes the convolution graph options and creates a structure containing any information
- *
- * @param[in] options Options to consume
- *
- * @return Convolutionparams structure containing the common graph parameters
- */
-ExampleParams consume_covolution_graph_parameters(ConvolutionOptions &options)
-{
- ExampleParams common_params;
-
- common_params.common_params.help = options.help->is_set() ? options.help->value() : false;
- common_params.common_params.threads = options.threads->value();
- common_params.common_params.target = options.target->value();
-
- common_params.input.width = options.width->value();
- common_params.input.height = options.height->value();
- common_params.input.fm = options.channels->value();
- common_params.input.batch = options.batch->value();
- common_params.input.quant_info.scale = options.scale->value();
- common_params.input.quant_info.offset = options.offset->value();
- common_params.input.npy = options.input_npy->value();
- common_params.input.range_low = options.input_range_low->value();
- common_params.input.range_high = options.input_range_high->value();
-
- common_params.weights.width = options.weights_width->value();
- common_params.weights.height = options.weights_height->value();
- common_params.weights.fm = options.OFM->value();
- common_params.weights.npy = options.weights_npy->value();
- common_params.weights.quant_info.scale = options.weights_scale->value();
- common_params.weights.quant_info.offset = options.weights_offset->value();
- common_params.weights.range_low = options.weights_range_low->value();
- common_params.weights.range_high = options.weights_range_high->value();
-
- common_params.bias.npy = options.bias_npy->value();
-
- common_params.output.quant_info.scale = options.output_scale->value();
- common_params.output.quant_info.offset = options.output_offset->value();
- common_params.output.npy = options.output_npy->value();
-
- common_params.convolution.padding_mode = options.padding_mode->value();
- common_params.convolution.padding_top = options.padding_top->value();
- common_params.convolution.padding_bottom = options.padding_bottom->value();
- common_params.convolution.padding_left = options.padding_left->value();
- common_params.convolution.padding_right = options.padding_right->value();
- common_params.convolution.padding_stride_x = options.stride_x->value();
- common_params.convolution.padding_stride_y = options.stride_y->value();
- common_params.convolution.convolution_method = options.conv_mode->value();
- common_params.convolution.data_type = options.data_type->value();
- common_params.convolution.data_layout = options.data_layout->value();
-
- common_params.verification.absolute_tolerance = options.absolute_tolerance->value();
- common_params.verification.relative_tolerance = options.relative_tolerance->value();
- common_params.verification.tolerance_number = options.tolerance_number->value();
-
- return common_params;
-}
-
-/** Calculate stride information.
- *
- * Depending on the selected padding mode create the desired PadStrideInfo
- *
- * @param[in] params Convolution parameters supplied by the user.
- *
- * @return PadStrideInfo with the correct padding mode.
- */
-inline PadStrideInfo calculate_convolution_padding(ExampleParams params)
-{
- switch(params.convolution.padding_mode)
- {
- case PaddingMode::Manual:
- {
- return PadStrideInfo(params.convolution.padding_stride_x, params.convolution.padding_stride_y, params.convolution.padding_left, params.convolution.padding_right, params.convolution.padding_top,
- params.convolution.padding_bottom, DimensionRoundingType::FLOOR);
- }
- case PaddingMode::Valid:
- {
- return PadStrideInfo();
- }
- case PaddingMode::Same:
- {
- return arm_compute::calculate_same_pad(TensorShape(params.input.width, params.input.height), TensorShape(params.weights.width, params.weights.height),
- PadStrideInfo(params.convolution.padding_stride_x,
- params.convolution.padding_stride_y));
- }
- default:
- ARM_COMPUTE_ERROR("NOT SUPPORTED!");
- }
-}
-
/** ConvolutionLayer Graph example validation accessor class */
template <typename D>
-class ConvolutionVerifyAccessor final : public graph::ITensorAccessor
+class ConvolutionVerifyAccessor final : public VerifyAccessor<D>
{
-public:
+ using BaseClassType = VerifyAccessor<D>;
+ using BaseClassType::BaseClassType;
+ using BaseClassType::_params;
using TBias = typename std::conditional<std::is_same<typename std::decay<D>::type, uint8_t>::value, int32_t, D>::type;
- /** Constructor
- *
- * @param[in] params Convolution parameters
- */
- explicit ConvolutionVerifyAccessor(ExampleParams &params)
- : _params(std::move(params))
+ SimpleTensor<D> reference(SimpleTensor<D> &src, SimpleTensor<D> &weights, SimpleTensor<TBias> &bias, const TensorShape &output_shape) override
{
- }
+ // Calculate padding information
+ const PadStrideInfo padding_info = calculate_convolution_padding(_params);
- // Inherited methods overriden:
- bool access_tensor(ITensor &tensor) override
- {
- if(_params.output.npy.empty())
- {
- const RelativeTolerance<float> rel_tolerance(relative_tolenace(_params.verification.relative_tolerance)); /**< Relative tolerance */
- const AbsoluteTolerance<float> abs_tolerance(absolute_tolerance(_params.verification.absolute_tolerance)); /**< Absolute tolerance */
- const float tolerance_num(tolerance_number(_params.verification.tolerance_number)); /**< Tolerance number */
-
- //Create Input tensors
- SimpleTensor<D> src{ TensorShape(_params.input.width, _params.input.height, _params.input.fm, _params.input.batch), _params.convolution.data_type, 1, _params.input.quant_info };
- SimpleTensor<D> weights{ TensorShape(_params.weights.width, _params.weights.height, _params.weights.fm), _params.convolution.data_type, 1, _params.weights.quant_info };
- SimpleTensor<TBias> bias{ TensorShape(_params.input.height), _params.convolution.data_type, 1, _params.input.quant_info };
-
- //Fill the tenors with random values
- fill_tensor<D>(src, 0, static_cast<D>(_params.input.range_low), static_cast<D>(_params.input.range_high));
- fill_tensor<D>(weights, 1, static_cast<D>(_params.weights.range_low), static_cast<D>(_params.weights.range_high));
- fill_tensor<TBias>(bias, 2, static_cast<TBias>(_params.input.range_low), static_cast<TBias>(_params.input.range_high));
-
- // Calculate padding information
- const PadStrideInfo padding_info = calculate_convolution_padding(_params);
-
- //Calculate reference
- SimpleTensor<D> output = reference::convolution_layer<D>(src, weights, bias, permute_shape(tensor.info()->tensor_shape(), _params.convolution.data_layout, DataLayout::NCHW), padding_info, Size2D(1,
- 1),
- 1,
- _params.output.quant_info);
-
- arm_compute::test::validation::validate(Accessor(tensor), output, rel_tolerance, tolerance_num, abs_tolerance);
- }
- else
- {
- //The user provided a reference file use an npy accessor to validate
- NumPyAccessor(_params.output.npy, tensor.info()->tensor_shape(), tensor.info()->data_type()).access_tensor(tensor);
- }
- return false;
+ //Calculate reference
+ return reference::convolution_layer<D>(src, weights, bias, output_shape, padding_info, Size2D(1, 1),
+ 1, _params.output.quant_info);
}
-private:
- /** Fill tensor with Random values.
- *
- * Validate the given tensor against the reference result.
- *
- * @param[out] tensor The tensor we want to file
- * @param[in] seed seed for the randomization function
- * @param[in] low lower bound for random values
- * @param[in] high upper bound for random values
- *
- * @return None.
- */
- template <typename T>
- void fill_tensor(arm_compute::test::SimpleTensor<T> &tensor, std::random_device::result_type seed, T low, T high)
- {
- std::mt19937 gen(seed);
- switch(tensor.data_type())
- {
- case arm_compute::DataType::QASYMM8:
- {
- uint8_t qasymm8_low = tensor.quantization_info().quantize(low, RoundingPolicy::TO_NEAREST_UP);
- uint8_t qasymm8_high = tensor.quantization_info().quantize(high, RoundingPolicy::TO_NEAREST_UP);
-
- std::uniform_int_distribution<uint8_t> distribution(qasymm8_low, qasymm8_high);
-
- for(int i = 0; i < tensor.num_elements(); ++i)
- {
- tensor[i] = tensor.quantization_info().quantize(distribution(gen), RoundingPolicy::TO_NEAREST_UP);
- }
-
- break;
- }
- case arm_compute::DataType::S32:
- {
- std::uniform_int_distribution<int32_t> distribution(static_cast<int32_t>(low), static_cast<uint32_t>(high));
-
- for(int i = 0; i < tensor.num_elements(); ++i)
- {
- tensor[i] = distribution(gen);
- }
-
- break;
- }
-
- case arm_compute::DataType::F16:
- {
- std::uniform_real_distribution<float> distribution(static_cast<half>(low), static_cast<half>(high));
-
- for(int i = 0; i < tensor.num_elements(); ++i)
- {
- tensor[i] = static_cast<half>(distribution(gen));
- }
- break;
- }
- case arm_compute::DataType::F32:
- {
- std::uniform_real_distribution<float> distribution(static_cast<float>(low), static_cast<float>(high));
-
- for(int i = 0; i < tensor.num_elements(); ++i)
- {
- tensor[i] = distribution(gen);
- }
-
- break;
- }
- default:
- ARM_COMPUTE_ERROR("NOT SUPPORTED!");
- }
- }
- /** Select relative tolerance.
- *
- * Select relative tolerance if not supplied by user.
- *
- * @param[in] user_value supplied relative tolerance. -1 designates no user input
- *
- * @return Appropriate relative tolerance.
- */
- float relative_tolenace(float user_value)
+ float relative_tolerance() override
{
const std::map<arm_compute::graph::Target, const std::map<DataType, float>> relative_tolerance
{
@@ -618,32 +294,20 @@ private:
}
}
};
- if(user_value == -1)
+
+ if(_params.convolution_method == arm_compute::graph::ConvolutionMethod::Winograd
+ && _params.data_type == DataType::F32
+ && _params.common_params.target == arm_compute::graph::Target::NEON)
{
- if(_params.convolution.convolution_method == arm_compute::graph::ConvolutionMethod::Winograd
- && _params.convolution.data_type == DataType::F32
- && _params.common_params.target == arm_compute::graph::Target::NEON)
- {
- return 0.05f;
- }
- else
- {
- return relative_tolerance.at(_params.common_params.target).at(_params.convolution.data_type);
- }
+ return 0.05f;
+ }
+ else
+ {
+ return relative_tolerance.at(_params.common_params.target).at(_params.data_type);
}
-
- return user_value;
}
- /** Select absolute tolerance.
- *
- * Select absolute tolerance if not supplied by user.
- *
- * @param[in] user_value supplied absolute tolerance. -1 designates no user input
- *
- * @return Appropriate absolute tolerance.
- */
- float absolute_tolerance(float user_value)
+ float absolute_tolerance() override
{
const std::map<Target, const std::map<DataType, float>> absolute_tolerance
{
@@ -663,21 +327,10 @@ private:
}
};
- if(user_value == -1)
- {
- return absolute_tolerance.at(_params.common_params.target).at(_params.convolution.data_type);
- }
- return user_value;
+ return absolute_tolerance.at(_params.common_params.target).at(_params.data_type);
}
- /** Select tolerance number.
- *
- * Select tolerance number if not supplied by user.
- *
- * @param[in] user_value supplied tolerance number. -1 designates no user input
- *
- * @return Appropriate tolerance number.
- */
- float tolerance_number(float user_value)
+
+ float tolerance_number() override
{
const std::map<Target, const std::map<DataType, float>> absolute_tolerance
{
@@ -697,133 +350,38 @@ private:
}
};
- if(user_value == -1)
- {
- return absolute_tolerance.at(_params.common_params.target).at(_params.convolution.data_type);
- }
- return user_value;
+ return absolute_tolerance.at(_params.common_params.target).at(_params.data_type);
}
-
- ExampleParams _params;
};
-/** Generates appropriate convolution verify accessor
- *
- * @param[in] params User supplied parameters for convolution.
- *
- * @return A convolution verify accessor for the requested datatype.
- */
-inline std::unique_ptr<graph::ITensorAccessor> get_convolution_verify_accessor(ExampleParams params)
-{
- switch(params.convolution.data_type)
- {
- case DataType::QASYMM8:
- {
- return arm_compute::support::cpp14::make_unique<ConvolutionVerifyAccessor<uint8_t>>(
- params);
- }
- case DataType::F16:
- {
- return arm_compute::support::cpp14::make_unique<ConvolutionVerifyAccessor<half>>(
- params);
- }
- case DataType::F32:
- {
- return arm_compute::support::cpp14::make_unique<ConvolutionVerifyAccessor<float>>(
- params);
- }
- default:
- ARM_COMPUTE_ERROR("NOT SUPPORTED!");
- }
-}
-/** Generates appropriate accessor according to the specified graph parameters
- *
- * @param[in] graph_parameters Graph parameters
- * @param[in] lower Lower random values bound
- * @param[in] upper Upper random values bound
- * @param[in] seed Random generator seed
- *
- * @return An appropriate tensor accessor
- */
-inline std::unique_ptr<graph::ITensorAccessor> get_accessor(const TensorParams &tensor, PixelValue lower, PixelValue upper, const std::random_device::result_type seed = 0)
-{
- if(!tensor.npy.empty())
- {
- return arm_compute::support::cpp14::make_unique<NumPyBinLoader>(tensor.npy);
- }
- else
- {
- return arm_compute::support::cpp14::make_unique<RandomAccessor>(lower, upper, seed);
- }
-}
} // namespace
-class GraphConvolutionValidateExample final : public ValidateExample
+class GraphConvolutionValidateExample final : public GraphValidateExample<ConvolutionLayer, ConvolutionOptions, ConvolutionVerifyAccessor>
{
+ using GraphValidateExample::graph;
+
public:
GraphConvolutionValidateExample()
- : graph(0, "Convolution Graph example")
+ : GraphValidateExample("Convolution Graph example")
{
}
- bool do_setup(int argc, char **argv) override
- {
- CommandLineParser parser;
-
- ConvolutionOptions Options(parser);
- parser.parse(argc, argv);
-
- ExampleParams params = consume_covolution_graph_parameters(Options);
-
- if(params.common_params.help)
- {
- parser.print_help(argv[0]);
- return false;
- }
+ ConvolutionLayer GraphFunctionLayer(ExampleParams &params) override
+ {
+ const PixelValue lower = PixelValue(params.input.range_low, params.data_type, params.input.quant_info);
+ const PixelValue upper = PixelValue(params.input.range_high, params.data_type, params.input.quant_info);
- std::cout << params << std::endl;
+ const PixelValue weights_lower = PixelValue(params.weights.range_low, params.data_type, params.weights.quant_info);
+ const PixelValue weights_upper = PixelValue(params.weights.range_high, params.data_type, params.weights.quant_info);
// Calculate padding information
const PadStrideInfo padding_info = calculate_convolution_padding(params);
- // Create input descriptor
- const TensorShape input_shape = permute_shape(TensorShape(params.input.width, params.input.height, params.input.fm, params.input.batch), DataLayout::NCHW, params.convolution.data_layout);
- TensorDescriptor input_descriptor = TensorDescriptor(input_shape, params.convolution.data_type, params.input.quant_info, params.convolution.data_layout);
-
- const PixelValue lower = PixelValue(params.input.range_low, params.convolution.data_type, params.input.quant_info);
- const PixelValue upper = PixelValue(params.input.range_high, params.convolution.data_type, params.input.quant_info);
-
- const PixelValue weights_lower = PixelValue(params.weights.range_low, params.convolution.data_type, params.weights.quant_info);
- const PixelValue weights_upper = PixelValue(params.weights.range_high, params.convolution.data_type, params.weights.quant_info);
-
- graph << params.common_params.target
- << params.convolution.convolution_method
- << InputLayer(input_descriptor, get_accessor(params.input, lower, upper, 0))
- << ConvolutionLayer(params.weights.width, params.weights.height, params.weights.fm,
- get_accessor(params.weights, weights_lower, weights_upper, 1),
- get_accessor(params.bias, lower, upper, 2),
- padding_info, 1, params.weights.quant_info, params.output.quant_info)
- << OutputLayer(get_convolution_verify_accessor(params));
-
- GraphConfig config;
- config.num_threads = params.common_params.threads;
-
- graph.finalize(params.common_params.target, config);
-
- return true;
- }
-
- void do_run() override
- {
- graph.run();
- }
-
- void do_teardown() override
- {
+ return ConvolutionLayer(params.weights.width, params.weights.height, params.weights.fm,
+ get_accessor(params.weights, weights_lower, weights_upper, 1),
+ get_accessor(params.bias, lower, upper, 2),
+ padding_info, 1, params.weights.quant_info, params.output.quant_info);
}
-
-private:
- Stream graph;
};
/** Main program for Graph Convolution test
diff --git a/tests/validate_examples/graph_depthwiseconvolution.cpp b/tests/validate_examples/graph_depthwiseconvolution.cpp
new file mode 100644
index 0000000000..cdad404dfa
--- /dev/null
+++ b/tests/validate_examples/graph_depthwiseconvolution.cpp
@@ -0,0 +1,396 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph.h"
+
+#include "support/ToolchainSupport.h"
+
+#include "tests/NEON/Accessor.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/reference/DepthwiseConvolutionLayer.h"
+#include "tests/validation/reference/Permute.h"
+
+#include "utils/CommonGraphOptions.h"
+#include "utils/GraphUtils.h"
+#include "utils/Utils.h"
+
+#include "ValidateExample.h"
+#include "graph_validate_utils.h"
+
+#include <utility>
+
+using namespace arm_compute::utils;
+using namespace arm_compute::graph::frontend;
+using namespace arm_compute::graph_utils;
+using namespace arm_compute::graph;
+using namespace arm_compute;
+using namespace arm_compute::test;
+using namespace arm_compute::test::validation;
+
+namespace
+{
+/** Depthwise Convolution command line options used to configure the graph examples
+ *
+ * (Similar to common options)
+ * The options in this object get populated when "parse()" is called on the parser used to construct it.
+ * The expected workflow is:
+ *
+ * CommandLineParser parser;
+ * CommonOptions options( parser );
+ * parser.parse(argc, argv);
+ */
+class DepthConvolutionOptions final : public CommonGraphValidateOptions
+{
+public:
+ explicit DepthConvolutionOptions(CommandLineParser &parser) noexcept
+ : CommonGraphValidateOptions(parser),
+ width(parser.add_option<SimpleOption<int>>("width", 9)),
+ height(parser.add_option<SimpleOption<int>>("height", 9)),
+ channels(parser.add_option<SimpleOption<int>>("channels", 1)),
+ batch(parser.add_option<SimpleOption<int>>("batch", 1)),
+ weights_width(parser.add_option<SimpleOption<int>>("weights_width", 3)),
+ weights_height(parser.add_option<SimpleOption<int>>("weights_height", 3)),
+ padding_top(parser.add_option<SimpleOption<int>>("padding_top", 0)),
+ padding_left(parser.add_option<SimpleOption<int>>("padding_left", 0)),
+ padding_bottom(parser.add_option<SimpleOption<int>>("padding_bottom", 0)),
+ padding_right(parser.add_option<SimpleOption<int>>("padding_right", 0)),
+ stride_x(parser.add_option<SimpleOption<int>>("stride_x", 1)),
+ stride_y(parser.add_option<SimpleOption<int>>("stride_y", 1)),
+ padding_mode(),
+ conv_mode(),
+ depth_multiplier(parser.add_option<SimpleOption<int>>("depth_multiplier", 1)),
+ data_layout(),
+ scale(parser.add_option<SimpleOption<float>>("scale", 1.0f)),
+ offset(parser.add_option<SimpleOption<int>>("offset", 0)),
+ weights_scale(parser.add_option<SimpleOption<float>>("weights_scale", 1.0f)),
+ weights_offset(parser.add_option<SimpleOption<int>>("weights_offset", 0)),
+ output_scale(parser.add_option<SimpleOption<float>>("output_scale", 1.0f)),
+ output_offset(parser.add_option<SimpleOption<int>>("output_offset", 0)),
+ input_range_low(parser.add_option<SimpleOption<uint64_t>>("input_range_low")),
+ input_range_high(parser.add_option<SimpleOption<uint64_t>>("input_range_high")),
+ weights_range_low(parser.add_option<SimpleOption<uint64_t>>("weights_range_low")),
+ weights_range_high(parser.add_option<SimpleOption<uint64_t>>("weights_range_high")),
+ input_npy(parser.add_option<SimpleOption<std::string>>("input_image")),
+ output_npy(parser.add_option<SimpleOption<std::string>>("reference_image")),
+ weights_npy(parser.add_option<SimpleOption<std::string>>("weights_npy")),
+ bias_npy(parser.add_option<SimpleOption<std::string>>("bias_image"))
+ {
+ const std::set<ConvolutionPaddingMode> available_padding_modes
+ {
+ ConvolutionPaddingMode::Valid,
+ ConvolutionPaddingMode::Same
+ };
+
+ const std::set<arm_compute::graph::DepthwiseConvolutionMethod> supported_convolution_methods
+ {
+ arm_compute::graph::DepthwiseConvolutionMethod::Default,
+ arm_compute::graph::DepthwiseConvolutionMethod::GEMV,
+ arm_compute::graph::DepthwiseConvolutionMethod::Optimized3x3,
+ };
+
+ const std::set<DataLayout> supported_data_layouts
+ {
+ DataLayout::NHWC,
+ DataLayout::NCHW,
+ };
+
+ padding_mode = parser.add_option<EnumOption<ConvolutionPaddingMode>>("padding_mode", available_padding_modes, ConvolutionPaddingMode::Valid);
+ conv_mode = parser.add_option<EnumOption<arm_compute::graph::DepthwiseConvolutionMethod>>("convolution_method", supported_convolution_methods,
+ arm_compute::graph::DepthwiseConvolutionMethod::Default);
+ data_layout = parser.add_option<EnumOption<DataLayout>>("layout", supported_data_layouts, DataLayout::NHWC);
+
+ padding_mode->set_help("Set padding mode");
+ width->set_help("Set Input dimension width");
+ height->set_help("Set Input dimension height");
+ channels->set_help("Set Input dimension channels");
+ batch->set_help("Set Input dimension batch");
+ weights_width->set_help("Set weights_dimensions width");
+ weights_height->set_help("Set weights_dimensions height");
+ padding_top->set_help("Set padding top");
+ padding_bottom->set_help("Set padding bottom");
+ padding_left->set_help("Set padding left");
+ padding_right->set_help("Set padding right");
+ stride_x->set_help("Set padding stride x");
+ stride_y->set_help("Set padding stride y");
+ conv_mode->set_help("Set convolution method");
+ data_layout->set_help("Data layout to use");
+ scale->set_help("Quantization scale from QASYMM8");
+ offset->set_help("Quantization offset from QASYMM8");
+ output_scale->set_help("Quantization scale from QASYMM8");
+ output_offset->set_help("Quantization offset from QASYMM8");
+ input_npy->set_help("Use input .npy instead");
+ output_npy->set_help("Use .npy as a reference");
+ input_range_low->set_help("Lower bound for input randomization range");
+ input_range_high->set_help("Lower bound for input randomization range");
+ weights_scale->set_help("Quantization scale from QASYMM8");
+ weights_offset->set_help("Quantization offset from QASYMM8");
+ weights_range_low->set_help("Lower bound for input randomization range");
+ weights_range_high->set_help("Lower bound for input randomization range");
+ depth_multiplier->set_help("Depth multiplier");
+ }
+
+ /** Fill out the supplied parameters with user supplied parameters
+ *
+ * @param[out] os Output stream.
+ * @param[in] common_params Example parameters to output
+ *
+ * @return None.
+ */
+ void consume_parameters(ExampleParams &common_params)
+ {
+ common_params.input.width = width->value();
+ common_params.input.height = height->value();
+ common_params.input.fm = channels->value();
+ common_params.input.batch = batch->value();
+ common_params.input.quant_info.scale = scale->value();
+ common_params.input.quant_info.offset = offset->value();
+ common_params.input.npy = input_npy->value();
+ common_params.input.range_low = input_range_low->value();
+ common_params.input.range_high = input_range_high->value();
+
+ common_params.weights.width = weights_width->value();
+ common_params.weights.height = weights_height->value();
+ common_params.weights.npy = weights_npy->value();
+ common_params.weights.range_low = weights_range_low->value();
+ common_params.weights.range_high = weights_range_high->value();
+ common_params.weights.quant_info.scale = weights_scale->value();
+ common_params.weights.quant_info.offset = weights_offset->value();
+
+ common_params.bias.npy = bias_npy->value();
+
+ common_params.output.quant_info.scale = output_scale->value();
+ common_params.output.quant_info.offset = output_offset->value();
+ common_params.output.npy = output_npy->value();
+
+ common_params.convolution.padding_mode = padding_mode->value();
+ common_params.convolution.padding_top = padding_top->value();
+ common_params.convolution.padding_bottom = padding_bottom->value();
+ common_params.convolution.padding_left = padding_left->value();
+ common_params.convolution.padding_right = padding_right->value();
+ common_params.convolution.padding_stride_x = stride_x->value();
+ common_params.convolution.padding_stride_y = stride_y->value();
+ common_params.convolution.depth_multiplier = depth_multiplier->value();
+
+ common_params.data_type = data_type->value();
+ common_params.data_layout = data_layout->value();
+ common_params.depth_convolution_method = conv_mode->value();
+ }
+
+ void print_parameters(::std::ostream &os, const ExampleParams &common_params) override
+ {
+ os << "Threads : " << common_params.common_params.threads << std::endl;
+ os << "Target : " << common_params.common_params.target << std::endl;
+ os << "Data type : " << common_params.data_type << std::endl;
+ os << "Input dimensions(X,Y, Channels, Batch) : (" << common_params.input.width << "," << common_params.input.height << "," << common_params.input.fm << "," << common_params.input.batch << ")"
+ << std::endl;
+ os << "Weight dimensions(X,Y, Channels(same as input)) : (" << common_params.weights.width << "," << common_params.weights.height << "," << common_params.input.fm << ","
+ << ")" << std::endl;
+ os << "Padding(top, bottom, left, right) (stride x, stride y) : (" << common_params.convolution.padding_top << "," << common_params.convolution.padding_bottom << "," <<
+ common_params.convolution.padding_left << "," << common_params.convolution.padding_right << ") (" << common_params.convolution.padding_stride_x << "," << common_params.convolution.padding_stride_y <<
+ ")" << std::endl;
+ os << "Padding Mode: " << common_params.convolution.padding_mode << std::endl;
+ os << "Convolution Method: " << common_params.depth_convolution_method << std::endl;
+ os << "Depth multiplier: " << common_params.convolution.depth_multiplier;
+ }
+
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ DepthConvolutionOptions(const DepthConvolutionOptions &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ DepthConvolutionOptions &operator=(const DepthConvolutionOptions &) = delete;
+ /** Allow instances of this class to be moved */
+ DepthConvolutionOptions(DepthConvolutionOptions &&) noexcept(true) = default;
+ /** Allow instances of this class to be moved */
+ DepthConvolutionOptions &operator=(DepthConvolutionOptions &&) noexcept(true) = default;
+ /** Default destructor */
+ ~DepthConvolutionOptions() override = default;
+
+ SimpleOption<int> *width; /**< Input width */
+ SimpleOption<int> *height; /**< Input height */
+ SimpleOption<int> *channels; /**< Input channels */
+ SimpleOption<int> *batch; /**< Input batch */
+ SimpleOption<int> *weights_width; /**< weights width */
+ SimpleOption<int> *weights_height; /**< weights height */
+ SimpleOption<int> *padding_top; /**< Padding top */
+ SimpleOption<int> *padding_left; /**< Padding left */
+ SimpleOption<int> *padding_bottom; /**< Padding bottom */
+ SimpleOption<int> *padding_right; /**< Padding right */
+ SimpleOption<int> *stride_x; /**< Padding stride x */
+ SimpleOption<int> *stride_y; /**< Padding stride y */
+ EnumOption<ConvolutionPaddingMode> *padding_mode; /**< Padding mode */
+ EnumOption<arm_compute::graph::DepthwiseConvolutionMethod> *conv_mode; /**< Convolution method */
+ SimpleOption<int> *depth_multiplier; /**< Depth multiplier */
+ EnumOption<arm_compute::DataLayout> *data_layout; /**< Graph data layout */
+ SimpleOption<float> *scale; /**< Input Quantization scale from QASYMM8 */
+ SimpleOption<int> *offset; /**< Input Quantization offset from QASYMM8 */
+ SimpleOption<float> *weights_scale; /**< Weights Quantization scale from QASYMM8 */
+ SimpleOption<int> *weights_offset; /**< Weights Quantization offset from QASYMM8 */
+ SimpleOption<float> *output_scale; /**< Output Quantization scale from QASYMM8 */
+ SimpleOption<int> *output_offset; /**< Output Quantization offset from QASYMM8 */
+ SimpleOption<uint64_t> *input_range_low; /**< Lower bound for input randomization range */
+ SimpleOption<uint64_t> *input_range_high; /**< Upper bound for input randomization range */
+ SimpleOption<uint64_t> *weights_range_low; /**< Lower bound for weights randomization range */
+ SimpleOption<uint64_t> *weights_range_high; /**< Upper bound for weights randomization range */
+
+ SimpleOption<std::string> *input_npy; /**< Use input .npy image */
+ SimpleOption<std::string> *output_npy; /**< Use output .npy image to verify*/
+ SimpleOption<std::string> *weights_npy; /**< Use weights .npy image */
+ SimpleOption<std::string> *bias_npy; /**< Use bias .npy image */
+};
+
+/** DepthwiseConvolutionLayer Graph example validation accessor class */
+template <typename D>
+class DepthConvolutionVerifyAccessor final : public VerifyAccessor<D>
+{
+public:
+ using BaseClassType = VerifyAccessor<D>;
+ using BaseClassType::BaseClassType;
+ using BaseClassType::_params;
+ using TBias = typename std::conditional<std::is_same<typename std::decay<D>::type, uint8_t>::value, int32_t, D>::type;
+
+public:
+ SimpleTensor<D> reference(SimpleTensor<D> &src, SimpleTensor<D> &weights, SimpleTensor<TBias> &bias, const TensorShape &output_shape) override
+ {
+ // Calculate padding information
+ const PadStrideInfo padding_info = calculate_convolution_padding(_params);
+
+ //Calculate reference
+ return reference::depthwise_convolution<D>(src, weights, bias, output_shape, padding_info,
+ _params.convolution.depth_multiplier,
+ Size2D(1U, 1U),
+ _params.output.quant_info);
+ }
+
+ float relative_tolerance() override
+ {
+ const std::map<arm_compute::graph::Target, const std::map<DataType, float>> relative_tolerance
+ {
+ {
+ arm_compute::graph::Target::CL,
+ { { DataType::F16, 0.01f },
+ { DataType::F32, 0.01f },
+ { DataType::QASYMM8, 0.0f }
+ }
+ },
+ {
+ arm_compute::graph::Target::NEON,
+ { { DataType::F16, 0.01f },
+ { DataType::F32, 0.01f },
+ { DataType::QASYMM8, 1.0f }
+ }
+ }
+ };
+
+ return relative_tolerance.at(_params.common_params.target).at(_params.data_type);
+ }
+
+ float absolute_tolerance() override
+ {
+ const std::map<Target, const std::map<DataType, float>> absolute_tolerance
+ {
+ {
+ Target::CL,
+ { { DataType::F16, 0.0f },
+ { DataType::F32, 0.0000f },
+ { DataType::QASYMM8, 0.0f }
+ }
+ },
+ {
+ Target::NEON,
+ { { DataType::F16, 0.2f },
+ { DataType::F32, 0.002f },
+ { DataType::QASYMM8, 0.0f }
+ }
+ }
+ };
+
+ return absolute_tolerance.at(_params.common_params.target).at(_params.data_type);
+ }
+
+ float tolerance_number() override
+ {
+ const std::map<Target, const std::map<DataType, float>> absolute_tolerance
+ {
+ {
+ Target::CL,
+ { { DataType::F16, 0.05f },
+ { DataType::F32, 0.00f },
+ { DataType::QASYMM8, 0.0f }
+ }
+ },
+ {
+ Target::NEON,
+ { { DataType::F16, 0.05f },
+ { DataType::F32, 0.0f },
+ { DataType::QASYMM8, 0.0f }
+ }
+ }
+ };
+
+ return absolute_tolerance.at(_params.common_params.target).at(_params.data_type);
+ }
+};
+
+} // namespace
+
+class GraphDepthwiseConvolutionValidateExample final : public GraphValidateExample<DepthwiseConvolutionLayer, DepthConvolutionOptions, DepthConvolutionVerifyAccessor>
+{
+ using GraphValidateExample::graph;
+
+public:
+ GraphDepthwiseConvolutionValidateExample()
+ : GraphValidateExample("DepthWiseConvolution Graph example")
+ {
+ }
+
+ DepthwiseConvolutionLayer GraphFunctionLayer(ExampleParams &params) override
+ {
+ const PixelValue lower = PixelValue(params.input.range_low, params.data_type, params.input.quant_info);
+ const PixelValue upper = PixelValue(params.input.range_high, params.data_type, params.input.quant_info);
+
+ const PixelValue weights_lower = PixelValue(params.weights.range_low, params.data_type, params.weights.quant_info);
+ const PixelValue weights_upper = PixelValue(params.weights.range_high, params.data_type, params.weights.quant_info);
+
+ // Calculate padding information
+ const PadStrideInfo padding_info = calculate_convolution_padding(params);
+
+ return DepthwiseConvolutionLayer(params.weights.width, params.weights.height,
+ get_accessor(params.weights, weights_lower, weights_upper, 1),
+ get_accessor(params.bias, lower, upper, 2),
+ padding_info, params.convolution.depth_multiplier, params.weights.quant_info, params.output.quant_info);
+ }
+};
+
+/** Main program for Graph Depthwise Convolution test
+ *
+ * @param[in] argc Number of arguments
+ * @param[in] argv Arguments ( Input dimensions [width, height, channels, batch]
+ * Weights dimensions [width, height, channels]
+ * Padding [top,bottom,left,right, Stride x, Stride y, mode [Valid / Same / Manual] )
+ * Convolution Method[ Default/GEMV/Optimized3x3]
+ * Verification[tolerance_number,absolute_tolerance,relative_tolerance] )
+ *
+ */
+int main(int argc, char **argv)
+{
+ return arm_compute::utils::run_example<GraphDepthwiseConvolutionValidateExample>(argc, argv);
+}
diff --git a/tests/validate_examples/graph_fully_connected.cpp b/tests/validate_examples/graph_fully_connected.cpp
index e4f51175f0..085518c865 100644
--- a/tests/validate_examples/graph_fully_connected.cpp
+++ b/tests/validate_examples/graph_fully_connected.cpp
@@ -35,6 +35,7 @@
#include "utils/Utils.h"
#include "ValidateExample.h"
+#include "graph_validate_utils.h"
#include <utility>
@@ -45,77 +46,10 @@ using namespace arm_compute::graph;
using namespace arm_compute;
using namespace arm_compute::test;
using namespace arm_compute::test::validation;
-namespace
-{
-/** Structure holding all the input tensor graph parameters */
-struct TensorParams
-{
- int width{ 1 };
- int height{ 1 };
- int fm{ 1 };
- int batch{ 1 };
- QuantizationInfo quant_info{ 1.0f, 0 };
- uint64_t range_low{ 0 };
- uint64_t range_high{ 16 };
-};
-/** Structure holding all the verification graph parameters */
-struct VerificationParams
-{
- float absolute_tolerance{ -1.f };
- float relative_tolerance{ -1.f };
- float tolerance_number{ -1.f };
-};
-
-/** Structure holding all the common graph parameters */
-struct FrameworkParams
-{
- bool help{ false };
- int threads{ 0 };
- arm_compute::graph::Target target{ arm_compute::graph::Target::NEON };
-};
-/** Structure holding all the fully_connected layer graph parameters */
-struct FullyConnectedParams
-{
- arm_compute::DataType data_type{ DataType::F32 };
- arm_compute::DataLayout data_layout{ DataLayout::NCHW };
- FullyConnectedLayerInfo info{};
- int num_outputs{ 1 };
-};
-
-/** Structure holding all the graph Example parameters */
-struct ExampleParams
-{
- FrameworkParams common_params{};
- TensorParams input{};
- TensorParams weights{};
- TensorParams output{};
- VerificationParams verification{};
- FullyConnectedParams fully_connected{};
-};
-
-/** Formatted output of the fully_connectedParams type
- *
- * @param[out] os Output stream.
- * @param[in] common_params fully_connected parameters to output
- *
- * @return Modified output stream.
- */
-::std::ostream &operator<<(::std::ostream &os, const ExampleParams &common_params)
+namespace
{
- std::string false_str = std::string("false");
- std::string true_str = std::string("true");
-
- os << "Threads : " << common_params.common_params.threads << std::endl;
- os << "Target : " << common_params.common_params.target << std::endl;
- os << "Data type : " << common_params.fully_connected.data_type << std::endl;
- os << "Input dimensions(X,Y, Channels, Batch) : (" << common_params.input.width << "," << common_params.input.height << "," << common_params.input.fm << "," << common_params.input.batch << ")"
- << std::endl;
- os << "Number of outputs : " << common_params.fully_connected.num_outputs << std::endl;
- return os;
-}
-
-/** fully_connected command line options used to configure the graph examples
+/** Fully connected command line options used to configure the graph examples
*
* (Similar to common options)
* The options in this object get populated when "parse()" is called on the parser used to construct it.
@@ -125,19 +59,13 @@ struct ExampleParams
* CommonOptions options( parser );
* parser.parse(argc, argv);
*/
-class FullyConnectedOptions final
+class FullyConnectedOptions final : public CommonGraphValidateOptions
{
public:
explicit FullyConnectedOptions(CommandLineParser &parser) noexcept
- : width(parser.add_option<SimpleOption<int>>("width", 3)),
+ : CommonGraphValidateOptions(parser),
+ width(parser.add_option<SimpleOption<int>>("width", 3)),
batch(parser.add_option<SimpleOption<int>>("batch", 1)),
- help(parser.add_option<ToggleOption>("help")),
- threads(parser.add_option<SimpleOption<int>>("threads")),
- target(),
- data_type(),
- absolute_tolerance(parser.add_option<SimpleOption<float>>("abs_tolerance", -1.0f)),
- relative_tolerance(parser.add_option<SimpleOption<float>>("rel_tolerance", -1.0f)),
- tolerance_number(parser.add_option<SimpleOption<float>>("tolerance_num", -1.0f)),
input_scale(parser.add_option<SimpleOption<float>>("input_scale", 1.0f)),
input_offset(parser.add_option<SimpleOption<int>>("input_offset", 0)),
weights_scale(parser.add_option<SimpleOption<float>>("weights_scale", 1.0f)),
@@ -150,31 +78,8 @@ public:
weights_range_low(parser.add_option<SimpleOption<uint64_t>>("weights_range_low")),
weights_range_high(parser.add_option<SimpleOption<uint64_t>>("weights_range_high"))
{
- const std::set<arm_compute::graph::Target> supported_targets
- {
- Target::NEON,
- Target::CL,
- Target::GC,
- };
-
- const std::set<arm_compute::DataType> supported_data_types
- {
- DataType::F16,
- DataType::F32,
- DataType::QASYMM8,
- };
-
- target = parser.add_option<EnumOption<Target>>("target", supported_targets, Target::NEON);
- data_type = parser.add_option<EnumOption<DataType>>("type", supported_data_types, DataType::F32);
-
- target->set_help("Target to execute on");
- data_type->set_help("Data type to use");
- help->set_help("Show this help message");
width->set_help("Set Input dimension width");
batch->set_help("Set Input dimension batch");
- absolute_tolerance->set_help("Absolute tolerance used for verification");
- relative_tolerance->set_help("Absolute tolerance used for verification");
- tolerance_number->set_help("Absolute tolerance used for verification");
input_scale->set_help("Quantization scale from QASYMM8");
input_offset->set_help("Quantization offset from QASYMM8");
weights_scale->set_help("Quantization scale from QASYMM8");
@@ -188,6 +93,44 @@ public:
weights_range_high->set_help("Lower bound for input randomization range");
}
+ /** Fill out the supplied parameters with user supplied parameters
+ *
+ * @param[out] os Output stream.
+ * @param[in] common_params Example parameters to output
+ *
+ * @return None.
+ */
+ void consume_parameters(ExampleParams &common_params)
+ {
+ common_params.input.width = width->value();
+ common_params.input.batch = batch->value();
+ common_params.input.quant_info.scale = input_scale->value();
+ common_params.input.quant_info.offset = input_offset->value();
+ common_params.input.range_low = input_range_low->value();
+ common_params.input.range_high = input_range_high->value();
+
+ common_params.weights.quant_info.scale = weights_scale->value();
+ common_params.weights.quant_info.offset = weights_offset->value();
+ common_params.weights.range_low = weights_range_low->value();
+ common_params.weights.range_high = weights_range_high->value();
+
+ common_params.output.quant_info.scale = output_scale->value();
+ common_params.output.quant_info.offset = output_offset->value();
+
+ common_params.data_type = data_type->value();
+ common_params.fully_connected.num_outputs = num_outputs->value();
+ }
+
+ void print_parameters(::std::ostream &os, const ExampleParams &common_params) override
+ {
+ os << "Threads : " << common_params.common_params.threads << std::endl;
+ os << "Target : " << common_params.common_params.target << std::endl;
+ os << "Data type : " << common_params.data_type << std::endl;
+ os << "Input dimensions(X,Y, Channels, Batch) : (" << common_params.input.width << "," << common_params.input.height << "," << common_params.input.fm << "," << common_params.input.batch << ")"
+ << std::endl;
+ os << "Number of outputs : " << common_params.fully_connected.num_outputs << std::endl;
+ }
+
/** Prevent instances of this class from being copied (As this class contains pointers) */
FullyConnectedOptions(const FullyConnectedOptions &) = delete;
/** Prevent instances of this class from being copied (As this class contains pointers) */
@@ -197,95 +140,41 @@ public:
/** Allow instances of this class to be moved */
FullyConnectedOptions &operator=(FullyConnectedOptions &&) noexcept(true) = default;
/** Default destructor */
- ~FullyConnectedOptions() = default;
-
- SimpleOption<int> *width; /**< Input width */
- SimpleOption<int> *batch; /**< Input batch */
- ToggleOption *help; /**< show help message */
- SimpleOption<int> *threads; /**< Number of threads option */
- EnumOption<arm_compute::graph::Target> *target; /**< Graph execution target */
- EnumOption<arm_compute::DataType> *data_type; /**< Graph data type */
- SimpleOption<float> *absolute_tolerance; /**< Absolute tolerance used in verification */
- SimpleOption<float> *relative_tolerance; /**< Relative tolerance used in verification */
- SimpleOption<float> *tolerance_number; /**< Tolerance number used in verification */
- SimpleOption<float> *input_scale; /**< Input Quantization scale from QASSYMM8 */
- SimpleOption<int> *input_offset; /**< Input Quantization offset from QASSYMM8 */
- SimpleOption<float> *weights_scale; /**< Weights Quantization scale from QASSYMM8 */
- SimpleOption<int> *weights_offset; /**< Weights Quantization offset from QASSYMM8 */
- SimpleOption<float> *output_scale; /**< Output Quantization scale from QASSYMM8 */
- SimpleOption<int> *output_offset; /**< Output Quantization offset from QASSYMM8 */
- SimpleOption<int> *num_outputs; /**< Number of outputs. */
- SimpleOption<uint64_t> *input_range_low; /**< Lower bound for input randomization range */
- SimpleOption<uint64_t> *input_range_high; /**< Upper bound for input randomization range */
- SimpleOption<uint64_t> *weights_range_low; /**< Lower bound for weights randomization range */
- SimpleOption<uint64_t> *weights_range_high; /**< Upper bound for weights randomization range */
+ ~FullyConnectedOptions() override = default;
+
+ SimpleOption<int> *width; /**< Input width */
+ SimpleOption<int> *batch; /**< Input batch */
+ SimpleOption<float> *input_scale; /**< Input Quantization scale from QASSYMM8 */
+ SimpleOption<int> *input_offset; /**< Input Quantization offset from QASSYMM8 */
+ SimpleOption<float> *weights_scale; /**< Weights Quantization scale from QASSYMM8 */
+ SimpleOption<int> *weights_offset; /**< Weights Quantization offset from QASSYMM8 */
+ SimpleOption<float> *output_scale; /**< Output Quantization scale from QASSYMM8 */
+ SimpleOption<int> *output_offset; /**< Output Quantization offset from QASSYMM8 */
+ SimpleOption<int> *num_outputs; /**< Number of outputs. */
+ SimpleOption<uint64_t> *input_range_low; /**< Lower bound for input randomization range */
+ SimpleOption<uint64_t> *input_range_high; /**< Upper bound for input randomization range */
+ SimpleOption<uint64_t> *weights_range_low; /**< Lower bound for weights randomization range */
+ SimpleOption<uint64_t> *weights_range_high; /**< Upper bound for weights randomization range */
};
-/** Consumes the fully_connected graph options and creates a structure containing any information
- *
- * @param[in] options Options to consume
- *
- * @return fully_connectedparams structure containing the common graph parameters
- */
-ExampleParams consume_fully_connected_graph_parameters(FullyConnectedOptions &options)
-{
- ExampleParams common_params;
-
- common_params.common_params.help = options.help->is_set() ? options.help->value() : false;
- common_params.common_params.threads = options.threads->value();
- common_params.common_params.target = options.target->value();
-
- common_params.input.width = options.width->value();
- common_params.input.batch = options.batch->value();
- common_params.input.quant_info.scale = options.input_scale->value();
- common_params.input.quant_info.offset = options.input_offset->value();
- common_params.input.range_low = options.input_range_low->value();
- common_params.input.range_high = options.input_range_high->value();
-
- common_params.weights.quant_info.scale = options.weights_scale->value();
- common_params.weights.quant_info.offset = options.weights_offset->value();
- common_params.weights.range_low = options.weights_range_low->value();
- common_params.weights.range_high = options.weights_range_high->value();
-
- common_params.output.quant_info.scale = options.output_scale->value();
- common_params.output.quant_info.offset = options.output_offset->value();
-
- common_params.fully_connected.data_type = options.data_type->value();
- common_params.fully_connected.num_outputs = options.num_outputs->value();
-
- common_params.verification.absolute_tolerance = options.absolute_tolerance->value();
- common_params.verification.relative_tolerance = options.relative_tolerance->value();
- common_params.verification.tolerance_number = options.tolerance_number->value();
-
- return common_params;
-}
-
-/** fully_connectedLayer Graph example validation accessor class */
+/** Fully Connected Layer Graph example validation accessor class */
template <typename D>
-class FullyConnectedVerifyAccessor final : public graph::ITensorAccessor
+class FullyConnectedVerifyAccessor final : public VerifyAccessor<D>
{
-public:
+ using BaseClassType = VerifyAccessor<D>;
+ using BaseClassType::BaseClassType;
+ using BaseClassType::_params;
using TBias = typename std::conditional<std::is_same<typename std::decay<D>::type, uint8_t>::value, int32_t, D>::type;
- /** Constructor
- *
- * @param[in] params fully_connected parameters
- */
- explicit FullyConnectedVerifyAccessor(ExampleParams &params)
- : _params(params)
- {
- }
-
- // Inherited methods overridden:
- bool access_tensor(ITensor &tensor) override
+ // Inherited methods overriden:
+ void create_tensors(arm_compute::test::SimpleTensor<D> &src,
+ arm_compute::test::SimpleTensor<D> &weights,
+ arm_compute::test::SimpleTensor<TBias> &bias,
+ ITensor &tensor) override
{
- const RelativeTolerance<float> rel_tolerance(relative_tolenace(_params.verification.relative_tolerance)); /**< Relative tolerance */
- const AbsoluteTolerance<float> abs_tolerance(absolute_tolerance(_params.verification.absolute_tolerance)); /**< Absolute tolerance */
- const float tolerance_num(tolerance_number(_params.verification.tolerance_number)); /**< Tolerance number */
-
// Calculate Tensor shapes for verification
const TensorShape input_shape = TensorShape(_params.input.width, _params.input.height, _params.input.fm, _params.input.batch);
- const TensorDescriptor input_descriptor = TensorDescriptor(input_shape, _params.fully_connected.data_type, _params.input.quant_info);
+ const TensorDescriptor input_descriptor = TensorDescriptor(input_shape, _params.data_type, _params.input.quant_info);
const TensorDescriptor weights_descriptor = FullyConnectedLayerNode::compute_weights_descriptor(input_descriptor,
_params.fully_connected.num_outputs,
_params.fully_connected.info,
@@ -293,101 +182,31 @@ public:
const TensorDescriptor output_desciptor = FullyConnectedLayerNode::compute_output_descriptor(input_descriptor, _params.fully_connected.num_outputs, _params.output.quant_info);
//Create Input tensors
- SimpleTensor<D> src{ input_descriptor.shape, _params.fully_connected.data_type, 1, input_descriptor.quant_info };
- SimpleTensor<D> weights{ weights_descriptor.shape, _params.fully_connected.data_type, 1, weights_descriptor.quant_info };
- SimpleTensor<TBias> bias{ TensorShape(tensor.info()->tensor_shape().x()), _params.fully_connected.data_type, 1, _params.input.quant_info };
-
- //Fill the tensors with random values
- fill_tensor<D>(src, 0, static_cast<D>(_params.input.range_low), static_cast<D>(_params.input.range_high));
- fill_tensor<D>(weights, 1, static_cast<D>(_params.weights.range_low), static_cast<D>(_params.weights.range_high));
- fill_tensor<TBias>(bias, 2, static_cast<TBias>(_params.input.range_low), static_cast<TBias>(_params.input.range_high));
-
- //Calculate reference
- SimpleTensor<D> output = reference::fully_connected_layer<D>(src, weights, bias, output_desciptor.shape, _params.output.quant_info);
-
- arm_compute::test::validation::validate(Accessor(tensor), output, rel_tolerance, tolerance_num, abs_tolerance);
-
- return false;
+ src = SimpleTensor<D> { input_descriptor.shape, _params.data_type, 1, input_descriptor.quant_info };
+ weights = SimpleTensor<D> { weights_descriptor.shape, _params.data_type, 1, weights_descriptor.quant_info };
+ bias = SimpleTensor<TBias> { TensorShape(tensor.info()->tensor_shape().x()), _params.data_type, 1, _params.input.quant_info };
}
-private:
- /** Fill tensor with Random values.
- *
- * Validate the given tensor against the reference result.
- *
- * @param[out] tensor The tensor we want to file
- * @param[in] seed seed for the randomization function
- * @param[in] low lower bound for random values
- * @param[in] high upper bound for random values
- *
- * @return None.
- */
- template <typename T>
- void fill_tensor(arm_compute::test::SimpleTensor<T> &tensor, std::random_device::result_type seed, T low, T high)
+ TensorShape output_shape(ITensor &tensor) override
{
- std::mt19937 gen(seed);
- switch(tensor.data_type())
- {
- case arm_compute::DataType::QASYMM8:
- {
- const uint8_t qasymm8_low = tensor.quantization_info().quantize(low, RoundingPolicy::TO_NEAREST_UP);
- const uint8_t qasymm8_high = tensor.quantization_info().quantize(high, RoundingPolicy::TO_NEAREST_UP);
+ ARM_COMPUTE_UNUSED(tensor);
- std::uniform_int_distribution<uint8_t> distribution(qasymm8_low, qasymm8_high);
-
- for(int i = 0; i < tensor.num_elements(); ++i)
- {
- tensor[i] = tensor.quantization_info().quantize(distribution(gen), RoundingPolicy::TO_NEAREST_UP);
- }
-
- break;
- }
- case arm_compute::DataType::S32:
- {
- std::uniform_int_distribution<int32_t> distribution(static_cast<int32_t>(low), static_cast<uint32_t>(high));
-
- for(int i = 0; i < tensor.num_elements(); ++i)
- {
- tensor[i] = distribution(gen);
- }
-
- break;
- }
-
- case arm_compute::DataType::F16:
- {
- std::uniform_real_distribution<float> distribution(static_cast<half>(low), static_cast<half>(high));
-
- for(int i = 0; i < tensor.num_elements(); ++i)
- {
- tensor[i] = static_cast<half>(distribution(gen));
- }
- break;
- }
- case arm_compute::DataType::F32:
- {
- std::uniform_real_distribution<float> distribution(static_cast<float>(low), static_cast<float>(high));
+ const TensorShape input_shape = TensorShape(_params.input.width, _params.input.height, _params.input.fm, _params.input.batch);
+ const TensorDescriptor input_descriptor = TensorDescriptor(input_shape, _params.data_type, _params.input.quant_info);
+ const TensorDescriptor output_desciptor = FullyConnectedLayerNode::compute_output_descriptor(input_descriptor, _params.fully_connected.num_outputs, _params.output.quant_info);
- for(int i = 0; i < tensor.num_elements(); ++i)
- {
- tensor[i] = distribution(gen);
- }
+ return output_desciptor.shape;
+ }
- break;
- }
- default:
- ARM_COMPUTE_ERROR("NOT SUPPORTED!");
- }
+ arm_compute::test::SimpleTensor<D> reference(arm_compute::test::SimpleTensor<D> &src,
+ arm_compute::test::SimpleTensor<D> &weights,
+ arm_compute::test::SimpleTensor<TBias> &bias,
+ const arm_compute::TensorShape &output_shape) override
+ {
+ return reference::fully_connected_layer<D>(src, weights, bias, output_shape, _params.output.quant_info);
}
- /** Select relative tolerance.
- *
- * Select relative tolerance if not supplied by user.
- *
- * @param[in] user_value supplied relative tolerance. -1 designates no user input
- *
- * @return Appropriate relative tolerance.
- */
- float relative_tolenace(float user_value)
+
+ float relative_tolerance() override
{
const std::map<arm_compute::graph::Target, const std::map<DataType, float>> relative_tolerance
{
@@ -406,23 +225,11 @@ private:
}
}
};
- if(user_value == -1)
- {
- return relative_tolerance.at(_params.common_params.target).at(_params.fully_connected.data_type);
- }
- return user_value;
+ return relative_tolerance.at(_params.common_params.target).at(_params.data_type);
}
- /** Select absolute tolerance.
- *
- * Select absolute tolerance if not supplied by user.
- *
- * @param[in] user_value supplied absolute tolerance. -1 designates no user input
- *
- * @return Appropriate absolute tolerance.
- */
- float absolute_tolerance(float user_value)
+ float absolute_tolerance() override
{
const std::map<Target, const std::map<DataType, float>> absolute_tolerance
{
@@ -442,21 +249,10 @@ private:
}
};
- if(user_value == -1)
- {
- return absolute_tolerance.at(_params.common_params.target).at(_params.fully_connected.data_type);
- }
- return user_value;
+ return absolute_tolerance.at(_params.common_params.target).at(_params.data_type);
}
- /** Select tolerance number.
- *
- * Select tolerance number if not supplied by user.
- *
- * @param[in] user_value supplied tolerance number. -1 designates no user input
- *
- * @return Appropriate tolerance number.
- */
- float tolerance_number(float user_value)
+
+ float tolerance_number() override
{
const std::map<Target, const std::map<DataType, float>> absolute_tolerance
{
@@ -476,110 +272,35 @@ private:
}
};
- if(user_value == -1)
- {
- return absolute_tolerance.at(_params.common_params.target).at(_params.fully_connected.data_type);
- }
- return user_value;
+ return absolute_tolerance.at(_params.common_params.target).at(_params.data_type);
}
-
- ExampleParams _params;
};
-/** Generates appropriate fully_connected verify accessor
- *
- * @param[in] params User supplied parameters for fully_connected.
- *
- * @return A fully_connected verify accessor for the requested datatype.
- */
-inline std::unique_ptr<graph::ITensorAccessor> get_fully_connected_verify_accessor(ExampleParams params)
-{
- switch(params.fully_connected.data_type)
- {
- case DataType::QASYMM8:
- {
- return arm_compute::support::cpp14::make_unique<FullyConnectedVerifyAccessor<uint8_t>>(
- params);
- }
- case DataType::F16:
- {
- return arm_compute::support::cpp14::make_unique<FullyConnectedVerifyAccessor<half>>(
- params);
- }
- case DataType::F32:
- {
- return arm_compute::support::cpp14::make_unique<FullyConnectedVerifyAccessor<float>>(
- params);
- }
- default:
- ARM_COMPUTE_ERROR("NOT SUPPORTED!");
- }
-}
-
} // namespace
-class Graphfully_connectedValidateExample final : public ValidateExample
+class GraphFullyConnectedValidateExample final : public GraphValidateExample<FullyConnectedLayer, FullyConnectedOptions, FullyConnectedVerifyAccessor>
{
+ using GraphValidateExample::graph;
+
public:
- Graphfully_connectedValidateExample()
- : graph(0, "fully_connected Graph example")
- {
- }
- bool do_setup(int argc, char **argv) override
+ GraphFullyConnectedValidateExample()
+ : GraphValidateExample("Fully_connected Graph example")
{
- CommandLineParser parser;
-
- FullyConnectedOptions Options(parser);
-
- parser.parse(argc, argv);
-
- ExampleParams params = consume_fully_connected_graph_parameters(Options);
-
- if(params.common_params.help)
- {
- parser.print_help(argv[0]);
- return false;
- }
-
- std::cout << params << std::endl;
-
- // Create input descriptor
- const TensorShape input_shape = TensorShape(params.input.width, params.input.height, params.input.fm, params.input.batch);
- const TensorDescriptor input_descriptor = TensorDescriptor(input_shape, params.fully_connected.data_type, params.input.quant_info, params.fully_connected.data_layout);
-
- const PixelValue lower = PixelValue(params.input.range_low, params.fully_connected.data_type, params.input.quant_info);
- const PixelValue upper = PixelValue(params.input.range_high, params.fully_connected.data_type, params.input.quant_info);
-
- const PixelValue weights_lower = PixelValue(params.weights.range_low, params.fully_connected.data_type, params.weights.quant_info);
- const PixelValue weights_upper = PixelValue(params.weights.range_high, params.fully_connected.data_type, params.weights.quant_info);
-
- graph << params.common_params.target
- << InputLayer(input_descriptor, get_random_accessor(lower, upper, 0))
- << FullyConnectedLayer(params.fully_connected.num_outputs,
- get_random_accessor(weights_lower, weights_upper, 1),
- get_random_accessor(lower, upper, 2),
- params.fully_connected.info, params.weights.quant_info, params.output.quant_info)
- << OutputLayer(get_fully_connected_verify_accessor(params));
-
- GraphConfig config;
- config.num_threads = params.common_params.threads;
-
- graph.finalize(params.common_params.target, config);
-
- return true;
}
- void do_run() override
+ FullyConnectedLayer GraphFunctionLayer(ExampleParams &params) override
{
- graph.run();
- }
+ const PixelValue lower = PixelValue(params.input.range_low, params.data_type, params.input.quant_info);
+ const PixelValue upper = PixelValue(params.input.range_high, params.data_type, params.input.quant_info);
- void do_teardown() override
- {
- }
+ const PixelValue weights_lower = PixelValue(params.weights.range_low, params.data_type, params.weights.quant_info);
+ const PixelValue weights_upper = PixelValue(params.weights.range_high, params.data_type, params.weights.quant_info);
-private:
- Stream graph;
+ return FullyConnectedLayer(params.fully_connected.num_outputs,
+ get_random_accessor(weights_lower, weights_upper, 1),
+ get_random_accessor(lower, upper, 2),
+ params.fully_connected.info, params.weights.quant_info, params.output.quant_info);
+ }
};
/** Main program for Graph fully_connected test
@@ -592,5 +313,5 @@ private:
*/
int main(int argc, char **argv)
{
- return arm_compute::utils::run_example<Graphfully_connectedValidateExample>(argc, argv);
+ return arm_compute::utils::run_example<GraphFullyConnectedValidateExample>(argc, argv);
}
diff --git a/tests/validate_examples/graph_validate_utils.h b/tests/validate_examples/graph_validate_utils.h
new file mode 100644
index 0000000000..485d3c1409
--- /dev/null
+++ b/tests/validate_examples/graph_validate_utils.h
@@ -0,0 +1,695 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __GRAPH_VALIDATE_UTILS_H__
+#define __GRAPH_VALIDATE_UTILS_H__
+
+#include "arm_compute/graph.h"
+
+#include "ValidateExample.h"
+#include "utils/command_line/CommandLineParser.h"
+
+namespace arm_compute
+{
+namespace utils
+{
+/*Available Padding modes */
+enum class ConvolutionPaddingMode
+{
+ Valid,
+ Same,
+ Manual
+};
+
+/** Stream Input operator for the ConvolutionPaddingMode type
+ *
+ * @param[in] stream Input stream.
+ * @param[out] Mode Convolution parameters to output
+ *
+ * @return input stream.
+ */
+inline ::std::istream &operator>>(::std::istream &stream, ConvolutionPaddingMode &Mode)
+{
+ static const std::map<std::string, ConvolutionPaddingMode> modes =
+ {
+ { "valid", ConvolutionPaddingMode::Valid },
+ { "same", ConvolutionPaddingMode::Same },
+ { "manual", ConvolutionPaddingMode::Manual }
+ };
+ std::string value;
+ stream >> value;
+#ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED
+ try
+ {
+#endif /* ARM_COMPUTE_EXCEPTIONS_DISABLED */
+ Mode = modes.at(arm_compute::utility::tolower(value));
+#ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED
+ }
+ catch(const std::out_of_range &)
+ {
+ throw std::invalid_argument(value);
+ }
+#endif /* ARM_COMPUTE_EXCEPTIONS_DISABLED */
+
+ return stream;
+}
+
+/** Formatted output of the ConvolutionPaddingMode type
+ *
+ * @param[out] os Output stream.
+ * @param[in] Mode ConvolutionPaddingMode to output
+ *
+ * @return Modified output stream.
+ */
+inline ::std::ostream &operator<<(::std::ostream &os, ConvolutionPaddingMode Mode)
+{
+ switch(Mode)
+ {
+ case ConvolutionPaddingMode::Valid:
+ os << "Valid";
+ break;
+ case ConvolutionPaddingMode::Same:
+ os << "Same";
+ break;
+ case ConvolutionPaddingMode::Manual:
+ os << "Manual";
+ break;
+ default:
+ throw std::invalid_argument("Unsupported padding mode format");
+ }
+
+ return os;
+}
+
+/** Structure holding all the input tensor graph parameters */
+struct TensorParams
+{
+ int width{ 1 };
+ int height{ 1 };
+ int fm{ 1 };
+ int batch{ 1 };
+ QuantizationInfo quant_info{ 1.0f, 0 };
+ std::string npy{};
+ uint64_t range_low{ 0 };
+ uint64_t range_high{ 16 };
+};
+
+/** Structure holding all the verification graph parameters */
+struct VerificationParams
+{
+ float absolute_tolerance{ -1.f };
+ float relative_tolerance{ -1.f };
+ float tolerance_number{ -1.f };
+};
+
+/** Structure holding all the common graph parameters */
+struct FrameworkParams
+{
+ bool help{ false };
+ int threads{ 0 };
+ arm_compute::graph::Target target{ arm_compute::graph::Target::NEON };
+};
+
+/** Structure holding all the graph Example parameters */
+struct CommonParams
+{
+ FrameworkParams common_params{};
+ TensorParams input{};
+ TensorParams weights{};
+ TensorParams bias{};
+ TensorParams output{};
+ VerificationParams verification{};
+ arm_compute::DataType data_type{ DataType::F32 };
+};
+
+/** Structure holding all the Convolution layer graph parameters */
+struct ConvolutionParams
+{
+ int depth_multiplier{ 1 };
+ /** Padding graph parameters */
+ int padding_top{ 0 };
+ int padding_bottom{ 0 };
+ int padding_left{ 0 };
+ int padding_right{ 0 };
+ int padding_stride_x{ 0 };
+ int padding_stride_y{ 0 };
+ ConvolutionPaddingMode padding_mode{ ConvolutionPaddingMode::Valid };
+ struct
+ {
+ struct
+ {
+ int X{ 0 };
+ int Y{ 0 };
+ } stride{};
+ ConvolutionPaddingMode mode{ ConvolutionPaddingMode::Valid };
+ } padding{};
+};
+
+/** Structure holding all the fully_connected layer graph parameters */
+struct FullyConnectedParams
+{
+ FullyConnectedLayerInfo info{};
+ int num_outputs{ 1 };
+};
+
+/** Structure holding all the graph Example parameters */
+struct ExampleParams : public CommonParams
+{
+ FullyConnectedParams fully_connected{};
+ ConvolutionParams convolution{};
+ arm_compute::graph::DepthwiseConvolutionMethod depth_convolution_method{ arm_compute::graph::DepthwiseConvolutionMethod::Default };
+ arm_compute::graph::ConvolutionMethod convolution_method{ arm_compute::graph::ConvolutionMethod::Default };
+ arm_compute::DataLayout data_layout{ DataLayout::NCHW };
+};
+
+/** Calculate stride information.
+ *
+ * Depending on the selected padding mode create the desired PadStrideInfo
+ *
+ * @param[in] params Convolution parameters supplied by the user.
+ *
+ * @return PadStrideInfo with the correct padding mode.
+ */
+inline PadStrideInfo calculate_convolution_padding(ExampleParams params)
+{
+ switch(params.convolution.padding_mode)
+ {
+ case ConvolutionPaddingMode::Manual:
+ {
+ return PadStrideInfo(params.convolution.padding_stride_x, params.convolution.padding_stride_y, params.convolution.padding_left, params.convolution.padding_right, params.convolution.padding_top,
+ params.convolution.padding_bottom, DimensionRoundingType::FLOOR);
+ }
+ case ConvolutionPaddingMode::Valid:
+ {
+ return PadStrideInfo();
+ }
+ case ConvolutionPaddingMode::Same:
+ {
+ return arm_compute::calculate_same_pad(TensorShape(params.input.width, params.input.height), TensorShape(params.weights.width, params.weights.height),
+ PadStrideInfo(params.convolution.padding_stride_x,
+ params.convolution.padding_stride_y));
+ }
+ default:
+ ARM_COMPUTE_ERROR("NOT SUPPORTED!");
+ }
+}
+/** CommonGraphValidateOptions command line options used to configure the graph examples
+ *
+ * (Similar to common options)
+ * The options in this object get populated when "parse()" is called on the parser used to construct it.
+ * The expected workflow is:
+ *
+ * CommandLineParser parser;
+ * CommonOptions options( parser );
+ * parser.parse(argc, argv);
+ */
+class CommonGraphValidateOptions
+{
+public:
+ explicit CommonGraphValidateOptions(CommandLineParser &parser) noexcept
+ : help(parser.add_option<ToggleOption>("help")),
+ threads(parser.add_option<SimpleOption<int>>("threads")),
+ target(),
+ data_type(),
+ absolute_tolerance(parser.add_option<SimpleOption<float>>("abs_tolerance", -1.0f)),
+ relative_tolerance(parser.add_option<SimpleOption<float>>("rel_tolerance", -1.0f)),
+ tolerance_number(parser.add_option<SimpleOption<float>>("tolerance_num", -1.0f))
+ {
+ const std::set<arm_compute::graph::Target> supported_targets
+ {
+ arm_compute::graph::Target::NEON,
+ arm_compute::graph::Target::CL,
+ arm_compute::graph::Target::GC,
+ };
+
+ const std::set<arm_compute::DataType> supported_data_types
+ {
+ DataType::F16,
+ DataType::F32,
+ DataType::QASYMM8,
+ };
+
+ target = parser.add_option<EnumOption<arm_compute::graph::Target>>("target", supported_targets, arm_compute::graph::Target::NEON);
+ data_type = parser.add_option<EnumOption<DataType>>("type", supported_data_types, DataType::F32);
+
+ target->set_help("Target to execute on");
+ data_type->set_help("Data type to use");
+ help->set_help("Show this help message");
+ absolute_tolerance->set_help("Absolute tolerance used for verification");
+ relative_tolerance->set_help("Absolute tolerance used for verification");
+ tolerance_number->set_help("Absolute tolerance used for verification");
+ ;
+ }
+
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CommonGraphValidateOptions(const CommonGraphValidateOptions &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CommonGraphValidateOptions &operator=(const CommonGraphValidateOptions &) = delete;
+ /** Allow instances of this class to be moved */
+ CommonGraphValidateOptions(CommonGraphValidateOptions &&) noexcept(true) = default;
+ /** Allow instances of this class to be moved */
+ CommonGraphValidateOptions &operator=(CommonGraphValidateOptions &&) noexcept(true) = default;
+ /** Default destructor */
+ virtual ~CommonGraphValidateOptions() = default;
+
+ void consume_common_parameters(CommonParams &common_params)
+ {
+ common_params.common_params.help = help->is_set() ? help->value() : false;
+ common_params.common_params.threads = threads->value();
+ common_params.common_params.target = target->value();
+
+ common_params.verification.absolute_tolerance = absolute_tolerance->value();
+ common_params.verification.relative_tolerance = relative_tolerance->value();
+ common_params.verification.tolerance_number = tolerance_number->value();
+ }
+
+ /** Formatted output of the ExampleParams type
+ *
+ * @param[out] os Output stream.
+ * @param[in] common_params Example parameters to output
+ *
+ * @return None.
+ */
+ virtual void print_parameters(::std::ostream &os, const ExampleParams &common_params)
+ {
+ os << "Threads : " << common_params.common_params.threads << std::endl;
+ os << "Target : " << common_params.common_params.target << std::endl;
+ os << "Data type : " << common_params.data_type << std::endl;
+ }
+
+ ToggleOption *help; /**< show help message */
+ SimpleOption<int> *threads; /**< Number of threads option */
+ EnumOption<arm_compute::graph::Target> *target; /**< Graph execution target */
+ EnumOption<arm_compute::DataType> *data_type; /**< Graph data type */
+ SimpleOption<float> *absolute_tolerance; /**< Absolute tolerance used in verification */
+ SimpleOption<float> *relative_tolerance; /**< Relative tolerance used in verification */
+ SimpleOption<float> *tolerance_number; /**< Tolerance number used in verification */
+};
+
+/** Consumes the consume_common_graph_parameters graph options and creates a structure containing any information
+ *
+ * @param[in] options Options to consume
+ * @param[out] common_params params structure to consume.
+ *
+ * @return consume_common_graph_parameters structure containing the common graph parameters
+ */
+void consume_common_graph_parameters(CommonGraphValidateOptions &options, CommonParams &common_params)
+{
+ common_params.common_params.help = options.help->is_set() ? options.help->value() : false;
+ common_params.common_params.threads = options.threads->value();
+ common_params.common_params.target = options.target->value();
+
+ common_params.verification.absolute_tolerance = options.absolute_tolerance->value();
+ common_params.verification.relative_tolerance = options.relative_tolerance->value();
+ common_params.verification.tolerance_number = options.tolerance_number->value();
+}
+
+/** Generates appropriate accessor according to the specified graph parameters
+ *
+ * @param[in] tensor Tensor parameters
+ * @param[in] lower Lower random values bound
+ * @param[in] upper Upper random values bound
+ * @param[in] seed Random generator seed
+ *
+ * @return An appropriate tensor accessor
+ */
+inline std::unique_ptr<graph::ITensorAccessor> get_accessor(const TensorParams &tensor, PixelValue lower, PixelValue upper, const std::random_device::result_type seed = 0)
+{
+ if(!tensor.npy.empty())
+ {
+ return arm_compute::support::cpp14::make_unique<arm_compute::graph_utils::NumPyBinLoader>(tensor.npy);
+ }
+ else
+ {
+ return arm_compute::support::cpp14::make_unique<arm_compute::graph_utils::RandomAccessor>(lower, upper, seed);
+ }
+}
+
+/** Graph example validation accessor class */
+template <typename D>
+class VerifyAccessor : public graph::ITensorAccessor
+{
+public:
+ using TBias = typename std::conditional<std::is_same<typename std::decay<D>::type, uint8_t>::value, int32_t, D>::type;
+ /** Constructor
+ *
+ * @param[in] params Convolution parameters
+ */
+ explicit VerifyAccessor(ExampleParams &params)
+ : _params(std::move(params))
+ {
+ }
+ // Inherited methods overriden:
+ bool access_tensor(ITensor &tensor) override
+ {
+ if(_params.output.npy.empty())
+ {
+ arm_compute::test::SimpleTensor<D> src;
+ arm_compute::test::SimpleTensor<D> weights;
+ arm_compute::test::SimpleTensor<TBias> bias;
+
+ //Create Input tensors
+ create_tensors(src, weights, bias, tensor);
+
+ //Fill the tensors with random values
+ fill_tensor(src, 0, static_cast<D>(_params.input.range_low), static_cast<D>(_params.input.range_high));
+ fill_tensor(weights, 1, static_cast<D>(_params.weights.range_low), static_cast<D>(_params.weights.range_high));
+ fill_tensor(bias, 2, static_cast<TBias>(_params.input.range_low), static_cast<TBias>(_params.input.range_high));
+
+ arm_compute::test::SimpleTensor<D> output = reference(src, weights, bias, output_shape(tensor));
+
+ validate(tensor, output);
+ }
+ else
+ {
+ //The user provided a reference file use an npy accessor to validate
+ arm_compute::graph_utils::NumPyAccessor(_params.output.npy, tensor.info()->tensor_shape(), tensor.info()->data_type()).access_tensor(tensor);
+ }
+ return false;
+ }
+
+ /** Create reference tensors.
+ *
+ * Validate the given tensor against the reference result.
+ *
+ * @param[out] src The tensor with the source data.
+ * @param[out] weights The tensor with the weigths data.
+ * @param[out] bias The tensor with the bias data.
+ * @param[in] tensor Tensor result of the actual operation passed into the Accessor.
+ *
+ * @return None.
+ */
+ virtual void create_tensors(arm_compute::test::SimpleTensor<D> &src,
+ arm_compute::test::SimpleTensor<D> &weights,
+ arm_compute::test::SimpleTensor<TBias> &bias,
+ ITensor &tensor)
+ {
+ //Create Input tensors
+ src = arm_compute::test::SimpleTensor<D> { TensorShape(_params.input.width, _params.input.height, _params.input.fm, _params.input.batch), _params.data_type, 1, _params.input.quant_info };
+ weights = arm_compute::test::SimpleTensor<D> { TensorShape(_params.weights.width, _params.weights.height, _params.weights.fm), _params.data_type, 1, _params.weights.quant_info };
+ bias = arm_compute::test::SimpleTensor<TBias> { TensorShape(_params.input.height), _params.data_type, 1, _params.input.quant_info };
+ }
+
+ /** Calculate reference output tensor shape.
+ *
+ * @param[in] tensor Tensor result of the actual operation passed into the Accessor.
+ *
+ * @return output tensor shape.
+ */
+ virtual TensorShape output_shape(ITensor &tensor)
+ {
+ return arm_compute::graph_utils::permute_shape(tensor.info()->tensor_shape(), _params.data_layout, DataLayout::NCHW);
+ }
+
+ /** Calculate reference tensor.
+ *
+ * Validate the given tensor against the reference result.
+ *
+ * @param[in] src The tensor with the source data.
+ * @param[in] weights The tensor with the weigths data.
+ * @param[in] bias The tensor with the bias data.
+ * @param[in] output_shape Shape of the output tensor.
+ *
+ * @return Tensor with the reference output.
+ */
+ virtual arm_compute::test::SimpleTensor<D> reference(arm_compute::test::SimpleTensor<D> &src,
+ arm_compute::test::SimpleTensor<D> &weights,
+ arm_compute::test::SimpleTensor<TBias> &bias,
+ const arm_compute::TensorShape &output_shape) = 0;
+
+ /** Fill QASYMM tensor with Random values.
+ *
+ * Validate the given tensor against the reference result.
+ *
+ * @param[out] tensor The tensor we want to file
+ * @param[in] seed seed for the randomization function
+ * @param[in] low lower bound for random values
+ * @param[in] high upper bound for random values
+ *
+ * @return None.
+ */
+ void fill_tensor(arm_compute::test::SimpleTensor<uint8_t> &tensor, std::random_device::result_type seed, uint8_t low, uint8_t high)
+ {
+ ARM_COMPUTE_ERROR_ON(tensor.data_type() != arm_compute::DataType::QASYMM8);
+
+ std::mt19937 gen(seed);
+
+ uint8_t qasymm8_low = tensor.quantization_info().quantize(low, RoundingPolicy::TO_NEAREST_UP);
+ uint8_t qasymm8_high = tensor.quantization_info().quantize(high, RoundingPolicy::TO_NEAREST_UP);
+
+ std::uniform_int_distribution<uint8_t> distribution(qasymm8_low, qasymm8_high);
+
+ for(int i = 0; i < tensor.num_elements(); ++i)
+ {
+ tensor[i] = tensor.quantization_info().quantize(distribution(gen), RoundingPolicy::TO_NEAREST_UP);
+ }
+ }
+ /** Fill S32 tensor with Random values.
+ *
+ * Validate the given tensor against the reference result.
+ *
+ * @param[out] tensor The tensor we want to file
+ * @param[in] seed seed for the randomization function
+ * @param[in] low lower bound for random values
+ * @param[in] high upper bound for random values
+ *
+ * @return None.
+ */
+ void fill_tensor(arm_compute::test::SimpleTensor<int32_t> &tensor, std::random_device::result_type seed, int32_t low, int32_t high)
+ {
+ std::mt19937 gen(seed);
+ std::uniform_int_distribution<int32_t> distribution(static_cast<int32_t>(low), static_cast<uint32_t>(high));
+
+ for(int i = 0; i < tensor.num_elements(); ++i)
+ {
+ tensor[i] = distribution(gen);
+ }
+ }
+ /** Fill F32 tensor with Random values.
+ *
+ * Validate the given tensor against the reference result.
+ *
+ * @param[out] tensor The tensor we want to file
+ * @param[in] seed seed for the randomization function
+ * @param[in] low lower bound for random values
+ * @param[in] high upper bound for random values
+ *
+ * @return None.
+ */
+ void fill_tensor(arm_compute::test::SimpleTensor<float> &tensor, std::random_device::result_type seed, float low, float high)
+ {
+ ARM_COMPUTE_ERROR_ON(tensor.data_type() != arm_compute::DataType::F32);
+ std::mt19937 gen(seed);
+ std::uniform_real_distribution<float> distribution(low, high);
+
+ for(int i = 0; i < tensor.num_elements(); ++i)
+ {
+ tensor[i] = distribution(gen);
+ }
+ }
+ /** Fill F16 tensor with Random values.
+ *
+ * Validate the given tensor against the reference result.
+ *
+ * @param[out] tensor The tensor we want to file
+ * @param[in] seed seed for the randomization function
+ * @param[in] low lower bound for random values
+ * @param[in] high upper bound for random values
+ *
+ * @return None.
+ */
+ void fill_tensor(arm_compute::test::SimpleTensor<half> &tensor, std::random_device::result_type seed, half low, half high)
+ {
+ ARM_COMPUTE_ERROR_ON(tensor.data_type() != arm_compute::DataType::F16);
+ std::mt19937 gen(seed);
+ std::uniform_real_distribution<float> distribution(static_cast<half>(low), static_cast<half>(high));
+
+ for(int i = 0; i < tensor.num_elements(); ++i)
+ {
+ tensor[i] = static_cast<half>(distribution(gen));
+ }
+ }
+
+ /** Select relative tolerance.
+ *
+ * Select relative tolerance if not supplied by user.
+ *
+ * @return Appropriate relative tolerance.
+ */
+ virtual float relative_tolerance() = 0;
+
+ /** Select absolute tolerance.
+ *
+ * Select absolute tolerance if not supplied by user.
+ *
+ * @return Appropriate absolute tolerance.
+ */
+ virtual float absolute_tolerance() = 0;
+
+ /** Select tolerance number.
+ *
+ * Select tolerance number if not supplied by user.
+ *
+ * @return Appropriate tolerance number.
+ */
+ virtual float tolerance_number() = 0;
+
+ /** Validate the output versus the reference.
+ *
+ * @param[in] tensor Tensor result of the actual operation passed into the Accessor.
+ * @param[in] output Tensor result of the reference implementation.
+ *
+ * @return None.
+ */
+ void validate(ITensor &tensor, arm_compute::test::SimpleTensor<D> output)
+ {
+ float user_relative_tolerance = _params.verification.relative_tolerance;
+ float user_absolute_tolerance = _params.verification.absolute_tolerance;
+ float user_tolerance_num = _params.verification.tolerance_number;
+ /* If no user input was provided override with defaults. */
+ if(user_relative_tolerance == -1)
+ {
+ user_relative_tolerance = relative_tolerance();
+ }
+
+ if(user_absolute_tolerance == -1)
+ {
+ user_absolute_tolerance = absolute_tolerance();
+ }
+
+ if(user_tolerance_num == -1)
+ {
+ user_tolerance_num = tolerance_number();
+ }
+
+ const arm_compute::test::validation::RelativeTolerance<float> rel_tolerance(user_relative_tolerance); /**< Relative tolerance */
+ const arm_compute::test::validation::AbsoluteTolerance<float> abs_tolerance(user_absolute_tolerance); /**< Absolute tolerance */
+ const float tolerance_num(user_tolerance_num); /**< Tolerance number */
+
+ arm_compute::test::validation::validate(arm_compute::test::Accessor(tensor), output, rel_tolerance, tolerance_num, abs_tolerance);
+ }
+
+ ExampleParams _params;
+};
+
+/** Generates appropriate convolution verify accessor
+ *
+ * @param[in] params User supplied parameters for convolution.
+ *
+ * @return A convolution verify accessor for the requested datatype.
+ */
+template <template <typename D> class VerifyAccessorT>
+inline std::unique_ptr<graph::ITensorAccessor> get_verify_accessor(ExampleParams params)
+{
+ switch(params.data_type)
+ {
+ case DataType::QASYMM8:
+ {
+ return arm_compute::support::cpp14::make_unique<VerifyAccessorT<uint8_t>>(
+ params);
+ }
+ case DataType::F16:
+ {
+ return arm_compute::support::cpp14::make_unique<VerifyAccessorT<half>>(
+ params);
+ }
+ case DataType::F32:
+ {
+ return arm_compute::support::cpp14::make_unique<VerifyAccessorT<float>>(
+ params);
+ }
+ default:
+ ARM_COMPUTE_ERROR("NOT SUPPORTED!");
+ }
+}
+
+template <typename LayerT, typename OptionsT, template <typename D> class VerifyAccessorT>
+class GraphValidateExample : public ValidateExample
+{
+public:
+ GraphValidateExample(std::string name)
+ : graph(0, name)
+ {
+ }
+
+ virtual LayerT GraphFunctionLayer(ExampleParams &params) = 0;
+
+ bool do_setup(int argc, char **argv) override
+ {
+ CommandLineParser parser;
+
+ OptionsT Options(parser);
+
+ parser.parse(argc, argv);
+
+ ExampleParams params;
+
+ Options.consume_common_parameters(params);
+ Options.consume_parameters(params);
+
+ if(params.common_params.help)
+ {
+ parser.print_help(argv[0]);
+ return false;
+ }
+
+ Options.print_parameters(std::cout, params);
+ // Create input descriptor
+ const TensorShape input_shape = arm_compute::graph_utils::permute_shape(TensorShape(params.input.width, params.input.height, params.input.fm, params.input.batch),
+ DataLayout::NCHW, params.data_layout);
+ arm_compute::graph::TensorDescriptor input_descriptor = arm_compute::graph::TensorDescriptor(input_shape, params.data_type, params.input.quant_info, params.data_layout);
+
+ const PixelValue lower = PixelValue(params.input.range_low, params.data_type, params.input.quant_info);
+ const PixelValue upper = PixelValue(params.input.range_high, params.data_type, params.input.quant_info);
+
+ graph << params.common_params.target
+ << params.convolution_method
+ << params.depth_convolution_method
+ << arm_compute::graph::frontend::InputLayer(input_descriptor, get_accessor(params.input, lower, upper, 0))
+ << GraphFunctionLayer(params)
+ << arm_compute::graph::frontend::OutputLayer(get_verify_accessor<VerifyAccessorT>(params));
+
+ arm_compute::graph::GraphConfig config;
+ config.num_threads = params.common_params.threads;
+
+ graph.finalize(params.common_params.target, config);
+
+ return true;
+ }
+
+ void do_run() override
+ {
+ graph.run();
+ }
+
+ void do_teardown() override
+ {
+ }
+
+ arm_compute::graph::frontend::Stream graph;
+};
+
+} // graph_validate_utils
+} // arm_compute
+#endif //__GRAPH_VALIDATE_UTILS_H__
diff --git a/tests/validation/reference/DepthwiseConvolutionLayer.cpp b/tests/validation/reference/DepthwiseConvolutionLayer.cpp
index f27610afb8..122dbd4d98 100644
--- a/tests/validation/reference/DepthwiseConvolutionLayer.cpp
+++ b/tests/validation/reference/DepthwiseConvolutionLayer.cpp
@@ -50,8 +50,10 @@ namespace reference
*/
template <typename T, typename TB>
SimpleTensor<T> depthwise_convolution(const SimpleTensor<T> &src, const SimpleTensor<T> &weights, const SimpleTensor<TB> &biases, const TensorShape &dst_shape, const PadStrideInfo &conv_info,
- unsigned int depth_multiplier, const Size2D &dilation)
+ unsigned int depth_multiplier, const Size2D &dilation, QuantizationInfo out_quant_info)
{
+ ARM_COMPUTE_UNUSED(out_quant_info);
+
SimpleTensor<T> dst{ dst_shape, src.data_type(), 1 };
// Compute reference
@@ -119,9 +121,14 @@ SimpleTensor<T> depthwise_convolution(const SimpleTensor<T> &src, const SimpleTe
template <>
SimpleTensor<uint8_t> depthwise_convolution(const SimpleTensor<uint8_t> &src, const SimpleTensor<uint8_t> &weights, const SimpleTensor<int32_t> &biases, const TensorShape &dst_shape,
- const PadStrideInfo &conv_info, unsigned int depth_multiplier, const Size2D &dilation)
+ const PadStrideInfo &conv_info, unsigned int depth_multiplier, const Size2D &dilation, QuantizationInfo out_quant_info)
{
- SimpleTensor<uint8_t> dst{ dst_shape, src.data_type(), 1, src.quantization_info() };
+ // if no explicit quantization has been set you the same as src
+ if(out_quant_info == QuantizationInfo(0.0f, 0))
+ {
+ out_quant_info = src.quantization_info();
+ }
+ SimpleTensor<uint8_t> dst{ dst_shape, src.data_type(), 1, out_quant_info };
// Create reference
const int input_offset = -src.quantization_info().offset;
@@ -206,10 +213,10 @@ SimpleTensor<uint8_t> depthwise_convolution(const SimpleTensor<uint8_t> &src, co
}
template SimpleTensor<float> depthwise_convolution(const SimpleTensor<float> &src, const SimpleTensor<float> &weights, const SimpleTensor<float> &biases, const TensorShape &dst_shape,
- const PadStrideInfo &conv_info, unsigned int depth_multiplier, const Size2D &dilation);
+ const PadStrideInfo &conv_info, unsigned int depth_multiplier, const Size2D &dilation, QuantizationInfo out_quant_info);
template SimpleTensor<half> depthwise_convolution(const SimpleTensor<half> &src, const SimpleTensor<half> &weights, const SimpleTensor<half> &biases, const TensorShape &dst_shape,
- const PadStrideInfo &conv_info, unsigned int depth_multiplier, const Size2D &dilation);
+ const PadStrideInfo &conv_info, unsigned int depth_multiplier, const Size2D &dilation, QuantizationInfo out_quant_info);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/DepthwiseConvolutionLayer.h b/tests/validation/reference/DepthwiseConvolutionLayer.h
index 2146611d13..ac70de02ca 100644
--- a/tests/validation/reference/DepthwiseConvolutionLayer.h
+++ b/tests/validation/reference/DepthwiseConvolutionLayer.h
@@ -37,7 +37,7 @@ namespace reference
{
template <typename T, typename TB>
SimpleTensor<T> depthwise_convolution(const SimpleTensor<T> &src, const SimpleTensor<T> &weights, const SimpleTensor<TB> &biases, const TensorShape &dst_shape, const PadStrideInfo &conv_info,
- unsigned int depth_multiplier, const Size2D &dilation = Size2D(1U, 1U));
+ unsigned int depth_multiplier, const Size2D &dilation = Size2D(1U, 1U), QuantizationInfo out_quant_info = QuantizationInfo(0.0f, 0));
} // namespace reference
} // namespace validation
} // namespace test