aboutsummaryrefslogtreecommitdiff
path: root/examples
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2018-07-03 12:06:23 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:54:10 +0000
commit12be7ab4876f77fecfab903df70791623219b3da (patch)
tree1cfa6852e60948bee9db0831a9f3abc97a2031c8 /examples
parente39334c15c7fd141bb8173d5017ea5ca157fca2c (diff)
downloadComputeLibrary-12be7ab4876f77fecfab903df70791623219b3da.tar.gz
COMPMID-1310: Create graph validation executables.
Change-Id: I9e0b57b1b83fe5a95777cdaeddba6ecef650bafc Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/138697 Reviewed-by: Anthony Barbier <anthony.barbier@arm.com> Tested-by: Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'examples')
-rw-r--r--examples/SConscript1
-rw-r--r--examples/cl_convolution.cpp4
-rw-r--r--examples/cl_events.cpp4
-rw-r--r--examples/cl_sgemm.cpp4
-rw-r--r--examples/gc_absdiff.cpp4
-rw-r--r--examples/gc_dc.cpp6
-rw-r--r--examples/graph_alexnet.cpp109
-rw-r--r--examples/graph_googlenet.cpp106
-rw-r--r--examples/graph_inception_v3.cpp104
-rw-r--r--examples/graph_inception_v4.cpp114
-rw-r--r--examples/graph_lenet.cpp86
-rw-r--r--examples/graph_mobilenet.cpp336
-rw-r--r--examples/graph_mobilenet_qasymm8.cpp239
-rw-r--r--examples/graph_resnet50.cpp106
-rw-r--r--examples/graph_resnext50.cpp97
-rw-r--r--examples/graph_squeezenet.cpp107
-rw-r--r--examples/graph_squeezenet_v1_1.cpp106
-rw-r--r--examples/graph_vgg16.cpp105
-rw-r--r--examples/graph_vgg19.cpp106
-rw-r--r--examples/neon_cartoon_effect.cpp4
-rw-r--r--examples/neon_cnn.cpp6
-rw-r--r--examples/neon_convolution.cpp4
-rw-r--r--examples/neon_copy_objects.cpp6
-rw-r--r--examples/neon_scale.cpp4
-rw-r--r--examples/neoncl_scale_median_gaussian.cpp4
25 files changed, 717 insertions, 1055 deletions
diff --git a/examples/SConscript b/examples/SConscript
index c3576fb1a0..bada734659 100644
--- a/examples/SConscript
+++ b/examples/SConscript
@@ -47,6 +47,7 @@ else:
# Build graph examples
graph_utils = examples_env.Object("../utils/GraphUtils.cpp")
+graph_utils += examples_env.Object("../utils/CommonGraphOptions.cpp")
examples_libs = examples_env.get("LIBS",[])
for file in Glob("./graph_*.cpp"):
example = os.path.basename(os.path.splitext(str(file))[0])
diff --git a/examples/cl_convolution.cpp b/examples/cl_convolution.cpp
index 8f3d7e3a30..b15bbb6cb4 100644
--- a/examples/cl_convolution.cpp
+++ b/examples/cl_convolution.cpp
@@ -57,7 +57,7 @@ const int16_t gaussian5x5[] =
class CLConvolutionExample : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ bool do_setup(int argc, char **argv) override
{
PPMLoader ppm;
@@ -95,6 +95,8 @@ public:
ppm.fill_image(src);
output_filename = std::string(argv[1]) + "_out.ppm";
}
+
+ return true;
}
void do_run() override
{
diff --git a/examples/cl_events.cpp b/examples/cl_events.cpp
index 4a46df961c..a9c508ac58 100644
--- a/examples/cl_events.cpp
+++ b/examples/cl_events.cpp
@@ -37,7 +37,7 @@ using namespace utils;
class CLEventsExample : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ bool do_setup(int argc, char **argv) override
{
/** [OpenCL events] **/
PPMLoader ppm;
@@ -84,6 +84,8 @@ public:
output_filename = std::string(argv[1]) + "_out.ppm";
}
/** [OpenCL events] **/
+
+ return true;
}
void do_run() override
{
diff --git a/examples/cl_sgemm.cpp b/examples/cl_sgemm.cpp
index fa57885450..805aec1cf3 100644
--- a/examples/cl_sgemm.cpp
+++ b/examples/cl_sgemm.cpp
@@ -39,7 +39,7 @@ using namespace utils;
class CLSGEMMExample : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ bool do_setup(int argc, char **argv) override
{
NPYLoader npy0, npy1, npy2;
alpha = 1.0f;
@@ -164,6 +164,8 @@ public:
// Dummy run for CLTuner
sgemm.run();
+
+ return true;
}
void do_run() override
{
diff --git a/examples/gc_absdiff.cpp b/examples/gc_absdiff.cpp
index 1024dace26..f53459231f 100644
--- a/examples/gc_absdiff.cpp
+++ b/examples/gc_absdiff.cpp
@@ -38,7 +38,7 @@ using namespace utils;
class GCAbsDiffExample : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ bool do_setup(int argc, char **argv) override
{
PPMLoader ppm1, ppm2;
@@ -90,6 +90,8 @@ public:
{
ppm2.fill_image(src2);
}
+
+ return true;
}
void do_run() override
{
diff --git a/examples/gc_dc.cpp b/examples/gc_dc.cpp
index 8b6f4414e0..f3f194252e 100644
--- a/examples/gc_dc.cpp
+++ b/examples/gc_dc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2018 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -36,7 +36,7 @@ using namespace utils;
class GCDCExample : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ bool do_setup(int argc, char **argv) override
{
ARM_COMPUTE_UNUSED(argc);
ARM_COMPUTE_UNUSED(argv);
@@ -86,6 +86,8 @@ public:
*reinterpret_cast<half_float::half *>(it.ptr()) = half_float::half(1.f);
});
src.unmap();
+
+ return true;
}
void do_run() override
{
diff --git a/examples/graph_alexnet.cpp b/examples/graph_alexnet.cpp
index 5328662b6d..95d36342f9 100644
--- a/examples/graph_alexnet.cpp
+++ b/examples/graph_alexnet.cpp
@@ -23,13 +23,10 @@
*/
#include "arm_compute/graph.h"
#include "support/ToolchainSupport.h"
+#include "utils/CommonGraphOptions.h"
#include "utils/GraphUtils.h"
#include "utils/Utils.h"
-#include <cstdlib>
-#include <iostream>
-#include <memory>
-
using namespace arm_compute::utils;
using namespace arm_compute::graph::frontend;
using namespace arm_compute::graph_utils;
@@ -37,71 +34,47 @@ using namespace arm_compute::graph_utils;
/** Example demonstrating how to implement AlexNet's network using the Compute Library's graph API
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
class GraphAlexnetExample : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ GraphAlexnetExample()
+ : cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "AlexNet")
{
- std::string data_path; /* Path to the trainable data */
- std::string image; /* Image data */
- std::string label; /* Label data */
-
- // Create a preprocessor object
- const std::array<float, 3> mean_rgb{ { 122.68f, 116.67f, 104.01f } };
- std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
+ }
+ bool do_setup(int argc, char **argv) override
+ {
+ // Parse arguments
+ cmd_parser.parse(argc, argv);
- // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- Target target_hint = set_target_hint(target);
- FastMathHint fast_math_hint = FastMathHint::DISABLED;
+ // Consume common parameters
+ common_params = consume_common_graph_parameters(common_opts);
- // Parse arguments
- if(argc < 2)
- {
- // Print help
- std::cout << "Usage: " << argv[0] << " [target] [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 2)
- {
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 3)
- {
- data_path = argv[2];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No image provided: using random values\n\n";
- }
- else if(argc == 4)
- {
- data_path = argv[2];
- image = argv[3];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " [labels] [fast_math_hint]\n\n";
- std::cout << "No text file with labels provided: skipping output accessor\n\n";
- }
- else if(argc == 5)
+ // Return when help menu is requested
+ if(common_params.help)
{
- data_path = argv[2];
- image = argv[3];
- label = argv[4];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " " << argv[4] << " [fast_math_hint]\n\n";
- std::cout << "No fast math info provided: disabling fast math\n\n";
- }
- else
- {
- data_path = argv[2];
- image = argv[3];
- label = argv[4];
- fast_math_hint = (std::strtol(argv[5], nullptr, 1) == 0) ? FastMathHint::DISABLED : FastMathHint::ENABLED;
+ cmd_parser.print_help(argv[0]);
+ return false;
}
- graph << target_hint
- << fast_math_hint
- << InputLayer(TensorDescriptor(TensorShape(227U, 227U, 3U, 1U), DataType::F32),
- get_input_accessor(image, std::move(preprocessor)))
+ // Checks
+ ARM_COMPUTE_ERROR_ON_MSG(arm_compute::is_data_type_quantized_asymmetric(common_params.data_type), "Unsupported data type!");
+
+ // Print parameter values
+ std::cout << common_params << std::endl;
+
+ // Get trainable parameters data path
+ std::string data_path = common_params.data_path;
+
+ // Create a preprocessor object
+ const std::array<float, 3> mean_rgb{ { 122.68f, 116.67f, 104.01f } };
+ std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
+
+ graph << common_params.target
+ << common_params.fast_math_hint
+ << InputLayer(TensorDescriptor(TensorShape(227U, 227U, 3U, 1U), common_params.data_type),
+ get_input_accessor(common_params, std::move(preprocessor)))
// Layer 1
<< ConvolutionLayer(
11U, 11U, 96U,
@@ -169,12 +142,15 @@ public:
.set_name("fc8")
// Softmax
<< SoftmaxLayer().set_name("prob")
- << OutputLayer(get_output_accessor(label, 5));
+ << OutputLayer(get_output_accessor(common_params, 5));
// Finalize graph
GraphConfig config;
- config.use_tuner = (target == 2);
- graph.finalize(target_hint, config);
+ config.num_threads = common_params.threads;
+ config.use_tuner = common_params.enable_tuner;
+ graph.finalize(common_params.target, config);
+
+ return true;
}
void do_run() override
{
@@ -183,13 +159,18 @@ public:
}
private:
- Stream graph{ 0, "AlexNet" };
+ CommandLineParser cmd_parser;
+ CommonGraphOptions common_opts;
+ CommonGraphParams common_params;
+ Stream graph;
};
/** Main program for AlexNet
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
+ *
+ * @return Return code
*/
int main(int argc, char **argv)
{
diff --git a/examples/graph_googlenet.cpp b/examples/graph_googlenet.cpp
index 2dba67f5eb..e23107f081 100644
--- a/examples/graph_googlenet.cpp
+++ b/examples/graph_googlenet.cpp
@@ -23,12 +23,10 @@
*/
#include "arm_compute/graph.h"
#include "support/ToolchainSupport.h"
+#include "utils/CommonGraphOptions.h"
#include "utils/GraphUtils.h"
#include "utils/Utils.h"
-#include <cstdlib>
-#include <tuple>
-
using namespace arm_compute::utils;
using namespace arm_compute::graph::frontend;
using namespace arm_compute::graph_utils;
@@ -36,71 +34,47 @@ using namespace arm_compute::graph_utils;
/** Example demonstrating how to implement Googlenet's network using the Compute Library's graph API
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
class GraphGooglenetExample : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ GraphGooglenetExample()
+ : cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "GoogleNet")
{
- std::string data_path; /* Path to the trainable data */
- std::string image; /* Image data */
- std::string label; /* Label data */
-
- // Create a preprocessor object
- const std::array<float, 3> mean_rgb{ { 122.68f, 116.67f, 104.01f } };
- std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
+ }
+ bool do_setup(int argc, char **argv) override
+ {
+ // Parse arguments
+ cmd_parser.parse(argc, argv);
- // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- Target target_hint = set_target_hint(target);
- FastMathHint fast_math_hint = FastMathHint::DISABLED;
+ // Consume common parameters
+ common_params = consume_common_graph_parameters(common_opts);
- // Parse arguments
- if(argc < 2)
- {
- // Print help
- std::cout << "Usage: " << argv[0] << " [target] [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 2)
+ // Return when help menu is requested
+ if(common_params.help)
{
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 3)
- {
- data_path = argv[2];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No image provided: using random values\n\n";
- }
- else if(argc == 4)
- {
- data_path = argv[2];
- image = argv[3];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " [labels] [fast_math_hint]\n\n";
- std::cout << "No text file with labels provided: skipping output accessor\n\n";
- }
- else if(argc == 5)
- {
- data_path = argv[2];
- image = argv[3];
- label = argv[4];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " " << argv[4] << " [fast_math_hint]\n\n";
- std::cout << "No fast math info provided: disabling fast math\n\n";
- }
- else
- {
- data_path = argv[2];
- image = argv[3];
- label = argv[4];
- fast_math_hint = (std::strtol(argv[5], nullptr, 1) == 0) ? FastMathHint::DISABLED : FastMathHint::ENABLED;
+ cmd_parser.print_help(argv[0]);
+ return false;
}
- graph << target_hint
- << fast_math_hint
- << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), DataType::F32),
- get_input_accessor(image, std::move(preprocessor)))
+ // Checks
+ ARM_COMPUTE_ERROR_ON_MSG(arm_compute::is_data_type_quantized_asymmetric(common_params.data_type), "Unsupported data type!");
+
+ // Print parameter values
+ std::cout << common_params << std::endl;
+
+ // Get trainable parameters data path
+ std::string data_path = common_params.data_path;
+
+ // Create a preprocessor object
+ const std::array<float, 3> mean_rgb{ { 122.68f, 116.67f, 104.01f } };
+ std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
+
+ graph << common_params.target
+ << common_params.fast_math_hint
+ << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), common_params.data_type),
+ get_input_accessor(common_params, std::move(preprocessor)))
<< ConvolutionLayer(
7U, 7U, 64U,
get_weights_accessor(data_path, "/cnn_data/googlenet_model/conv1/conv1_7x7_s2_w.npy"),
@@ -140,12 +114,15 @@ public:
get_weights_accessor(data_path, "/cnn_data/googlenet_model/loss3/loss3_classifier_w.npy"),
get_weights_accessor(data_path, "/cnn_data/googlenet_model/loss3/loss3_classifier_b.npy"))
<< SoftmaxLayer()
- << OutputLayer(get_output_accessor(label, 5));
+ << OutputLayer(get_output_accessor(common_params, 5));
// Finalize graph
GraphConfig config;
- config.use_tuner = (target == 2);
- graph.finalize(target_hint, config);
+ config.num_threads = common_params.threads;
+ config.use_tuner = common_params.enable_tuner;
+ graph.finalize(common_params.target, config);
+
+ return true;
}
void do_run() override
{
@@ -154,7 +131,10 @@ public:
}
private:
- Stream graph{ 0, "GoogleNet" };
+ CommandLineParser cmd_parser;
+ CommonGraphOptions common_opts;
+ CommonGraphParams common_params;
+ Stream graph;
BranchLayer get_inception_node(const std::string &data_path, std::string &&param_path,
unsigned int a_filt,
@@ -215,7 +195,7 @@ private:
/** Main program for Googlenet
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
int main(int argc, char **argv)
{
diff --git a/examples/graph_inception_v3.cpp b/examples/graph_inception_v3.cpp
index d1d6ab4e05..30b1b7d7ce 100644
--- a/examples/graph_inception_v3.cpp
+++ b/examples/graph_inception_v3.cpp
@@ -23,12 +23,10 @@
*/
#include "arm_compute/graph.h"
#include "support/ToolchainSupport.h"
+#include "utils/CommonGraphOptions.h"
#include "utils/GraphUtils.h"
#include "utils/Utils.h"
-#include <cstdlib>
-#include <tuple>
-
using namespace arm_compute::utils;
using namespace arm_compute::graph::frontend;
using namespace arm_compute::graph_utils;
@@ -36,70 +34,46 @@ using namespace arm_compute::graph_utils;
/** Example demonstrating how to implement InceptionV3's network using the Compute Library's graph API
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
class InceptionV3Example : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ InceptionV3Example()
+ : cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "InceptionV3")
{
- std::string data_path; /* Path to the trainable data */
- std::string image; /* Image data */
- std::string label; /* Label data */
-
- // Create a preprocessor object
- std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<TFPreproccessor>();
+ }
+ bool do_setup(int argc, char **argv) override
+ {
+ // Parse arguments
+ cmd_parser.parse(argc, argv);
- // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- Target target_hint = set_target_hint(target);
- FastMathHint fast_math_hint = FastMathHint::DISABLED;
+ // Consume common parameters
+ common_params = consume_common_graph_parameters(common_opts);
- // Parse arguments
- if(argc < 2)
- {
- // Print help
- std::cout << "Usage: " << argv[0] << " [target] [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 2)
+ // Return when help menu is requested
+ if(common_params.help)
{
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 3)
- {
- data_path = argv[2];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No image provided: using random values\n\n";
- }
- else if(argc == 4)
- {
- data_path = argv[2];
- image = argv[3];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " [labels] [fast_math_hint]\n\n";
- std::cout << "No text file with labels provided: skipping output accessor\n\n";
- }
- else if(argc == 5)
- {
- data_path = argv[2];
- image = argv[3];
- label = argv[4];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " " << argv[4] << " [fast_math_hint]\n\n";
- std::cout << "No fast math info provided: disabling fast math\n\n";
- }
- else
- {
- data_path = argv[2];
- image = argv[3];
- label = argv[4];
- fast_math_hint = (std::strtol(argv[5], nullptr, 1) == 0) ? FastMathHint::DISABLED : FastMathHint::ENABLED;
+ cmd_parser.print_help(argv[0]);
+ return false;
}
- graph << target_hint
- << fast_math_hint
- << InputLayer(TensorDescriptor(TensorShape(299U, 299U, 3U, 1U), DataType::F32),
- get_input_accessor(image, std::move(preprocessor), false))
+ // Checks
+ ARM_COMPUTE_ERROR_ON_MSG(arm_compute::is_data_type_quantized_asymmetric(common_params.data_type), "Unsupported data type!");
+
+ // Print parameter values
+ std::cout << common_params << std::endl;
+
+ // Get trainable parameters data path
+ std::string data_path = common_params.data_path;
+
+ // Create a preprocessor object
+ std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<TFPreproccessor>();
+
+ graph << common_params.target
+ << common_params.fast_math_hint
+ << InputLayer(TensorDescriptor(TensorShape(299U, 299U, 3U, 1U), common_params.data_type),
+ get_input_accessor(common_params, std::move(preprocessor), false))
<< ConvolutionLayer(3U, 3U, 32U,
get_weights_accessor(data_path, "/cnn_data/inceptionv3_model/Conv2d_1a_3x3_weights.npy"),
std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(2, 2, 0, 0))
@@ -218,12 +192,15 @@ public:
.set_name("Logits/Conv2d_1c_1x1/convolution")
<< ReshapeLayer(TensorShape(1001U)).set_name("Predictions/Reshape")
<< SoftmaxLayer().set_name("Predictions/Softmax")
- << OutputLayer(get_output_accessor(label, 5));
+ << OutputLayer(get_output_accessor(common_params, 5));
// Finalize graph
GraphConfig config;
- config.use_tuner = (target == 2);
- graph.finalize(target_hint, config);
+ config.num_threads = common_params.threads;
+ config.use_tuner = common_params.enable_tuner;
+ graph.finalize(common_params.target, config);
+
+ return true;
}
void do_run() override
@@ -232,7 +209,10 @@ public:
}
private:
- Stream graph{ 0, "InceptionV3" };
+ CommandLineParser cmd_parser;
+ CommonGraphOptions common_opts;
+ CommonGraphParams common_params;
+ Stream graph;
private:
BranchLayer get_inception_node_A(const std::string &data_path, std::string &&param_path,
@@ -863,7 +843,7 @@ private:
/** Main program for Inception V3
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
int main(int argc, char **argv)
{
diff --git a/examples/graph_inception_v4.cpp b/examples/graph_inception_v4.cpp
index ed95baa99e..e7c1bc69e2 100644
--- a/examples/graph_inception_v4.cpp
+++ b/examples/graph_inception_v4.cpp
@@ -23,12 +23,10 @@
*/
#include "arm_compute/graph.h"
#include "support/ToolchainSupport.h"
+#include "utils/CommonGraphOptions.h"
#include "utils/GraphUtils.h"
#include "utils/Utils.h"
-#include <cstdlib>
-#include <tuple>
-
using namespace arm_compute::utils;
using namespace arm_compute::graph::frontend;
using namespace arm_compute::graph_utils;
@@ -36,73 +34,46 @@ using namespace arm_compute::graph_utils;
/** Example demonstrating how to implement InceptionV4's network using the Compute Library's graph API
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
class InceptionV4Example final : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ InceptionV4Example()
+ : cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "InceptionV4")
{
- // Disabled the test for now because the process gets killed on Linux Firefly 32 bit even when using ConvolutionMethodHint::DIRECT.
- // Needs to review/rework to run the code below.
-#if __aarch64__
- std::string data_path; /* Path to the trainable data */
- std::string image; /* Image data */
- std::string label; /* Label data */
-
- // Create a preprocessor object
- std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<TFPreproccessor>();
+ }
+ bool do_setup(int argc, char **argv) override
+ {
+ // Parse arguments
+ cmd_parser.parse(argc, argv);
- // Set target. 0 (NEON), 1 (OpenCL). By default it is NEON
- const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- Target target_hint = set_target_hint(target);
- FastMathHint fast_math_hint = FastMathHint::DISABLED;
+ // Consume common parameters
+ common_params = consume_common_graph_parameters(common_opts);
- // Parse arguments
- if(argc < 2)
- {
- // Print help
- std::cout << "Usage: " << argv[0] << " [target] [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 2)
+ // Return when help menu is requested
+ if(common_params.help)
{
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 3)
- {
- data_path = argv[2];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No image provided: using random values\n\n";
- }
- else if(argc == 4)
- {
- data_path = argv[2];
- image = argv[3];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " [labels] [fast_math_hint]\n\n";
- std::cout << "No text file with labels provided: skipping output accessor\n\n";
- }
- else if(argc == 5)
- {
- data_path = argv[2];
- image = argv[3];
- label = argv[4];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " " << argv[4] << " [fast_math_hint]\n\n";
- std::cout << "No fast math info provided: disabling fast math\n\n";
- }
- else
- {
- data_path = argv[2];
- image = argv[3];
- label = argv[4];
- fast_math_hint = (std::strtol(argv[5], nullptr, 1) == 0) ? FastMathHint::DISABLED : FastMathHint::ENABLED;
+ cmd_parser.print_help(argv[0]);
+ return false;
}
- graph << target_hint
- << fast_math_hint
- << InputLayer(TensorDescriptor(TensorShape(299U, 299U, 3U, 1U), DataType::F32),
- get_input_accessor(image, std::move(preprocessor), false))
+ // Checks
+ ARM_COMPUTE_ERROR_ON_MSG(arm_compute::is_data_type_quantized_asymmetric(common_params.data_type), "Unsupported data type!");
+
+ // Print parameter values
+ std::cout << common_params << std::endl;
+
+ // Get trainable parameters data path
+ std::string data_path = common_params.data_path;
+
+ // Create a preprocessor object
+ std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<TFPreproccessor>();
+
+ graph << common_params.target
+ << common_params.fast_math_hint
+ << InputLayer(TensorDescriptor(TensorShape(299U, 299U, 3U, 1U), common_params.data_type),
+ get_input_accessor(common_params, std::move(preprocessor), false))
// Conv2d_1a_3x3
<< ConvolutionLayer(3U, 3U, 32U,
get_weights_accessor(data_path, "/cnn_data/inceptionv4_model/Conv2d_1a_3x3_weights.npy"),
@@ -165,28 +136,27 @@ public:
get_weights_accessor(data_path, "/cnn_data/inceptionv4_model/Logits_Logits_weights.npy"),
get_weights_accessor(data_path, "/cnn_data/inceptionv4_model/Logits_Logits_biases.npy"))
<< SoftmaxLayer()
- << OutputLayer(get_output_accessor(label, 5));
+ << OutputLayer(get_output_accessor(common_params, 5));
// Finalize graph
GraphConfig config;
- config.use_tuner = (target == 2);
- graph.finalize(target_hint, config);
-#else /* __aarch64__ */
- using namespace arm_compute;
- ARM_COMPUTE_UNUSED(argc);
- ARM_COMPUTE_UNUSED(argv);
-#endif /* __aarch64__ */
+ config.num_threads = common_params.threads;
+ config.use_tuner = common_params.enable_tuner;
+ graph.finalize(common_params.target, config);
+
+ return true;
}
void do_run() override
{
-#if __aarch64__
graph.run();
-#endif /* __aarch64__ */
}
private:
- Stream graph{ 0, "InceptionV4" };
+ CommandLineParser cmd_parser;
+ CommonGraphOptions common_opts;
+ CommonGraphParams common_params;
+ Stream graph;
private:
BranchLayer get_mixed_3a(const std::string &data_path)
@@ -747,7 +717,7 @@ private:
/** Main program for Inception V4
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
int main(int argc, char **argv)
{
diff --git a/examples/graph_lenet.cpp b/examples/graph_lenet.cpp
index 32c75827d3..f90892aeee 100644
--- a/examples/graph_lenet.cpp
+++ b/examples/graph_lenet.cpp
@@ -22,13 +22,11 @@
* SOFTWARE.
*/
#include "arm_compute/graph.h"
-
#include "support/ToolchainSupport.h"
+#include "utils/CommonGraphOptions.h"
#include "utils/GraphUtils.h"
#include "utils/Utils.h"
-#include <cstdlib>
-
using namespace arm_compute::utils;
using namespace arm_compute::graph::frontend;
using namespace arm_compute::graph_utils;
@@ -41,55 +39,39 @@ using namespace arm_compute::graph_utils;
class GraphLenetExample : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ GraphLenetExample()
+ : cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "LeNet")
{
- std::string data_path; /** Path to the trainable data */
- unsigned int batches = 4; /** Number of batches */
-
- // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- Target target_hint = set_target_hint(target);
+ }
+ bool do_setup(int argc, char **argv) override
+ {
+ // Parse arguments
+ cmd_parser.parse(argc, argv);
- FastMathHint fast_math_hint = FastMathHint::DISABLED;
+ // Consume common parameters
+ common_params = consume_common_graph_parameters(common_opts);
- // Parse arguments
- if(argc < 2)
+ // Return when help menu is requested
+ if(common_params.help)
{
- // Print help
- std::cout << "Usage: " << argv[0] << " [target] [path_to_data] [batches] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 2)
- {
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " [path_to_data] [batches] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 3)
- {
- //Do something with argv[1]
- data_path = argv[2];
- std::cout << "Usage: " << argv[0] << " [path_to_data] [batches] [fast_math_hint]\n\n";
- std::cout << "No number of batches where specified, thus will use the default : " << batches << "\n\n";
- }
- else if(argc == 4)
- {
- data_path = argv[2];
- batches = std::strtol(argv[3], nullptr, 0);
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " [fast_math_hint]\n\n";
- std::cout << "No fast math info provided: disabling fast math\n\n";
- }
- else
- {
- //Do something with argv[1] and argv[2]
- data_path = argv[2];
- batches = std::strtol(argv[3], nullptr, 0);
- fast_math_hint = (std::strtol(argv[4], nullptr, 1) == 0) ? FastMathHint::DISABLED : FastMathHint::ENABLED;
+ cmd_parser.print_help(argv[0]);
+ return false;
}
+ // Checks
+ ARM_COMPUTE_ERROR_ON_MSG(arm_compute::is_data_type_quantized_asymmetric(common_params.data_type), "Unsupported data type!");
+
+ // Print parameter values
+ std::cout << common_params << std::endl;
+
+ // Get trainable parameters data path
+ std::string data_path = common_params.data_path;
+ unsigned int batches = 4; /** Number of batches */
+
//conv1 << pool1 << conv2 << pool2 << fc1 << act1 << fc2 << smx
- graph << target_hint
- << fast_math_hint
- << InputLayer(TensorDescriptor(TensorShape(28U, 28U, 1U, batches), DataType::F32), get_input_accessor(""))
+ graph << common_params.target
+ << common_params.fast_math_hint
+ << InputLayer(TensorDescriptor(TensorShape(28U, 28U, 1U, batches), common_params.data_type), get_input_accessor(common_params))
<< ConvolutionLayer(
5U, 5U, 20U,
get_weights_accessor(data_path, "/cnn_data/lenet_model/conv1_w.npy"),
@@ -116,12 +98,15 @@ public:
get_weights_accessor(data_path, "/cnn_data/lenet_model/ip2_b.npy"))
.set_name("ip2")
<< SoftmaxLayer().set_name("prob")
- << OutputLayer(get_output_accessor(""));
+ << OutputLayer(get_output_accessor(common_params));
// Finalize graph
GraphConfig config;
- config.use_tuner = (target == 2);
- graph.finalize(target_hint, config);
+ config.num_threads = common_params.threads;
+ config.use_tuner = common_params.enable_tuner;
+ graph.finalize(common_params.target, config);
+
+ return true;
}
void do_run() override
{
@@ -130,7 +115,10 @@ public:
}
private:
- Stream graph{ 0, "LeNet" };
+ CommandLineParser cmd_parser;
+ CommonGraphOptions common_opts;
+ CommonGraphParams common_params;
+ Stream graph;
};
/** Main program for LeNet
diff --git a/examples/graph_mobilenet.cpp b/examples/graph_mobilenet.cpp
index 40243bb111..9304b2b380 100644
--- a/examples/graph_mobilenet.cpp
+++ b/examples/graph_mobilenet.cpp
@@ -23,11 +23,11 @@
*/
#include "arm_compute/graph.h"
#include "support/ToolchainSupport.h"
+#include "utils/CommonGraphOptions.h"
#include "utils/GraphUtils.h"
#include "utils/Utils.h"
-#include <cstdlib>
-
+using namespace arm_compute;
using namespace arm_compute::utils;
using namespace arm_compute::graph::frontend;
using namespace arm_compute::graph_utils;
@@ -35,107 +35,112 @@ using namespace arm_compute::graph_utils;
/** Example demonstrating how to implement MobileNet's network using the Compute Library's graph API
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Model ID (0 = MobileNetV1_1.0_224, 1 = MobileNetV1_0.75_160), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] data layout, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
-class GraphMobilenetExample : public Example
+class GraphMobilenetExample : public Example // NOLINT
{
public:
- void do_setup(int argc, char **argv) override
+ GraphMobilenetExample()
+ : cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "MobileNetV1")
{
- std::string data_path; /* Path to the trainable data */
- std::string image; /* Image data */
- std::string label; /* Label data */
-
- // Create a preprocessor object
- std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<TFPreproccessor>();
+ // Add model id option
+ model_id_opt = cmd_parser.add_option<SimpleOption<int>>("model-id", 0);
+ model_id_opt->set_help("Mobilenet model id (0: 1.0_224, else: 0.75_160");
+ }
+ /** Prevent instances of this class from being copy constructed */
+ GraphMobilenetExample(const GraphMobilenetExample &) = delete;
+ /** Prevent instances of this class from being copied */
+ GraphMobilenetExample &operator=(const GraphMobilenetExample &) = delete;
+ bool do_setup(int argc, char **argv) override
+ {
+ // Parse arguments
+ cmd_parser.parse(argc, argv);
- // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- Target target_hint = set_target_hint(target);
- DepthwiseConvolutionMethod depthwise_convolution_hint = DepthwiseConvolutionMethod::OPTIMIZED_3x3;
- FastMathHint fast_math_hint = FastMathHint::DISABLED;
-
- // Set model to execute. 0 (MobileNetV1_1.0_224), 1 (MobileNetV1_0.75_160)
- int model_id = (argc > 2) ? std::strtol(argv[2], nullptr, 10) : 0;
- ARM_COMPUTE_ERROR_ON_MSG(model_id > 1, "Invalid model ID. Model must be 0 (MobileNetV1_1.0_224) or 1 (MobileNetV1_0.75_160)");
- int layout_id = (argc > 3) ? std::strtol(argv[3], nullptr, 10) : 0;
- ARM_COMPUTE_ERROR_ON_MSG(layout_id > 1, "Invalid layout ID. Layout must be 0 (NCHW) or 1 (NHWC)");
-
- float depth_scale = (model_id == 0) ? 1.f : 0.75;
- unsigned int spatial_size = (model_id == 0) ? 224 : 160;
- std::string model_path = (model_id == 0) ? "/cnn_data/mobilenet_v1_1_224_model/" : "/cnn_data/mobilenet_v1_075_160_model/";
- TensorDescriptor input_descriptor_nchw = TensorDescriptor(TensorShape(spatial_size, spatial_size, 3U, 1U), DataType::F32);
- TensorDescriptor input_descriptor_nhwc = TensorDescriptor(TensorShape(3U, spatial_size, spatial_size, 1U), DataType::F32).set_layout(DataLayout::NHWC);
- TensorDescriptor input_descriptor = (layout_id == 0) ? input_descriptor_nchw : input_descriptor_nhwc;
+ // Consume common parameters
+ common_params = consume_common_graph_parameters(common_opts);
- // Parse arguments
- if(argc < 2)
- {
- // Print help
- std::cout << "Usage: " << argv[0] << " [target] [model] [layout] [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No model ID provided: using MobileNetV1_1.0_224\n\n";
- std::cout << "No data layout provided: using NCHW\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 2)
+ // Return when help menu is requested
+ if(common_params.help)
{
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " [model] [layout] [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No model ID provided: using MobileNetV1_1.0_224\n\n";
- std::cout << "No data layout provided: using NCHW\n\n";
- std::cout << "No data folder provided: using random values\n\n";
+ cmd_parser.print_help(argv[0]);
+ return false;
}
- else if(argc == 3)
- {
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " [layout] [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data layout provided: using NCHW\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 4)
- {
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 5)
- {
- data_path = argv[4];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " " << argv[4] << " [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No image provided: using random values\n\n";
- std::cout << "No text file with labels provided: skipping output accessor\n\n";
- }
- else if(argc == 6)
+
+ // Print parameter values
+ std::cout << common_params << std::endl;
+
+ // Get model parameters
+ int model_id = model_id_opt->value();
+
+ // Create input descriptor
+ unsigned int spatial_size = (model_id == 0 || common_params.data_type == DataType::QASYMM8) ? 224 : 160;
+ TensorShape tensor_shape = TensorShape(spatial_size, spatial_size, 3U, 1U);
+ if(common_params.data_layout == DataLayout::NHWC)
{
- data_path = argv[4];
- image = argv[5];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " [labels] [fast_math_hint]\n\n";
- std::cout << "No text file with labels provided: skipping output accessor\n\n";
+ arm_compute::permute(tensor_shape, arm_compute::PermutationVector(2U, 0U, 1U));
}
- else if(argc == 7)
+ TensorDescriptor input_descriptor = TensorDescriptor(tensor_shape, common_params.data_type).set_layout(common_params.data_layout);
+
+ // Set graph hints
+ graph << common_params.target
+ << DepthwiseConvolutionMethod::OPTIMIZED_3x3 // FIXME(COMPMID-1073): Add heuristics to automatically call the optimized 3x3 method
+ << common_params.fast_math_hint;
+
+ // Create core graph
+ if(arm_compute::is_data_type_float(common_params.data_type))
{
- data_path = argv[4];
- image = argv[5];
- label = argv[6];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " " << argv[4] << " [fast_math_hint]\n\n";
- std::cout << "No fast math info provided: disabling fast math\n\n";
+ create_graph_float(input_descriptor, model_id);
}
else
{
- data_path = argv[4];
- image = argv[5];
- label = argv[6];
- fast_math_hint = (std::strtol(argv[7], nullptr, 1) == 0) ? FastMathHint::DISABLED : FastMathHint::ENABLED;
+ create_graph_qasymm(input_descriptor);
}
+ // Create common tail
+ graph << ReshapeLayer(TensorShape(1001U)).set_name("Reshape")
+ << SoftmaxLayer().set_name("Softmax")
+ << OutputLayer(get_output_accessor(common_params, 5));
+
+ // Finalize graph
+ GraphConfig config;
+ config.num_threads = common_params.threads;
+ config.use_tuner = common_params.enable_tuner;
+ graph.finalize(common_params.target, config);
+
+ return true;
+ }
+ void do_run() override
+ {
+ // Run graph
+ graph.run();
+ }
+
+private:
+ CommandLineParser cmd_parser;
+ CommonGraphOptions common_opts;
+ SimpleOption<int> *model_id_opt{ nullptr };
+ CommonGraphParams common_params;
+ Stream graph;
+
+ void create_graph_float(TensorDescriptor &input_descriptor, int model_id)
+ {
+ float depth_scale = (model_id == 0) ? 1.f : 0.75;
+ std::string model_path = (model_id == 0) ? "/cnn_data/mobilenet_v1_1_224_model/" : "/cnn_data/mobilenet_v1_075_160_model/";
+
+ // Create a preprocessor object
+ std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<TFPreproccessor>();
+
+ // Get trainable parameters data path
+ std::string data_path = common_params.data_path;
+
// Add model path to data path
if(!data_path.empty())
{
data_path += model_path;
}
- graph << target_hint
- << depthwise_convolution_hint
- << fast_math_hint
- << InputLayer(input_descriptor,
- get_input_accessor(image, std::move(preprocessor), false))
+ graph << InputLayer(input_descriptor,
+ get_input_accessor(common_params, std::move(preprocessor), false))
<< ConvolutionLayer(
3U, 3U, 32U * depth_scale,
get_weights_accessor(data_path, "Conv2d_0_weights.npy", DataLayout::NCHW),
@@ -150,47 +155,122 @@ public:
0.001f)
.set_name("Conv2d_0/BatchNorm")
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f)).set_name("Conv2d_0/Relu6");
- graph << get_dwsc_node(data_path, "Conv2d_1", 64 * depth_scale, PadStrideInfo(1, 1, 1, 1), PadStrideInfo(1, 1, 0, 0));
- graph << get_dwsc_node(data_path, "Conv2d_2", 128 * depth_scale, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
- graph << get_dwsc_node(data_path, "Conv2d_3", 128 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
- graph << get_dwsc_node(data_path, "Conv2d_4", 256 * depth_scale, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
- graph << get_dwsc_node(data_path, "Conv2d_5", 256 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
- graph << get_dwsc_node(data_path, "Conv2d_6", 512 * depth_scale, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
- graph << get_dwsc_node(data_path, "Conv2d_7", 512 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
- graph << get_dwsc_node(data_path, "Conv2d_8", 512 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
- graph << get_dwsc_node(data_path, "Conv2d_9", 512 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
- graph << get_dwsc_node(data_path, "Conv2d_10", 512 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
- graph << get_dwsc_node(data_path, "Conv2d_11", 512 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
- graph << get_dwsc_node(data_path, "Conv2d_12", 1024 * depth_scale, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
- graph << get_dwsc_node(data_path, "Conv2d_13", 1024 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
+ graph << get_dwsc_node_float(data_path, "Conv2d_1", 64 * depth_scale, PadStrideInfo(1, 1, 1, 1), PadStrideInfo(1, 1, 0, 0));
+ graph << get_dwsc_node_float(data_path, "Conv2d_2", 128 * depth_scale, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
+ graph << get_dwsc_node_float(data_path, "Conv2d_3", 128 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
+ graph << get_dwsc_node_float(data_path, "Conv2d_4", 256 * depth_scale, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
+ graph << get_dwsc_node_float(data_path, "Conv2d_5", 256 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
+ graph << get_dwsc_node_float(data_path, "Conv2d_6", 512 * depth_scale, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
+ graph << get_dwsc_node_float(data_path, "Conv2d_7", 512 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
+ graph << get_dwsc_node_float(data_path, "Conv2d_8", 512 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
+ graph << get_dwsc_node_float(data_path, "Conv2d_9", 512 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
+ graph << get_dwsc_node_float(data_path, "Conv2d_10", 512 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
+ graph << get_dwsc_node_float(data_path, "Conv2d_11", 512 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
+ graph << get_dwsc_node_float(data_path, "Conv2d_12", 1024 * depth_scale, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
+ graph << get_dwsc_node_float(data_path, "Conv2d_13", 1024 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
graph << PoolingLayer(PoolingLayerInfo(PoolingType::AVG)).set_name("Logits/AvgPool_1a")
<< ConvolutionLayer(
1U, 1U, 1001U,
get_weights_accessor(data_path, "Logits_Conv2d_1c_1x1_weights.npy", DataLayout::NCHW),
get_weights_accessor(data_path, "Logits_Conv2d_1c_1x1_biases.npy"),
PadStrideInfo(1, 1, 0, 0))
- .set_name("Logits/Conv2d_1c_1x1")
- << ReshapeLayer(TensorShape(1001U)).set_name("Reshape")
- << SoftmaxLayer().set_name("Softmax")
- << OutputLayer(get_output_accessor(label, 5));
-
- // Finalize graph
- GraphConfig config;
- config.use_tuner = (target == 2);
- graph.finalize(target_hint, config);
+ .set_name("Logits/Conv2d_1c_1x1");
}
- void do_run() override
+
+ void create_graph_qasymm(TensorDescriptor &input_descriptor)
{
- // Run graph
- graph.run();
- }
+ // Get trainable parameters data path
+ std::string data_path = common_params.data_path;
-private:
- Stream graph{ 0, "MobileNetV1" };
+ // Quantization info taken from the AndroidNN QASYMM8 MobileNet example
+ const QuantizationInfo in_quant_info = QuantizationInfo(0.0078125f, 128);
+ const QuantizationInfo mid_quant_info = QuantizationInfo(0.0784313753247f, 128);
+
+ const std::vector<QuantizationInfo> conv_weights_quant_info =
+ {
+ QuantizationInfo(0.031778190285f, 156), // conv0
+ QuantizationInfo(0.00604454148561f, 66) // conv14
+ };
+
+ const std::vector<QuantizationInfo> depth_weights_quant_info =
+ {
+ QuantizationInfo(0.254282623529f, 129), // dwsc1
+ QuantizationInfo(0.12828284502f, 172), // dwsc2
+ QuantizationInfo(0.265911251307f, 83), // dwsc3
+ QuantizationInfo(0.0985597148538f, 30), // dwsc4
+ QuantizationInfo(0.0631204470992f, 54), // dwsc5
+ QuantizationInfo(0.0137207424268f, 141), // dwsc6
+ QuantizationInfo(0.0817828401923f, 125), // dwsc7
+ QuantizationInfo(0.0393880493939f, 164), // dwsc8
+ QuantizationInfo(0.211694166064f, 129), // dwsc9
+ QuantizationInfo(0.158015936613f, 103), // dwsc10
+ QuantizationInfo(0.0182712618262f, 137), // dwsc11
+ QuantizationInfo(0.0127998134121f, 134), // dwsc12
+ QuantizationInfo(0.299285322428f, 161) // dwsc13
+ };
- BranchLayer get_dwsc_node(const std::string &data_path, std::string &&param_path,
- unsigned int conv_filt,
- PadStrideInfo dwc_pad_stride_info, PadStrideInfo conv_pad_stride_info)
+ const std::vector<QuantizationInfo> point_weights_quant_info =
+ {
+ QuantizationInfo(0.0425766184926f, 129), // dwsc1
+ QuantizationInfo(0.0250773020089f, 94), // dwsc2
+ QuantizationInfo(0.015851572156f, 93), // dwsc3
+ QuantizationInfo(0.0167811904103f, 98), // dwsc4
+ QuantizationInfo(0.00951790809631f, 135), // dwsc5
+ QuantizationInfo(0.00999817531556f, 128), // dwsc6
+ QuantizationInfo(0.00590536883101f, 126), // dwsc7
+ QuantizationInfo(0.00576109671965f, 133), // dwsc8
+ QuantizationInfo(0.00830461271107f, 142), // dwsc9
+ QuantizationInfo(0.0152327232063f, 72), // dwsc10
+ QuantizationInfo(0.00741417845711f, 125), // dwsc11
+ QuantizationInfo(0.0135628981516f, 142), // dwsc12
+ QuantizationInfo(0.0338749065995f, 140) // dwsc13
+ };
+
+ graph << InputLayer(input_descriptor.set_quantization_info(in_quant_info),
+ get_weights_accessor(data_path, "/cnn_data/mobilenet_qasymm8_model/" + common_params.image))
+ << ConvolutionLayer(
+ 3U, 3U, 32U,
+ get_weights_accessor(data_path, "/cnn_data/mobilenet_qasymm8_model/Conv2d_0_weights.npy"),
+ get_weights_accessor(data_path, "/cnn_data/mobilenet_qasymm8_model/Conv2d_0_bias.npy"),
+ PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR),
+ 1, conv_weights_quant_info.at(0), mid_quant_info)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f));
+ graph << get_dwsc_node_qasymm(data_path, "Conv2d_1", 64U, PadStrideInfo(1U, 1U, 1U, 1U), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(0), point_weights_quant_info.at(0));
+ graph << get_dwsc_node_qasymm(data_path, "Conv2d_2", 128U, PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(1),
+ point_weights_quant_info.at(1));
+ graph << get_dwsc_node_qasymm(data_path, "Conv2d_3", 128U, PadStrideInfo(1U, 1U, 1U, 1U, 1U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(2),
+ point_weights_quant_info.at(2));
+ graph << get_dwsc_node_qasymm(data_path, "Conv2d_4", 256U, PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(3),
+ point_weights_quant_info.at(3));
+ graph << get_dwsc_node_qasymm(data_path, "Conv2d_5", 256U, PadStrideInfo(1U, 1U, 1U, 1U, 1U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(4),
+ point_weights_quant_info.at(4));
+ graph << get_dwsc_node_qasymm(data_path, "Conv2d_6", 512U, PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(5),
+ point_weights_quant_info.at(5));
+ graph << get_dwsc_node_qasymm(data_path, "Conv2d_7", 512U, PadStrideInfo(1U, 1U, 1U, 1U, 1U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(6),
+ point_weights_quant_info.at(6));
+ graph << get_dwsc_node_qasymm(data_path, "Conv2d_8", 512U, PadStrideInfo(1U, 1U, 1U, 1U, 1U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(7),
+ point_weights_quant_info.at(7));
+ graph << get_dwsc_node_qasymm(data_path, "Conv2d_9", 512U, PadStrideInfo(1U, 1U, 1U, 1U, 1U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(8),
+ point_weights_quant_info.at(8));
+ graph << get_dwsc_node_qasymm(data_path, "Conv2d_10", 512U, PadStrideInfo(1U, 1U, 1U, 1U, 1U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(9),
+ point_weights_quant_info.at(9));
+ graph << get_dwsc_node_qasymm(data_path, "Conv2d_11", 512U, PadStrideInfo(1U, 1U, 1U, 1U, 1U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(10),
+ point_weights_quant_info.at(10));
+ graph << get_dwsc_node_qasymm(data_path, "Conv2d_12", 1024U, PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(11),
+ point_weights_quant_info.at(11));
+ graph << get_dwsc_node_qasymm(data_path, "Conv2d_13", 1024U, PadStrideInfo(1U, 1U, 1U, 1U, 1U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(12),
+ point_weights_quant_info.at(12))
+ << PoolingLayer(PoolingLayerInfo(PoolingType::AVG))
+ << ConvolutionLayer(
+ 1U, 1U, 1001U,
+ get_weights_accessor(data_path, "/cnn_data/mobilenet_qasymm8_model/Logits_Conv2d_1c_1x1_weights.npy"),
+ get_weights_accessor(data_path, "/cnn_data/mobilenet_qasymm8_model/Logits_Conv2d_1c_1x1_bias.npy"),
+ PadStrideInfo(1U, 1U, 0U, 0U), 1, conv_weights_quant_info.at(1));
+ }
+
+ BranchLayer get_dwsc_node_float(const std::string &data_path, std::string &&param_path,
+ unsigned int conv_filt,
+ PadStrideInfo dwc_pad_stride_info, PadStrideInfo conv_pad_stride_info)
{
std::string total_path = param_path + "_";
SubStream sg(graph);
@@ -225,18 +305,36 @@ private:
return BranchLayer(std::move(sg));
}
+
+ BranchLayer get_dwsc_node_qasymm(const std::string &data_path, std::string &&param_path,
+ const unsigned int conv_filt,
+ PadStrideInfo dwc_pad_stride_info, PadStrideInfo conv_pad_stride_info,
+ QuantizationInfo depth_weights_quant_info, QuantizationInfo point_weights_quant_info)
+ {
+ std::string total_path = "/cnn_data/mobilenet_qasymm8_model/" + param_path + "_";
+ SubStream sg(graph);
+
+ sg << DepthwiseConvolutionLayer(
+ 3U, 3U,
+ get_weights_accessor(data_path, total_path + "depthwise_weights.npy"),
+ get_weights_accessor(data_path, total_path + "depthwise_bias.npy"),
+ dwc_pad_stride_info, depth_weights_quant_info)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f))
+ << ConvolutionLayer(
+ 1U, 1U, conv_filt,
+ get_weights_accessor(data_path, total_path + "pointwise_weights.npy"),
+ get_weights_accessor(data_path, total_path + "pointwise_bias.npy"),
+ conv_pad_stride_info, 1, point_weights_quant_info)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f));
+
+ return BranchLayer(std::move(sg));
+ }
};
/** Main program for MobileNetV1
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner),
- * [optional] Model ID (0 = MobileNetV1_1.0_224, 1 = MobileNetV1_0.75_160),
- * [optional] Path to the weights folder,
- * [optional] image,
- * [optional] labels,
- * [optional] data layout,
- * [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
int main(int argc, char **argv)
{
diff --git a/examples/graph_mobilenet_qasymm8.cpp b/examples/graph_mobilenet_qasymm8.cpp
deleted file mode 100644
index 2801209985..0000000000
--- a/examples/graph_mobilenet_qasymm8.cpp
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph.h"
-#include "support/ToolchainSupport.h"
-#include "utils/GraphUtils.h"
-#include "utils/Utils.h"
-
-#include <cstdlib>
-
-using namespace arm_compute;
-using namespace arm_compute::utils;
-using namespace arm_compute::graph::frontend;
-using namespace arm_compute::graph_utils;
-
-/** Example demonstrating how to implement QASYMM8 MobileNet's network using the Compute Library's graph API
- *
- * @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] npy_input, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
- */
-class GraphMobileNetQASYMM8Example : public Example
-{
-public:
- void do_setup(int argc, char **argv) override
- {
- std::string data_path; /* Path to the trainable data */
- std::string input; /* Image data */
- std::string label; /* Label data */
-
- // Quantization info taken from the AndroidNN QASYMM8 MobileNet example
- const QuantizationInfo in_quant_info = QuantizationInfo(0.0078125f, 128);
- const QuantizationInfo mid_quant_info = QuantizationInfo(0.0784313753247f, 128);
-
- const std::vector<QuantizationInfo> conv_weights_quant_info =
- {
- QuantizationInfo(0.031778190285f, 156), // conv0
- QuantizationInfo(0.00604454148561f, 66) // conv14
- };
-
- const std::vector<QuantizationInfo> depth_weights_quant_info =
- {
- QuantizationInfo(0.254282623529f, 129), // dwsc1
- QuantizationInfo(0.12828284502f, 172), // dwsc2
- QuantizationInfo(0.265911251307f, 83), // dwsc3
- QuantizationInfo(0.0985597148538f, 30), // dwsc4
- QuantizationInfo(0.0631204470992f, 54), // dwsc5
- QuantizationInfo(0.0137207424268f, 141), // dwsc6
- QuantizationInfo(0.0817828401923f, 125), // dwsc7
- QuantizationInfo(0.0393880493939f, 164), // dwsc8
- QuantizationInfo(0.211694166064f, 129), // dwsc9
- QuantizationInfo(0.158015936613f, 103), // dwsc10
- QuantizationInfo(0.0182712618262f, 137), // dwsc11
- QuantizationInfo(0.0127998134121f, 134), // dwsc12
- QuantizationInfo(0.299285322428f, 161) // dwsc13
- };
-
- const std::vector<QuantizationInfo> point_weights_quant_info =
- {
- QuantizationInfo(0.0425766184926f, 129), // dwsc1
- QuantizationInfo(0.0250773020089f, 94), // dwsc2
- QuantizationInfo(0.015851572156f, 93), // dwsc3
- QuantizationInfo(0.0167811904103f, 98), // dwsc4
- QuantizationInfo(0.00951790809631f, 135), // dwsc5
- QuantizationInfo(0.00999817531556f, 128), // dwsc6
- QuantizationInfo(0.00590536883101f, 126), // dwsc7
- QuantizationInfo(0.00576109671965f, 133), // dwsc8
- QuantizationInfo(0.00830461271107f, 142), // dwsc9
- QuantizationInfo(0.0152327232063f, 72), // dwsc10
- QuantizationInfo(0.00741417845711f, 125), // dwsc11
- QuantizationInfo(0.0135628981516f, 142), // dwsc12
- QuantizationInfo(0.0338749065995f, 140) // dwsc13
- };
-
- // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- Target target_hint = set_target_hint(target);
- FastMathHint fast_math_hint = FastMathHint::DISABLED;
-
- // Parse arguments
- if(argc < 2)
- {
- // Print help
- std::cout << "Usage: " << argv[0] << " [target] [path_to_data] [npy_input] [labels] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 2)
- {
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " [path_to_data] [npy_input] [labels] [fast_math_hint]\n\n";
- std::cout << "No input provided: using random values\n\n";
- }
- else if(argc == 4)
- {
- data_path = argv[2];
- input = argv[3];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " [labels] [fast_math_hint]\n\n";
- std::cout << "No text file with labels provided: skipping output accessor\n\n";
- }
- else if(argc == 5)
- {
- data_path = argv[2];
- input = argv[3];
- label = argv[4];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " " << argv[4] << " [fast_math_hint]\n\n";
- std::cout << "No fast math info provided: disabling fast math\n\n";
- }
- else
- {
- data_path = argv[2];
- input = argv[3];
- label = argv[4];
- fast_math_hint = (std::strtol(argv[5], nullptr, 1) == 0) ? FastMathHint::DISABLED : FastMathHint::ENABLED;
- }
-
- graph << target_hint
- << DepthwiseConvolutionMethod::OPTIMIZED_3x3 // FIXME(COMPMID-1073): Add heuristics to automatically call the optimized 3x3 method
- << fast_math_hint
- << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), DataType::QASYMM8, in_quant_info),
- get_weights_accessor(data_path, "/cnn_data/mobilenet_qasymm8_model/" + input))
- << ConvolutionLayer(
- 3U, 3U, 32U,
- get_weights_accessor(data_path, "/cnn_data/mobilenet_qasymm8_model/Conv2d_0_weights.npy"),
- get_weights_accessor(data_path, "/cnn_data/mobilenet_qasymm8_model/Conv2d_0_bias.npy"),
- PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR),
- 1, conv_weights_quant_info.at(0), mid_quant_info)
- << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f));
- graph << get_dwsc_node(data_path, "Conv2d_1", 64U, PadStrideInfo(1U, 1U, 1U, 1U), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(0), point_weights_quant_info.at(0));
- graph << get_dwsc_node(data_path, "Conv2d_2", 128U, PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(1),
- point_weights_quant_info.at(1));
- graph << get_dwsc_node(data_path, "Conv2d_3", 128U, PadStrideInfo(1U, 1U, 1U, 1U, 1U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(2),
- point_weights_quant_info.at(2));
- graph << get_dwsc_node(data_path, "Conv2d_4", 256U, PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(3),
- point_weights_quant_info.at(3));
- graph << get_dwsc_node(data_path, "Conv2d_5", 256U, PadStrideInfo(1U, 1U, 1U, 1U, 1U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(4),
- point_weights_quant_info.at(4));
- graph << get_dwsc_node(data_path, "Conv2d_6", 512U, PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(5),
- point_weights_quant_info.at(5));
- graph << get_dwsc_node(data_path, "Conv2d_7", 512U, PadStrideInfo(1U, 1U, 1U, 1U, 1U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(6),
- point_weights_quant_info.at(6));
- graph << get_dwsc_node(data_path, "Conv2d_8", 512U, PadStrideInfo(1U, 1U, 1U, 1U, 1U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(7),
- point_weights_quant_info.at(7));
- graph << get_dwsc_node(data_path, "Conv2d_9", 512U, PadStrideInfo(1U, 1U, 1U, 1U, 1U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(8),
- point_weights_quant_info.at(8));
- graph << get_dwsc_node(data_path, "Conv2d_10", 512U, PadStrideInfo(1U, 1U, 1U, 1U, 1U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(9),
- point_weights_quant_info.at(9));
- graph << get_dwsc_node(data_path, "Conv2d_11", 512U, PadStrideInfo(1U, 1U, 1U, 1U, 1U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(10),
- point_weights_quant_info.at(10));
- graph << get_dwsc_node(data_path, "Conv2d_12", 1024U, PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(11),
- point_weights_quant_info.at(11));
- graph << get_dwsc_node(data_path, "Conv2d_13", 1024U, PadStrideInfo(1U, 1U, 1U, 1U, 1U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(12),
- point_weights_quant_info.at(12))
- << PoolingLayer(PoolingLayerInfo(PoolingType::AVG))
- << ConvolutionLayer(
- 1U, 1U, 1001U,
- get_weights_accessor(data_path, "/cnn_data/mobilenet_qasymm8_model/Logits_Conv2d_1c_1x1_weights.npy"),
- get_weights_accessor(data_path, "/cnn_data/mobilenet_qasymm8_model/Logits_Conv2d_1c_1x1_bias.npy"),
- PadStrideInfo(1U, 1U, 0U, 0U), 1, conv_weights_quant_info.at(1))
- << ReshapeLayer(TensorShape(1001U))
- << SoftmaxLayer()
- << OutputLayer(get_output_accessor(label, 5));
-
- // Finalize graph
- GraphConfig config;
- config.use_tuner = (target == 2);
- graph.finalize(target_hint, config);
- }
- void do_run() override
- {
- // Run graph
- graph.run();
- }
-
-private:
- Stream graph{ 0, "MobileNetV1_QASYMM8" };
-
- /** This function produces a depthwise separable convolution node (i.e. depthwise + pointwise layers) with ReLU6 activation after each layer.
- *
- * @param[in] data_path Path to trainable data folder
- * @param[in] param_path Prefix of specific set of weights/biases data
- * @param[in] conv_filt Filters depths for pointwise convolution
- * @param[in] dwc_pad_stride_info PadStrideInfo for depthwise convolution
- * @param[in] conv_pad_stride_info PadStrideInfo for pointwise convolution
- * @param[in] depth_weights_quant_info QuantizationInfo for depthwise convolution's weights
- * @param[in] point_weights_quant_info QuantizationInfo for pointwise convolution's weights
- *
- * @return The complete dwsc node
- */
- BranchLayer get_dwsc_node(const std::string &data_path, std::string &&param_path,
- const unsigned int conv_filt,
- PadStrideInfo dwc_pad_stride_info, PadStrideInfo conv_pad_stride_info,
- QuantizationInfo depth_weights_quant_info, QuantizationInfo point_weights_quant_info)
- {
- std::string total_path = "/cnn_data/mobilenet_qasymm8_model/" + param_path + "_";
- SubStream sg(graph);
-
- sg << DepthwiseConvolutionLayer(
- 3U, 3U,
- get_weights_accessor(data_path, total_path + "depthwise_weights.npy"),
- get_weights_accessor(data_path, total_path + "depthwise_bias.npy"),
- dwc_pad_stride_info, depth_weights_quant_info)
- << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f))
- << ConvolutionLayer(
- 1U, 1U, conv_filt,
- get_weights_accessor(data_path, total_path + "pointwise_weights.npy"),
- get_weights_accessor(data_path, total_path + "pointwise_bias.npy"),
- conv_pad_stride_info, 1, point_weights_quant_info)
- << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f));
-
- return BranchLayer(std::move(sg));
- }
-};
-/** Main program for MobileNetQASYMM8
- *
- * @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Path to the weights folder, [optional] npy_input, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
- */
-int main(int argc, char **argv)
-{
- return arm_compute::utils::run_example<GraphMobileNetQASYMM8Example>(argc, argv);
-}
diff --git a/examples/graph_resnet50.cpp b/examples/graph_resnet50.cpp
index bafa9a5852..66fc6e869d 100644
--- a/examples/graph_resnet50.cpp
+++ b/examples/graph_resnet50.cpp
@@ -23,11 +23,10 @@
*/
#include "arm_compute/graph.h"
#include "support/ToolchainSupport.h"
+#include "utils/CommonGraphOptions.h"
#include "utils/GraphUtils.h"
#include "utils/Utils.h"
-#include <cstdlib>
-
using namespace arm_compute::utils;
using namespace arm_compute::graph::frontend;
using namespace arm_compute::graph_utils;
@@ -35,72 +34,47 @@ using namespace arm_compute::graph_utils;
/** Example demonstrating how to implement ResNet50 network using the Compute Library's graph API
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
class GraphResNet50Example : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ GraphResNet50Example()
+ : cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "ResNet50")
{
- std::string data_path; /* Path to the trainable data */
- std::string image; /* Image data */
- std::string label; /* Label data */
-
- // Create a preprocessor object
- const std::array<float, 3> mean_rgb{ { 122.68f, 116.67f, 104.01f } };
- std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb,
- false /* Do not convert to BGR */);
+ }
+ bool do_setup(int argc, char **argv) override
+ {
+ // Parse arguments
+ cmd_parser.parse(argc, argv);
- // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- Target target_hint = set_target_hint(target);
- FastMathHint fast_math_hint = FastMathHint::DISABLED;
+ // Consume common parameters
+ common_params = consume_common_graph_parameters(common_opts);
- // Parse arguments
- if(argc < 2)
- {
- // Print help
- std::cout << "Usage: " << argv[0] << " [target] [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 2)
- {
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 3)
+ // Return when help menu is requested
+ if(common_params.help)
{
- data_path = argv[2];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No image provided: using random values\n\n";
- }
- else if(argc == 4)
- {
- data_path = argv[2];
- image = argv[3];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " [labels] [fast_math_hint]\n\n";
- std::cout << "No text file with labels provided: skipping output accessor\n\n";
- }
- else if(argc == 5)
- {
- data_path = argv[2];
- image = argv[3];
- label = argv[4];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " " << argv[4] << " [fast_math_hint]\n\n";
- std::cout << "No fast math info provided: disabling fast math\n\n";
- }
- else
- {
- data_path = argv[2];
- image = argv[3];
- label = argv[4];
- fast_math_hint = (std::strtol(argv[5], nullptr, 1) == 0) ? FastMathHint::DISABLED : FastMathHint::ENABLED;
+ cmd_parser.print_help(argv[0]);
+ return false;
}
- graph << target_hint
- << fast_math_hint
- << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), DataType::F32),
- get_input_accessor(image, std::move(preprocessor), false /* Do not convert to BGR */))
+ // Checks
+ ARM_COMPUTE_ERROR_ON_MSG(arm_compute::is_data_type_quantized_asymmetric(common_params.data_type), "Unsupported data type!");
+
+ // Print parameter values
+ std::cout << common_params << std::endl;
+
+ // Get trainable parameters data path
+ std::string data_path = common_params.data_path;
+
+ // Create a preprocessor object
+ const std::array<float, 3> mean_rgb{ { 122.68f, 116.67f, 104.01f } };
+ std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb,
+ false /* Do not convert to BGR */);
+ graph << common_params.target
+ << common_params.fast_math_hint
+ << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), common_params.data_type),
+ get_input_accessor(common_params, std::move(preprocessor), false /* Do not convert to BGR */))
<< ConvolutionLayer(
7U, 7U, 64U,
get_weights_accessor(data_path, "/cnn_data/resnet50_model/conv1_weights.npy"),
@@ -131,12 +105,15 @@ public:
.set_name("logits/convolution")
<< FlattenLayer().set_name("predictions/Reshape")
<< SoftmaxLayer().set_name("predictions/Softmax")
- << OutputLayer(get_output_accessor(label, 5));
+ << OutputLayer(get_output_accessor(common_params, 5));
// Finalize graph
GraphConfig config;
- config.use_tuner = (target == 2);
- graph.finalize(target_hint, config);
+ config.num_threads = common_params.threads;
+ config.use_tuner = common_params.enable_tuner;
+ graph.finalize(common_params.target, config);
+
+ return true;
}
void do_run() override
@@ -146,7 +123,10 @@ public:
}
private:
- Stream graph{ 0, "ResNet50" };
+ CommandLineParser cmd_parser;
+ CommonGraphOptions common_opts;
+ CommonGraphParams common_params;
+ Stream graph;
void add_residual_block(const std::string &data_path, const std::string &name, unsigned int base_depth, unsigned int num_units, unsigned int stride)
{
@@ -252,7 +232,7 @@ private:
/** Main program for ResNet50
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
int main(int argc, char **argv)
{
diff --git a/examples/graph_resnext50.cpp b/examples/graph_resnext50.cpp
index f96a02e6d6..c0a2308a1f 100644
--- a/examples/graph_resnext50.cpp
+++ b/examples/graph_resnext50.cpp
@@ -23,11 +23,10 @@
*/
#include "arm_compute/graph.h"
#include "support/ToolchainSupport.h"
+#include "utils/CommonGraphOptions.h"
#include "utils/GraphUtils.h"
#include "utils/Utils.h"
-#include <cstdlib>
-
using namespace arm_compute::utils;
using namespace arm_compute::graph::frontend;
using namespace arm_compute::graph_utils;
@@ -35,67 +34,43 @@ using namespace arm_compute::graph_utils;
/** Example demonstrating how to implement ResNeXt50 network using the Compute Library's graph API
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] npy_in, [optional] npy_out, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
class GraphResNeXt50Example : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ GraphResNeXt50Example()
+ : cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "ResNeXt50")
+ {
+ }
+ bool do_setup(int argc, char **argv) override
{
- std::string data_path; /* Path to the trainable data */
- std::string npy_in; /* Input npy data */
- std::string npy_out; /* Output npy data */
+ // Parse arguments
+ cmd_parser.parse(argc, argv);
- // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- Target target_hint = set_target_hint(target);
- FastMathHint fast_math_hint = FastMathHint::DISABLED;
+ // Consume common parameters
+ common_params = consume_common_graph_parameters(common_opts);
- // Parse arguments
- if(argc < 2)
+ // Return when help menu is requested
+ if(common_params.help)
{
- // Print help
- std::cout << "Usage: " << argv[0] << " [target] [path_to_data] [npy_in] [npy_out] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 2)
- {
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " [path_to_data] [npy_in] [npy_out] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 3)
- {
- data_path = argv[2];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " [npy_in] [npy_out] [fast_math_hint]\n\n";
- std::cout << "No input npy file provided: using random values\n\n";
- }
- else if(argc == 4)
- {
- data_path = argv[2];
- npy_in = argv[3];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " [npy_out] [fast_math_hint]\n\n";
- std::cout << "No output npy file provided: skipping output accessor\n\n";
- }
- else if(argc == 5)
- {
- data_path = argv[2];
- npy_in = argv[3];
- npy_out = argv[4];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " " << argv[4] << " [fast_math_hint]\n\n";
- std::cout << "No fast math info provided: disabling fast math\n\n";
- }
- else
- {
- data_path = argv[2];
- npy_in = argv[3];
- npy_out = argv[4];
- fast_math_hint = (std::strtol(argv[5], nullptr, 1) == 0) ? FastMathHint::DISABLED : FastMathHint::ENABLED;
+ cmd_parser.print_help(argv[0]);
+ return false;
}
- graph << target_hint
- << fast_math_hint
- << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), DataType::F32),
- get_input_accessor(npy_in))
+ // Checks
+ ARM_COMPUTE_ERROR_ON_MSG(arm_compute::is_data_type_quantized_asymmetric(common_params.data_type), "Unsupported data type!");
+
+ // Print parameter values
+ std::cout << common_params << std::endl;
+
+ // Get trainable parameters data path
+ std::string data_path = common_params.data_path;
+
+ graph << common_params.target
+ << common_params.fast_math_hint
+ << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), common_params.data_type),
+ get_input_accessor(common_params))
<< ScaleLayer(get_weights_accessor(data_path, "/cnn_data/resnext50_model/bn_data_mul.npy"),
get_weights_accessor(data_path, "/cnn_data/resnext50_model/bn_data_add.npy"))
.set_name("bn_data/Scale")
@@ -115,12 +90,15 @@ public:
graph << PoolingLayer(PoolingLayerInfo(PoolingType::AVG)).set_name("pool1")
<< FlattenLayer().set_name("predictions/Reshape")
- << OutputLayer(get_npy_output_accessor(npy_out, TensorShape(2048U), DataType::F32));
+ << OutputLayer(get_npy_output_accessor(common_params.labels, TensorShape(2048U), DataType::F32));
// Finalize graph
GraphConfig config;
- config.use_tuner = (target == 2);
- graph.finalize(target_hint, config);
+ config.num_threads = common_params.threads;
+ config.use_tuner = common_params.enable_tuner;
+ graph.finalize(common_params.target, config);
+
+ return true;
}
void do_run() override
@@ -130,7 +108,10 @@ public:
}
private:
- Stream graph{ 0, "ResNeXt50" };
+ CommandLineParser cmd_parser;
+ CommonGraphOptions common_opts;
+ CommonGraphParams common_params;
+ Stream graph;
void add_residual_block(const std::string &data_path, unsigned int base_depth, unsigned int stage, unsigned int num_units, unsigned int stride_conv_unit1)
{
@@ -200,7 +181,7 @@ private:
/** Main program for ResNeXt50
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [[optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] npy_in, [optional] npy_out )
+ * @param[in] argv Arguments
*/
int main(int argc, char **argv)
{
diff --git a/examples/graph_squeezenet.cpp b/examples/graph_squeezenet.cpp
index b632688839..a290b91148 100644
--- a/examples/graph_squeezenet.cpp
+++ b/examples/graph_squeezenet.cpp
@@ -23,85 +23,58 @@
*/
#include "arm_compute/graph.h"
#include "support/ToolchainSupport.h"
+#include "utils/CommonGraphOptions.h"
#include "utils/GraphUtils.h"
#include "utils/Utils.h"
-#include <cstdlib>
-#include <tuple>
-
using namespace arm_compute::utils;
using namespace arm_compute::graph::frontend;
using namespace arm_compute::graph_utils;
-using namespace arm_compute::logging;
/** Example demonstrating how to implement Squeezenet's network using the Compute Library's graph API
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
class GraphSqueezenetExample : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ GraphSqueezenetExample()
+ : cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "SqueezeNetV1")
{
- std::string data_path; /* Path to the trainable data */
- std::string image; /* Image data */
- std::string label; /* Label data */
-
- // Create a preprocessor object
- const std::array<float, 3> mean_rgb{ { 122.68f, 116.67f, 104.01f } };
- std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
+ }
+ bool do_setup(int argc, char **argv) override
+ {
+ // Parse arguments
+ cmd_parser.parse(argc, argv);
- // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- Target target_hint = set_target_hint(target);
- FastMathHint fast_math_hint = FastMathHint::DISABLED;
+ // Consume common parameters
+ common_params = consume_common_graph_parameters(common_opts);
- // Parse arguments
- if(argc < 2)
- {
- // Print help
- std::cout << "Usage: " << argv[0] << " [target] [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 2)
+ // Return when help menu is requested
+ if(common_params.help)
{
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 3)
- {
- data_path = argv[2];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No image provided: using random values\n\n";
- }
- else if(argc == 4)
- {
- data_path = argv[2];
- image = argv[3];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " [labels] [fast_math_hint]\n\n";
- std::cout << "No text file with labels provided: skipping output accessor\n\n";
- }
- else if(argc == 5)
- {
- data_path = argv[2];
- image = argv[3];
- label = argv[4];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " " << argv[4] << " [fast_math_hint]\n\n";
- std::cout << "No fast math info provided: disabling fast math\n\n";
- }
- else
- {
- data_path = argv[2];
- image = argv[3];
- label = argv[4];
- fast_math_hint = (std::strtol(argv[5], nullptr, 1) == 0) ? FastMathHint::DISABLED : FastMathHint::ENABLED;
+ cmd_parser.print_help(argv[0]);
+ return false;
}
- graph << target_hint
- << fast_math_hint
- << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), DataType::F32),
- get_input_accessor(image, std::move(preprocessor)))
+ // Checks
+ ARM_COMPUTE_ERROR_ON_MSG(arm_compute::is_data_type_quantized_asymmetric(common_params.data_type), "Unsupported data type!");
+
+ // Print parameter values
+ std::cout << common_params << std::endl;
+
+ // Get trainable parameters data path
+ std::string data_path = common_params.data_path;
+
+ // Create a preprocessor object
+ const std::array<float, 3> mean_rgb{ { 122.68f, 116.67f, 104.01f } };
+ std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
+
+ graph << common_params.target
+ << common_params.fast_math_hint
+ << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), common_params.data_type),
+ get_input_accessor(common_params, std::move(preprocessor)))
<< ConvolutionLayer(
7U, 7U, 96U,
get_weights_accessor(data_path, "/cnn_data/squeezenet_v1.0_model/conv1_w.npy"),
@@ -176,12 +149,15 @@ public:
<< PoolingLayer(PoolingLayerInfo(PoolingType::AVG))
<< FlattenLayer()
<< SoftmaxLayer()
- << OutputLayer(get_output_accessor(label, 5));
+ << OutputLayer(get_output_accessor(common_params, 5));
// Finalize graph
GraphConfig config;
- config.use_tuner = (target == 2);
- graph.finalize(target_hint, config);
+ config.num_threads = common_params.threads;
+ config.use_tuner = common_params.enable_tuner;
+ graph.finalize(common_params.target, config);
+
+ return true;
}
void do_run() override
{
@@ -190,7 +166,10 @@ public:
}
private:
- Stream graph{ 0, "SqueezeNetV1" };
+ CommandLineParser cmd_parser;
+ CommonGraphOptions common_opts;
+ CommonGraphParams common_params;
+ Stream graph;
BranchLayer get_expand_fire_node(const std::string &data_path, std::string &&param_path, unsigned int expand1_filt, unsigned int expand3_filt)
{
@@ -218,7 +197,7 @@ private:
/** Main program for Squeezenet v1.0
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
int main(int argc, char **argv)
{
diff --git a/examples/graph_squeezenet_v1_1.cpp b/examples/graph_squeezenet_v1_1.cpp
index b2c5a442cd..8ce928c5b1 100644
--- a/examples/graph_squeezenet_v1_1.cpp
+++ b/examples/graph_squeezenet_v1_1.cpp
@@ -23,12 +23,10 @@
*/
#include "arm_compute/graph.h"
#include "support/ToolchainSupport.h"
+#include "utils/CommonGraphOptions.h"
#include "utils/GraphUtils.h"
#include "utils/Utils.h"
-#include <cstdlib>
-#include <tuple>
-
using namespace arm_compute::utils;
using namespace arm_compute::graph::frontend;
using namespace arm_compute::graph_utils;
@@ -36,71 +34,47 @@ using namespace arm_compute::graph_utils;
/** Example demonstrating how to implement Squeezenet's v1.1 network using the Compute Library's graph API
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
class GraphSqueezenet_v1_1Example : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ GraphSqueezenet_v1_1Example()
+ : cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "SqueezeNetV1.1")
{
- std::string data_path; /* Path to the trainable data */
- std::string image; /* Image data */
- std::string label; /* Label data */
-
- // Create a preprocessor object
- const std::array<float, 3> mean_rgb{ { 122.68f, 116.67f, 104.01f } };
- std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
+ }
+ bool do_setup(int argc, char **argv) override
+ {
+ // Parse arguments
+ cmd_parser.parse(argc, argv);
- // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- Target target_hint = set_target_hint(target);
- FastMathHint fast_math_hint = FastMathHint::DISABLED;
+ // Consume common parameters
+ common_params = consume_common_graph_parameters(common_opts);
- // Parse arguments
- if(argc < 2)
- {
- // Print help
- std::cout << "Usage: " << argv[0] << " [target] [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 2)
+ // Return when help menu is requested
+ if(common_params.help)
{
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 3)
- {
- data_path = argv[2];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No image provided: using random values\n\n";
- }
- else if(argc == 4)
- {
- data_path = argv[2];
- image = argv[3];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " [labels] [fast_math_hint]\n\n";
- std::cout << "No text file with labels provided: skipping output accessor\n\n";
- }
- else if(argc == 5)
- {
- data_path = argv[2];
- image = argv[3];
- label = argv[4];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " " << argv[4] << " [fast_math_hint]\n\n";
- std::cout << "No fast math info provided: disabling fast math\n\n";
- }
- else
- {
- data_path = argv[2];
- image = argv[3];
- label = argv[4];
- fast_math_hint = (std::strtol(argv[5], nullptr, 1) == 0) ? FastMathHint::DISABLED : FastMathHint::ENABLED;
+ cmd_parser.print_help(argv[0]);
+ return false;
}
- graph << target_hint
- << fast_math_hint
- << InputLayer(TensorDescriptor(TensorShape(227U, 227U, 3U, 1U), DataType::F32),
- get_input_accessor(image, std::move(preprocessor)))
+ // Checks
+ ARM_COMPUTE_ERROR_ON_MSG(arm_compute::is_data_type_quantized_asymmetric(common_params.data_type), "Unsupported data type!");
+
+ // Print parameter values
+ std::cout << common_params << std::endl;
+
+ // Get trainable parameters data path
+ std::string data_path = common_params.data_path;
+
+ // Create a preprocessor object
+ const std::array<float, 3> mean_rgb{ { 122.68f, 116.67f, 104.01f } };
+ std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
+
+ graph << common_params.target
+ << common_params.fast_math_hint
+ << InputLayer(TensorDescriptor(TensorShape(227U, 227U, 3U, 1U), common_params.data_type),
+ get_input_accessor(common_params, std::move(preprocessor)))
<< ConvolutionMethod::DIRECT
<< ConvolutionLayer(
3U, 3U, 64U,
@@ -177,12 +151,15 @@ public:
<< PoolingLayer(PoolingLayerInfo(PoolingType::AVG))
<< FlattenLayer()
<< SoftmaxLayer()
- << OutputLayer(get_output_accessor(label, 5));
+ << OutputLayer(get_output_accessor(common_params, 5));
// Finalize graph
GraphConfig config;
- config.use_tuner = (target == 2);
- graph.finalize(target_hint, config);
+ config.num_threads = common_params.threads;
+ config.use_tuner = common_params.enable_tuner;
+ graph.finalize(common_params.target, config);
+
+ return true;
}
void do_run() override
{
@@ -191,7 +168,10 @@ public:
}
private:
- Stream graph{ 0, "SqueezeNetV1.1" };
+ CommandLineParser cmd_parser;
+ CommonGraphOptions common_opts;
+ CommonGraphParams common_params;
+ Stream graph;
BranchLayer get_expand_fire_node(const std::string &data_path, std::string &&param_path, unsigned int expand1_filt, unsigned int expand3_filt)
{
@@ -219,7 +199,7 @@ private:
/** Main program for Squeezenet v1.1
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
int main(int argc, char **argv)
{
diff --git a/examples/graph_vgg16.cpp b/examples/graph_vgg16.cpp
index d70c56eadd..5ff306507f 100644
--- a/examples/graph_vgg16.cpp
+++ b/examples/graph_vgg16.cpp
@@ -23,11 +23,10 @@
*/
#include "arm_compute/graph.h"
#include "support/ToolchainSupport.h"
+#include "utils/CommonGraphOptions.h"
#include "utils/GraphUtils.h"
#include "utils/Utils.h"
-#include <cstdlib>
-
using namespace arm_compute::utils;
using namespace arm_compute::graph::frontend;
using namespace arm_compute::graph_utils;
@@ -35,71 +34,47 @@ using namespace arm_compute::graph_utils;
/** Example demonstrating how to implement VGG16's network using the Compute Library's graph API
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
class GraphVGG16Example : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ GraphVGG16Example()
+ : cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "VGG16")
{
- std::string data_path; /* Path to the trainable data */
- std::string image; /* Image data */
- std::string label; /* Label data */
-
- // Create a preprocessor object
- const std::array<float, 3> mean_rgb{ { 123.68f, 116.779f, 103.939f } };
- std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
+ }
+ bool do_setup(int argc, char **argv) override
+ {
+ // Parse arguments
+ cmd_parser.parse(argc, argv);
- // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- Target target_hint = set_target_hint(target);
- FastMathHint fast_math_hint = FastMathHint::DISABLED;
+ // Consume common parameters
+ common_params = consume_common_graph_parameters(common_opts);
- // Parse arguments
- if(argc < 2)
- {
- // Print help
- std::cout << "Usage: " << argv[0] << " [target] [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 2)
+ // Return when help menu is requested
+ if(common_params.help)
{
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 3)
- {
- data_path = argv[2];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No image provided: using random values\n\n";
- }
- else if(argc == 4)
- {
- data_path = argv[2];
- image = argv[3];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " [labels] [fast_math_hint]\n\n";
- std::cout << "No text file with labels provided: skipping output accessor\n\n";
- }
- else if(argc == 5)
- {
- data_path = argv[2];
- image = argv[3];
- label = argv[4];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " " << argv[4] << " [fast_math_hint]\n\n";
- std::cout << "No fast math info provided: disabling fast math\n\n";
- }
- else
- {
- data_path = argv[2];
- image = argv[3];
- label = argv[4];
- fast_math_hint = (std::strtol(argv[5], nullptr, 1) == 0) ? FastMathHint::DISABLED : FastMathHint::ENABLED;
+ cmd_parser.print_help(argv[0]);
+ return false;
}
- graph << target_hint
- << fast_math_hint
- << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), DataType::F32),
- get_input_accessor(image, std::move(preprocessor)))
+ // Checks
+ ARM_COMPUTE_ERROR_ON_MSG(arm_compute::is_data_type_quantized_asymmetric(common_params.data_type), "Unsupported data type!");
+
+ // Print parameter values
+ std::cout << common_params << std::endl;
+
+ // Get trainable parameters data path
+ std::string data_path = common_params.data_path;
+
+ // Create a preprocessor object
+ const std::array<float, 3> mean_rgb{ { 123.68f, 116.779f, 103.939f } };
+ std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
+
+ graph << common_params.target
+ << common_params.fast_math_hint
+ << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), common_params.data_type),
+ get_input_accessor(common_params, std::move(preprocessor)))
// Layer 1
<< ConvolutionLayer(
3U, 3U, 64U,
@@ -231,12 +206,15 @@ public:
.set_name("fc8")
// Softmax
<< SoftmaxLayer().set_name("prob")
- << OutputLayer(get_output_accessor(label, 5));
+ << OutputLayer(get_output_accessor(common_params, 5));
// Finalize graph
GraphConfig config;
- config.use_tuner = (target == 2);
- graph.finalize(target_hint, config);
+ config.num_threads = common_params.threads;
+ config.use_tuner = common_params.enable_tuner;
+ graph.finalize(common_params.target, config);
+
+ return true;
}
void do_run() override
{
@@ -245,13 +223,16 @@ public:
}
private:
- Stream graph{ 0, "VGG16" };
+ CommandLineParser cmd_parser;
+ CommonGraphOptions common_opts;
+ CommonGraphParams common_params;
+ Stream graph;
};
/** Main program for VGG16
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
int main(int argc, char **argv)
{
diff --git a/examples/graph_vgg19.cpp b/examples/graph_vgg19.cpp
index 8a0ec6fdbd..8bf88b96ed 100644
--- a/examples/graph_vgg19.cpp
+++ b/examples/graph_vgg19.cpp
@@ -23,83 +23,57 @@
*/
#include "arm_compute/graph.h"
#include "support/ToolchainSupport.h"
+#include "utils/CommonGraphOptions.h"
#include "utils/GraphUtils.h"
#include "utils/Utils.h"
-#include <cstdlib>
-
using namespace arm_compute::utils;
using namespace arm_compute::graph::frontend;
using namespace arm_compute::graph_utils;
-
/** Example demonstrating how to implement VGG19's network using the Compute Library's graph API
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
class GraphVGG19Example : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ GraphVGG19Example()
+ : cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "VGG19")
{
- std::string data_path; /* Path to the trainable data */
- std::string image; /* Image data */
- std::string label; /* Label data */
-
- // Create a preprocessor object
- const std::array<float, 3> mean_rgb{ { 123.68f, 116.779f, 103.939f } };
- std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
+ }
+ bool do_setup(int argc, char **argv) override
+ {
+ // Parse arguments
+ cmd_parser.parse(argc, argv);
- // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- Target target_hint = set_target_hint(target);
- FastMathHint fast_math_hint = FastMathHint::DISABLED;
+ // Consume common parameters
+ common_params = consume_common_graph_parameters(common_opts);
- // Parse arguments
- if(argc < 2)
- {
- // Print help
- std::cout << "Usage: " << argv[0] << " [target] [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 2)
- {
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 3)
+ // Return when help menu is requested
+ if(common_params.help)
{
- data_path = argv[2];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No image provided: using random values\n\n";
- }
- else if(argc == 4)
- {
- data_path = argv[2];
- image = argv[3];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " [labels] [fast_math_hint]\n\n";
- std::cout << "No text file with labels provided: skipping output accessor\n\n";
- }
- else if(argc == 5)
- {
- data_path = argv[2];
- image = argv[3];
- label = argv[4];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " " << argv[4] << " [fast_math_hint]\n\n";
- std::cout << "No fast math info provided: disabling fast math\n\n";
- }
- else
- {
- data_path = argv[2];
- image = argv[3];
- label = argv[4];
- fast_math_hint = (std::strtol(argv[5], nullptr, 1) == 0) ? FastMathHint::DISABLED : FastMathHint::ENABLED;
+ cmd_parser.print_help(argv[0]);
+ return false;
}
- graph << target_hint
- << fast_math_hint
- << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), DataType::F32),
- get_input_accessor(image, std::move(preprocessor)))
+ // Checks
+ ARM_COMPUTE_ERROR_ON_MSG(arm_compute::is_data_type_quantized_asymmetric(common_params.data_type), "Unsupported data type!");
+
+ // Print parameter values
+ std::cout << common_params << std::endl;
+
+ // Get trainable parameters data path
+ std::string data_path = common_params.data_path;
+
+ // Create a preprocessor object
+ const std::array<float, 3> mean_rgb{ { 123.68f, 116.779f, 103.939f } };
+ std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
+
+ graph << common_params.target
+ << common_params.fast_math_hint
+ << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), common_params.data_type),
+ get_input_accessor(common_params, std::move(preprocessor)))
// Layer 1
<< ConvolutionLayer(
3U, 3U, 64U,
@@ -244,12 +218,15 @@ public:
.set_name("fc8")
// Softmax
<< SoftmaxLayer().set_name("prob")
- << OutputLayer(get_output_accessor(label, 5));
+ << OutputLayer(get_output_accessor(common_params, 5));
// Finalize graph
GraphConfig config;
- config.use_tuner = (target == 2);
- graph.finalize(target_hint, config);
+ config.num_threads = common_params.threads;
+ config.use_tuner = common_params.enable_tuner;
+ graph.finalize(common_params.target, config);
+
+ return true;
}
void do_run() override
{
@@ -258,13 +235,16 @@ public:
}
private:
- Stream graph{ 0, "VGG19" };
+ CommandLineParser cmd_parser;
+ CommonGraphOptions common_opts;
+ CommonGraphParams common_params;
+ Stream graph;
};
/** Main program for VGG19
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
int main(int argc, char **argv)
{
diff --git a/examples/neon_cartoon_effect.cpp b/examples/neon_cartoon_effect.cpp
index e6e0f34154..4285aa41e3 100644
--- a/examples/neon_cartoon_effect.cpp
+++ b/examples/neon_cartoon_effect.cpp
@@ -34,7 +34,7 @@ using namespace utils;
class NEONCartoonEffectExample : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ bool do_setup(int argc, char **argv) override
{
// Open PPM file
PPMLoader ppm;
@@ -75,6 +75,8 @@ public:
ppm.fill_image(src_img);
output_filename = std::string(argv[1]) + "_out.ppm";
}
+
+ return true;
}
void do_run() override
diff --git a/examples/neon_cnn.cpp b/examples/neon_cnn.cpp
index 05b6c832bc..1df81256b9 100644
--- a/examples/neon_cnn.cpp
+++ b/examples/neon_cnn.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2018 ARM Limited.
+ * Copyright (c) 2016-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -36,7 +36,7 @@ using namespace utils;
class NEONCNNExample : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ bool do_setup(int argc, char **argv) override
{
ARM_COMPUTE_UNUSED(argc);
ARM_COMPUTE_UNUSED(argv);
@@ -227,6 +227,8 @@ public:
// Finalize the manager. (Validity checks, memory allocations etc)
mm_transitions->finalize();
+
+ return true;
}
void do_run() override
{
diff --git a/examples/neon_convolution.cpp b/examples/neon_convolution.cpp
index 8efb932081..1a7e865908 100644
--- a/examples/neon_convolution.cpp
+++ b/examples/neon_convolution.cpp
@@ -53,7 +53,7 @@ const int16_t gaussian5x5[] =
class NEONConvolutionExample : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ bool do_setup(int argc, char **argv) override
{
/** [Accurate padding] **/
PPMLoader ppm;
@@ -94,6 +94,8 @@ public:
output_filename = std::string(argv[1]) + "_out.ppm";
}
/** [Accurate padding] **/
+
+ return true;
}
void do_run() override
{
diff --git a/examples/neon_copy_objects.cpp b/examples/neon_copy_objects.cpp
index 9409cf366c..84a2abd379 100644
--- a/examples/neon_copy_objects.cpp
+++ b/examples/neon_copy_objects.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2018 ARM Limited.
+ * Copyright (c) 2016-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -36,7 +36,7 @@ using namespace utils;
class NEONCopyObjectsExample : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ bool do_setup(int argc, char **argv) override
{
ARM_COMPUTE_UNUSED(argc);
ARM_COMPUTE_UNUSED(argv);
@@ -135,6 +135,8 @@ public:
output_it);
/** [Copy objects example] */
+
+ return true;
}
void do_run() override
{
diff --git a/examples/neon_scale.cpp b/examples/neon_scale.cpp
index 252bfc9ae7..b04d916aaf 100644
--- a/examples/neon_scale.cpp
+++ b/examples/neon_scale.cpp
@@ -33,7 +33,7 @@ using namespace utils;
class NEONScaleExample : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ bool do_setup(int argc, char **argv) override
{
PPMLoader ppm;
@@ -72,6 +72,8 @@ public:
ppm.fill_image(src);
output_filename = std::string(argv[1]) + "_out.ppm";
}
+
+ return true;
}
void do_run() override
{
diff --git a/examples/neoncl_scale_median_gaussian.cpp b/examples/neoncl_scale_median_gaussian.cpp
index 173575c061..1b26517d9f 100644
--- a/examples/neoncl_scale_median_gaussian.cpp
+++ b/examples/neoncl_scale_median_gaussian.cpp
@@ -43,7 +43,7 @@ using namespace utils;
class NEONCLScaleMedianGaussianExample : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ bool do_setup(int argc, char **argv) override
{
/** [NEON / OpenCL Interop] */
PPMLoader ppm;
@@ -88,6 +88,8 @@ public:
const std::string output_filename = std::string(argv[1]) + "_out.ppm";
}
/** [NEON / OpenCL Interop] */
+
+ return true;
}
void do_run() override
{