aboutsummaryrefslogtreecommitdiff
path: root/examples/graph_mobilenet.cpp
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2018-07-03 12:06:23 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:54:10 +0000
commit12be7ab4876f77fecfab903df70791623219b3da (patch)
tree1cfa6852e60948bee9db0831a9f3abc97a2031c8 /examples/graph_mobilenet.cpp
parente39334c15c7fd141bb8173d5017ea5ca157fca2c (diff)
downloadComputeLibrary-12be7ab4876f77fecfab903df70791623219b3da.tar.gz
COMPMID-1310: Create graph validation executables.
Change-Id: I9e0b57b1b83fe5a95777cdaeddba6ecef650bafc Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/138697 Reviewed-by: Anthony Barbier <anthony.barbier@arm.com> Tested-by: Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'examples/graph_mobilenet.cpp')
-rw-r--r--examples/graph_mobilenet.cpp336
1 files changed, 217 insertions, 119 deletions
diff --git a/examples/graph_mobilenet.cpp b/examples/graph_mobilenet.cpp
index 40243bb111..9304b2b380 100644
--- a/examples/graph_mobilenet.cpp
+++ b/examples/graph_mobilenet.cpp
@@ -23,11 +23,11 @@
*/
#include "arm_compute/graph.h"
#include "support/ToolchainSupport.h"
+#include "utils/CommonGraphOptions.h"
#include "utils/GraphUtils.h"
#include "utils/Utils.h"
-#include <cstdlib>
-
+using namespace arm_compute;
using namespace arm_compute::utils;
using namespace arm_compute::graph::frontend;
using namespace arm_compute::graph_utils;
@@ -35,107 +35,112 @@ using namespace arm_compute::graph_utils;
/** Example demonstrating how to implement MobileNet's network using the Compute Library's graph API
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Model ID (0 = MobileNetV1_1.0_224, 1 = MobileNetV1_0.75_160), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] data layout, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
-class GraphMobilenetExample : public Example
+class GraphMobilenetExample : public Example // NOLINT
{
public:
- void do_setup(int argc, char **argv) override
+ GraphMobilenetExample()
+ : cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "MobileNetV1")
{
- std::string data_path; /* Path to the trainable data */
- std::string image; /* Image data */
- std::string label; /* Label data */
-
- // Create a preprocessor object
- std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<TFPreproccessor>();
+ // Add model id option
+ model_id_opt = cmd_parser.add_option<SimpleOption<int>>("model-id", 0);
+ model_id_opt->set_help("Mobilenet model id (0: 1.0_224, else: 0.75_160");
+ }
+ /** Prevent instances of this class from being copy constructed */
+ GraphMobilenetExample(const GraphMobilenetExample &) = delete;
+ /** Prevent instances of this class from being copied */
+ GraphMobilenetExample &operator=(const GraphMobilenetExample &) = delete;
+ bool do_setup(int argc, char **argv) override
+ {
+ // Parse arguments
+ cmd_parser.parse(argc, argv);
- // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- Target target_hint = set_target_hint(target);
- DepthwiseConvolutionMethod depthwise_convolution_hint = DepthwiseConvolutionMethod::OPTIMIZED_3x3;
- FastMathHint fast_math_hint = FastMathHint::DISABLED;
-
- // Set model to execute. 0 (MobileNetV1_1.0_224), 1 (MobileNetV1_0.75_160)
- int model_id = (argc > 2) ? std::strtol(argv[2], nullptr, 10) : 0;
- ARM_COMPUTE_ERROR_ON_MSG(model_id > 1, "Invalid model ID. Model must be 0 (MobileNetV1_1.0_224) or 1 (MobileNetV1_0.75_160)");
- int layout_id = (argc > 3) ? std::strtol(argv[3], nullptr, 10) : 0;
- ARM_COMPUTE_ERROR_ON_MSG(layout_id > 1, "Invalid layout ID. Layout must be 0 (NCHW) or 1 (NHWC)");
-
- float depth_scale = (model_id == 0) ? 1.f : 0.75;
- unsigned int spatial_size = (model_id == 0) ? 224 : 160;
- std::string model_path = (model_id == 0) ? "/cnn_data/mobilenet_v1_1_224_model/" : "/cnn_data/mobilenet_v1_075_160_model/";
- TensorDescriptor input_descriptor_nchw = TensorDescriptor(TensorShape(spatial_size, spatial_size, 3U, 1U), DataType::F32);
- TensorDescriptor input_descriptor_nhwc = TensorDescriptor(TensorShape(3U, spatial_size, spatial_size, 1U), DataType::F32).set_layout(DataLayout::NHWC);
- TensorDescriptor input_descriptor = (layout_id == 0) ? input_descriptor_nchw : input_descriptor_nhwc;
+ // Consume common parameters
+ common_params = consume_common_graph_parameters(common_opts);
- // Parse arguments
- if(argc < 2)
- {
- // Print help
- std::cout << "Usage: " << argv[0] << " [target] [model] [layout] [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No model ID provided: using MobileNetV1_1.0_224\n\n";
- std::cout << "No data layout provided: using NCHW\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 2)
+ // Return when help menu is requested
+ if(common_params.help)
{
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " [model] [layout] [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No model ID provided: using MobileNetV1_1.0_224\n\n";
- std::cout << "No data layout provided: using NCHW\n\n";
- std::cout << "No data folder provided: using random values\n\n";
+ cmd_parser.print_help(argv[0]);
+ return false;
}
- else if(argc == 3)
- {
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " [layout] [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data layout provided: using NCHW\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 4)
- {
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 5)
- {
- data_path = argv[4];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " " << argv[4] << " [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No image provided: using random values\n\n";
- std::cout << "No text file with labels provided: skipping output accessor\n\n";
- }
- else if(argc == 6)
+
+ // Print parameter values
+ std::cout << common_params << std::endl;
+
+ // Get model parameters
+ int model_id = model_id_opt->value();
+
+ // Create input descriptor
+ unsigned int spatial_size = (model_id == 0 || common_params.data_type == DataType::QASYMM8) ? 224 : 160;
+ TensorShape tensor_shape = TensorShape(spatial_size, spatial_size, 3U, 1U);
+ if(common_params.data_layout == DataLayout::NHWC)
{
- data_path = argv[4];
- image = argv[5];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " [labels] [fast_math_hint]\n\n";
- std::cout << "No text file with labels provided: skipping output accessor\n\n";
+ arm_compute::permute(tensor_shape, arm_compute::PermutationVector(2U, 0U, 1U));
}
- else if(argc == 7)
+ TensorDescriptor input_descriptor = TensorDescriptor(tensor_shape, common_params.data_type).set_layout(common_params.data_layout);
+
+ // Set graph hints
+ graph << common_params.target
+ << DepthwiseConvolutionMethod::OPTIMIZED_3x3 // FIXME(COMPMID-1073): Add heuristics to automatically call the optimized 3x3 method
+ << common_params.fast_math_hint;
+
+ // Create core graph
+ if(arm_compute::is_data_type_float(common_params.data_type))
{
- data_path = argv[4];
- image = argv[5];
- label = argv[6];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " " << argv[4] << " [fast_math_hint]\n\n";
- std::cout << "No fast math info provided: disabling fast math\n\n";
+ create_graph_float(input_descriptor, model_id);
}
else
{
- data_path = argv[4];
- image = argv[5];
- label = argv[6];
- fast_math_hint = (std::strtol(argv[7], nullptr, 1) == 0) ? FastMathHint::DISABLED : FastMathHint::ENABLED;
+ create_graph_qasymm(input_descriptor);
}
+ // Create common tail
+ graph << ReshapeLayer(TensorShape(1001U)).set_name("Reshape")
+ << SoftmaxLayer().set_name("Softmax")
+ << OutputLayer(get_output_accessor(common_params, 5));
+
+ // Finalize graph
+ GraphConfig config;
+ config.num_threads = common_params.threads;
+ config.use_tuner = common_params.enable_tuner;
+ graph.finalize(common_params.target, config);
+
+ return true;
+ }
+ void do_run() override
+ {
+ // Run graph
+ graph.run();
+ }
+
+private:
+ CommandLineParser cmd_parser;
+ CommonGraphOptions common_opts;
+ SimpleOption<int> *model_id_opt{ nullptr };
+ CommonGraphParams common_params;
+ Stream graph;
+
+ void create_graph_float(TensorDescriptor &input_descriptor, int model_id)
+ {
+ float depth_scale = (model_id == 0) ? 1.f : 0.75;
+ std::string model_path = (model_id == 0) ? "/cnn_data/mobilenet_v1_1_224_model/" : "/cnn_data/mobilenet_v1_075_160_model/";
+
+ // Create a preprocessor object
+ std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<TFPreproccessor>();
+
+ // Get trainable parameters data path
+ std::string data_path = common_params.data_path;
+
// Add model path to data path
if(!data_path.empty())
{
data_path += model_path;
}
- graph << target_hint
- << depthwise_convolution_hint
- << fast_math_hint
- << InputLayer(input_descriptor,
- get_input_accessor(image, std::move(preprocessor), false))
+ graph << InputLayer(input_descriptor,
+ get_input_accessor(common_params, std::move(preprocessor), false))
<< ConvolutionLayer(
3U, 3U, 32U * depth_scale,
get_weights_accessor(data_path, "Conv2d_0_weights.npy", DataLayout::NCHW),
@@ -150,47 +155,122 @@ public:
0.001f)
.set_name("Conv2d_0/BatchNorm")
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f)).set_name("Conv2d_0/Relu6");
- graph << get_dwsc_node(data_path, "Conv2d_1", 64 * depth_scale, PadStrideInfo(1, 1, 1, 1), PadStrideInfo(1, 1, 0, 0));
- graph << get_dwsc_node(data_path, "Conv2d_2", 128 * depth_scale, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
- graph << get_dwsc_node(data_path, "Conv2d_3", 128 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
- graph << get_dwsc_node(data_path, "Conv2d_4", 256 * depth_scale, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
- graph << get_dwsc_node(data_path, "Conv2d_5", 256 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
- graph << get_dwsc_node(data_path, "Conv2d_6", 512 * depth_scale, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
- graph << get_dwsc_node(data_path, "Conv2d_7", 512 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
- graph << get_dwsc_node(data_path, "Conv2d_8", 512 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
- graph << get_dwsc_node(data_path, "Conv2d_9", 512 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
- graph << get_dwsc_node(data_path, "Conv2d_10", 512 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
- graph << get_dwsc_node(data_path, "Conv2d_11", 512 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
- graph << get_dwsc_node(data_path, "Conv2d_12", 1024 * depth_scale, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
- graph << get_dwsc_node(data_path, "Conv2d_13", 1024 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
+ graph << get_dwsc_node_float(data_path, "Conv2d_1", 64 * depth_scale, PadStrideInfo(1, 1, 1, 1), PadStrideInfo(1, 1, 0, 0));
+ graph << get_dwsc_node_float(data_path, "Conv2d_2", 128 * depth_scale, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
+ graph << get_dwsc_node_float(data_path, "Conv2d_3", 128 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
+ graph << get_dwsc_node_float(data_path, "Conv2d_4", 256 * depth_scale, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
+ graph << get_dwsc_node_float(data_path, "Conv2d_5", 256 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
+ graph << get_dwsc_node_float(data_path, "Conv2d_6", 512 * depth_scale, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
+ graph << get_dwsc_node_float(data_path, "Conv2d_7", 512 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
+ graph << get_dwsc_node_float(data_path, "Conv2d_8", 512 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
+ graph << get_dwsc_node_float(data_path, "Conv2d_9", 512 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
+ graph << get_dwsc_node_float(data_path, "Conv2d_10", 512 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
+ graph << get_dwsc_node_float(data_path, "Conv2d_11", 512 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
+ graph << get_dwsc_node_float(data_path, "Conv2d_12", 1024 * depth_scale, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
+ graph << get_dwsc_node_float(data_path, "Conv2d_13", 1024 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
graph << PoolingLayer(PoolingLayerInfo(PoolingType::AVG)).set_name("Logits/AvgPool_1a")
<< ConvolutionLayer(
1U, 1U, 1001U,
get_weights_accessor(data_path, "Logits_Conv2d_1c_1x1_weights.npy", DataLayout::NCHW),
get_weights_accessor(data_path, "Logits_Conv2d_1c_1x1_biases.npy"),
PadStrideInfo(1, 1, 0, 0))
- .set_name("Logits/Conv2d_1c_1x1")
- << ReshapeLayer(TensorShape(1001U)).set_name("Reshape")
- << SoftmaxLayer().set_name("Softmax")
- << OutputLayer(get_output_accessor(label, 5));
-
- // Finalize graph
- GraphConfig config;
- config.use_tuner = (target == 2);
- graph.finalize(target_hint, config);
+ .set_name("Logits/Conv2d_1c_1x1");
}
- void do_run() override
+
+ void create_graph_qasymm(TensorDescriptor &input_descriptor)
{
- // Run graph
- graph.run();
- }
+ // Get trainable parameters data path
+ std::string data_path = common_params.data_path;
-private:
- Stream graph{ 0, "MobileNetV1" };
+ // Quantization info taken from the AndroidNN QASYMM8 MobileNet example
+ const QuantizationInfo in_quant_info = QuantizationInfo(0.0078125f, 128);
+ const QuantizationInfo mid_quant_info = QuantizationInfo(0.0784313753247f, 128);
+
+ const std::vector<QuantizationInfo> conv_weights_quant_info =
+ {
+ QuantizationInfo(0.031778190285f, 156), // conv0
+ QuantizationInfo(0.00604454148561f, 66) // conv14
+ };
+
+ const std::vector<QuantizationInfo> depth_weights_quant_info =
+ {
+ QuantizationInfo(0.254282623529f, 129), // dwsc1
+ QuantizationInfo(0.12828284502f, 172), // dwsc2
+ QuantizationInfo(0.265911251307f, 83), // dwsc3
+ QuantizationInfo(0.0985597148538f, 30), // dwsc4
+ QuantizationInfo(0.0631204470992f, 54), // dwsc5
+ QuantizationInfo(0.0137207424268f, 141), // dwsc6
+ QuantizationInfo(0.0817828401923f, 125), // dwsc7
+ QuantizationInfo(0.0393880493939f, 164), // dwsc8
+ QuantizationInfo(0.211694166064f, 129), // dwsc9
+ QuantizationInfo(0.158015936613f, 103), // dwsc10
+ QuantizationInfo(0.0182712618262f, 137), // dwsc11
+ QuantizationInfo(0.0127998134121f, 134), // dwsc12
+ QuantizationInfo(0.299285322428f, 161) // dwsc13
+ };
- BranchLayer get_dwsc_node(const std::string &data_path, std::string &&param_path,
- unsigned int conv_filt,
- PadStrideInfo dwc_pad_stride_info, PadStrideInfo conv_pad_stride_info)
+ const std::vector<QuantizationInfo> point_weights_quant_info =
+ {
+ QuantizationInfo(0.0425766184926f, 129), // dwsc1
+ QuantizationInfo(0.0250773020089f, 94), // dwsc2
+ QuantizationInfo(0.015851572156f, 93), // dwsc3
+ QuantizationInfo(0.0167811904103f, 98), // dwsc4
+ QuantizationInfo(0.00951790809631f, 135), // dwsc5
+ QuantizationInfo(0.00999817531556f, 128), // dwsc6
+ QuantizationInfo(0.00590536883101f, 126), // dwsc7
+ QuantizationInfo(0.00576109671965f, 133), // dwsc8
+ QuantizationInfo(0.00830461271107f, 142), // dwsc9
+ QuantizationInfo(0.0152327232063f, 72), // dwsc10
+ QuantizationInfo(0.00741417845711f, 125), // dwsc11
+ QuantizationInfo(0.0135628981516f, 142), // dwsc12
+ QuantizationInfo(0.0338749065995f, 140) // dwsc13
+ };
+
+ graph << InputLayer(input_descriptor.set_quantization_info(in_quant_info),
+ get_weights_accessor(data_path, "/cnn_data/mobilenet_qasymm8_model/" + common_params.image))
+ << ConvolutionLayer(
+ 3U, 3U, 32U,
+ get_weights_accessor(data_path, "/cnn_data/mobilenet_qasymm8_model/Conv2d_0_weights.npy"),
+ get_weights_accessor(data_path, "/cnn_data/mobilenet_qasymm8_model/Conv2d_0_bias.npy"),
+ PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR),
+ 1, conv_weights_quant_info.at(0), mid_quant_info)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f));
+ graph << get_dwsc_node_qasymm(data_path, "Conv2d_1", 64U, PadStrideInfo(1U, 1U, 1U, 1U), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(0), point_weights_quant_info.at(0));
+ graph << get_dwsc_node_qasymm(data_path, "Conv2d_2", 128U, PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(1),
+ point_weights_quant_info.at(1));
+ graph << get_dwsc_node_qasymm(data_path, "Conv2d_3", 128U, PadStrideInfo(1U, 1U, 1U, 1U, 1U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(2),
+ point_weights_quant_info.at(2));
+ graph << get_dwsc_node_qasymm(data_path, "Conv2d_4", 256U, PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(3),
+ point_weights_quant_info.at(3));
+ graph << get_dwsc_node_qasymm(data_path, "Conv2d_5", 256U, PadStrideInfo(1U, 1U, 1U, 1U, 1U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(4),
+ point_weights_quant_info.at(4));
+ graph << get_dwsc_node_qasymm(data_path, "Conv2d_6", 512U, PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(5),
+ point_weights_quant_info.at(5));
+ graph << get_dwsc_node_qasymm(data_path, "Conv2d_7", 512U, PadStrideInfo(1U, 1U, 1U, 1U, 1U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(6),
+ point_weights_quant_info.at(6));
+ graph << get_dwsc_node_qasymm(data_path, "Conv2d_8", 512U, PadStrideInfo(1U, 1U, 1U, 1U, 1U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(7),
+ point_weights_quant_info.at(7));
+ graph << get_dwsc_node_qasymm(data_path, "Conv2d_9", 512U, PadStrideInfo(1U, 1U, 1U, 1U, 1U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(8),
+ point_weights_quant_info.at(8));
+ graph << get_dwsc_node_qasymm(data_path, "Conv2d_10", 512U, PadStrideInfo(1U, 1U, 1U, 1U, 1U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(9),
+ point_weights_quant_info.at(9));
+ graph << get_dwsc_node_qasymm(data_path, "Conv2d_11", 512U, PadStrideInfo(1U, 1U, 1U, 1U, 1U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(10),
+ point_weights_quant_info.at(10));
+ graph << get_dwsc_node_qasymm(data_path, "Conv2d_12", 1024U, PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(11),
+ point_weights_quant_info.at(11));
+ graph << get_dwsc_node_qasymm(data_path, "Conv2d_13", 1024U, PadStrideInfo(1U, 1U, 1U, 1U, 1U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(12),
+ point_weights_quant_info.at(12))
+ << PoolingLayer(PoolingLayerInfo(PoolingType::AVG))
+ << ConvolutionLayer(
+ 1U, 1U, 1001U,
+ get_weights_accessor(data_path, "/cnn_data/mobilenet_qasymm8_model/Logits_Conv2d_1c_1x1_weights.npy"),
+ get_weights_accessor(data_path, "/cnn_data/mobilenet_qasymm8_model/Logits_Conv2d_1c_1x1_bias.npy"),
+ PadStrideInfo(1U, 1U, 0U, 0U), 1, conv_weights_quant_info.at(1));
+ }
+
+ BranchLayer get_dwsc_node_float(const std::string &data_path, std::string &&param_path,
+ unsigned int conv_filt,
+ PadStrideInfo dwc_pad_stride_info, PadStrideInfo conv_pad_stride_info)
{
std::string total_path = param_path + "_";
SubStream sg(graph);
@@ -225,18 +305,36 @@ private:
return BranchLayer(std::move(sg));
}
+
+ BranchLayer get_dwsc_node_qasymm(const std::string &data_path, std::string &&param_path,
+ const unsigned int conv_filt,
+ PadStrideInfo dwc_pad_stride_info, PadStrideInfo conv_pad_stride_info,
+ QuantizationInfo depth_weights_quant_info, QuantizationInfo point_weights_quant_info)
+ {
+ std::string total_path = "/cnn_data/mobilenet_qasymm8_model/" + param_path + "_";
+ SubStream sg(graph);
+
+ sg << DepthwiseConvolutionLayer(
+ 3U, 3U,
+ get_weights_accessor(data_path, total_path + "depthwise_weights.npy"),
+ get_weights_accessor(data_path, total_path + "depthwise_bias.npy"),
+ dwc_pad_stride_info, depth_weights_quant_info)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f))
+ << ConvolutionLayer(
+ 1U, 1U, conv_filt,
+ get_weights_accessor(data_path, total_path + "pointwise_weights.npy"),
+ get_weights_accessor(data_path, total_path + "pointwise_bias.npy"),
+ conv_pad_stride_info, 1, point_weights_quant_info)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f));
+
+ return BranchLayer(std::move(sg));
+ }
};
/** Main program for MobileNetV1
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner),
- * [optional] Model ID (0 = MobileNetV1_1.0_224, 1 = MobileNetV1_0.75_160),
- * [optional] Path to the weights folder,
- * [optional] image,
- * [optional] labels,
- * [optional] data layout,
- * [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
int main(int argc, char **argv)
{