From afd38f0c617d6f89b2b4532c6c44f116617e2b6f Mon Sep 17 00:00:00 2001 From: Felix Thomasmathibalan Date: Wed, 27 Sep 2023 17:46:17 +0100 Subject: Apply clang-format on repository Code is formatted as per a revised clang format configuration file(not part of this delivery). Version 14.0.6 is used. Exclusion List: - files with .cl extension - files that are not strictly C/C++ (e.g. Android.bp, Sconscript ...) And the following directories - compute_kernel_writer/validation/ - tests/ - include/ - src/core/NEON/kernels/convolution/ - src/core/NEON/kernels/arm_gemm/ - src/core/NEON/kernels/arm_conv/ - data/ There will be a follow up for formatting of .cl files and the files under tests/ and compute_kernel_writer/validation/. Signed-off-by: Felix Thomasmathibalan Change-Id: Ib7eb1fcf4e7537b9feaefcfc15098a804a3fde0a Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10391 Benchmark: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Gunes Bayir --- examples/graph_shufflenet.cpp | 161 +++++++++++++++++++++--------------------- 1 file changed, 80 insertions(+), 81 deletions(-) (limited to 'examples/graph_shufflenet.cpp') diff --git a/examples/graph_shufflenet.cpp b/examples/graph_shufflenet.cpp index 6e13c5eeb4..513d95884e 100644 --- a/examples/graph_shufflenet.cpp +++ b/examples/graph_shufflenet.cpp @@ -22,6 +22,7 @@ * SOFTWARE. */ #include "arm_compute/graph.h" + #include "support/ToolchainSupport.h" #include "utils/CommonGraphOptions.h" #include "utils/GraphUtils.h" @@ -35,8 +36,7 @@ using namespace arm_compute::graph_utils; class ShuffleNetExample : public Example { public: - ShuffleNetExample() - : cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "ShuffleNet") + ShuffleNetExample() : cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "ShuffleNet") { } bool do_setup(int argc, char **argv) override @@ -49,20 +49,21 @@ public: common_params = consume_common_graph_parameters(common_opts); // Return when help menu is requested - if(common_params.help) + if (common_params.help) { cmd_parser.print_help(argv[0]); return false; } // Set default layout if needed (Single kernel grouped convolution not yet supported int NHWC) - if(!common_opts.data_layout->is_set()) + if (!common_opts.data_layout->is_set()) { common_params.data_layout = DataLayout::NHWC; } // Checks - ARM_COMPUTE_EXIT_ON_MSG(arm_compute::is_data_type_quantized_asymmetric(common_params.data_type), "QASYMM8 not supported for this graph"); + ARM_COMPUTE_EXIT_ON_MSG(arm_compute::is_data_type_quantized_asymmetric(common_params.data_type), + "QASYMM8 not supported for this graph"); // Print parameter values std::cout << common_params << std::endl; @@ -75,15 +76,17 @@ public: std::string data_path = common_params.data_path; // Add model path to data path - if(!data_path.empty()) + if (!data_path.empty()) { data_path += model_path; } // Create input descriptor const auto operation_layout = common_params.data_layout; - const TensorShape tensor_shape = permute_shape(TensorShape(224U, 224U, 3U, common_params.batches), DataLayout::NCHW, operation_layout); - TensorDescriptor input_descriptor = TensorDescriptor(tensor_shape, common_params.data_type).set_layout(operation_layout); + const TensorShape tensor_shape = + permute_shape(TensorShape(224U, 224U, 3U, common_params.batches), DataLayout::NCHW, operation_layout); + TensorDescriptor input_descriptor = + TensorDescriptor(tensor_shape, common_params.data_type).set_layout(operation_layout); // Set weights trained layout const DataLayout weights_layout = DataLayout::NCHW; @@ -91,24 +94,22 @@ public: // Create preprocessor std::unique_ptr preprocessor = std::make_unique(0); - graph << common_params.target - << common_params.fast_math_hint - << InputLayer(input_descriptor, get_input_accessor(common_params, std::move(preprocessor), false /* Do not convert to BGR */)) - << ConvolutionLayer( - 3U, 3U, 24U, - get_weights_accessor(data_path, "conv3_0_w_0.npy", weights_layout), - get_weights_accessor(data_path, "conv3_0_b_0.npy", weights_layout), - PadStrideInfo(2, 2, 1, 1)) - .set_name("Conv1/convolution") - << BatchNormalizationLayer( - get_weights_accessor(data_path, "conv3_0_bn_rm_0.npy"), - get_weights_accessor(data_path, "conv3_0_bn_riv_0.npy"), - get_weights_accessor(data_path, "conv3_0_bn_s_0.npy"), - get_weights_accessor(data_path, "conv3_0_bn_b_0.npy"), - 1e-5f) - .set_name("Conv1/BatchNorm") - << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv1/Relu") - << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, operation_layout, PadStrideInfo(2, 2, 1, 1))).set_name("pool1/MaxPool"); + graph << common_params.target << common_params.fast_math_hint + << InputLayer(input_descriptor, get_input_accessor(common_params, std::move(preprocessor), + false /* Do not convert to BGR */)) + << ConvolutionLayer(3U, 3U, 24U, get_weights_accessor(data_path, "conv3_0_w_0.npy", weights_layout), + get_weights_accessor(data_path, "conv3_0_b_0.npy", weights_layout), + PadStrideInfo(2, 2, 1, 1)) + .set_name("Conv1/convolution") + << BatchNormalizationLayer(get_weights_accessor(data_path, "conv3_0_bn_rm_0.npy"), + get_weights_accessor(data_path, "conv3_0_bn_riv_0.npy"), + get_weights_accessor(data_path, "conv3_0_bn_s_0.npy"), + get_weights_accessor(data_path, "conv3_0_bn_b_0.npy"), 1e-5f) + .set_name("Conv1/BatchNorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)) + .set_name("Conv1/Relu") + << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, operation_layout, PadStrideInfo(2, 2, 1, 1))) + .set_name("pool1/MaxPool"); // Stage 2 add_residual_block(data_path, DataLayout::NCHW, 0U /* unit */, 112U /* depth */, 2U /* stride */); @@ -134,13 +135,10 @@ public: graph << PoolingLayer(PoolingLayerInfo(PoolingType::AVG, operation_layout)).set_name("predictions/AvgPool") << FlattenLayer().set_name("predictions/Reshape") - << FullyConnectedLayer( - 1000U, - get_weights_accessor(data_path, "pred_w_0.npy", weights_layout), - get_weights_accessor(data_path, "pred_b_0.npy")) - .set_name("predictions/FC") - << SoftmaxLayer().set_name("predictions/Softmax") - << OutputLayer(get_output_accessor(common_params, 5)); + << FullyConnectedLayer(1000U, get_weights_accessor(data_path, "pred_w_0.npy", weights_layout), + get_weights_accessor(data_path, "pred_b_0.npy")) + .set_name("predictions/FC") + << SoftmaxLayer().set_name("predictions/Softmax") << OutputLayer(get_output_accessor(common_params, 5)); // Finalize graph GraphConfig config; @@ -167,8 +165,11 @@ private: CommonGraphParams common_params; Stream graph; - void add_residual_block(const std::string &data_path, DataLayout weights_layout, - unsigned int unit, unsigned int depth, unsigned int stride) + void add_residual_block(const std::string &data_path, + DataLayout weights_layout, + unsigned int unit, + unsigned int depth, + unsigned int stride) { PadStrideInfo dwc_info = PadStrideInfo(1, 1, 1, 1); const unsigned int gconv_id = unit * 2; @@ -181,63 +182,61 @@ private: SubStream left_ss(graph); SubStream right_ss(graph); - if(stride == 2) + if (stride == 2) { - right_ss << PoolingLayer(PoolingLayerInfo(PoolingType::AVG, 3, common_params.data_layout, PadStrideInfo(2, 2, 1, 1))).set_name(unit_name + "/pool_1/AveragePool"); + right_ss << PoolingLayer( + PoolingLayerInfo(PoolingType::AVG, 3, common_params.data_layout, PadStrideInfo(2, 2, 1, 1))) + .set_name(unit_name + "/pool_1/AveragePool"); dwc_info = PadStrideInfo(2, 2, 1, 1); } - left_ss << ConvolutionLayer( - 1U, 1U, depth, - get_weights_accessor(data_path, "gconv1_" + gconv_id_name + "_w_0.npy", weights_layout), - std::unique_ptr(nullptr), - PadStrideInfo(1, 1, 0, 0), num_groups) - .set_name(unit_name + "/gconv1_" + gconv_id_name + "/convolution") - << BatchNormalizationLayer( - get_weights_accessor(data_path, "gconv1_" + gconv_id_name + "_bn_rm_0.npy"), - get_weights_accessor(data_path, "gconv1_" + gconv_id_name + "_bn_riv_0.npy"), - get_weights_accessor(data_path, "gconv1_" + gconv_id_name + "_bn_s_0.npy"), - get_weights_accessor(data_path, "gconv1_" + gconv_id_name + "_bn_b_0.npy"), - 1e-5f) - .set_name(unit_name + "/gconv1_" + gconv_id_name + "/BatchNorm") - << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(unit_name + "/gconv1_" + gconv_id_name + "/Relu") - << ChannelShuffleLayer(num_groups).set_name(unit_name + "/shuffle_0/ChannelShufle") - << DepthwiseConvolutionLayer( - 3U, 3U, - get_weights_accessor(data_path, "gconv3_" + unit_id_name + "_w_0.npy", weights_layout), - std::unique_ptr(nullptr), - dwc_info) - .set_name(unit_name + "/gconv3_" + unit_id_name + "/depthwise") - << BatchNormalizationLayer( - get_weights_accessor(data_path, "gconv3_" + unit_id_name + "_bn_rm_0.npy"), - get_weights_accessor(data_path, "gconv3_" + unit_id_name + "_bn_riv_0.npy"), - get_weights_accessor(data_path, "gconv3_" + unit_id_name + "_bn_s_0.npy"), - get_weights_accessor(data_path, "gconv3_" + unit_id_name + "_bn_b_0.npy"), - 1e-5f) - .set_name(unit_name + "/gconv3_" + unit_id_name + "/BatchNorm") - << ConvolutionLayer( - 1U, 1U, depth, - get_weights_accessor(data_path, "gconv1_" + gconv_id_1_name + "_w_0.npy", weights_layout), - std::unique_ptr(nullptr), - PadStrideInfo(1, 1, 0, 0), num_groups) - .set_name(unit_name + "/gconv1_" + gconv_id_1_name + "/convolution") - << BatchNormalizationLayer( - get_weights_accessor(data_path, "gconv1_" + gconv_id_1_name + "_bn_rm_0.npy"), - get_weights_accessor(data_path, "gconv1_" + gconv_id_1_name + "_bn_riv_0.npy"), - get_weights_accessor(data_path, "gconv1_" + gconv_id_1_name + "_bn_s_0.npy"), - get_weights_accessor(data_path, "gconv1_" + gconv_id_1_name + "_bn_b_0.npy"), - 1e-5f) - .set_name(unit_name + "/gconv1_" + gconv_id_1_name + "/BatchNorm"); + left_ss + << ConvolutionLayer(1U, 1U, depth, + get_weights_accessor(data_path, "gconv1_" + gconv_id_name + "_w_0.npy", weights_layout), + std::unique_ptr(nullptr), + PadStrideInfo(1, 1, 0, 0), num_groups) + .set_name(unit_name + "/gconv1_" + gconv_id_name + "/convolution") + << BatchNormalizationLayer(get_weights_accessor(data_path, "gconv1_" + gconv_id_name + "_bn_rm_0.npy"), + get_weights_accessor(data_path, "gconv1_" + gconv_id_name + "_bn_riv_0.npy"), + get_weights_accessor(data_path, "gconv1_" + gconv_id_name + "_bn_s_0.npy"), + get_weights_accessor(data_path, "gconv1_" + gconv_id_name + "_bn_b_0.npy"), + 1e-5f) + .set_name(unit_name + "/gconv1_" + gconv_id_name + "/BatchNorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)) + .set_name(unit_name + "/gconv1_" + gconv_id_name + "/Relu") + << ChannelShuffleLayer(num_groups).set_name(unit_name + "/shuffle_0/ChannelShufle") + << DepthwiseConvolutionLayer( + 3U, 3U, get_weights_accessor(data_path, "gconv3_" + unit_id_name + "_w_0.npy", weights_layout), + std::unique_ptr(nullptr), dwc_info) + .set_name(unit_name + "/gconv3_" + unit_id_name + "/depthwise") + << BatchNormalizationLayer(get_weights_accessor(data_path, "gconv3_" + unit_id_name + "_bn_rm_0.npy"), + get_weights_accessor(data_path, "gconv3_" + unit_id_name + "_bn_riv_0.npy"), + get_weights_accessor(data_path, "gconv3_" + unit_id_name + "_bn_s_0.npy"), + get_weights_accessor(data_path, "gconv3_" + unit_id_name + "_bn_b_0.npy"), 1e-5f) + .set_name(unit_name + "/gconv3_" + unit_id_name + "/BatchNorm") + << ConvolutionLayer( + 1U, 1U, depth, + get_weights_accessor(data_path, "gconv1_" + gconv_id_1_name + "_w_0.npy", weights_layout), + std::unique_ptr(nullptr), PadStrideInfo(1, 1, 0, 0), num_groups) + .set_name(unit_name + "/gconv1_" + gconv_id_1_name + "/convolution") + << BatchNormalizationLayer(get_weights_accessor(data_path, "gconv1_" + gconv_id_1_name + "_bn_rm_0.npy"), + get_weights_accessor(data_path, "gconv1_" + gconv_id_1_name + "_bn_riv_0.npy"), + get_weights_accessor(data_path, "gconv1_" + gconv_id_1_name + "_bn_s_0.npy"), + get_weights_accessor(data_path, "gconv1_" + gconv_id_1_name + "_bn_b_0.npy"), + 1e-5f) + .set_name(unit_name + "/gconv1_" + gconv_id_1_name + "/BatchNorm"); - if(stride == 2) + if (stride == 2) { graph << ConcatLayer(std::move(left_ss), std::move(right_ss)).set_name(unit_name + "/Concat"); } else { - graph << EltwiseLayer(std::move(left_ss), std::move(right_ss), EltwiseOperation::Add).set_name(unit_name + "/Add"); + graph << EltwiseLayer(std::move(left_ss), std::move(right_ss), EltwiseOperation::Add) + .set_name(unit_name + "/Add"); } - graph << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(unit_name + "/Relu"); + graph << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)) + .set_name(unit_name + "/Relu"); } }; -- cgit v1.2.1