From afd38f0c617d6f89b2b4532c6c44f116617e2b6f Mon Sep 17 00:00:00 2001 From: Felix Thomasmathibalan Date: Wed, 27 Sep 2023 17:46:17 +0100 Subject: Apply clang-format on repository Code is formatted as per a revised clang format configuration file(not part of this delivery). Version 14.0.6 is used. Exclusion List: - files with .cl extension - files that are not strictly C/C++ (e.g. Android.bp, Sconscript ...) And the following directories - compute_kernel_writer/validation/ - tests/ - include/ - src/core/NEON/kernels/convolution/ - src/core/NEON/kernels/arm_gemm/ - src/core/NEON/kernels/arm_conv/ - data/ There will be a follow up for formatting of .cl files and the files under tests/ and compute_kernel_writer/validation/. Signed-off-by: Felix Thomasmathibalan Change-Id: Ib7eb1fcf4e7537b9feaefcfc15098a804a3fde0a Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10391 Benchmark: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Gunes Bayir --- examples/graph_resnet_v2_50.cpp | 181 +++++++++++++++++++++------------------- 1 file changed, 93 insertions(+), 88 deletions(-) (limited to 'examples/graph_resnet_v2_50.cpp') diff --git a/examples/graph_resnet_v2_50.cpp b/examples/graph_resnet_v2_50.cpp index 6d5abb4f4b..48cf9b0b3c 100644 --- a/examples/graph_resnet_v2_50.cpp +++ b/examples/graph_resnet_v2_50.cpp @@ -22,6 +22,7 @@ * SOFTWARE. */ #include "arm_compute/graph.h" + #include "support/ToolchainSupport.h" #include "utils/CommonGraphOptions.h" #include "utils/GraphUtils.h" @@ -35,8 +36,7 @@ using namespace arm_compute::graph_utils; class GraphResNetV2_50Example : public Example { public: - GraphResNetV2_50Example() - : cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "ResNetV2_50") + GraphResNetV2_50Example() : cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "ResNetV2_50") { } bool do_setup(int argc, char **argv) override @@ -49,7 +49,7 @@ public: common_params = consume_common_graph_parameters(common_opts); // Return when help menu is requested - if(common_params.help) + if (common_params.help) { cmd_parser.print_help(argv[0]); return false; @@ -61,7 +61,7 @@ public: // Get trainable parameters data path std::string data_path = common_params.data_path; std::string model_path = "/cnn_data/resnet_v2_50_model/"; - if(!data_path.empty()) + if (!data_path.empty()) { data_path += model_path; } @@ -71,45 +71,42 @@ public: // Create input descriptor const auto operation_layout = common_params.data_layout; - const TensorShape tensor_shape = permute_shape(TensorShape(224U, 224U, 3U, common_params.batches), DataLayout::NCHW, operation_layout); - TensorDescriptor input_descriptor = TensorDescriptor(tensor_shape, common_params.data_type).set_layout(operation_layout); + const TensorShape tensor_shape = + permute_shape(TensorShape(224U, 224U, 3U, common_params.batches), DataLayout::NCHW, operation_layout); + TensorDescriptor input_descriptor = + TensorDescriptor(tensor_shape, common_params.data_type).set_layout(operation_layout); // Set weights trained layout const DataLayout weights_layout = DataLayout::NCHW; - graph << common_params.target - << common_params.fast_math_hint - << InputLayer(input_descriptor, get_input_accessor(common_params, std::move(preprocessor), false /* Do not convert to BGR */)) - << ConvolutionLayer( - 7U, 7U, 64U, - get_weights_accessor(data_path, "conv1_weights.npy", weights_layout), - get_weights_accessor(data_path, "conv1_biases.npy", weights_layout), - PadStrideInfo(2, 2, 3, 3)) - .set_name("conv1/convolution") - << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, operation_layout, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::FLOOR))).set_name("pool1/MaxPool"); + graph << common_params.target << common_params.fast_math_hint + << InputLayer(input_descriptor, get_input_accessor(common_params, std::move(preprocessor), + false /* Do not convert to BGR */)) + << ConvolutionLayer(7U, 7U, 64U, get_weights_accessor(data_path, "conv1_weights.npy", weights_layout), + get_weights_accessor(data_path, "conv1_biases.npy", weights_layout), + PadStrideInfo(2, 2, 3, 3)) + .set_name("conv1/convolution") + << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, operation_layout, + PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::FLOOR))) + .set_name("pool1/MaxPool"); add_residual_block(data_path, "block1", weights_layout, 64, 3, 2); add_residual_block(data_path, "block2", weights_layout, 128, 4, 2); add_residual_block(data_path, "block3", weights_layout, 256, 6, 2); add_residual_block(data_path, "block4", weights_layout, 512, 3, 1); - graph << BatchNormalizationLayer( - get_weights_accessor(data_path, "postnorm_moving_mean.npy"), - get_weights_accessor(data_path, "postnorm_moving_variance.npy"), - get_weights_accessor(data_path, "postnorm_gamma.npy"), - get_weights_accessor(data_path, "postnorm_beta.npy"), - 0.000009999999747378752f) - .set_name("postnorm/BatchNorm") - << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("postnorm/Relu") + graph << BatchNormalizationLayer(get_weights_accessor(data_path, "postnorm_moving_mean.npy"), + get_weights_accessor(data_path, "postnorm_moving_variance.npy"), + get_weights_accessor(data_path, "postnorm_gamma.npy"), + get_weights_accessor(data_path, "postnorm_beta.npy"), 0.000009999999747378752f) + .set_name("postnorm/BatchNorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)) + .set_name("postnorm/Relu") << PoolingLayer(PoolingLayerInfo(PoolingType::AVG, operation_layout)).set_name("pool5") - << ConvolutionLayer( - 1U, 1U, 1001U, - get_weights_accessor(data_path, "logits_weights.npy", weights_layout), - get_weights_accessor(data_path, "logits_biases.npy"), - PadStrideInfo(1, 1, 0, 0)) - .set_name("logits/convolution") - << FlattenLayer().set_name("predictions/Reshape") - << SoftmaxLayer().set_name("predictions/Softmax") + << ConvolutionLayer(1U, 1U, 1001U, get_weights_accessor(data_path, "logits_weights.npy", weights_layout), + get_weights_accessor(data_path, "logits_biases.npy"), PadStrideInfo(1, 1, 0, 0)) + .set_name("logits/convolution") + << FlattenLayer().set_name("predictions/Reshape") << SoftmaxLayer().set_name("predictions/Softmax") << OutputLayer(get_output_accessor(common_params, 5)); // Finalize graph @@ -139,10 +136,14 @@ private: CommonGraphParams common_params; Stream graph; - void add_residual_block(const std::string &data_path, const std::string &name, DataLayout weights_layout, - unsigned int base_depth, unsigned int num_units, unsigned int stride) + void add_residual_block(const std::string &data_path, + const std::string &name, + DataLayout weights_layout, + unsigned int base_depth, + unsigned int num_units, + unsigned int stride) { - for(unsigned int i = 0; i < num_units; ++i) + for (unsigned int i = 0; i < num_units; ++i) { // Generate unit names std::stringstream unit_path_ss; @@ -154,7 +155,8 @@ private: std::string unit_name = unit_name_ss.str(); const TensorShape last_shape = graph.graph().node(graph.tail_node())->output(0)->desc().shape; - unsigned int depth_in = last_shape[arm_compute::get_data_layout_dimension_index(common_params.data_layout, DataLayoutDimension::CHANNEL)]; + unsigned int depth_in = last_shape[arm_compute::get_data_layout_dimension_index( + common_params.data_layout, DataLayoutDimension::CHANNEL)]; unsigned int depth_out = base_depth * 4; // All units have stride 1 apart from last one @@ -162,73 +164,76 @@ private: // Preact SubStream preact(graph); - preact << BatchNormalizationLayer( - get_weights_accessor(data_path, unit_path + "preact_moving_mean.npy"), - get_weights_accessor(data_path, unit_path + "preact_moving_variance.npy"), - get_weights_accessor(data_path, unit_path + "preact_gamma.npy"), - get_weights_accessor(data_path, unit_path + "preact_beta.npy"), - 0.000009999999747378752f) - .set_name(unit_name + "preact/BatchNorm") - << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(unit_name + "preact/Relu"); + preact << BatchNormalizationLayer(get_weights_accessor(data_path, unit_path + "preact_moving_mean.npy"), + get_weights_accessor(data_path, unit_path + "preact_moving_variance.npy"), + get_weights_accessor(data_path, unit_path + "preact_gamma.npy"), + get_weights_accessor(data_path, unit_path + "preact_beta.npy"), + 0.000009999999747378752f) + .set_name(unit_name + "preact/BatchNorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)) + .set_name(unit_name + "preact/Relu"); // Create bottleneck path SubStream shortcut(graph); - if(depth_in == depth_out) + if (depth_in == depth_out) { - if(middle_stride != 1) + if (middle_stride != 1) { - shortcut << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 1, common_params.data_layout, PadStrideInfo(middle_stride, middle_stride, 0, 0), true)).set_name(unit_name + "shortcut/MaxPool"); + shortcut << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 1, common_params.data_layout, + PadStrideInfo(middle_stride, middle_stride, 0, 0), true)) + .set_name(unit_name + "shortcut/MaxPool"); } } else { shortcut.forward_tail(preact.tail_node()); shortcut << ConvolutionLayer( - 1U, 1U, depth_out, - get_weights_accessor(data_path, unit_path + "shortcut_weights.npy", weights_layout), - get_weights_accessor(data_path, unit_path + "shortcut_biases.npy", weights_layout), - PadStrideInfo(1, 1, 0, 0)) - .set_name(unit_name + "shortcut/convolution"); + 1U, 1U, depth_out, + get_weights_accessor(data_path, unit_path + "shortcut_weights.npy", weights_layout), + get_weights_accessor(data_path, unit_path + "shortcut_biases.npy", weights_layout), + PadStrideInfo(1, 1, 0, 0)) + .set_name(unit_name + "shortcut/convolution"); } // Create residual path SubStream residual(preact); - residual << ConvolutionLayer( - 1U, 1U, base_depth, - get_weights_accessor(data_path, unit_path + "conv1_weights.npy", weights_layout), - std::unique_ptr(nullptr), - PadStrideInfo(1, 1, 0, 0)) - .set_name(unit_name + "conv1/convolution") - << BatchNormalizationLayer( - get_weights_accessor(data_path, unit_path + "conv1_BatchNorm_moving_mean.npy"), - get_weights_accessor(data_path, unit_path + "conv1_BatchNorm_moving_variance.npy"), - get_weights_accessor(data_path, unit_path + "conv1_BatchNorm_gamma.npy"), - get_weights_accessor(data_path, unit_path + "conv1_BatchNorm_beta.npy"), - 0.000009999999747378752f) - .set_name(unit_name + "conv1/BatchNorm") - << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(unit_name + "conv1/Relu") - << ConvolutionLayer( - 3U, 3U, base_depth, - get_weights_accessor(data_path, unit_path + "conv2_weights.npy", weights_layout), - std::unique_ptr(nullptr), - PadStrideInfo(middle_stride, middle_stride, 1, 1)) - .set_name(unit_name + "conv2/convolution") - << BatchNormalizationLayer( - get_weights_accessor(data_path, unit_path + "conv2_BatchNorm_moving_mean.npy"), - get_weights_accessor(data_path, unit_path + "conv2_BatchNorm_moving_variance.npy"), - get_weights_accessor(data_path, unit_path + "conv2_BatchNorm_gamma.npy"), - get_weights_accessor(data_path, unit_path + "conv2_BatchNorm_beta.npy"), - 0.000009999999747378752f) - .set_name(unit_name + "conv2/BatchNorm") - << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(unit_name + "conv1/Relu") - << ConvolutionLayer( - 1U, 1U, depth_out, - get_weights_accessor(data_path, unit_path + "conv3_weights.npy", weights_layout), - get_weights_accessor(data_path, unit_path + "conv3_biases.npy", weights_layout), - PadStrideInfo(1, 1, 0, 0)) - .set_name(unit_name + "conv3/convolution"); - - graph << EltwiseLayer(std::move(shortcut), std::move(residual), EltwiseOperation::Add).set_name(unit_name + "add"); + residual + << ConvolutionLayer(1U, 1U, base_depth, + get_weights_accessor(data_path, unit_path + "conv1_weights.npy", weights_layout), + std::unique_ptr(nullptr), + PadStrideInfo(1, 1, 0, 0)) + .set_name(unit_name + "conv1/convolution") + << BatchNormalizationLayer( + get_weights_accessor(data_path, unit_path + "conv1_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, unit_path + "conv1_BatchNorm_moving_variance.npy"), + get_weights_accessor(data_path, unit_path + "conv1_BatchNorm_gamma.npy"), + get_weights_accessor(data_path, unit_path + "conv1_BatchNorm_beta.npy"), + 0.000009999999747378752f) + .set_name(unit_name + "conv1/BatchNorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)) + .set_name(unit_name + "conv1/Relu") + << ConvolutionLayer(3U, 3U, base_depth, + get_weights_accessor(data_path, unit_path + "conv2_weights.npy", weights_layout), + std::unique_ptr(nullptr), + PadStrideInfo(middle_stride, middle_stride, 1, 1)) + .set_name(unit_name + "conv2/convolution") + << BatchNormalizationLayer( + get_weights_accessor(data_path, unit_path + "conv2_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, unit_path + "conv2_BatchNorm_moving_variance.npy"), + get_weights_accessor(data_path, unit_path + "conv2_BatchNorm_gamma.npy"), + get_weights_accessor(data_path, unit_path + "conv2_BatchNorm_beta.npy"), + 0.000009999999747378752f) + .set_name(unit_name + "conv2/BatchNorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)) + .set_name(unit_name + "conv1/Relu") + << ConvolutionLayer(1U, 1U, depth_out, + get_weights_accessor(data_path, unit_path + "conv3_weights.npy", weights_layout), + get_weights_accessor(data_path, unit_path + "conv3_biases.npy", weights_layout), + PadStrideInfo(1, 1, 0, 0)) + .set_name(unit_name + "conv3/convolution"); + + graph << EltwiseLayer(std::move(shortcut), std::move(residual), EltwiseOperation::Add) + .set_name(unit_name + "add"); } } }; -- cgit v1.2.1