diff options
author | Felix Thomasmathibalan <felixjohnny.thomasmathibalan@arm.com> | 2023-09-27 17:46:17 +0100 |
---|---|---|
committer | felixjohnny.thomasmathibalan <felixjohnny.thomasmathibalan@arm.com> | 2023-09-28 12:08:05 +0000 |
commit | afd38f0c617d6f89b2b4532c6c44f116617e2b6f (patch) | |
tree | 03bc7d5a762099989b16a656fa8d397b490ed70e /examples/graph_resnet12.cpp | |
parent | bdcb4c148ee2fdeaaddf4cf1e57bbb0de02bb894 (diff) | |
download | ComputeLibrary-afd38f0c617d6f89b2b4532c6c44f116617e2b6f.tar.gz |
Apply clang-format on repository
Code is formatted as per a revised clang format configuration
file(not part of this delivery). Version 14.0.6 is used.
Exclusion List:
- files with .cl extension
- files that are not strictly C/C++ (e.g. Android.bp, Sconscript ...)
And the following directories
- compute_kernel_writer/validation/
- tests/
- include/
- src/core/NEON/kernels/convolution/
- src/core/NEON/kernels/arm_gemm/
- src/core/NEON/kernels/arm_conv/
- data/
There will be a follow up for formatting of .cl files and the
files under tests/ and compute_kernel_writer/validation/.
Signed-off-by: Felix Thomasmathibalan <felixjohnny.thomasmathibalan@arm.com>
Change-Id: Ib7eb1fcf4e7537b9feaefcfc15098a804a3fde0a
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10391
Benchmark: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Gunes Bayir <gunes.bayir@arm.com>
Diffstat (limited to 'examples/graph_resnet12.cpp')
-rw-r--r-- | examples/graph_resnet12.cpp | 136 |
1 files changed, 69 insertions, 67 deletions
diff --git a/examples/graph_resnet12.cpp b/examples/graph_resnet12.cpp index 48708ce29a..80db826be5 100644 --- a/examples/graph_resnet12.cpp +++ b/examples/graph_resnet12.cpp @@ -22,6 +22,7 @@ * SOFTWARE. */ #include "arm_compute/graph.h" + #include "support/ToolchainSupport.h" #include "utils/CommonGraphOptions.h" #include "utils/GraphUtils.h" @@ -36,7 +37,12 @@ class GraphResNet12Example : public Example { public: GraphResNet12Example() - : cmd_parser(), common_opts(cmd_parser), model_input_width(nullptr), model_input_height(nullptr), common_params(), graph(0, "ResNet12") + : cmd_parser(), + common_opts(cmd_parser), + model_input_width(nullptr), + model_input_height(nullptr), + common_params(), + graph(0, "ResNet12") { model_input_width = cmd_parser.add_option<SimpleOption<unsigned int>>("image-width", 192); model_input_height = cmd_parser.add_option<SimpleOption<unsigned int>>("image-height", 128); @@ -45,7 +51,7 @@ public: model_input_width->set_help("Input image width."); model_input_height->set_help("Input image height."); } - GraphResNet12Example(const GraphResNet12Example &) = delete; + GraphResNet12Example(const GraphResNet12Example &) = delete; GraphResNet12Example &operator=(const GraphResNet12Example &) = delete; ~GraphResNet12Example() override = default; bool do_setup(int argc, char **argv) override @@ -58,7 +64,7 @@ public: common_params = consume_common_graph_parameters(common_opts); // Return when help menu is requested - if(common_params.help) + if (common_params.help) { cmd_parser.print_help(argv[0]); return false; @@ -69,7 +75,8 @@ public: const unsigned int image_height = model_input_height->value(); // Checks - ARM_COMPUTE_EXIT_ON_MSG(arm_compute::is_data_type_quantized_asymmetric(common_params.data_type), "QASYMM8 not supported for this graph"); + ARM_COMPUTE_EXIT_ON_MSG(arm_compute::is_data_type_quantized_asymmetric(common_params.data_type), + "QASYMM8 not supported for this graph"); // Print parameter values std::cout << common_params << std::endl; @@ -84,50 +91,47 @@ public: std::unique_ptr<IPreprocessor> preprocessor = std::make_unique<TFPreproccessor>(); // Create input descriptor - const TensorShape tensor_shape = permute_shape(TensorShape(image_width, image_height, 3U, common_params.batches), DataLayout::NCHW, common_params.data_layout); - TensorDescriptor input_descriptor = TensorDescriptor(tensor_shape, common_params.data_type).set_layout(common_params.data_layout); + const TensorShape tensor_shape = + permute_shape(TensorShape(image_width, image_height, 3U, common_params.batches), DataLayout::NCHW, + common_params.data_layout); + TensorDescriptor input_descriptor = + TensorDescriptor(tensor_shape, common_params.data_type).set_layout(common_params.data_layout); // Set weights trained layout const DataLayout weights_layout = DataLayout::NCHW; - graph << common_params.target - << common_params.fast_math_hint - << InputLayer(input_descriptor, get_input_accessor(common_params, std::move(preprocessor), false /* Do not convert to BGR */)) - << ConvolutionLayer( - 9U, 9U, 64U, - get_weights_accessor(data_path, "conv1_weights.npy", weights_layout), - get_weights_accessor(data_path, "conv1_biases.npy", weights_layout), - PadStrideInfo(1, 1, 4, 4)) - .set_name("conv1/convolution") - << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv1/Relu"); + graph << common_params.target << common_params.fast_math_hint + << InputLayer(input_descriptor, get_input_accessor(common_params, std::move(preprocessor), + false /* Do not convert to BGR */)) + << ConvolutionLayer(9U, 9U, 64U, get_weights_accessor(data_path, "conv1_weights.npy", weights_layout), + get_weights_accessor(data_path, "conv1_biases.npy", weights_layout), + PadStrideInfo(1, 1, 4, 4)) + .set_name("conv1/convolution") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)) + .set_name("conv1/Relu"); add_residual_block(data_path, "block1", weights_layout); add_residual_block(data_path, "block2", weights_layout); add_residual_block(data_path, "block3", weights_layout); add_residual_block(data_path, "block4", weights_layout); - graph << ConvolutionLayer( - 3U, 3U, 64U, - get_weights_accessor(data_path, "conv10_weights.npy", weights_layout), - get_weights_accessor(data_path, "conv10_biases.npy"), - PadStrideInfo(1, 1, 1, 1)) - .set_name("conv10/convolution") - << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv10/Relu") - << ConvolutionLayer( - 3U, 3U, 64U, - get_weights_accessor(data_path, "conv11_weights.npy", weights_layout), - get_weights_accessor(data_path, "conv11_biases.npy"), - PadStrideInfo(1, 1, 1, 1)) - .set_name("conv11/convolution") - << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv11/Relu") - << ConvolutionLayer( - 9U, 9U, 3U, - get_weights_accessor(data_path, "conv12_weights.npy", weights_layout), - get_weights_accessor(data_path, "conv12_biases.npy"), - PadStrideInfo(1, 1, 4, 4)) - .set_name("conv12/convolution") - << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH)).set_name("conv12/Tanh") - << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LINEAR, 0.58f, 0.5f)).set_name("conv12/Linear") + graph << ConvolutionLayer(3U, 3U, 64U, get_weights_accessor(data_path, "conv10_weights.npy", weights_layout), + get_weights_accessor(data_path, "conv10_biases.npy"), PadStrideInfo(1, 1, 1, 1)) + .set_name("conv10/convolution") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)) + .set_name("conv10/Relu") + << ConvolutionLayer(3U, 3U, 64U, get_weights_accessor(data_path, "conv11_weights.npy", weights_layout), + get_weights_accessor(data_path, "conv11_biases.npy"), PadStrideInfo(1, 1, 1, 1)) + .set_name("conv11/convolution") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)) + .set_name("conv11/Relu") + << ConvolutionLayer(9U, 9U, 3U, get_weights_accessor(data_path, "conv12_weights.npy", weights_layout), + get_weights_accessor(data_path, "conv12_biases.npy"), PadStrideInfo(1, 1, 4, 4)) + .set_name("conv12/convolution") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH)) + .set_name("conv12/Tanh") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LINEAR, 0.58f, 0.5f)) + .set_name("conv12/Linear") << OutputLayer(std::make_unique<DummyAccessor>(0)); // Finalize graph @@ -152,8 +156,8 @@ public: private: CommandLineParser cmd_parser; CommonGraphOptions common_opts; - SimpleOption<unsigned int> *model_input_width{ nullptr }; - SimpleOption<unsigned int> *model_input_height{ nullptr }; + SimpleOption<unsigned int> *model_input_width{nullptr}; + SimpleOption<unsigned int> *model_input_height{nullptr}; CommonGraphParams common_params; Stream graph; @@ -170,35 +174,33 @@ private: SubStream left(graph); SubStream right(graph); - right << ConvolutionLayer( - 3U, 3U, 64U, - get_weights_accessor(data_path, unit_path + "conv1_weights.npy", weights_layout), - get_weights_accessor(data_path, unit_path + "conv1_biases.npy", weights_layout), - PadStrideInfo(1, 1, 1, 1)) - .set_name(unit_name + "conv1/convolution") + right << ConvolutionLayer(3U, 3U, 64U, + get_weights_accessor(data_path, unit_path + "conv1_weights.npy", weights_layout), + get_weights_accessor(data_path, unit_path + "conv1_biases.npy", weights_layout), + PadStrideInfo(1, 1, 1, 1)) + .set_name(unit_name + "conv1/convolution") << BatchNormalizationLayer( - get_weights_accessor(data_path, unit_path + "conv1_BatchNorm_moving_mean.npy"), - get_weights_accessor(data_path, unit_path + "conv1_BatchNorm_moving_variance.npy"), - get_weights_accessor(data_path, unit_path + "conv1_BatchNorm_gamma.npy"), - get_weights_accessor(data_path, unit_path + "conv1_BatchNorm_beta.npy"), - 0.0000100099996416f) - .set_name(unit_name + "conv1/BatchNorm") - << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(unit_name + "conv1/Relu") - - << ConvolutionLayer( - 3U, 3U, 64U, - get_weights_accessor(data_path, unit_path + "conv2_weights.npy", weights_layout), - get_weights_accessor(data_path, unit_path + "conv2_biases.npy", weights_layout), - PadStrideInfo(1, 1, 1, 1)) - .set_name(unit_name + "conv2/convolution") + get_weights_accessor(data_path, unit_path + "conv1_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, unit_path + "conv1_BatchNorm_moving_variance.npy"), + get_weights_accessor(data_path, unit_path + "conv1_BatchNorm_gamma.npy"), + get_weights_accessor(data_path, unit_path + "conv1_BatchNorm_beta.npy"), 0.0000100099996416f) + .set_name(unit_name + "conv1/BatchNorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)) + .set_name(unit_name + "conv1/Relu") + + << ConvolutionLayer(3U, 3U, 64U, + get_weights_accessor(data_path, unit_path + "conv2_weights.npy", weights_layout), + get_weights_accessor(data_path, unit_path + "conv2_biases.npy", weights_layout), + PadStrideInfo(1, 1, 1, 1)) + .set_name(unit_name + "conv2/convolution") << BatchNormalizationLayer( - get_weights_accessor(data_path, unit_path + "conv2_BatchNorm_moving_mean.npy"), - get_weights_accessor(data_path, unit_path + "conv2_BatchNorm_moving_variance.npy"), - get_weights_accessor(data_path, unit_path + "conv2_BatchNorm_gamma.npy"), - get_weights_accessor(data_path, unit_path + "conv2_BatchNorm_beta.npy"), - 0.0000100099996416f) - .set_name(unit_name + "conv2/BatchNorm") - << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(unit_name + "conv2/Relu"); + get_weights_accessor(data_path, unit_path + "conv2_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, unit_path + "conv2_BatchNorm_moving_variance.npy"), + get_weights_accessor(data_path, unit_path + "conv2_BatchNorm_gamma.npy"), + get_weights_accessor(data_path, unit_path + "conv2_BatchNorm_beta.npy"), 0.0000100099996416f) + .set_name(unit_name + "conv2/BatchNorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)) + .set_name(unit_name + "conv2/Relu"); graph << EltwiseLayer(std::move(left), std::move(right), EltwiseOperation::Add).set_name(unit_name + "add"); } |