aboutsummaryrefslogtreecommitdiff
path: root/examples/graph_resnext50.cpp
diff options
context:
space:
mode:
authorFelix Thomasmathibalan <felixjohnny.thomasmathibalan@arm.com>2023-09-27 17:46:17 +0100
committerfelixjohnny.thomasmathibalan <felixjohnny.thomasmathibalan@arm.com>2023-09-28 12:08:05 +0000
commitafd38f0c617d6f89b2b4532c6c44f116617e2b6f (patch)
tree03bc7d5a762099989b16a656fa8d397b490ed70e /examples/graph_resnext50.cpp
parentbdcb4c148ee2fdeaaddf4cf1e57bbb0de02bb894 (diff)
downloadComputeLibrary-afd38f0c617d6f89b2b4532c6c44f116617e2b6f.tar.gz
Apply clang-format on repository
Code is formatted as per a revised clang format configuration file(not part of this delivery). Version 14.0.6 is used. Exclusion List: - files with .cl extension - files that are not strictly C/C++ (e.g. Android.bp, Sconscript ...) And the following directories - compute_kernel_writer/validation/ - tests/ - include/ - src/core/NEON/kernels/convolution/ - src/core/NEON/kernels/arm_gemm/ - src/core/NEON/kernels/arm_conv/ - data/ There will be a follow up for formatting of .cl files and the files under tests/ and compute_kernel_writer/validation/. Signed-off-by: Felix Thomasmathibalan <felixjohnny.thomasmathibalan@arm.com> Change-Id: Ib7eb1fcf4e7537b9feaefcfc15098a804a3fde0a Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10391 Benchmark: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gunes Bayir <gunes.bayir@arm.com>
Diffstat (limited to 'examples/graph_resnext50.cpp')
-rw-r--r--examples/graph_resnext50.cpp119
1 files changed, 65 insertions, 54 deletions
diff --git a/examples/graph_resnext50.cpp b/examples/graph_resnext50.cpp
index 6378f6c741..12a1507c4c 100644
--- a/examples/graph_resnext50.cpp
+++ b/examples/graph_resnext50.cpp
@@ -22,6 +22,7 @@
* SOFTWARE.
*/
#include "arm_compute/graph.h"
+
#include "support/ToolchainSupport.h"
#include "utils/CommonGraphOptions.h"
#include "utils/GraphUtils.h"
@@ -35,8 +36,7 @@ using namespace arm_compute::graph_utils;
class GraphResNeXt50Example : public Example
{
public:
- GraphResNeXt50Example()
- : cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "ResNeXt50")
+ GraphResNeXt50Example() : cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "ResNeXt50")
{
}
bool do_setup(int argc, char **argv) override
@@ -49,14 +49,15 @@ public:
common_params = consume_common_graph_parameters(common_opts);
// Return when help menu is requested
- if(common_params.help)
+ if (common_params.help)
{
cmd_parser.print_help(argv[0]);
return false;
}
// Checks
- ARM_COMPUTE_EXIT_ON_MSG(arm_compute::is_data_type_quantized_asymmetric(common_params.data_type), "QASYMM8 not supported for this graph");
+ ARM_COMPUTE_EXIT_ON_MSG(arm_compute::is_data_type_quantized_asymmetric(common_params.data_type),
+ "QASYMM8 not supported for this graph");
// Print parameter values
std::cout << common_params << std::endl;
@@ -66,28 +67,33 @@ public:
// Create input descriptor
const auto operation_layout = common_params.data_layout;
- const TensorShape tensor_shape = permute_shape(TensorShape(224U, 224U, 3U, common_params.batches), DataLayout::NCHW, operation_layout);
- TensorDescriptor input_descriptor = TensorDescriptor(tensor_shape, common_params.data_type).set_layout(operation_layout);
+ const TensorShape tensor_shape =
+ permute_shape(TensorShape(224U, 224U, 3U, common_params.batches), DataLayout::NCHW, operation_layout);
+ TensorDescriptor input_descriptor =
+ TensorDescriptor(tensor_shape, common_params.data_type).set_layout(operation_layout);
// Set weights trained layout
const DataLayout weights_layout = DataLayout::NCHW;
- graph << common_params.target
- << common_params.fast_math_hint
+ graph << common_params.target << common_params.fast_math_hint
<< InputLayer(input_descriptor, get_input_accessor(common_params))
<< ScaleLayer(get_weights_accessor(data_path, "/cnn_data/resnext50_model/bn_data_mul.npy"),
get_weights_accessor(data_path, "/cnn_data/resnext50_model/bn_data_add.npy"))
- .set_name("bn_data/Scale")
+ .set_name("bn_data/Scale")
<< ConvolutionLayer(
- 7U, 7U, 64U,
- get_weights_accessor(data_path, "/cnn_data/resnext50_model/conv0_weights.npy", weights_layout),
- get_weights_accessor(data_path, "/cnn_data/resnext50_model/conv0_biases.npy"),
- PadStrideInfo(2, 2, 2, 3, 2, 3, DimensionRoundingType::FLOOR))
- .set_name("conv0/Convolution")
- << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv0/Relu")
- << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, operation_layout, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::FLOOR))).set_name("pool0");
-
- add_residual_block(data_path, weights_layout, /*ofm*/ 256, /*stage*/ 1, /*num_unit*/ 3, /*stride_conv_unit1*/ 1);
+ 7U, 7U, 64U,
+ get_weights_accessor(data_path, "/cnn_data/resnext50_model/conv0_weights.npy", weights_layout),
+ get_weights_accessor(data_path, "/cnn_data/resnext50_model/conv0_biases.npy"),
+ PadStrideInfo(2, 2, 2, 3, 2, 3, DimensionRoundingType::FLOOR))
+ .set_name("conv0/Convolution")
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+ .set_name("conv0/Relu")
+ << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, operation_layout,
+ PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::FLOOR)))
+ .set_name("pool0");
+
+ add_residual_block(data_path, weights_layout, /*ofm*/ 256, /*stage*/ 1, /*num_unit*/ 3,
+ /*stride_conv_unit1*/ 1);
add_residual_block(data_path, weights_layout, 512, 2, 4, 2);
add_residual_block(data_path, weights_layout, 1024, 3, 6, 2);
add_residual_block(data_path, weights_layout, 2048, 4, 3, 2);
@@ -121,10 +127,14 @@ private:
CommonGraphParams common_params;
Stream graph;
- void add_residual_block(const std::string &data_path, DataLayout weights_layout,
- unsigned int base_depth, unsigned int stage, unsigned int num_units, unsigned int stride_conv_unit1)
+ void add_residual_block(const std::string &data_path,
+ DataLayout weights_layout,
+ unsigned int base_depth,
+ unsigned int stage,
+ unsigned int num_units,
+ unsigned int stride_conv_unit1)
{
- for(unsigned int i = 0; i < num_units; ++i)
+ for (unsigned int i = 0; i < num_units; ++i)
{
std::stringstream unit_path_ss;
unit_path_ss << "/cnn_data/resnext50_model/stage" << stage << "_unit" << (i + 1) << "_";
@@ -135,54 +145,55 @@ private:
std::string unit_name = unit_name_ss.str();
PadStrideInfo pad_grouped_conv(1, 1, 1, 1);
- if(i == 0)
+ if (i == 0)
{
- pad_grouped_conv = (stage == 1) ? PadStrideInfo(stride_conv_unit1, stride_conv_unit1, 1, 1) : PadStrideInfo(stride_conv_unit1, stride_conv_unit1, 0, 1, 0, 1, DimensionRoundingType::FLOOR);
+ pad_grouped_conv = (stage == 1) ? PadStrideInfo(stride_conv_unit1, stride_conv_unit1, 1, 1)
+ : PadStrideInfo(stride_conv_unit1, stride_conv_unit1, 0, 1, 0, 1,
+ DimensionRoundingType::FLOOR);
}
SubStream right(graph);
- right << ConvolutionLayer(
- 1U, 1U, base_depth / 2,
- get_weights_accessor(data_path, unit_path + "conv1_weights.npy", weights_layout),
- get_weights_accessor(data_path, unit_path + "conv1_biases.npy"),
- PadStrideInfo(1, 1, 0, 0))
- .set_name(unit_name + "conv1/convolution")
- << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(unit_name + "conv1/Relu")
-
- << ConvolutionLayer(
- 3U, 3U, base_depth / 2,
- get_weights_accessor(data_path, unit_path + "conv2_weights.npy", weights_layout),
- std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr),
- pad_grouped_conv, 32)
- .set_name(unit_name + "conv2/convolution")
+ right << ConvolutionLayer(1U, 1U, base_depth / 2,
+ get_weights_accessor(data_path, unit_path + "conv1_weights.npy", weights_layout),
+ get_weights_accessor(data_path, unit_path + "conv1_biases.npy"),
+ PadStrideInfo(1, 1, 0, 0))
+ .set_name(unit_name + "conv1/convolution")
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+ .set_name(unit_name + "conv1/Relu")
+
+ << ConvolutionLayer(3U, 3U, base_depth / 2,
+ get_weights_accessor(data_path, unit_path + "conv2_weights.npy", weights_layout),
+ std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), pad_grouped_conv,
+ 32)
+ .set_name(unit_name + "conv2/convolution")
<< ScaleLayer(get_weights_accessor(data_path, unit_path + "bn2_mul.npy"),
get_weights_accessor(data_path, unit_path + "bn2_add.npy"))
- .set_name(unit_name + "conv1/Scale")
- << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(unit_name + "conv2/Relu")
+ .set_name(unit_name + "conv1/Scale")
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+ .set_name(unit_name + "conv2/Relu")
- << ConvolutionLayer(
- 1U, 1U, base_depth,
- get_weights_accessor(data_path, unit_path + "conv3_weights.npy", weights_layout),
- get_weights_accessor(data_path, unit_path + "conv3_biases.npy"),
- PadStrideInfo(1, 1, 0, 0))
- .set_name(unit_name + "conv3/convolution");
+ << ConvolutionLayer(1U, 1U, base_depth,
+ get_weights_accessor(data_path, unit_path + "conv3_weights.npy", weights_layout),
+ get_weights_accessor(data_path, unit_path + "conv3_biases.npy"),
+ PadStrideInfo(1, 1, 0, 0))
+ .set_name(unit_name + "conv3/convolution");
SubStream left(graph);
- if(i == 0)
+ if (i == 0)
{
- left << ConvolutionLayer(
- 1U, 1U, base_depth,
- get_weights_accessor(data_path, unit_path + "sc_weights.npy", weights_layout),
- std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr),
- PadStrideInfo(stride_conv_unit1, stride_conv_unit1, 0, 0))
- .set_name(unit_name + "sc/convolution")
+ left << ConvolutionLayer(1U, 1U, base_depth,
+ get_weights_accessor(data_path, unit_path + "sc_weights.npy", weights_layout),
+ std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr),
+ PadStrideInfo(stride_conv_unit1, stride_conv_unit1, 0, 0))
+ .set_name(unit_name + "sc/convolution")
<< ScaleLayer(get_weights_accessor(data_path, unit_path + "sc_bn_mul.npy"),
get_weights_accessor(data_path, unit_path + "sc_bn_add.npy"))
- .set_name(unit_name + "sc/scale");
+ .set_name(unit_name + "sc/scale");
}
graph << EltwiseLayer(std::move(left), std::move(right), EltwiseOperation::Add).set_name(unit_name + "add");
- graph << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(unit_name + "Relu");
+ graph << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+ .set_name(unit_name + "Relu");
}
}
};