From afd38f0c617d6f89b2b4532c6c44f116617e2b6f Mon Sep 17 00:00:00 2001 From: Felix Thomasmathibalan Date: Wed, 27 Sep 2023 17:46:17 +0100 Subject: Apply clang-format on repository Code is formatted as per a revised clang format configuration file(not part of this delivery). Version 14.0.6 is used. Exclusion List: - files with .cl extension - files that are not strictly C/C++ (e.g. Android.bp, Sconscript ...) And the following directories - compute_kernel_writer/validation/ - tests/ - include/ - src/core/NEON/kernels/convolution/ - src/core/NEON/kernels/arm_gemm/ - src/core/NEON/kernels/arm_conv/ - data/ There will be a follow up for formatting of .cl files and the files under tests/ and compute_kernel_writer/validation/. Signed-off-by: Felix Thomasmathibalan Change-Id: Ib7eb1fcf4e7537b9feaefcfc15098a804a3fde0a Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10391 Benchmark: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Gunes Bayir --- src/graph/detail/ExecutionHelpers.cpp | 87 ++++++++++++++++++----------------- 1 file changed, 45 insertions(+), 42 deletions(-) (limited to 'src/graph/detail/ExecutionHelpers.cpp') diff --git a/src/graph/detail/ExecutionHelpers.cpp b/src/graph/detail/ExecutionHelpers.cpp index ac800df76c..870d24a6c7 100644 --- a/src/graph/detail/ExecutionHelpers.cpp +++ b/src/graph/detail/ExecutionHelpers.cpp @@ -23,12 +23,12 @@ */ #include "arm_compute/graph/detail/ExecutionHelpers.h" +#include "arm_compute/graph/backends/BackendRegistry.h" #include "arm_compute/graph/Graph.h" #include "arm_compute/graph/GraphContext.h" #include "arm_compute/graph/GraphManager.h" #include "arm_compute/graph/Tensor.h" #include "arm_compute/graph/Utils.h" -#include "arm_compute/graph/backends/BackendRegistry.h" namespace arm_compute { @@ -41,9 +41,9 @@ void validate_all_nodes(Graph &g) auto &nodes = g.nodes(); // Create tasks - for(auto &node : nodes) + for (auto &node : nodes) { - if(node != nullptr) + if (node != nullptr) { Target assigned_target = node->assigned_target(); backends::IDeviceBackend &backend = backends::BackendRegistry::get().get_backend(assigned_target); @@ -57,9 +57,9 @@ void configure_all_tensors(Graph &g) { auto &tensors = g.tensors(); - for(auto &tensor : tensors) + for (auto &tensor : tensors) { - if(tensor && tensor->handle() == nullptr) + if (tensor && tensor->handle() == nullptr) { Target target = tensor->desc().target; backends::IDeviceBackend &backend = backends::BackendRegistry::get().get_backend(target); @@ -72,10 +72,10 @@ void configure_all_tensors(Graph &g) void allocate_all_input_tensors(INode &node) { - for(unsigned int i = 0; i < node.num_inputs(); ++i) + for (unsigned int i = 0; i < node.num_inputs(); ++i) { Tensor *tensor = node.input(i); - if(tensor != nullptr && !tensor->bound_edges().empty()) + if (tensor != nullptr && !tensor->bound_edges().empty()) { ARM_COMPUTE_ERROR_ON_MSG(!tensor->handle(), "Tensor handle is not configured!"); tensor->handle()->allocate(); @@ -85,10 +85,10 @@ void allocate_all_input_tensors(INode &node) void allocate_all_output_tensors(INode &node) { - for(unsigned int i = 0; i < node.num_outputs(); ++i) + for (unsigned int i = 0; i < node.num_outputs(); ++i) { Tensor *tensor = node.output(i); - if(tensor != nullptr && !tensor->bound_edges().empty()) + if (tensor != nullptr && !tensor->bound_edges().empty()) { ARM_COMPUTE_ERROR_ON_MSG(!tensor->handle(), "Tensor handle is not configured!"); tensor->handle()->allocate(); @@ -98,11 +98,11 @@ void allocate_all_output_tensors(INode &node) void allocate_const_tensors(Graph &g) { - for(auto &node : g.nodes()) + for (auto &node : g.nodes()) { - if(node != nullptr) + if (node != nullptr) { - switch(node->type()) + switch (node->type()) { case NodeType::Const: case NodeType::Input: @@ -121,9 +121,10 @@ void allocate_all_tensors(Graph &g) { auto &tensors = g.tensors(); - for(auto &tensor : tensors) + for (auto &tensor : tensors) { - if(tensor && !tensor->bound_edges().empty() && tensor->handle() != nullptr && tensor->handle()->tensor().info()->is_resizable() && tensor->handle()->tensor().is_used()) + if (tensor && !tensor->bound_edges().empty() && tensor->handle() != nullptr && + tensor->handle()->tensor().info()->is_resizable() && tensor->handle()->tensor().is_used()) { tensor->handle()->allocate(); } @@ -140,15 +141,15 @@ ExecutionWorkload configure_all_nodes(Graph &g, GraphContext &ctx, const std::ve workload.tasks.reserve(node_order.size()); // Create tasks - for(auto &node_id : node_order) + for (auto &node_id : node_order) { auto node = g.node(node_id); - if(node != nullptr) + if (node != nullptr) { Target assigned_target = node->assigned_target(); - backends::IDeviceBackend &backend = backends::BackendRegistry::get().get_backend(assigned_target); + backends::IDeviceBackend &backend = backends::BackendRegistry::get().get_backend(assigned_target); std::unique_ptr func = backend.configure_node(*node, ctx); - if(func != nullptr || is_utility_node(node)) + if (func != nullptr || is_utility_node(node)) { workload.tasks.emplace_back(ExecutionTask(std::move(func), node)); } @@ -156,14 +157,14 @@ ExecutionWorkload configure_all_nodes(Graph &g, GraphContext &ctx, const std::ve } // Add inputs and outputs - for(auto &node : g.nodes()) + for (auto &node : g.nodes()) { - if(node != nullptr && node->type() == NodeType::Input) + if (node != nullptr && node->type() == NodeType::Input) { workload.inputs.push_back(node->output(0)); } - if(node != nullptr && node->type() == NodeType::Output) + if (node != nullptr && node->type() == NodeType::Output) { workload.outputs.push_back(node->input(0)); continue; @@ -175,9 +176,9 @@ ExecutionWorkload configure_all_nodes(Graph &g, GraphContext &ctx, const std::ve void release_unused_tensors(Graph &g) { - for(auto &tensor : g.tensors()) + for (auto &tensor : g.tensors()) { - if(tensor != nullptr && tensor->handle() != nullptr) + if (tensor != nullptr && tensor->handle() != nullptr) { tensor->handle()->release_if_unused(); } @@ -194,11 +195,11 @@ void call_all_const_node_accessors(Graph &g) { auto &nodes = g.nodes(); - for(auto &node : nodes) + for (auto &node : nodes) { - if(node != nullptr && node->type() == NodeType::Const && node->num_outputs()) + if (node != nullptr && node->type() == NodeType::Const && node->num_outputs()) { - if(!node->output(0)->bound_edges().empty()) + if (!node->output(0)->bound_edges().empty()) { call_tensor_accessor(node->output(0)); } @@ -209,18 +210,19 @@ void call_all_const_node_accessors(Graph &g) bool call_all_input_node_accessors(ExecutionWorkload &workload) { bool is_valid = true; - std::for_each(std::begin(workload.inputs), std::end(workload.inputs), [&](Tensor * input_tensor) - { - bool valid_input = (input_tensor != nullptr) && input_tensor->call_accessor(); - is_valid = is_valid && valid_input; - }); + std::for_each(std::begin(workload.inputs), std::end(workload.inputs), + [&](Tensor *input_tensor) + { + bool valid_input = (input_tensor != nullptr) && input_tensor->call_accessor(); + is_valid = is_valid && valid_input; + }); return is_valid; } void prepare_all_tasks(ExecutionWorkload &workload) { ARM_COMPUTE_ERROR_ON(workload.graph == nullptr); - for(auto &task : workload.tasks) + for (auto &task : workload.tasks) { task.prepare(); release_unused_tensors(*workload.graph); @@ -232,24 +234,24 @@ void call_all_tasks(ExecutionWorkload &workload) ARM_COMPUTE_ERROR_ON(workload.ctx == nullptr); // Acquire memory for the transition buffers - for(auto &mm_ctx : workload.ctx->memory_managers()) + for (auto &mm_ctx : workload.ctx->memory_managers()) { - if(mm_ctx.second.cross_group != nullptr) + if (mm_ctx.second.cross_group != nullptr) { mm_ctx.second.cross_group->acquire(); } } // Execute tasks - for(auto &task : workload.tasks) + for (auto &task : workload.tasks) { task(); } // Release memory for the transition buffers - for(auto &mm_ctx : workload.ctx->memory_managers()) + for (auto &mm_ctx : workload.ctx->memory_managers()) { - if(mm_ctx.second.cross_group != nullptr) + if (mm_ctx.second.cross_group != nullptr) { mm_ctx.second.cross_group->release(); } @@ -259,11 +261,12 @@ void call_all_tasks(ExecutionWorkload &workload) bool call_all_output_node_accessors(ExecutionWorkload &workload) { bool is_valid = true; - std::for_each(std::begin(workload.outputs), std::end(workload.outputs), [&](Tensor * output_tensor) - { - bool valid_output = (output_tensor != nullptr) && output_tensor->call_accessor(); - is_valid = is_valid && valid_output; - }); + std::for_each(std::begin(workload.outputs), std::end(workload.outputs), + [&](Tensor *output_tensor) + { + bool valid_output = (output_tensor != nullptr) && output_tensor->call_accessor(); + is_valid = is_valid && valid_output; + }); sync_backends(); -- cgit v1.2.1