aboutsummaryrefslogtreecommitdiff
path: root/src/graph/detail/ExecutionHelpers.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/graph/detail/ExecutionHelpers.cpp')
-rw-r--r--src/graph/detail/ExecutionHelpers.cpp91
1 files changed, 48 insertions, 43 deletions
diff --git a/src/graph/detail/ExecutionHelpers.cpp b/src/graph/detail/ExecutionHelpers.cpp
index 5be3706cfe..870d24a6c7 100644
--- a/src/graph/detail/ExecutionHelpers.cpp
+++ b/src/graph/detail/ExecutionHelpers.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,12 +23,12 @@
*/
#include "arm_compute/graph/detail/ExecutionHelpers.h"
+#include "arm_compute/graph/backends/BackendRegistry.h"
#include "arm_compute/graph/Graph.h"
#include "arm_compute/graph/GraphContext.h"
#include "arm_compute/graph/GraphManager.h"
#include "arm_compute/graph/Tensor.h"
#include "arm_compute/graph/Utils.h"
-#include "arm_compute/graph/backends/BackendRegistry.h"
namespace arm_compute
{
@@ -41,9 +41,9 @@ void validate_all_nodes(Graph &g)
auto &nodes = g.nodes();
// Create tasks
- for(auto &node : nodes)
+ for (auto &node : nodes)
{
- if(node != nullptr)
+ if (node != nullptr)
{
Target assigned_target = node->assigned_target();
backends::IDeviceBackend &backend = backends::BackendRegistry::get().get_backend(assigned_target);
@@ -57,9 +57,9 @@ void configure_all_tensors(Graph &g)
{
auto &tensors = g.tensors();
- for(auto &tensor : tensors)
+ for (auto &tensor : tensors)
{
- if(tensor && tensor->handle() == nullptr)
+ if (tensor && tensor->handle() == nullptr)
{
Target target = tensor->desc().target;
backends::IDeviceBackend &backend = backends::BackendRegistry::get().get_backend(target);
@@ -72,10 +72,10 @@ void configure_all_tensors(Graph &g)
void allocate_all_input_tensors(INode &node)
{
- for(unsigned int i = 0; i < node.num_inputs(); ++i)
+ for (unsigned int i = 0; i < node.num_inputs(); ++i)
{
Tensor *tensor = node.input(i);
- if(tensor != nullptr && !tensor->bound_edges().empty())
+ if (tensor != nullptr && !tensor->bound_edges().empty())
{
ARM_COMPUTE_ERROR_ON_MSG(!tensor->handle(), "Tensor handle is not configured!");
tensor->handle()->allocate();
@@ -85,10 +85,10 @@ void allocate_all_input_tensors(INode &node)
void allocate_all_output_tensors(INode &node)
{
- for(unsigned int i = 0; i < node.num_outputs(); ++i)
+ for (unsigned int i = 0; i < node.num_outputs(); ++i)
{
Tensor *tensor = node.output(i);
- if(tensor != nullptr && !tensor->bound_edges().empty())
+ if (tensor != nullptr && !tensor->bound_edges().empty())
{
ARM_COMPUTE_ERROR_ON_MSG(!tensor->handle(), "Tensor handle is not configured!");
tensor->handle()->allocate();
@@ -98,11 +98,11 @@ void allocate_all_output_tensors(INode &node)
void allocate_const_tensors(Graph &g)
{
- for(auto &node : g.nodes())
+ for (auto &node : g.nodes())
{
- if(node != nullptr)
+ if (node != nullptr)
{
- switch(node->type())
+ switch (node->type())
{
case NodeType::Const:
case NodeType::Input:
@@ -121,9 +121,10 @@ void allocate_all_tensors(Graph &g)
{
auto &tensors = g.tensors();
- for(auto &tensor : tensors)
+ for (auto &tensor : tensors)
{
- if(tensor && !tensor->bound_edges().empty() && tensor->handle() != nullptr && tensor->handle()->tensor().info()->is_resizable() && tensor->handle()->tensor().is_used())
+ if (tensor && !tensor->bound_edges().empty() && tensor->handle() != nullptr &&
+ tensor->handle()->tensor().info()->is_resizable() && tensor->handle()->tensor().is_used())
{
tensor->handle()->allocate();
}
@@ -140,15 +141,15 @@ ExecutionWorkload configure_all_nodes(Graph &g, GraphContext &ctx, const std::ve
workload.tasks.reserve(node_order.size());
// Create tasks
- for(auto &node_id : node_order)
+ for (auto &node_id : node_order)
{
auto node = g.node(node_id);
- if(node != nullptr)
+ if (node != nullptr)
{
Target assigned_target = node->assigned_target();
- backends::IDeviceBackend &backend = backends::BackendRegistry::get().get_backend(assigned_target);
+ backends::IDeviceBackend &backend = backends::BackendRegistry::get().get_backend(assigned_target);
std::unique_ptr<IFunction> func = backend.configure_node(*node, ctx);
- if(func != nullptr || is_utility_node(node))
+ if (func != nullptr || is_utility_node(node))
{
workload.tasks.emplace_back(ExecutionTask(std::move(func), node));
}
@@ -156,14 +157,14 @@ ExecutionWorkload configure_all_nodes(Graph &g, GraphContext &ctx, const std::ve
}
// Add inputs and outputs
- for(auto &node : g.nodes())
+ for (auto &node : g.nodes())
{
- if(node != nullptr && node->type() == NodeType::Input)
+ if (node != nullptr && node->type() == NodeType::Input)
{
workload.inputs.push_back(node->output(0));
}
- if(node != nullptr && node->type() == NodeType::Output)
+ if (node != nullptr && node->type() == NodeType::Output)
{
workload.outputs.push_back(node->input(0));
continue;
@@ -175,9 +176,9 @@ ExecutionWorkload configure_all_nodes(Graph &g, GraphContext &ctx, const std::ve
void release_unused_tensors(Graph &g)
{
- for(auto &tensor : g.tensors())
+ for (auto &tensor : g.tensors())
{
- if(tensor != nullptr && tensor->handle() != nullptr)
+ if (tensor != nullptr && tensor->handle() != nullptr)
{
tensor->handle()->release_if_unused();
}
@@ -194,11 +195,11 @@ void call_all_const_node_accessors(Graph &g)
{
auto &nodes = g.nodes();
- for(auto &node : nodes)
+ for (auto &node : nodes)
{
- if(node != nullptr && node->type() == NodeType::Const && node->num_outputs())
+ if (node != nullptr && node->type() == NodeType::Const && node->num_outputs())
{
- if(!node->output(0)->bound_edges().empty())
+ if (!node->output(0)->bound_edges().empty())
{
call_tensor_accessor(node->output(0));
}
@@ -209,18 +210,19 @@ void call_all_const_node_accessors(Graph &g)
bool call_all_input_node_accessors(ExecutionWorkload &workload)
{
bool is_valid = true;
- std::for_each(std::begin(workload.inputs), std::end(workload.inputs), [&](Tensor * input_tensor)
- {
- bool valid_input = (input_tensor != nullptr) && input_tensor->call_accessor();
- is_valid = is_valid && valid_input;
- });
+ std::for_each(std::begin(workload.inputs), std::end(workload.inputs),
+ [&](Tensor *input_tensor)
+ {
+ bool valid_input = (input_tensor != nullptr) && input_tensor->call_accessor();
+ is_valid = is_valid && valid_input;
+ });
return is_valid;
}
void prepare_all_tasks(ExecutionWorkload &workload)
{
ARM_COMPUTE_ERROR_ON(workload.graph == nullptr);
- for(auto &task : workload.tasks)
+ for (auto &task : workload.tasks)
{
task.prepare();
release_unused_tensors(*workload.graph);
@@ -232,24 +234,24 @@ void call_all_tasks(ExecutionWorkload &workload)
ARM_COMPUTE_ERROR_ON(workload.ctx == nullptr);
// Acquire memory for the transition buffers
- for(auto &mm_ctx : workload.ctx->memory_managers())
+ for (auto &mm_ctx : workload.ctx->memory_managers())
{
- if(mm_ctx.second.cross_group != nullptr)
+ if (mm_ctx.second.cross_group != nullptr)
{
mm_ctx.second.cross_group->acquire();
}
}
// Execute tasks
- for(auto &task : workload.tasks)
+ for (auto &task : workload.tasks)
{
task();
}
// Release memory for the transition buffers
- for(auto &mm_ctx : workload.ctx->memory_managers())
+ for (auto &mm_ctx : workload.ctx->memory_managers())
{
- if(mm_ctx.second.cross_group != nullptr)
+ if (mm_ctx.second.cross_group != nullptr)
{
mm_ctx.second.cross_group->release();
}
@@ -259,11 +261,14 @@ void call_all_tasks(ExecutionWorkload &workload)
bool call_all_output_node_accessors(ExecutionWorkload &workload)
{
bool is_valid = true;
- std::for_each(std::begin(workload.outputs), std::end(workload.outputs), [&](Tensor * output_tensor)
- {
- bool valid_output = (output_tensor != nullptr) && output_tensor->call_accessor();
- is_valid = is_valid && valid_output;
- });
+ std::for_each(std::begin(workload.outputs), std::end(workload.outputs),
+ [&](Tensor *output_tensor)
+ {
+ bool valid_output = (output_tensor != nullptr) && output_tensor->call_accessor();
+ is_valid = is_valid && valid_output;
+ });
+
+ sync_backends();
return is_valid;
}