diff options
Diffstat (limited to 'arm_compute/graph/Utils.h')
-rw-r--r-- | arm_compute/graph/Utils.h | 15 |
1 files changed, 12 insertions, 3 deletions
diff --git a/arm_compute/graph/Utils.h b/arm_compute/graph/Utils.h index cb421fc55a..9813ff05c7 100644 --- a/arm_compute/graph/Utils.h +++ b/arm_compute/graph/Utils.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020 ARM Limited. + * Copyright (c) 2018-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -36,7 +36,7 @@ class GraphContext; inline bool is_utility_node(INode *node) { - std::set<NodeType> utility_node_types = { NodeType::PrintLayer }; + std::set<NodeType> utility_node_types = {NodeType::PrintLayer}; return utility_node_types.find(node->type()) != utility_node_types.end(); } @@ -76,7 +76,7 @@ bool is_target_supported(Target target); /** Returns default target for execution * * @note If an OpenCL backend exists then OpenCL is returned, - * else if the NEON backend exists returns NEON as target. + * else if the CPU backend exists returns @ref Target::NEON as target. * If no backends are registered an error is raised. * * @return Default target @@ -107,6 +107,8 @@ void setup_requested_backend_context(GraphContext &ctx, Target target); * @param[in,out] ctx Graph Context */ void release_default_graph_context(GraphContext &ctx); +/** Synchronize kernels execution on the backends. On GPU, this results in a blocking call waiting for all kernels to be completed. */ +void sync_backends(); /** Get size of a tensor's given dimension depending on its layout * * @param[in] descriptor Descriptor @@ -130,6 +132,13 @@ size_t get_dimension_idx(DataLayout data_layout, const DataLayoutDimension data_ * @return A list with the driving node of a given node */ std::vector<NodeIdxPair> get_driving_nodes(const INode &node); +/** Get the list of driver nodes of a given node + * + * @param[in] node Node to find the driver node of + * + * @return A list with the driver node of a given node + */ +std::vector<NodeIdxPair> get_driver_nodes(const INode &node); /** Configures tensor * * @param[in, out] tensor Tensor to configure |