aboutsummaryrefslogtreecommitdiff
path: root/src/dynamic_fusion/sketch/gpu/GpuOperatorGroup.cpp
diff options
context:
space:
mode:
authorViet-Hoa Do <viet-hoa.do@arm.com>2022-12-14 14:49:56 +0000
committerViet-Hoa Do <viet-hoa.do@arm.com>2022-12-23 14:11:34 +0000
commit04f4620cf999846a44089c81720aa920edec6993 (patch)
tree1c0080ac59d5b2aa500cd2b2ceffe0575e22a4b6 /src/dynamic_fusion/sketch/gpu/GpuOperatorGroup.cpp
parent81fdaddaf36cb4c7ff0d2c52a370dd977a13dc72 (diff)
downloadComputeLibrary-04f4620cf999846a44089c81720aa920edec6993.tar.gz
Add multiple output support for dynamic fusion
* The dependency graph now can schedule any acyclic graph into a sequential list of operators. This is needed as the output operators now form branches in the graph. * Fix the definition of input, output and intermediate tensors in GpuKernelComponentGroup to support non-linear but sequential list of operators. * Add constraint on GpuOperatorGroup to enforce strictly linear fusion style, but allow output operator as the only form of branch. Resolves: COMPMID-5771 Signed-off-by: Viet-Hoa Do <viet-hoa.do@arm.com> Change-Id: I68de3a31a2456145081f0a397e4e61dd66327682 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8823 Reviewed-by: Gunes Bayir <gunes.bayir@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/dynamic_fusion/sketch/gpu/GpuOperatorGroup.cpp')
-rw-r--r--src/dynamic_fusion/sketch/gpu/GpuOperatorGroup.cpp10
1 files changed, 5 insertions, 5 deletions
diff --git a/src/dynamic_fusion/sketch/gpu/GpuOperatorGroup.cpp b/src/dynamic_fusion/sketch/gpu/GpuOperatorGroup.cpp
index e8ef835405..7bb14c8698 100644
--- a/src/dynamic_fusion/sketch/gpu/GpuOperatorGroup.cpp
+++ b/src/dynamic_fusion/sketch/gpu/GpuOperatorGroup.cpp
@@ -68,12 +68,12 @@ ArgumentPack<ITensorInfo> Operator::tensors() const
return _tensors;
}
-bool GpuOperatorGroup::try_add_operator(const Operator &op) const
+bool GpuOperatorGroup::try_add_operator(const Operator &op, bool is_output) const
{
const auto src_tensor_ids = get_tensor_ids(op.tensors().get_const_src_tensors());
const auto dst_tensor_ids = get_tensor_ids(op.tensors().get_const_dst_tensors());
// Constraint 1
- if(!_graph.try_add_operator_as_linear(op.id(), src_tensor_ids, dst_tensor_ids))
+ if(!_graph.try_add_operator_as_linear(op.id(), src_tensor_ids, dst_tensor_ids, is_output))
{
return false;
}
@@ -143,12 +143,12 @@ bool GpuOperatorGroup::try_add_operator(const Operator &op) const
}
return true;
}
-void GpuOperatorGroup::add_operator(const Operator &op)
+void GpuOperatorGroup::add_operator(const Operator &op, bool is_output)
{
- ARM_COMPUTE_ERROR_ON(!try_add_operator(op));
+ ARM_COMPUTE_ERROR_ON(!try_add_operator(op, is_output));
const auto src_tensor_ids = get_tensor_ids(op.tensors().get_const_src_tensors());
const auto dst_tensor_ids = get_tensor_ids(op.tensors().get_const_dst_tensors());
- _graph.add_operator_as_linear(op.id(), src_tensor_ids, dst_tensor_ids);
+ _graph.add_operator_as_linear(op.id(), src_tensor_ids, dst_tensor_ids, is_output);
_operators[op.id()] = op;
}
Operator GpuOperatorGroup::new_operator(const GpuOperatorType &operator_type, const ArgumentPack<ITensorInfo> &tensors) const