aboutsummaryrefslogtreecommitdiff
path: root/src/dynamic_fusion/sketch/utils/DependencyGraph.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/dynamic_fusion/sketch/utils/DependencyGraph.h')
-rw-r--r--src/dynamic_fusion/sketch/utils/DependencyGraph.h182
1 files changed, 90 insertions, 92 deletions
diff --git a/src/dynamic_fusion/sketch/utils/DependencyGraph.h b/src/dynamic_fusion/sketch/utils/DependencyGraph.h
index c891e76d8b..c157c2b21c 100644
--- a/src/dynamic_fusion/sketch/utils/DependencyGraph.h
+++ b/src/dynamic_fusion/sketch/utils/DependencyGraph.h
@@ -25,6 +25,7 @@
#define SRC_DYNAMIC_FUSION_SKETCH_UTILS_DEPENDENCYGRAPH
#include "arm_compute/core/Error.h"
+
#include <cstdint>
#include <map>
#include <set>
@@ -68,12 +69,10 @@ public:
OperatorId op{};
std::vector<TensorId> inputs{};
std::vector<TensorId> outputs{};
- friend bool operator==(const OpPack &opp0, const OpPack &opp1)
+ friend bool operator==(const OpPack &opp0, const OpPack &opp1)
{
- return std::make_tuple(
- opp0.op, opp0.inputs, opp0.outputs)
- == std::make_tuple(
- opp1.op, opp1.inputs, opp1.outputs);
+ return std::make_tuple(opp0.op, opp0.inputs, opp0.outputs) ==
+ std::make_tuple(opp1.op, opp1.inputs, opp1.outputs);
}
};
@@ -95,10 +94,13 @@ public:
* @return true If the operator can be added while keeping the graph as a linear sequence
* @return false Otherwise
*/
- bool try_add_operator_as_linear(OperatorId op, const std::vector<TensorId> &inputs, const std::vector<TensorId> &outputs, bool is_output = false) const
+ bool try_add_operator_as_linear(OperatorId op,
+ const std::vector<TensorId> &inputs,
+ const std::vector<TensorId> &outputs,
+ bool is_output = false) const
{
ARM_COMPUTE_UNUSED(op, is_output);
- if(all_ops().empty())
+ if (all_ops().empty())
{
return true;
}
@@ -106,25 +108,25 @@ public:
// If the new operator is not the first operator, at least one input tensor must be
// the output tensor of the last non-output operator. All other input tensors must be
// the global input of the graph (i.e. not the output of any operator).
- if(_last_op_available)
+ if (_last_op_available)
{
auto use_input_from_last_op = false;
- for(auto src_tensor : inputs)
+ for (auto src_tensor : inputs)
{
const auto src_ops = _adj_src_ops.find(src_tensor);
- if(src_ops != _adj_src_ops.end())
+ if (src_ops != _adj_src_ops.end())
{
ARM_COMPUTE_ERROR_ON(src_ops->second.size() > 1);
- if(!src_ops->second.empty())
+ if (!src_ops->second.empty())
{
const auto src_op = src_ops->second[0];
- if(src_op == _last_op)
+ if (src_op == _last_op)
{
- if(use_input_from_last_op)
+ if (use_input_from_last_op)
{
// To be safe, we also forbid using the output tensor
// of the last operator twice.
@@ -143,7 +145,7 @@ public:
}
}
- if(!use_input_from_last_op)
+ if (!use_input_from_last_op)
{
// At least one input tensor must be the output tensor of the last non-output operator.
return false;
@@ -152,9 +154,9 @@ public:
// The output tensor of the new operator must not be the input tensor of any previously
// added operator.
- for(auto dst_tensor : outputs)
+ for (auto dst_tensor : outputs)
{
- if(_adj_dst_ops.find(dst_tensor) != _adj_dst_ops.end())
+ if (_adj_dst_ops.find(dst_tensor) != _adj_dst_ops.end())
{
return false;
}
@@ -168,7 +170,10 @@ public:
* INVARIANT: The list can only grow from head to tail
* INVARIANT: POSTCONDITION: The graph is linear
*/
- void add_operator_as_linear(OperatorId op, const std::vector<TensorId> &inputs, const std::vector<TensorId> &outputs, bool is_output = false)
+ void add_operator_as_linear(OperatorId op,
+ const std::vector<TensorId> &inputs,
+ const std::vector<TensorId> &outputs,
+ bool is_output = false)
{
const auto success = add_operator(op, inputs, outputs, is_output);
ARM_COMPUTE_UNUSED(success);
@@ -183,24 +188,27 @@ public:
* @param[in] outputs Output tensors to the operator
* @param[in] is_output Whether this is an output operator
*/
- bool add_operator(OperatorId op, const std::vector<TensorId> &inputs, const std::vector<TensorId> &outputs, bool is_output = false)
+ bool add_operator(OperatorId op,
+ const std::vector<TensorId> &inputs,
+ const std::vector<TensorId> &outputs,
+ bool is_output = false)
{
- if(operator_exists(op))
+ if (operator_exists(op))
{
return false;
}
_adj_src_tensors[op] = {};
_adj_dst_tensors[op] = {};
- for(auto in_tensor : inputs)
+ for (auto in_tensor : inputs)
{
// Linking input tensor to operator node will never create a cycle / loop because we guarantee
// each op is newly created, so every <input, op> pair / edge is new
link_input(op, in_tensor);
}
- for(auto out_tensor : outputs)
+ for (auto out_tensor : outputs)
{
// If there exists a back path from op's output tensor to op already, then linking the two will create a loop / cycle
- if(path_exists_from_tensor_to_op(out_tensor, op))
+ if (path_exists_from_tensor_to_op(out_tensor, op))
{
remove_operator(op);
return false;
@@ -211,10 +219,10 @@ public:
}
}
- if(!is_output)
+ if (!is_output)
{
_last_op_available = true;
- _last_op = op;
+ _last_op = op;
}
return true;
@@ -230,16 +238,16 @@ public:
std::vector<OpPack> build_operators_sequence() const
{
std::vector<OpPack> ops_seq;
- std::set<Id> done_ops;
- std::set<Id> done_tensors;
+ std::set<Id> done_ops;
+ std::set<Id> done_tensors;
const auto input_tensors = global_src_tensors();
- for(auto tensor : input_tensors)
+ for (auto tensor : input_tensors)
{
done_tensors.insert(tensor);
- for(auto op : _adj_dst_ops.at(tensor))
+ for (auto op : _adj_dst_ops.at(tensor))
{
build_operators_sequence_from_op(op, ops_seq, done_ops, done_tensors);
}
@@ -260,10 +268,8 @@ public:
friend bool operator==(const DependencyGraph &g0, const DependencyGraph &g1)
{
// Do not compare id allocators
- return std::make_tuple(
- g0._adj_src_tensors, g0._adj_dst_tensors, g0._adj_src_ops, g0._adj_dst_ops)
- == std::make_tuple(
- g1._adj_src_tensors, g1._adj_dst_tensors, g1._adj_src_ops, g1._adj_dst_ops);
+ return std::make_tuple(g0._adj_src_tensors, g0._adj_dst_tensors, g0._adj_src_ops, g0._adj_dst_ops) ==
+ std::make_tuple(g1._adj_src_tensors, g1._adj_dst_tensors, g1._adj_src_ops, g1._adj_dst_ops);
}
std::vector<OperatorId> src_ops_from_tensor(TensorId tensor) const
{
@@ -280,10 +286,8 @@ public:
std::vector<TensorId> all_tensors() const
{
std::vector<TensorId> tensors{};
- std::transform(std::begin(_adj_src_ops), std::end(_adj_src_ops), std::back_inserter(tensors), [](const auto & it)
- {
- return it.first;
- });
+ std::transform(std::begin(_adj_src_ops), std::end(_adj_src_ops), std::back_inserter(tensors),
+ [](const auto &it) { return it.first; });
return tensors;
}
/** Get source tensors of the whole graph
@@ -293,9 +297,9 @@ public:
std::vector<TensorId> global_src_tensors() const
{
std::vector<TensorId> tensors;
- for(auto tensor_src_ops : _adj_src_ops)
+ for (auto tensor_src_ops : _adj_src_ops)
{
- if(tensor_src_ops.second.empty())
+ if (tensor_src_ops.second.empty())
{
tensors.push_back(tensor_src_ops.first);
}
@@ -309,9 +313,9 @@ public:
std::vector<TensorId> global_dst_tensors() const
{
std::vector<TensorId> tensors;
- for(auto tensor_dst_ops : _adj_dst_ops)
+ for (auto tensor_dst_ops : _adj_dst_ops)
{
- if(tensor_dst_ops.second.empty())
+ if (tensor_dst_ops.second.empty())
{
tensors.push_back(tensor_dst_ops.first);
}
@@ -328,14 +332,14 @@ public:
// If a tensor is used to connect the input of an operator and the output of another operator,
// it is not allocated in the memory. The tensor exists as a temporary variable only.
- for(auto src_tensor : _adj_src_ops)
+ for (auto src_tensor : _adj_src_ops)
{
- if(!src_tensor.second.empty())
+ if (!src_tensor.second.empty())
{
const auto dst_tensor = _adj_dst_ops.find(src_tensor.first);
- if(dst_tensor != _adj_dst_ops.end())
+ if (dst_tensor != _adj_dst_ops.end())
{
- if(!dst_tensor->second.empty())
+ if (!dst_tensor->second.empty())
{
tensors.push_back(src_tensor.first);
}
@@ -354,9 +358,9 @@ public:
std::vector<OperatorId> ops{};
const auto op_list = all_ops();
- for(auto op : op_list)
+ for (auto op : op_list)
{
- if(src_ops(op).empty())
+ if (src_ops(op).empty())
{
ops.emplace_back(op);
}
@@ -368,7 +372,7 @@ private:
void link_input(OperatorId op, TensorId in_tensor)
{
ARM_COMPUTE_ERROR_ON(!operator_exists(op));
- if(!tensor_exists(in_tensor))
+ if (!tensor_exists(in_tensor))
{
insert_new_tensor(in_tensor);
}
@@ -379,7 +383,7 @@ private:
void link_output(OperatorId op, TensorId out_tensor)
{
ARM_COMPUTE_ERROR_ON(!operator_exists(op));
- if(!tensor_exists(out_tensor))
+ if (!tensor_exists(out_tensor))
{
insert_new_tensor(out_tensor);
}
@@ -392,7 +396,7 @@ private:
{
ARM_COMPUTE_ERROR_ON(!operator_exists(op));
std::vector<OperatorId> ops{};
- for(TensorId src_tensor : src_tensors(op))
+ for (TensorId src_tensor : src_tensors(op))
{
ops.insert(ops.end(), std::begin(_adj_src_ops.at(src_tensor)), std::end(_adj_src_ops.at(src_tensor)));
}
@@ -402,7 +406,7 @@ private:
{
ARM_COMPUTE_ERROR_ON(!operator_exists(op));
std::vector<OperatorId> ops{};
- for(TensorId dst_tensor : _adj_dst_tensors.at(op))
+ for (TensorId dst_tensor : _adj_dst_tensors.at(op))
{
ops.insert(ops.end(), std::begin(_adj_dst_ops.at(dst_tensor)), std::end(_adj_dst_ops.at(dst_tensor)));
}
@@ -436,10 +440,8 @@ private:
std::vector<OperatorId> all_ops() const
{
std::vector<OperatorId> ops{};
- std::transform(std::begin(_adj_src_tensors), std::end(_adj_src_tensors), std::back_inserter(ops), [](const auto & it)
- {
- return it.first;
- });
+ std::transform(std::begin(_adj_src_tensors), std::end(_adj_src_tensors), std::back_inserter(ops),
+ [](const auto &it) { return it.first; });
return ops;
}
/** Remove an operator from graph.
@@ -448,25 +450,21 @@ private:
*/
void remove_operator(OperatorId op)
{
- for(auto src_tensor : _adj_src_tensors.at(op))
+ for (auto src_tensor : _adj_src_tensors.at(op))
{
auto &dst_ops = _adj_dst_ops.at(src_tensor);
- dst_ops.erase(
- std::remove(std::begin(dst_ops), std::end(dst_ops), op),
- std::end(dst_ops));
+ dst_ops.erase(std::remove(std::begin(dst_ops), std::end(dst_ops), op), std::end(dst_ops));
}
- for(auto dst_tensor : _adj_dst_tensors.at(op))
+ for (auto dst_tensor : _adj_dst_tensors.at(op))
{
auto &src_ops = _adj_src_ops.at(dst_tensor);
- src_ops.erase(
- std::remove(std::begin(src_ops), std::end(src_ops), op),
- std::end(src_ops));
+ src_ops.erase(std::remove(std::begin(src_ops), std::end(src_ops), op), std::end(src_ops));
}
// Remove any isolated tensors
// An isolated tensor is one where both its _adj_src_ops and _adj_dst_ops are empty
- for(auto t : all_tensors())
+ for (auto t : all_tensors())
{
- if(_adj_src_ops.at(t).empty() && _adj_dst_ops.at(t).empty())
+ if (_adj_src_ops.at(t).empty() && _adj_dst_ops.at(t).empty())
{
_adj_src_ops.erase(t);
_adj_dst_ops.erase(t);
@@ -486,11 +484,12 @@ private:
}
bool operator_exists(OperatorId op) const
{
- return _adj_src_tensors.find(op) != _adj_src_tensors.end() && _adj_dst_tensors.find(op) != _adj_dst_tensors.end();
+ return _adj_src_tensors.find(op) != _adj_src_tensors.end() &&
+ _adj_dst_tensors.find(op) != _adj_dst_tensors.end();
}
bool is_src_tensor_of(OperatorId op, TensorId tensor) const
{
- if(!operator_exists(op) || !tensor_exists(tensor))
+ if (!operator_exists(op) || !tensor_exists(tensor))
{
return false;
}
@@ -499,7 +498,7 @@ private:
}
bool is_dst_tensor_of(OperatorId op, TensorId tensor) const
{
- if(!operator_exists(op) || !tensor_exists(tensor))
+ if (!operator_exists(op) || !tensor_exists(tensor))
{
return false;
}
@@ -525,9 +524,9 @@ private:
std::vector<OperatorId> ops{};
const auto op_list = all_ops();
- for(auto op : op_list)
+ for (auto op : op_list)
{
- if(is_dst_op(op))
+ if (is_dst_op(op))
{
ops.emplace_back(op);
}
@@ -536,13 +535,13 @@ private:
}
bool path_exists_from_tensor_to_op(TensorId src_tensor, OperatorId dst_op) const
{
- if(!tensor_exists(src_tensor) || !operator_exists(dst_op))
+ if (!tensor_exists(src_tensor) || !operator_exists(dst_op))
{
return false;
}
- for(auto child_op : dst_ops_from_tensor(src_tensor))
+ for (auto child_op : dst_ops_from_tensor(src_tensor))
{
- if(path_exists_from_op_to_op(child_op, dst_op))
+ if (path_exists_from_op_to_op(child_op, dst_op))
{
return true;
}
@@ -552,21 +551,21 @@ private:
bool path_exists_from_op_to_op(OperatorId src_op, OperatorId dst_op) const
{
- if(!operator_exists(src_op) || !operator_exists(dst_op))
+ if (!operator_exists(src_op) || !operator_exists(dst_op))
{
return false;
}
- if(src_op == dst_op)
+ if (src_op == dst_op)
{
return true;
}
- if(is_in(src_op, get_dst_ops()))
+ if (is_in(src_op, get_dst_ops()))
{
return false;
}
- for(auto child_tensor : dst_tensors(src_op))
+ for (auto child_tensor : dst_tensors(src_op))
{
- if(path_exists_from_tensor_to_op(child_tensor, dst_op))
+ if (path_exists_from_tensor_to_op(child_tensor, dst_op))
{
return true;
}
@@ -574,16 +573,15 @@ private:
return false;
}
- void build_operators_sequence_from_op(
- Id op,
- std::vector<OpPack> &ops_seq,
- std::set<Id> &done_ops,
- std::set<Id> &done_tensors) const
+ void build_operators_sequence_from_op(Id op,
+ std::vector<OpPack> &ops_seq,
+ std::set<Id> &done_ops,
+ std::set<Id> &done_tensors) const
{
- while(true)
+ while (true)
{
// If the operator has been added to the sequence, ignore it.
- if(done_ops.find(op) != done_ops.end())
+ if (done_ops.find(op) != done_ops.end())
{
return;
}
@@ -593,9 +591,9 @@ private:
// is added to the sequence.
const auto src_tensors = _adj_src_tensors.at(op);
- for(auto src : src_tensors)
+ for (auto src : src_tensors)
{
- if(done_tensors.find(src) == done_tensors.end())
+ if (done_tensors.find(src) == done_tensors.end())
{
return;
}
@@ -606,24 +604,24 @@ private:
done_ops.insert(op);
- OpPack pack{ op, src_tensors, dst_tensors };
+ OpPack pack{op, src_tensors, dst_tensors};
ops_seq.push_back(pack);
done_tensors.insert(dst_tensors.begin(), dst_tensors.end());
// Visit all the sink operators.
// Call this function recursively unless there is only one sink.
- if(dst_tensors.size() == 1 && _adj_dst_ops.at(dst_tensors[0]).size() == 1)
+ if (dst_tensors.size() == 1 && _adj_dst_ops.at(dst_tensors[0]).size() == 1)
{
op = _adj_dst_ops.at(dst_tensors[0])[0];
}
else
{
- for(auto dst_tensor : dst_tensors)
+ for (auto dst_tensor : dst_tensors)
{
const auto dst_ops = _adj_dst_ops.at(dst_tensor);
- for(auto dst_op : dst_ops)
+ for (auto dst_op : dst_ops)
{
build_operators_sequence_from_op(dst_op, ops_seq, done_ops, done_tensors);
}
@@ -640,8 +638,8 @@ private:
AdjList _adj_src_ops{};
AdjList _adj_dst_ops{};
- bool _last_op_available{ false };
- OperatorId _last_op{ 0 };
+ bool _last_op_available{false};
+ OperatorId _last_op{0};
};
} // namespace dynamic_fusion