aboutsummaryrefslogtreecommitdiff
path: root/ethosu/vela/pass_packing.py
diff options
context:
space:
mode:
authorDiego Russo <diego.russo@arm.com>2020-04-14 18:41:58 +0100
committerTim Hall <tim.hall@arm.com>2020-06-18 17:53:52 +0100
commitea6111a36e55501bbfb9ea022aaf8305b4d80183 (patch)
treef6787f7d07ea56a502616a8d6f36bc9c7e6e4efe /ethosu/vela/pass_packing.py
parent2213e90570af328418d4f4a0d54269ed21dc40bc (diff)
downloadethos-u-vela-ea6111a36e55501bbfb9ea022aaf8305b4d80183.tar.gz
Add pre-commit support for sanity checks
Use pre-commit framework [1] to run black and flake8 before the commit. black and flake8 are managed by the pre-commit framework and they can be run manually by the user using `pre-commit run` command. Fix the code base with the help of black and flake8. Fix import statements according to PEP8 guidelines [1] Both tools have the following settings (specified in the pre-commit configuration file): * line length: 120 characters * directory to exclude: ethosu/vela/tflite/ and ethosu/vela/ethos_u55_regs Updated README.md on how to install pre-commit and how to run sanity checks. Pipenv files have been updated including new dependencies for pre-commit. [1]: https://www.python.org/dev/peps/pep-0008/#imports [2]: https://github.com/pre-commit/pre-commit Change-Id: I304d9fffdf019d390ffa396a529c8a7c2437f63d Signed-off-by: Diego Russo <diego.russo@arm.com>
Diffstat (limited to 'ethosu/vela/pass_packing.py')
-rw-r--r--ethosu/vela/pass_packing.py29
1 files changed, 14 insertions, 15 deletions
diff --git a/ethosu/vela/pass_packing.py b/ethosu/vela/pass_packing.py
index 663520fc..bae81517 100644
--- a/ethosu/vela/pass_packing.py
+++ b/ethosu/vela/pass_packing.py
@@ -18,10 +18,12 @@
# Description:
# Packs a subgraph with Neural Network Operations into Passes. Each Pass has one or more Operations.
-from .nn_graph import Operation, Pass, PassPlacement, TensorPurpose, NpuBlockType, Tensor
-import collections
import enum
-from .data_type import BaseType, DataType
+import collections
+
+from .nn_graph import Pass, PassPlacement
+from .tensor import TensorPurpose
+from .operation import Operation, NpuBlockType
class PassFlags(enum.Flag):
@@ -104,10 +106,7 @@ elem_wise_ops = elem_wise_main_ops | activation_ops | set(("Sigmoid", "Tanh"))
quantization_ops = set(("Dequantize", "QuantizeV2", "Max", "Min"))
-cpu_ops = (
- set(("Softmax", "QuantizedSoftmax", "LRN", "Shape", "QuantizedPad", "Pad", "AddN"))
- | quantization_ops
-)
+cpu_ops = set(("Softmax", "QuantizedSoftmax", "LRN", "Shape", "QuantizedPad", "Pad", "AddN")) | quantization_ops
npu_dma_ops = set(("DMA",))
startup_init_ops = set(("Const", "VariableV2", "Placeholder", "SubgraphInput"))
@@ -183,7 +182,7 @@ test_sequence = [
# flags_to_set
PassFlags.Npu | PassFlags.Dma,
# flags_to_clear
- PassFlags.Empty
+ PassFlags.Empty,
),
(
# ops_set
@@ -203,7 +202,7 @@ test_sequence = [
# flags_to_set
PassFlags.MemoryOnly | PassFlags.Main,
# flags_to_clear
- PassFlags.Empty
+ PassFlags.Empty,
),
(
# ops_set
@@ -213,9 +212,9 @@ test_sequence = [
# flags_to_set
PassFlags.Cpu | PassFlags.Main,
# flags_to_clear
- PassFlags.Empty
+ PassFlags.Empty,
),
- ( # This last one is a fallback for unrecognised operations
+ ( # This last one is a fallback for unrecognised operations
# ops_set
None,
# incompatible_pack_flags
@@ -223,7 +222,7 @@ test_sequence = [
# flags_to_set
PassFlags.Cpu | PassFlags.Main,
# flags_to_clear
- PassFlags.Empty
+ PassFlags.Empty,
),
]
@@ -346,7 +345,7 @@ def pack_into_passes(nng, arch, verbose_packing=False):
is_element_wise = True
for op in reverse_ops_list:
- if not op.type in elem_wise_ops and not op.type in npu_dma_ops:
+ if op.type not in elem_wise_ops and op.type not in npu_dma_ops:
is_element_wise = False
break
@@ -368,9 +367,9 @@ def pack_into_passes(nng, arch, verbose_packing=False):
ops_list = list(reversed(reverse_ops_list))
intermediates = list(reversed(reverse_intermediates))
- if primary_op == None:
+ if primary_op is None:
primary_op = create_primary_op(ops_list)
- if primary_op != None:
+ if primary_op is not None:
visit_tensor_refcount[primary_op.inputs[0]] += 1
npu_block_type = primary_op.attrs["npu_block_type"]
for input_tens in primary_op.inputs: