aboutsummaryrefslogtreecommitdiff
path: root/ethosu/vela/npu_serialisation.py
diff options
context:
space:
mode:
Diffstat (limited to 'ethosu/vela/npu_serialisation.py')
-rw-r--r--ethosu/vela/npu_serialisation.py12
1 files changed, 7 insertions, 5 deletions
diff --git a/ethosu/vela/npu_serialisation.py b/ethosu/vela/npu_serialisation.py
index 4542c25b..29ede842 100644
--- a/ethosu/vela/npu_serialisation.py
+++ b/ethosu/vela/npu_serialisation.py
@@ -18,13 +18,15 @@
# Description:
# Serialises and packs an NPU subgraph into tensors.
+import struct
+
+import numpy as np
+
+from . import driver_actions
from .nn_graph import PassPlacement
from .tensor import MemArea, Tensor, TensorPurpose, TensorFormat
from .operation import Operation
from .data_type import DataType
-import numpy as np
-from . import driver_actions
-import struct
def make_memory_tensor(name, mem_area, sz, want_values, arch):
@@ -75,7 +77,7 @@ def serialise_npu_subgraph_into_tensors(nng, sg, arch, scratch_tens, flash_tens)
nng.total_size[scratch_area] = nng.total_size.get(scratch_area, 0) - scratch_size
nng.total_elements[scratch_area] = nng.total_elements.get(scratch_area, 0) - scratch_size
- if flash_tens == scratch_tens == None:
+ if flash_tens == scratch_tens is None:
# First Npu subgraph, create scratch and flash tensors
sg.scratch_tensor = make_memory_tensor(sg.name + "_scratch", scratch_area, scratch_size, False, arch)
sg.scratch_tensor.purpose = TensorPurpose.Scratch
@@ -88,7 +90,7 @@ def serialise_npu_subgraph_into_tensors(nng, sg, arch, scratch_tens, flash_tens)
for cps in sg.cascaded_passes:
for ps in cps.passes:
- if ps.placement == PassPlacement.Npu and ps.weight_tensor != None:
+ if ps.placement == PassPlacement.Npu and ps.weight_tensor is not None:
# For DMA ops, ps.weight_tensor is referring to the SRAM weight tensor and therefore the address
# is pointing at the destination address of where the weights should be placed in SRAM.
# This ensures that the Flash weight tensor is used instead and thus gets the correct address.