aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTim Hall <tim.hall@arm.com>2020-05-18 18:04:26 +0100
committerTim Hall <tim.hall@arm.com>2020-06-18 17:53:52 +0100
commit25f605c9dbc5d51208eec66d359457677cc73673 (patch)
treed0cac6ad84a7589960631a590741504ddcf947fb
parentcf7da10987cac3fc68cf180a9af665fe06d608fa (diff)
downloadethos-u-vela-25f605c9dbc5d51208eec66d359457677cc73673.tar.gz
vela: Add support for CPU only networks
- Fix various problems when no operators run on Ethos-U55 Signed-off-by: Tim Hall <tim.hall@arm.com> Change-Id: I44a1a914fabb7ca26c921a02753da8abeecd9c7b
-rw-r--r--ethosu/vela/compiler_driver.py35
-rw-r--r--ethosu/vela/tflite_writer.py9
2 files changed, 24 insertions, 20 deletions
diff --git a/ethosu/vela/compiler_driver.py b/ethosu/vela/compiler_driver.py
index b6a98a64..9c345dba 100644
--- a/ethosu/vela/compiler_driver.py
+++ b/ethosu/vela/compiler_driver.py
@@ -138,24 +138,23 @@ def compiler_driver(nng, arch, options, scheduler_options):
sg, permanent_storage, ignore_subgraph_input_output_tensors=True, lr_graph=lr_graph_flash
)
- assert len(nng.subgraphs) > 1, "Error: No operators can be hardware accelerated; cancelling compilation"
-
- # Allocate all Npu constant tensors to the first Npu subgraph since it is
- # processed first during serialization into tensors
- first_npu_sg = nng.subgraphs[1]
- assert first_npu_sg.placement == PassPlacement.Npu
- # Use the linear allocator for constant tensors
- tensor_allocation.allocate_tensors(
- nng,
- first_npu_sg,
- arch,
- permanent_storage,
- scheduler_options.use_ifm_ofm_overlap,
- TensorAllocator.LinearAlloc,
- options.verbose_allocation,
- options.show_minimum_possible_allocation,
- lr_graph_flash,
- )
+ if len(nng.subgraphs) > 1:
+ # Allocate all Npu constant tensors to the first Npu subgraph since it is
+ # processed first during serialization into tensors
+ first_npu_sg = nng.subgraphs[1]
+ assert first_npu_sg.placement == PassPlacement.Npu
+ # Use the linear allocator for constant tensors
+ tensor_allocation.allocate_tensors(
+ nng,
+ first_npu_sg,
+ arch,
+ permanent_storage,
+ scheduler_options.use_ifm_ofm_overlap,
+ TensorAllocator.LinearAlloc,
+ options.verbose_allocation,
+ options.show_minimum_possible_allocation,
+ lr_graph_flash,
+ )
# Allocate all non-constant tensors to the root, i.e. Cpu, subgraph. This step
# will start at the root subgraph's input and traverse from top to bottom. When
diff --git a/ethosu/vela/tflite_writer.py b/ethosu/vela/tflite_writer.py
index 99df849b..675b6985 100644
--- a/ethosu/vela/tflite_writer.py
+++ b/ethosu/vela/tflite_writer.py
@@ -134,12 +134,17 @@ class TFLiteSerialiser:
return builder.EndVector(len(v))
def assign_buffers_to_tensors(self, tensors):
+ scratch_tensors = [tens for tens in tensors if tens.purpose == TensorPurpose.Scratch]
+ if len(scratch_tensors) > 0:
+ scratch_tensor_mem_area = scratch_tensors[0].mem_area
+ else:
+ scratch_tensor_mem_area = None # all tensors are initialised to MemArea.Unknown
+
buffer_map = {}
- scratch_tensor = [tens for tens in tensors if tens.purpose == TensorPurpose.Scratch][0]
buf_idx = 1
for tens in tensors:
- if tens.mem_area == scratch_tensor.mem_area:
+ if tens.mem_area == scratch_tensor_mem_area:
buffer_map[tens] = self.scratch_buf_id
else:
buffer_map[tens] = buf_idx