aboutsummaryrefslogtreecommitdiff
path: root/ethosu/vela/compiler_driver.py
diff options
context:
space:
mode:
authorTim Hall <tim.hall@arm.com>2020-05-18 18:04:26 +0100
committerTim Hall <tim.hall@arm.com>2020-06-18 17:53:52 +0100
commit25f605c9dbc5d51208eec66d359457677cc73673 (patch)
treed0cac6ad84a7589960631a590741504ddcf947fb /ethosu/vela/compiler_driver.py
parentcf7da10987cac3fc68cf180a9af665fe06d608fa (diff)
downloadethos-u-vela-25f605c9dbc5d51208eec66d359457677cc73673.tar.gz
vela: Add support for CPU only networks
- Fix various problems when no operators run on Ethos-U55 Signed-off-by: Tim Hall <tim.hall@arm.com> Change-Id: I44a1a914fabb7ca26c921a02753da8abeecd9c7b
Diffstat (limited to 'ethosu/vela/compiler_driver.py')
-rw-r--r--ethosu/vela/compiler_driver.py35
1 files changed, 17 insertions, 18 deletions
diff --git a/ethosu/vela/compiler_driver.py b/ethosu/vela/compiler_driver.py
index b6a98a64..9c345dba 100644
--- a/ethosu/vela/compiler_driver.py
+++ b/ethosu/vela/compiler_driver.py
@@ -138,24 +138,23 @@ def compiler_driver(nng, arch, options, scheduler_options):
sg, permanent_storage, ignore_subgraph_input_output_tensors=True, lr_graph=lr_graph_flash
)
- assert len(nng.subgraphs) > 1, "Error: No operators can be hardware accelerated; cancelling compilation"
-
- # Allocate all Npu constant tensors to the first Npu subgraph since it is
- # processed first during serialization into tensors
- first_npu_sg = nng.subgraphs[1]
- assert first_npu_sg.placement == PassPlacement.Npu
- # Use the linear allocator for constant tensors
- tensor_allocation.allocate_tensors(
- nng,
- first_npu_sg,
- arch,
- permanent_storage,
- scheduler_options.use_ifm_ofm_overlap,
- TensorAllocator.LinearAlloc,
- options.verbose_allocation,
- options.show_minimum_possible_allocation,
- lr_graph_flash,
- )
+ if len(nng.subgraphs) > 1:
+ # Allocate all Npu constant tensors to the first Npu subgraph since it is
+ # processed first during serialization into tensors
+ first_npu_sg = nng.subgraphs[1]
+ assert first_npu_sg.placement == PassPlacement.Npu
+ # Use the linear allocator for constant tensors
+ tensor_allocation.allocate_tensors(
+ nng,
+ first_npu_sg,
+ arch,
+ permanent_storage,
+ scheduler_options.use_ifm_ofm_overlap,
+ TensorAllocator.LinearAlloc,
+ options.verbose_allocation,
+ options.show_minimum_possible_allocation,
+ lr_graph_flash,
+ )
# Allocate all non-constant tensors to the root, i.e. Cpu, subgraph. This step
# will start at the root subgraph's input and traverse from top to bottom. When