aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTim Hall <tim.hall@arm.com>2022-03-16 16:51:16 +0000
committerTim Hall <tim.hall@arm.com>2022-03-31 10:26:56 +0100
commit68df8a1f5469daac53b7a418d92204f7026e4228 (patch)
tree0db9df020c89e25c20ed4a7a738dedd1b0ceb023
parentd85750702229af97c0b0bbda6e397a23254b6144 (diff)
downloadethos-u-vela-68df8a1f5469daac53b7a418d92204f7026e4228.tar.gz
vela: Added debug info to external API
- Added optional name attributes to operators and tensors Signed-off-by: Tim Hall <tim.hall@arm.com> Change-Id: I3b5d881a7b1043a6ba4b58fff5d7532b271ba536
-rw-r--r--ethosu/vela/api.py6
-rw-r--r--ethosu/vela/high_level_command_to_npu_op.py3
-rw-r--r--ethosu/vela/register_command_stream_generator.py11
3 files changed, 14 insertions, 6 deletions
diff --git a/ethosu/vela/api.py b/ethosu/vela/api.py
index 3382ea9..399fd46 100644
--- a/ethosu/vela/api.py
+++ b/ethosu/vela/api.py
@@ -27,7 +27,7 @@ import numpy
API_VERSION_MAJOR = 1
-API_VERSION_MINOR = 2
+API_VERSION_MINOR = 3
API_VERSION = f"{API_VERSION_MAJOR}.{API_VERSION_MINOR}"
@@ -253,6 +253,8 @@ class NpuFeatureMap:
self.layout: NpuLayout = NpuLayout.NHWC
# x/y/c strides used by the NPU when traversing the feature map, if None, vela will use default strides
self.strides: Optional[NpuShape3D] = None
+ # Used for debug
+ self.name: Optional[str] = None
class NpuKernel:
@@ -290,6 +292,8 @@ class NpuOperation:
def __init__(self, op_type: NpuOperationType):
self.op_type = op_type
+ # Used for debug
+ self.name: Optional[str] = None
class NpuDmaOperation(NpuOperation):
diff --git a/ethosu/vela/high_level_command_to_npu_op.py b/ethosu/vela/high_level_command_to_npu_op.py
index 8c5525b..e6bfc1c 100644
--- a/ethosu/vela/high_level_command_to_npu_op.py
+++ b/ethosu/vela/high_level_command_to_npu_op.py
@@ -296,6 +296,7 @@ def create_feature_map(tens: Tensor, box: Box, arch: ArchitectureFeatures, op_sh
)
strides = tens.get_strides(shape4D=op_shape4D)
fm.strides = NpuShape3D(height=int(strides[2]), width=int(strides[3]), depth=int(strides[1]))
+ fm.name = tens.name
return fm
@@ -539,6 +540,7 @@ def convert_command_to_npu_op(cmd: Command, arch: ArchitectureFeatures) -> NpuOp
npu_op: NpuOperation
if isinstance(cmd, DMA):
npu_op = create_dma_op(cmd, arch)
+ npu_op.name = cmd.out_tensor.name
elif isinstance(cmd, NpuStripe):
npu_block_type = cmd.ps.primary_op.type.npu_block_type
if npu_block_type in (NpuBlockType.ConvolutionMxN, NpuBlockType.VectorProduct):
@@ -551,6 +553,7 @@ def convert_command_to_npu_op(cmd: Command, arch: ArchitectureFeatures) -> NpuOp
npu_op = create_npu_elementwise_op(cmd, arch)
else:
assert 0, f"Unknown command type {npu_block_type}"
+ npu_op.name = cmd.ps.primary_op.name
return npu_op
diff --git a/ethosu/vela/register_command_stream_generator.py b/ethosu/vela/register_command_stream_generator.py
index be01a75..7858e70 100644
--- a/ethosu/vela/register_command_stream_generator.py
+++ b/ethosu/vela/register_command_stream_generator.py
@@ -824,19 +824,20 @@ def print_feature_map(fm: Optional[NpuFeatureMap], name: str):
t = fm.tiles
addresses = [hex(addr) for addr in t.addresses]
print(f" {stride_str}, tiles: w0={t.width_0}, h0={t.height_0}, h1={t.height_1}, base={addresses}")
+ print(f" name={fm.name}")
def print_operation(npu_op: NpuOperation, index: int = 0, cmd=None):
- pass_info = f", {cmd}" if cmd else ""
+ pass_info = f" {cmd}" if cmd else ""
if isinstance(npu_op, NpuOperation) and not isinstance(npu_op, (NpuDmaOperation, NpuBlockOperation)):
- print(f"{index} {npu_op.op_type.name}{pass_info}")
+ print(f"{index} {npu_op.op_type.name} name={npu_op.name}:{pass_info}")
return
if isinstance(npu_op, NpuDmaOperation):
- print(f"{index} DMA_START src={npu_op.src}, dest={npu_op.dest}{pass_info}")
+ print(f"{index} {npu_op.op_type.name} name={npu_op.name}, src={npu_op.src}, dest={npu_op.dest}:{pass_info}")
return
k = None if npu_op.kernel is None else to_kernel(npu_op.kernel)
if isinstance(npu_op, (NpuPoolingOperation, NpuElementWiseOperation)):
- print(f"{index} {npu_op.sub_op_type.name} {npu_op.op_type.name}:{pass_info}")
+ print(f"{index} {npu_op.sub_op_type.name} {npu_op.op_type.name} name={npu_op.name}:{pass_info}")
else:
if (
isinstance(npu_op, NpuConv2DOperation)
@@ -845,7 +846,7 @@ def print_operation(npu_op: NpuOperation, index: int = 0, cmd=None):
fc = "FullyConnected "
else:
fc = ""
- print(f"{index} {fc}{npu_op.op_type.name}{pass_info}")
+ print(f"{index} {fc}{npu_op.op_type.name} name={npu_op.name}:{pass_info}")
print_feature_map(npu_op.ifm, "IFM")
if npu_op.ifm2_scalar is not None:
quant_val = quantise(npu_op.ifm2_scalar, npu_op.ifm2.quantization)