aboutsummaryrefslogtreecommitdiff
path: root/ethosu/vela/shared_buffer_allocation.py
diff options
context:
space:
mode:
authorLouis Verhaard <louis.verhaard@arm.com>2020-09-30 09:01:52 +0200
committerLouis Verhaard <louis.verhaard@arm.com>2020-10-08 16:29:29 +0200
commitaee5d7537ff81ffda5ba222721b72f914ce50fb8 (patch)
tree495b9dfff2a188c6916f8ca2e390ee88f7da8ccc /ethosu/vela/shared_buffer_allocation.py
parent36ad73a0fb46d3f844845c97c56d92de2a7a9b3d (diff)
downloadethos-u-vela-aee5d7537ff81ffda5ba222721b72f914ce50fb8.tar.gz
MLBEDSW-3148: Refactor Operation
- op.type is now an enum instead of a string - Removed unused operator codes - Refactored some attributes like npu_block_type, fused_activation_function - Refactored operator index calculation - Refactored a number of operator sets Change-Id: I641f65ee375794b7aec42abc0664251ae37d78e8 Signed-off-by: Louis Verhaard <louis.verhaard@arm.com>
Diffstat (limited to 'ethosu/vela/shared_buffer_allocation.py')
-rw-r--r--ethosu/vela/shared_buffer_allocation.py9
1 files changed, 6 insertions, 3 deletions
diff --git a/ethosu/vela/shared_buffer_allocation.py b/ethosu/vela/shared_buffer_allocation.py
index 58856a3e..aa5f4c86 100644
--- a/ethosu/vela/shared_buffer_allocation.py
+++ b/ethosu/vela/shared_buffer_allocation.py
@@ -25,6 +25,7 @@ from .architecture_features import SHRAMElements
from .errors import VelaError
from .ethos_u55_regs.ethos_u55_regs import resampling_mode
from .operation import NpuBlockType
+from .operation import Op
from .range_set import MemoryRangeSet
from .tensor import MemArea
@@ -39,7 +40,7 @@ class SharedBufferAllocation:
ifm_tensor, ifm2_tensor, weight_tensor, ofm_tensor = ps.get_primary_op_ifm_ifm2_weights_ofm()
tensors = [t for t in (ifm_tensor, ifm2_tensor, ofm_tensor) if t is not None]
scales = [t.quantization.scale_f32 for t in tensors if t.quantization is not None]
- has_scale = len(tensors) == len(scales) and not None in scales
+ has_scale = len(tensors) == len(scales) and None not in scales
strides = (1, 1, 1, 1)
dilation = (1, 1, 1, 1)
@@ -53,7 +54,7 @@ class SharedBufferAllocation:
k_h = 1
k_w = 1
if weight_tensor:
- if ps.primary_op.type != "FullyConnectedAct":
+ if ps.primary_op.type != Op.FullyConnected:
k_h = weight_tensor.shape[0]
k_w = weight_tensor.shape[1]
else:
@@ -94,7 +95,9 @@ class SharedBufferAllocation:
self.use_ifm_element == SHRAMElements.IFM16_Elementwise
)
elif self.ifm_bits == 32:
- assert self.is_elementwise or ps.npu_block_type == NpuBlockType.ReduceSum, "Unsupported 32-bit IFM operation"
+ assert (
+ self.is_elementwise or ps.npu_block_type == NpuBlockType.ReduceSum
+ ), "Unsupported 32-bit IFM operation"
self.use_ifm_element = SHRAMElements.IFM32
else:
assert self.ifm_bits == 8, "Unexpected IFM bitdepth"