diff options
author | Fredrik Svedberg <fredrik.svedberg@arm.com> | 2023-04-11 22:35:04 +0200 |
---|---|---|
committer | Fredrik Svedberg <fredrik.svedberg@arm.com> | 2023-04-17 14:16:44 +0200 |
commit | 0ac0804e76e098695ee2b8a9e24e2f0a1efc324f (patch) | |
tree | 9ccb766221987a415244079ed6c596a47d693b20 /ethosu/vela/operation.py | |
parent | c1ad80b3a581dd39b39a112d6c2026f6560207a4 (diff) | |
download | ethos-u-vela-0ac0804e76e098695ee2b8a9e24e2f0a1efc324f.tar.gz |
MLBEDSW-7196 Add LSTM support
Added int8 and int16 UNIDIRECTIONAL_SEQUENCE_LSTM support.
The implementation does not include support for:
* CIFG
* Peephole
* Projection
* Normalisation
This change also:
* Removed unused Op.BlockLSTM operation type.
* Removed the only one consumer limitation on putting the SplitSliceRead
on the tensor consumer(s), if all consumers fullfills the requirements
* Added Op.VariableTensorWrite as a Operation.memory_function to make
sure writes to variable tensors:
* Always use linear mode
* Are not moved to fast scratch
* Are not fused with other elementwise operation tensor ranges
Change-Id: Ief831738924ac3d1f2ba6d41f10bd6dc969911f3
Signed-off-by: Fredrik Svedberg <fredrik.svedberg@arm.com>
Diffstat (limited to 'ethosu/vela/operation.py')
-rw-r--r-- | ethosu/vela/operation.py | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/ethosu/vela/operation.py b/ethosu/vela/operation.py index 67717104..d1670536 100644 --- a/ethosu/vela/operation.py +++ b/ethosu/vela/operation.py @@ -37,6 +37,7 @@ from .shape4d import Shape4D # Import needed for Type annotations. Only import for Type checking to avoid run-time errors due to cyclic import. if TYPE_CHECKING: + from .tensor import QuantizationParameters from .tensor import Tensor PointXY = namedtuple("PointXY", "x y") @@ -142,8 +143,6 @@ class Op(Enum): BatchToSpaceND = OperatorInfo() BidirectionalSequenceLstm = OperatorInfo(block_type=NpuBlockType.VectorProduct, indices=NNG_IFM_WEIGHTS_INDICES) BidirectionalSequenceRnn = OperatorInfo(block_type=NpuBlockType.VectorProduct, indices=NNG_IFM_WEIGHTS_INDICES) - BlockLSTM = OperatorInfo(block_type=NpuBlockType.VectorProduct, indices=NNG_BLOCK_LSTM_INDICES) - CLZ = OperatorInfo( block_type=NpuBlockType.ElementWise, indices=NNG_IFM_INDICES, is_unary=True ) # NPU specific operation @@ -297,6 +296,7 @@ class Op(Enum): Unique = OperatorInfo() Unpack = OperatorInfo(indices=NNG_IFM_INDICES) UnpackReshaped = OperatorInfo(indices=NNG_IFM_INDICES) + VariableTensorWrite = OperatorInfo() Where = OperatorInfo() While = OperatorInfo() ZerosLike = OperatorInfo() @@ -516,8 +516,8 @@ class Operation: self.memory_function: Optional[Op] = None # If not none: contains QuantizationParameters to be used as output quantization # (which overrides the ofm tensor's quantization), used in LUT - self.forced_input_quantization = None - self.forced_output_quantization = None + self.forced_input_quantization: Optional[QuantizationParameters] = None + self.forced_output_quantization: Optional[QuantizationParameters] = None self.scheduled_pass = None self.op_index = None # input network operator index self.activation_lut = None |