aboutsummaryrefslogtreecommitdiff
path: root/ethosu/vela/test/testutil.py
diff options
context:
space:
mode:
authorFredrik Svedberg <fredrik.svedberg@arm.com>2023-04-11 22:35:04 +0200
committerFredrik Svedberg <fredrik.svedberg@arm.com>2023-04-17 14:16:44 +0200
commit0ac0804e76e098695ee2b8a9e24e2f0a1efc324f (patch)
tree9ccb766221987a415244079ed6c596a47d693b20 /ethosu/vela/test/testutil.py
parentc1ad80b3a581dd39b39a112d6c2026f6560207a4 (diff)
downloadethos-u-vela-0ac0804e76e098695ee2b8a9e24e2f0a1efc324f.tar.gz
MLBEDSW-7196 Add LSTM support
Added int8 and int16 UNIDIRECTIONAL_SEQUENCE_LSTM support. The implementation does not include support for: * CIFG * Peephole * Projection * Normalisation This change also: * Removed unused Op.BlockLSTM operation type. * Removed the only one consumer limitation on putting the SplitSliceRead on the tensor consumer(s), if all consumers fullfills the requirements * Added Op.VariableTensorWrite as a Operation.memory_function to make sure writes to variable tensors: * Always use linear mode * Are not moved to fast scratch * Are not fused with other elementwise operation tensor ranges Change-Id: Ief831738924ac3d1f2ba6d41f10bd6dc969911f3 Signed-off-by: Fredrik Svedberg <fredrik.svedberg@arm.com>
Diffstat (limited to 'ethosu/vela/test/testutil.py')
-rw-r--r--ethosu/vela/test/testutil.py62
1 files changed, 61 insertions, 1 deletions
diff --git a/ethosu/vela/test/testutil.py b/ethosu/vela/test/testutil.py
index 88fc8747..e08bde24 100644
--- a/ethosu/vela/test/testutil.py
+++ b/ethosu/vela/test/testutil.py
@@ -103,7 +103,10 @@ def create_op_with_quant_tensors(
def create_op(op_type, inputs, output, attrs=None, set_ifm_ofm_shapes=True):
op = Operation(op_type, output.name + "_op")
for input in inputs:
- op.add_input_tensor(input)
+ if input: # Add regular tensor input
+ op.add_input_tensor(input)
+ else: # Add optional (None) inputs for operators with sparse input positioning
+ op.inputs.append(input)
op.set_output_tensor(output)
if attrs is not None:
op.attrs = attrs
@@ -112,6 +115,63 @@ def create_op(op_type, inputs, output, attrs=None, set_ifm_ofm_shapes=True):
return op
+def create_lstm_op(batches, times, features, outputs, datatype):
+ input_shape = [batches, times, features]
+ output_shape = [batches, times, outputs]
+ weight_shape = [features, outputs]
+ state_shape = [batches, outputs]
+ bias_shape = [outputs]
+ ifm = Tensor(input_shape, datatype, "in")
+ ifm.quantization = default_quant_params()
+ ofm = Tensor(output_shape, datatype, "out")
+ ofm.quantization = default_quant_params()
+ bias_dtype = DataType.int64 if datatype == DataType.int16 else DataType.int32
+ bias = create_const_tensor("bias", bias_shape, bias_dtype, [0] * outputs)
+ weight_q = default_quant_params()
+ weight = create_const_tensor("weight", weight_shape, DataType.int8, np.ones(weight_shape), quantization=weight_q)
+ output_state = Tensor(state_shape, datatype, "output_state")
+ output_state.quantization = default_quant_params()
+ output_state.is_variable = True
+ cell_state = Tensor(state_shape, DataType.int16, "cell_state")
+ cell_state.quantization = default_quant_params()
+ cell_state.is_variable = True
+ intermediate = Tensor([], DataType.float32, "intermediate")
+ hidden_scale_intermediate = Tensor([], datatype, "effective_hidden_scale_intermediate")
+ hidden_scale_intermediate.quantization = default_quant_params()
+ peephole = None
+ projection = None
+ normalisation = None
+ inputs = [
+ ifm,
+ weight,
+ weight,
+ weight,
+ weight,
+ weight,
+ weight,
+ weight,
+ weight,
+ peephole,
+ peephole,
+ peephole,
+ bias,
+ bias,
+ bias,
+ bias,
+ projection,
+ projection,
+ output_state,
+ cell_state,
+ normalisation,
+ normalisation,
+ normalisation,
+ normalisation,
+ ]
+ op = create_op(Op.UnidirectionalSequenceLstm, inputs, ofm)
+ op.intermediates = [intermediate, intermediate, intermediate, intermediate, hidden_scale_intermediate]
+ return op
+
+
def create_subgraph(op_list):
# Creates subgraph using the given list of operations
sg = Subgraph()