diff options
author | Jerry Ge <jerry.ge@arm.com> | 2023-08-11 16:43:30 +0000 |
---|---|---|
committer | Jerry Ge <jerry.ge@arm.com> | 2023-08-11 16:43:47 +0000 |
commit | 0fc278bbd87827875ef4add9cfd46aea0d787b31 (patch) | |
tree | ac1bd06e51730239befc88895cb2db0bca49bc23 /chapters/pseudocode.adoc | |
parent | 12ab5da01cbc152ed14f00fccdf94815dd1512d2 (diff) | |
download | specification-0fc278bbd87827875ef4add9cfd46aea0d787b31.tar.gz |
Add StatefulOps to TOSA specification
Signed-off-by: Jerry Ge <jerry.ge@arm.com>
Change-Id: I63a4c1202a1eddcdedb222e64cac34557647ff21
Diffstat (limited to 'chapters/pseudocode.adoc')
-rw-r--r-- | chapters/pseudocode.adoc | 39 |
1 files changed, 39 insertions, 0 deletions
diff --git a/chapters/pseudocode.adoc b/chapters/pseudocode.adoc index 422188a..146b5d7 100644 --- a/chapters/pseudocode.adoc +++ b/chapters/pseudocode.adoc @@ -125,6 +125,45 @@ void tensor_write<type>(<type> *address, dim_t shape, dim_t index, <type> value) } ---- +==== Variable Tensor Allocate + +variable_tensor_allocate allocates the mutable persistent memory block for storing variable tensors. +The shape argument contains the shape of the allocated memory block for the variable_tensor. +The uid argument is a globally unique identifier for variable tensors. + +[source,c++] +---- +tensor_t* variable_tensor_allocate<in_t>(dim_t shape, int32_t uid) { + size_t size = tensor_size(shape); + tensor_t *allocated_tensor = new tensor_t; + allocated_tensor->data = new in_t[size]; + allocated_tensor->uid = uid; + allocated_tensor->is_written = false; + allocated_tensor->shape = shape; + allocated_tensor->type = in_t; + return allocated_tensor; +} +---- + +==== Variable Tensor Lookup + +variable_tensor_lookup checks whether a variable tensor has been allocated or not. +The uid argument is a globally unique identifier for variable tensors. + +[source,c++] +---- +tensor_t variable_tensor_lookup(int32_t uid) { + // The global all_allocated_variable_tensors was instantiated at the first + // time of executing the tosa graph + for_each(tensor_t allocated_tensor in all_allocated_variable_tensors) { + if (allocated_tensor.uid == uid) { + return allocated_tensor; + } + } + return NULL; +} +---- + ==== Broadcast Helpers The following function derives the broadcast output shape from the input shapes. |