From 0fc278bbd87827875ef4add9cfd46aea0d787b31 Mon Sep 17 00:00:00 2001 From: Jerry Ge Date: Fri, 11 Aug 2023 16:43:30 +0000 Subject: Add StatefulOps to TOSA specification Signed-off-by: Jerry Ge Change-Id: I63a4c1202a1eddcdedb222e64cac34557647ff21 --- chapters/operators.adoc | 12 ++++++ chapters/pseudocode.adoc | 39 +++++++++++++++++ chapters/variable.adoc | 106 +++++++++++++++++++++++++++++++++++++++++++++++ tools/dictionary.dic | 1 + tools/tosa.py | 17 +++++++- tosa.xml | 62 +++++++++++++++++++++++++++ tosa.xsd | 2 + 7 files changed, 237 insertions(+), 2 deletions(-) create mode 100644 chapters/variable.adoc diff --git a/chapters/operators.adoc b/chapters/operators.adoc index 3a4c831..698738f 100644 --- a/chapters/operators.adoc +++ b/chapters/operators.adoc @@ -62,6 +62,16 @@ implementation-defined order that must be a topological ordering of the TOSA gra tosa_execute_graph(tosa_context_t context, tosa_graph_t graph, tosa_list_t input_list, tosa_list_t output_list, tosa_level_t level) { ERROR_IF(tensor_list_shape(input_list) != tosa_input_shape(graph)); ERROR_IF(tensor_list_shape(output_list) != tosa_output_shape(graph)); + + // Declare the global list for storing persistent variable tensors across multiple graphs + if (!variable_tensors) { + variable_tensors = list(); + } else { // Clear the "seen flag" + for (tensor_t var_tensor in variable_tensors) { + var_tensor.seen = false; + } + } + for_each(operator in graph order) { ERROR_IF(operator input tensors do not meet requirement of operator Arguments inputs) ERROR_IF(operator attributes do not meet requirement of operator Arguments attributes) @@ -100,3 +110,5 @@ include::data_nodes.adoc[] include::custom.adoc[] include::control_flow.adoc[] + +include::variable.adoc[] \ No newline at end of file diff --git a/chapters/pseudocode.adoc b/chapters/pseudocode.adoc index 422188a..146b5d7 100644 --- a/chapters/pseudocode.adoc +++ b/chapters/pseudocode.adoc @@ -125,6 +125,45 @@ void tensor_write( *address, dim_t shape, dim_t index, value) } ---- +==== Variable Tensor Allocate + +variable_tensor_allocate allocates the mutable persistent memory block for storing variable tensors. +The shape argument contains the shape of the allocated memory block for the variable_tensor. +The uid argument is a globally unique identifier for variable tensors. + +[source,c++] +---- +tensor_t* variable_tensor_allocate(dim_t shape, int32_t uid) { + size_t size = tensor_size(shape); + tensor_t *allocated_tensor = new tensor_t; + allocated_tensor->data = new in_t[size]; + allocated_tensor->uid = uid; + allocated_tensor->is_written = false; + allocated_tensor->shape = shape; + allocated_tensor->type = in_t; + return allocated_tensor; +} +---- + +==== Variable Tensor Lookup + +variable_tensor_lookup checks whether a variable tensor has been allocated or not. +The uid argument is a globally unique identifier for variable tensors. + +[source,c++] +---- +tensor_t variable_tensor_lookup(int32_t uid) { + // The global all_allocated_variable_tensors was instantiated at the first + // time of executing the tosa graph + for_each(tensor_t allocated_tensor in all_allocated_variable_tensors) { + if (allocated_tensor.uid == uid) { + return allocated_tensor; + } + } + return NULL; +} +---- + ==== Broadcast Helpers The following function derives the broadcast output shape from the input shapes. diff --git a/chapters/variable.adoc b/chapters/variable.adoc new file mode 100644 index 0000000..1f7da51 --- /dev/null +++ b/chapters/variable.adoc @@ -0,0 +1,106 @@ +// +// This confidential and proprietary software may be used only as +// authorised by a licensing agreement from ARM Limited +// (C) COPYRIGHT 2023 ARM Limited +// ALL RIGHTS RESERVED +// The entire notice above must be reproduced on all authorised +// copies and copies may only be made to the extent permitted +// by a licensing agreement from ARM Limited. + +=== Variable Operators + +TOSA implements three variable operators for expressing persistent mutable values across multiple TOSA graph invocations. + +==== VARIABLE + +Defines a new TOSA variable. +This is a persistent mutable value across multiple TOSA graph invocations. +Modifications are expressed using read/write semantics. + +include::{generated}/operators/VARIABLE.adoc[] + +[source,c++] +---- + +tensor_t var_tensor = variable_tensor_lookup(uid); + +// Invocation for the first time +if (var_tensor == NULL) { + // Allocate the persistent mutable memory for the variable tensor + tensor_t var_tensor = variable_tensor_allocate(var_shape, uid); + + if (initial_value != NULL) { + REQUIRE(var_t == in_t); + REQUIRE(var_shape == shape); + for_each (index in shape) { + // Copy data from initial_value to var_tensor + in_t value = tensor_read(initial_value, shape, index); + tensor_write(var_tensor.data, var_shape, index, value); + } + var_tensor.is_written = true; + } +} else { // Variable tensor has already been declared + // It's invalid to declare the second variable with the same uid in a single graph execution, + REQUIRE(!var_tensor.seen); +} + +var_tensor.seen = true; + +---- + +==== VARIABLE_WRITE + +Assigns a value to the pseudo-buffer resource holding a persistent mutable tensor. + +include::{generated}/operators/VARIABLE_WRITE.adoc[] + +[source,c++] +---- + +tensor_t. variable_tensor = variable_tensor_lookup(uid); +// Check this variable tensor has been declared +REQUIRE(variable_tensor); +// The tensor has to be seen before to be written to +// The seen variable is cleared before each graph execution and set in declaration +REQUIRE(variable_tensor.seen); +// Input tensor's shape and variable_tensor's shape have to match +REQUIRE(variable_tensor.shape == shape); +// Input tensor's shape and variable_tensor's type have to match +REQUIRE(variable_tensor.type == in_t); + +for_each (index in shape) { + // Write data from the input to the pseudo-buffer resource + in_t value = tensor_read(input1, shape, index); + tensor_write(variable_tensor.data, variable_tensor.shape, index, value); +} + +variable_tensor.is_written = true; + +---- + +==== VARIABLE_READ + +Reads the value from a pseudo-buffer resource holding a persistent mutable tensor. + +include::{generated}/operators/VARIABLE_READ.adoc[] + +[source,c++] +---- + +tensor_t variable_tensor = variable_tensor_lookup(uid); +// Check this variable tensor has been decalred +REQUIRE(variable_tensor != NULL); +// Check this variable tensor has been written +REQUIRE(variable_tensor.is_written); +// Output tensor's shape and variable_tensor's shape have to match +REQUIRE(variable_tensor.shape == shape); +// Output tensor's shape and variable_tensor's type have to match +REQUIRE(variable_tensor.type == out_t); + +for_each (index in shape) { + // Read data from pseudo-buffer resource to the output + out_t value = tensor_read(variable_tensor.data, variable_tensor.shape, index); + tensor_write(input1, shape, index, value); +} + +---- diff --git a/tools/dictionary.dic b/tools/dictionary.dic index 70e556a..e2e1a58 100644 --- a/tools/dictionary.dic +++ b/tools/dictionary.dic @@ -82,3 +82,4 @@ TPUs ulp unary Unary +uid diff --git a/tools/tosa.py b/tools/tosa.py index 26e501f..803e478 100644 --- a/tools/tosa.py +++ b/tools/tosa.py @@ -4,6 +4,7 @@ import re import xml.etree.ElementTree as ET + # possible shapes: shape1, [2], [N,H,W,C] # returns (checkable, rank) # checkable is false if shape doesn't contain [] @@ -46,8 +47,18 @@ class TOSALevel: class TOSAOperatorArgument: def __init__( - self, name, description, categories, ty, elty, shape, levellimits, rank + self, + name, + description, + categories, + ty, + elty, + shape, + levellimits, + rank, + optional=False, ): + assert isinstance(optional, bool) self.name = name self.description = description self.categories = categories @@ -56,6 +67,7 @@ class TOSAOperatorArgument: self.shape = shape self.levellimits = levellimits self.rank = rank + self.optional = optional class TOSAOperatorDataTypeSupport: @@ -161,6 +173,7 @@ class TOSASpec: shape = arg.get("shape") levellimits = [] rank = [] + optional = arg.get("optional", "false") == "true" r = arg.find("rank") if r is not None: rank = [r.get("min"), r.get("max")] @@ -194,7 +207,7 @@ class TOSASpec: argcats.append(TOSAOperatorArgumentCategory(cat[0], cat[1].split(","))) return TOSAOperatorArgument( - name, desc, argcats, argtype, argtelty, shape, levellimits, rank + name, desc, argcats, argtype, argtelty, shape, levellimits, rank, optional ) def __load_enum(self, arg): diff --git a/tosa.xml b/tosa.xml index 707ea3b..4ec3775 100644 --- a/tosa.xml +++ b/tosa.xml @@ -2550,6 +2550,58 @@ used. + + + VARIABLE + + + Globally unique identifier for the declared variable tensor. + + + + The variable tensor shape + + + + Type of the tensor variable elements. + + + + Initial value of the variable tensor. This argument is optional with default value NULL. + + + + + + + VARIABLE_WRITE + + + Globally unique identifier of the variable tensor that is writing to + + + + Input tensor + + + + + + + VARIABLE_READ + + + Globally unique identifier of the variable tensor that is reading from + + + + Output tensor + + + + + + @@ -2563,4 +2615,14 @@ used. + + + + + + + + + + diff --git a/tosa.xsd b/tosa.xsd index 6aaf204..b6aa162 100644 --- a/tosa.xsd +++ b/tosa.xsd @@ -65,6 +65,7 @@ + @@ -206,6 +207,7 @@ + -- cgit v1.2.1