aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJerry Ge <jerry.ge@arm.com>2023-08-11 16:43:30 +0000
committerJerry Ge <jerry.ge@arm.com>2023-08-11 16:43:47 +0000
commit0fc278bbd87827875ef4add9cfd46aea0d787b31 (patch)
treeac1bd06e51730239befc88895cb2db0bca49bc23
parent12ab5da01cbc152ed14f00fccdf94815dd1512d2 (diff)
downloadspecification-0fc278bbd87827875ef4add9cfd46aea0d787b31.tar.gz
Add StatefulOps to TOSA specification
Signed-off-by: Jerry Ge <jerry.ge@arm.com> Change-Id: I63a4c1202a1eddcdedb222e64cac34557647ff21
-rw-r--r--chapters/operators.adoc12
-rw-r--r--chapters/pseudocode.adoc39
-rw-r--r--chapters/variable.adoc106
-rw-r--r--tools/dictionary.dic1
-rw-r--r--tools/tosa.py17
-rw-r--r--tosa.xml62
-rw-r--r--tosa.xsd2
7 files changed, 237 insertions, 2 deletions
diff --git a/chapters/operators.adoc b/chapters/operators.adoc
index 3a4c831..698738f 100644
--- a/chapters/operators.adoc
+++ b/chapters/operators.adoc
@@ -62,6 +62,16 @@ implementation-defined order that must be a topological ordering of the TOSA gra
tosa_execute_graph(tosa_context_t context, tosa_graph_t graph, tosa_list_t input_list, tosa_list_t output_list, tosa_level_t level) {
ERROR_IF(tensor_list_shape(input_list) != tosa_input_shape(graph));
ERROR_IF(tensor_list_shape(output_list) != tosa_output_shape(graph));
+
+ // Declare the global list for storing persistent variable tensors across multiple graphs
+ if (!variable_tensors) {
+ variable_tensors = list<tensor_t>();
+ } else { // Clear the "seen flag"
+ for (tensor_t var_tensor in variable_tensors) {
+ var_tensor.seen = false;
+ }
+ }
+
for_each(operator in graph order) {
ERROR_IF(operator input tensors do not meet requirement of operator Arguments inputs)
ERROR_IF(operator attributes do not meet requirement of operator Arguments attributes)
@@ -100,3 +110,5 @@ include::data_nodes.adoc[]
include::custom.adoc[]
include::control_flow.adoc[]
+
+include::variable.adoc[] \ No newline at end of file
diff --git a/chapters/pseudocode.adoc b/chapters/pseudocode.adoc
index 422188a..146b5d7 100644
--- a/chapters/pseudocode.adoc
+++ b/chapters/pseudocode.adoc
@@ -125,6 +125,45 @@ void tensor_write<type>(<type> *address, dim_t shape, dim_t index, <type> value)
}
----
+==== Variable Tensor Allocate
+
+variable_tensor_allocate allocates the mutable persistent memory block for storing variable tensors.
+The shape argument contains the shape of the allocated memory block for the variable_tensor.
+The uid argument is a globally unique identifier for variable tensors.
+
+[source,c++]
+----
+tensor_t* variable_tensor_allocate<in_t>(dim_t shape, int32_t uid) {
+ size_t size = tensor_size(shape);
+ tensor_t *allocated_tensor = new tensor_t;
+ allocated_tensor->data = new in_t[size];
+ allocated_tensor->uid = uid;
+ allocated_tensor->is_written = false;
+ allocated_tensor->shape = shape;
+ allocated_tensor->type = in_t;
+ return allocated_tensor;
+}
+----
+
+==== Variable Tensor Lookup
+
+variable_tensor_lookup checks whether a variable tensor has been allocated or not.
+The uid argument is a globally unique identifier for variable tensors.
+
+[source,c++]
+----
+tensor_t variable_tensor_lookup(int32_t uid) {
+ // The global all_allocated_variable_tensors was instantiated at the first
+ // time of executing the tosa graph
+ for_each(tensor_t allocated_tensor in all_allocated_variable_tensors) {
+ if (allocated_tensor.uid == uid) {
+ return allocated_tensor;
+ }
+ }
+ return NULL;
+}
+----
+
==== Broadcast Helpers
The following function derives the broadcast output shape from the input shapes.
diff --git a/chapters/variable.adoc b/chapters/variable.adoc
new file mode 100644
index 0000000..1f7da51
--- /dev/null
+++ b/chapters/variable.adoc
@@ -0,0 +1,106 @@
+//
+// This confidential and proprietary software may be used only as
+// authorised by a licensing agreement from ARM Limited
+// (C) COPYRIGHT 2023 ARM Limited
+// ALL RIGHTS RESERVED
+// The entire notice above must be reproduced on all authorised
+// copies and copies may only be made to the extent permitted
+// by a licensing agreement from ARM Limited.
+
+=== Variable Operators
+
+TOSA implements three variable operators for expressing persistent mutable values across multiple TOSA graph invocations.
+
+==== VARIABLE
+
+Defines a new TOSA variable.
+This is a persistent mutable value across multiple TOSA graph invocations.
+Modifications are expressed using read/write semantics.
+
+include::{generated}/operators/VARIABLE.adoc[]
+
+[source,c++]
+----
+
+tensor_t var_tensor = variable_tensor_lookup(uid);
+
+// Invocation for the first time
+if (var_tensor == NULL) {
+ // Allocate the persistent mutable memory for the variable tensor
+ tensor_t var_tensor = variable_tensor_allocate<var_t>(var_shape, uid);
+
+ if (initial_value != NULL) {
+ REQUIRE(var_t == in_t);
+ REQUIRE(var_shape == shape);
+ for_each (index in shape) {
+ // Copy data from initial_value to var_tensor
+ in_t value = tensor_read<in_t>(initial_value, shape, index);
+ tensor_write<in_t>(var_tensor.data, var_shape, index, value);
+ }
+ var_tensor.is_written = true;
+ }
+} else { // Variable tensor has already been declared
+ // It's invalid to declare the second variable with the same uid in a single graph execution,
+ REQUIRE(!var_tensor.seen);
+}
+
+var_tensor.seen = true;
+
+----
+
+==== VARIABLE_WRITE
+
+Assigns a value to the pseudo-buffer resource holding a persistent mutable tensor.
+
+include::{generated}/operators/VARIABLE_WRITE.adoc[]
+
+[source,c++]
+----
+
+tensor_t. variable_tensor = variable_tensor_lookup(uid);
+// Check this variable tensor has been declared
+REQUIRE(variable_tensor);
+// The tensor has to be seen before to be written to
+// The seen variable is cleared before each graph execution and set in declaration
+REQUIRE(variable_tensor.seen);
+// Input tensor's shape and variable_tensor's shape have to match
+REQUIRE(variable_tensor.shape == shape);
+// Input tensor's shape and variable_tensor's type have to match
+REQUIRE(variable_tensor.type == in_t);
+
+for_each (index in shape) {
+ // Write data from the input to the pseudo-buffer resource
+ in_t value = tensor_read<in_t>(input1, shape, index);
+ tensor_write<tensor_t>(variable_tensor.data, variable_tensor.shape, index, value);
+}
+
+variable_tensor.is_written = true;
+
+----
+
+==== VARIABLE_READ
+
+Reads the value from a pseudo-buffer resource holding a persistent mutable tensor.
+
+include::{generated}/operators/VARIABLE_READ.adoc[]
+
+[source,c++]
+----
+
+tensor_t variable_tensor = variable_tensor_lookup(uid);
+// Check this variable tensor has been decalred
+REQUIRE(variable_tensor != NULL);
+// Check this variable tensor has been written
+REQUIRE(variable_tensor.is_written);
+// Output tensor's shape and variable_tensor's shape have to match
+REQUIRE(variable_tensor.shape == shape);
+// Output tensor's shape and variable_tensor's type have to match
+REQUIRE(variable_tensor.type == out_t);
+
+for_each (index in shape) {
+ // Read data from pseudo-buffer resource to the output
+ out_t value = tensor_read<tensor_t>(variable_tensor.data, variable_tensor.shape, index);
+ tensor_write<out_t>(input1, shape, index, value);
+}
+
+----
diff --git a/tools/dictionary.dic b/tools/dictionary.dic
index 70e556a..e2e1a58 100644
--- a/tools/dictionary.dic
+++ b/tools/dictionary.dic
@@ -82,3 +82,4 @@ TPUs
ulp
unary
Unary
+uid
diff --git a/tools/tosa.py b/tools/tosa.py
index 26e501f..803e478 100644
--- a/tools/tosa.py
+++ b/tools/tosa.py
@@ -4,6 +4,7 @@
import re
import xml.etree.ElementTree as ET
+
# possible shapes: shape1, [2], [N,H,W,C]
# returns (checkable, rank)
# checkable is false if shape doesn't contain []
@@ -46,8 +47,18 @@ class TOSALevel:
class TOSAOperatorArgument:
def __init__(
- self, name, description, categories, ty, elty, shape, levellimits, rank
+ self,
+ name,
+ description,
+ categories,
+ ty,
+ elty,
+ shape,
+ levellimits,
+ rank,
+ optional=False,
):
+ assert isinstance(optional, bool)
self.name = name
self.description = description
self.categories = categories
@@ -56,6 +67,7 @@ class TOSAOperatorArgument:
self.shape = shape
self.levellimits = levellimits
self.rank = rank
+ self.optional = optional
class TOSAOperatorDataTypeSupport:
@@ -161,6 +173,7 @@ class TOSASpec:
shape = arg.get("shape")
levellimits = []
rank = []
+ optional = arg.get("optional", "false") == "true"
r = arg.find("rank")
if r is not None:
rank = [r.get("min"), r.get("max")]
@@ -194,7 +207,7 @@ class TOSASpec:
argcats.append(TOSAOperatorArgumentCategory(cat[0], cat[1].split(",")))
return TOSAOperatorArgument(
- name, desc, argcats, argtype, argtelty, shape, levellimits, rank
+ name, desc, argcats, argtype, argtelty, shape, levellimits, rank, optional
)
def __load_enum(self, arg):
diff --git a/tosa.xml b/tosa.xml
index 707ea3b..4ec3775 100644
--- a/tosa.xml
+++ b/tosa.xml
@@ -2550,6 +2550,58 @@ used.</description>
</arguments>
</operator>
</operatorgroup>
+ <operatorgroup name="variable">
+ <operator>
+ <name>VARIABLE</name>
+ <arguments>
+ <argument category="attribute" name="uid" type="tensor_t" shape="-" tensor-element-type="int32_t">
+ <description>Globally unique identifier for the declared variable tensor.</description>
+ <rank min="0" max="0"/>
+ </argument>
+ <argument category="attribute" name="var_shape" type="tensor_t" shape="var_shape" tensor-element-type="index_t">
+ <description>The variable tensor shape</description>
+ <rank min="1" max="1"/>
+ </argument>
+ <argument category="attribute" name="type" type="tensor_t" shape="-" tensor-element-type="var_t">
+ <description>Type of the tensor variable elements.</description>
+ <rank min="0" max="0"/>
+ </argument>
+ <argument category="attribute" name="initial_value" type="tensor_t" shape="shape" tensor-element-type="in_t" optional="true">
+ <description>Initial value of the variable tensor. This argument is optional with default value NULL.</description>
+ <levellimit value="rank(shape)" limit="MAX_RANK"/>
+ <rank min="0" max="MAX_RANK"/>
+ </argument>
+ </arguments>
+ </operator>
+ <operator>
+ <name>VARIABLE_WRITE</name>
+ <arguments>
+ <argument category="attribute" name="uid" type="tensor_t" shape="-" tensor-element-type="int32_t">
+ <description>Globally unique identifier of the variable tensor that is writing to</description>
+ <rank min="0" max="0"/>
+ </argument>
+ <argument category="input" name="input1" type="tensor_t" shape="shape" tensor-element-type="in_t">
+ <description>Input tensor</description>
+ <levellimit value="rank(shape)" limit="MAX_RANK"/>
+ <rank min="0" max="MAX_RANK"/>
+ </argument>
+ </arguments>
+ </operator>
+ <operator>
+ <name>VARIABLE_READ</name>
+ <arguments>
+ <argument category="attribute" name="uid" type="tensor_t" shape="-" tensor-element-type="int32_t">
+ <description>Globally unique identifier of the variable tensor that is reading from </description>
+ <rank min="0" max="0"/>
+ </argument>
+ <argument category="output" name="output1" type="tensor_t" shape="shape" tensor-element-type="out_t">
+ <description>Output tensor</description>
+ <levellimit value="rank(shape)" limit="MAX_RANK"/>
+ <rank min="0" max="MAX_RANK"/>
+ </argument>
+ </arguments>
+ </operator>
+ </operatorgroup>
</operators>
<enum name="resize_mode_t" description="Valid resize types">
@@ -2563,4 +2615,14 @@ used.</description>
<enumval value="2" name="FP32" description="32-bit floating-point"/>
</enum>
+ <enum name="var_t" description="Variable tensor data type">
+ <enumval value="0" name="BOOLEAN" description="Boolean"/>
+ <enumval value="1" name="INT8" description="8-bit integer"/>
+ <enumval value="2" name="INT16" description="16-bit integer"/>
+ <enumval value="3" name="INT32" description="32-bit integer"/>
+ <enumval value="4" name="FP16" description="16-bit floating-point"/>
+ <enumval value="5" name="BF16" description="16-bit brain floating-point"/>
+ <enumval value="6" name="FP32" description="32-bit floating-point"/>
+ </enum>
+
</tosa>
diff --git a/tosa.xsd b/tosa.xsd
index 6aaf204..b6aa162 100644
--- a/tosa.xsd
+++ b/tosa.xsd
@@ -65,6 +65,7 @@
<xs:enumeration value="index_t"/>
<xs:enumeration value="mul_t"/>
<xs:enumeration value="TABLE_SIZE"/>
+ <xs:enumeration value="var_t"/>
</xs:restriction>
</xs:simpleType>
@@ -206,6 +207,7 @@
<xs:attribute name="type" type="argument-type" use="required"/>
<xs:attribute name="tensor-element-type" type="argument-tensor-element-type" use="required"/>
<xs:attribute name="shape" type="xs:string" use="required"/>
+ <xs:attribute name="optional" type="xs:boolean"/>
</xs:complexType>
</xs:element>