From 58098a7b1ffcf41da759f862deb753c82fe5b4b0 Mon Sep 17 00:00:00 2001 From: Eric Kunze Date: Fri, 5 Aug 2022 15:40:12 -0700 Subject: Machine parsable specification This converts portions of the asciidoc specification into an xml document and schema. For the html and pdf outputs, the xml is converted to asciidoc files that are included into the existing specification. The xml allows future automated uses of the tosa specification while maintaining rough compatibility with the existing document. No significant functional changes are included in this change. Change-Id: I7f1f95c527638e270c157d58fcdec6a3510daea5 Signed-off-by: Eric Kunze --- .gitignore | 2 + .pre-commit-config.yaml | 21 + Makefile | 36 +- README.md | 4 +- chapters/activation_funcs.adoc | 62 +- chapters/comparison.adoc | 62 +- chapters/control_flow.adoc | 23 +- chapters/data_layout.adoc | 169 +--- chapters/data_nodes.adoc | 45 +- chapters/ewise_binary.adoc | 339 +------ chapters/ewise_ternary.adoc | 24 +- chapters/ewise_unary.adoc | 206 +--- chapters/image.adoc | 37 +- chapters/reduction.adoc | 123 +-- chapters/scatter_gather.adoc | 49 +- chapters/tensor_ops.adoc | 285 +----- chapters/type_conversion.adoc | 87 +- tools/dictionary.dic | 1 + tools/genspec.py | 68 ++ tools/get_descriptions.py | 9 +- tools/tosa.py | 97 ++ tosa.xml | 2160 ++++++++++++++++++++++++++++++++++++++++ tosa.xsd | 169 ++++ 23 files changed, 2628 insertions(+), 1450 deletions(-) create mode 100644 .gitignore create mode 100644 .pre-commit-config.yaml create mode 100755 tools/genspec.py create mode 100644 tools/tosa.py create mode 100644 tosa.xml create mode 100644 tosa.xsd diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..f00d102 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +out/ +*.pyc diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..9280ab7 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,21 @@ +# Copyright (c) 2022 Arm Limited. +# SPDX-License-Identifier: Apache-2.0 + +# See https://pre-commit.com for more information +# See https://pre-commit.com/hooks.html for more hooks +repos: +- repo: https://github.com/asottile/reorder_python_imports + rev: v3.0.1 + hooks: + - id: reorder-python-imports + +- repo: https://github.com/psf/black + rev: 22.3.0 + hooks: + - id: black + +- repo: https://github.com/pycqa/flake8 + rev: 4.0.1 + hooks: + - id: flake8 + args: [--max-line-length=88] diff --git a/Makefile b/Makefile index fa6274d..9fbaeab 100644 --- a/Makefile +++ b/Makefile @@ -13,31 +13,42 @@ MKDIR=mkdir -p ASCIIDOC=asciidoctor ASPELL=aspell SHELL=/bin/bash -o pipefail +XMLLINT = xmllint HTMLDIR=out/html PDFDIR=out/pdf +GENDIR=out/gen -COMMON_ARGS= -a revnumber="$(TOSAREVISION)" +COMMON_ARGS= -a revnumber="$(TOSAREVISION)" -a generated="$(abspath $(GENDIR))" SPECSRC := tosa_spec.adoc -ADOCFILES = $(wildcard chapters/[A-Za-z]*.adoc) +ADOCFILES = $(wildcard chapters/[A-Za-z]*.adoc) $(wildcard $(GENDIR)/*/*.adoc) SPECFILES = $(ADOCFILES) tosa.css FIGURES = $(wildcard figures/*.svg) +SPECXML := tosa.xml +SPECSCHEMA := tosa.xsd +GENSCRIPTS := tools/tosa.py tools/genspec.py + +GEN := $(GENDIR)/gen.stamp .DELETE_ON_ERROR: -.PHONY: all html pdf clean spell copy_html_figures +.PHONY: all html pdf clean spell copy_html_figures lint all: spell html pdf -html: copy_html_figures $(HTMLDIR)/tosa_spec.html +html: lint copy_html_figures $(HTMLDIR)/tosa_spec.html -pdf: $(PDFDIR)/tosa_spec.pdf +pdf: lint $(PDFDIR)/tosa_spec.pdf clean: $(RM) $(HTMLDIR)/tosa_spec.html - rm -rf $(HTMLDIR)/figures + $(RM) -rf $(HTMLDIR)/figures $(RM) $(PDFDIR)/tosa_spec.pdf + $(RM) -r $(GENDIR) + $(RM) out/lint.txt + +lint: out/lint.txt spell: out/spell.txt @@ -57,11 +68,20 @@ out/spell.txt: $(ADOCFILES) FORCE else echo No spelling errors found ; \ fi -$(HTMLDIR)/tosa_spec.html: $(SPECSRC) $(SPECFILES) +.PRECIOUS: out/lint.txt +out/lint.txt: $(SPECXML) $(SPECSCHEMA) + echo Linting XML + $(XMLLINT) --noout --schema $(SPECSCHEMA) $(SPECXML) + +$(GEN): $(SPECXML) $(GENSCRIPTS) + tools/genspec.py --xml $(SPECXML) --outdir $(GENDIR) + @touch $@ + +$(HTMLDIR)/tosa_spec.html: $(SPECSRC) $(SPECFILES) $(GEN) $(MKDIR) $(HTMLDIR) $(ASCIIDOC) -b html5 -a stylesheet=tosa.css $(COMMON_ARGS) -o $@ $< -$(PDFDIR)/tosa_spec.pdf: $(SPECSRC) $(SPECFILES) +$(PDFDIR)/tosa_spec.pdf: $(SPECSRC) $(SPECFILES) $(GEN) $(MKDIR) $(PDFDIR) $(ASCIIDOC) -r asciidoctor-pdf -b pdf $(COMMON_ARGS) -o $@ $(SPECSRC) diff --git a/README.md b/README.md index 7b01711..b47ab51 100644 --- a/README.md +++ b/README.md @@ -12,6 +12,8 @@ using the following tools: * Asciidoctor 1.5.5 or later ([Asciidoctor](https://asciidoctor.org)) * Asciidoctor-pdf * GNU Make 4.1 or later +* xmllint +* Python 3.8 or later The default `make` build creates both an html and a pdf version of the specification in out/html and out/pdf @@ -19,4 +21,4 @@ in out/html and out/pdf If only an html build is required, `make html` will build only the html file, and asciidoctor-pdf is not needed. -If only a pdf build is required, `make pdf` will build only the pdf. \ No newline at end of file +If only a pdf build is required, `make pdf` will build only the pdf. diff --git a/chapters/activation_funcs.adoc b/chapters/activation_funcs.adoc index 27ba596..54697d2 100644 --- a/chapters/activation_funcs.adoc +++ b/chapters/activation_funcs.adoc @@ -11,20 +11,12 @@ === Activation Functions ==== CLAMP + Clamp to an arbitrary minimum and maximum value. Maximum and minimum values are specified as values in the range of the input type. No zero point subtraction is done to the values, thus to clamp to the zero point value, the zero point itself should be supplied as the minimum value. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|Input|shape|Input tensor -|Attribute|in_out_t|min_val|-|minimum clip value -|Attribute|in_out_t|max_val|-|maximum clip value -|Output|in_out_t*|Output|shape|Output tensor of same type and shape as input -|=== +include::{generated}/operators/CLAMP.adoc[] *Operation Function:* [source,c++] @@ -37,18 +29,6 @@ for_each(index in shape) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|Any|signed 8|int8_t -|Any|signed 16|int16_t -|MI, MT|fp16|fp16_t -|MI, MT|bf16|bf16_t -|MI, MT|fp32|fp32_t -|=== - ==== SIGMOID Sigmoid function: output = 1 / (1 + exp(-input)) @@ -69,24 +49,7 @@ int16_t sigmoid_reference(int16_t x) { // input x range is -256 to + 256 inclusi generate_lookup_table(&sigmoid_table, &sigmoid_reference); ---- -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|Input|shape|Input tensor -|Output|in_out_t*|Output|shape|Output tensor of same type and shape as input -|=== - -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|MI, MT|fp16|fp16_t -|MI, MT|bf16|bf16_t -|MI, MT|fp32|fp32_t -|=== +include::{generated}/operators/SIGMOID.adoc[] ==== TANH @@ -109,21 +72,4 @@ int16_t tanh_reference(int16_t x) { // input x range is -256 to +256 inclusive generate_lookup_table(&tanh_table, &tanh_reference); ---- -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|Input|shape|Input tensor -|Output|in_out_t*|Output|shape|Output tensor of same type and shape as input -|=== - -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|MI, MT|fp16|fp16_t -|MI, MT|bf16|bf16_t -|MI, MT|fp32|fp32_t -|=== +include::{generated}/operators/TANH.adoc[] diff --git a/chapters/comparison.adoc b/chapters/comparison.adoc index 5c27071..9bc28ad 100644 --- a/chapters/comparison.adoc +++ b/chapters/comparison.adoc @@ -13,15 +13,7 @@ Elementwise comparison operation -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_t*|input1|shape1|Input tensor -|Input|in_t*|input2|shape2|Input tensor with the same rank as input1 -|Output|out_t*|output|shape|Output tensor with broadcast shape if necessary -|=== +include::{generated}/operators/EQUAL.adoc[] *Operation Function:* @@ -41,30 +33,11 @@ for_each(index in shape) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_t|out_t - -|Any|signed 32|int32_t|bool_t -|MI, MT|fp16|fp16_t|bool_t -|MI, MT|bf16|bf16_t|bool_t -|MI, MT|fp32|fp32_t|bool_t -|=== - ==== GREATER Elementwise greater than comparison operation -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_t*|input1|shape1|Input tensor -|Input|in_t*|input2|shape2|Input tensor with the same rank as input1 -|Output|out_t*|output|shape|Output tensor with broadcast shape if necessary -|=== +include::{generated}/operators/GREATER.adoc[] *Operation Function:* @@ -84,29 +57,11 @@ for_each(index in shape) { } ---- -*Supported Data Types:* -|=== -|Profile|Mode|in_t|out_t - -|Any|signed 32|int32_t|bool_t -|MI, MT|fp16|fp16_t|bool_t -|MI, MT|bf16|bf16_t|bool_t -|MI, MT|fp32|fp32_t|bool_t -|=== - ==== GREATER_EQUAL Elementwise comparison operation -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_t*|input1|shape1|Input tensor -|Input|in_t*|input2|shape2|Input tensor with the same rank as input1 -|Output|out_t*|output|shape|Output tensor with broadcast shape if necessary -|=== +include::{generated}/operators/GREATER_EQUAL.adoc[] *Operation Function:* @@ -125,14 +80,3 @@ for_each(index in shape) { tensor_write(output, shape, index, result); } ---- - -*Supported Data Types:* - -|=== -|Profile|Mode|in_t|out_t - -|Any|signed 32|int32_t|bool_t -|MI, MT|fp16|fp16_t|bool_t -|MI, MT|bf16|bf16_t|bool_t -|MI, MT|fp32|fp32_t|bool_t -|=== diff --git a/chapters/control_flow.adoc b/chapters/control_flow.adoc index e3c7fad..de6bdda 100644 --- a/chapters/control_flow.adoc +++ b/chapters/control_flow.adoc @@ -15,17 +15,7 @@ TOSA implements two control flow operators, for conditional branching and loop b Evaluates a Boolean condition and then takes one of two distinct execution paths. This implements the semantic if-then-else structure. -*Arguments:* - -|=== -|Argument|Type|Name|Description - -|Input |tensor_list_t |input_list |List of input tensors -|Input |bool_t |condition |Input condition as rank-0 tensor -|Attribute|tosa_graph_t|then_graph |TOSA graph to execute if condition is true -|Attribute|tosa_graph_t|else_graph |TOSA graph to execute if condition is false -|Output|tensor_list_t |output_list|List of output tensors -|=== +include::{generated}/operators/COND_IF.adoc[] *Operation Function:* @@ -47,16 +37,7 @@ if (condition) { Generates and evaluates a Bool condition and either executes a loop body or exits the loop. This action is performed repeatedly after updating and re-evaluating the Boolean condition every iteration. This implements the semantic foreach or while iterative loop structure. -*Arguments:* - -|=== -|Argument|Type|Name|Description - -|Input |tensor_list_t |input_list |List of input tensors -|Attribute|tosa_graph_t|cond_graph |TOSA graph to evaluate the condition -|Attribute|tosa_graph_t|body_graph |TOSA graph to execute the loop body -|Output|tensor_list_t |output_list|List of output tensors -|=== +include::{generated}/operators/WHILE_LOOP.adoc[] *Operation Function:* diff --git a/chapters/data_layout.adoc b/chapters/data_layout.adoc index 5ba9012..65a426b 100644 --- a/chapters/data_layout.adoc +++ b/chapters/data_layout.adoc @@ -13,15 +13,7 @@ Concatenate a list of tensors along a given axis. No data conversion happens during a concat operation. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input1|shapes1[]|List of input tensors. All inputs must have the same rank and data type -|Attribute|int32_t|axis|-|Axis along which concatenation is to occur, in range from 0 to rank(shape)-1 -|Output|in_out_t*|output|shape|Output tensor -|=== +include::{generated}/operators/CONCAT.adoc[] *Operation Function:* @@ -52,36 +44,13 @@ for_each(index1 in shape) { ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|Any|Boolean|bool_t -|Any|signed 8|int8_t -|Any|signed 16|int16_t -|Any|signed 32|int32_t -|MI, MT|fp16|fp16_t -|MI, MT|bf16|bf16_t -|MI, MT|fp32|fp32_t -|=== - ==== PAD Pads a tensor along the borders of each dimension with a supplied value. Returns a new tensor with the padding included. The pad_const value includes the zero point if the tensor uses a zero point. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input1|shape1|Input tensor -|Attribute|int32_t|padding|[rank(shape1),2]|Number of pad elements at the start and end of each dimension -|Attribute|in_out_t|pad_const|-|Constant value to be used as padding -|Output|in_out_t*|output|shape|Output tensor of same type as the input tensor -|=== +include::{generated}/operators/PAD.adoc[] *Operation Function:* @@ -107,33 +76,11 @@ for_each(index in shape) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|Any|Boolean|bool_t -|Any|signed 8|int8_t -|Any|signed 16|int16_t -|Any|signed 32|int32_t -|MI, MT|fp16|fp16_t -|MI, MT|bf16|bf16_t -|MI, MT|fp32|fp32_t -|=== - ==== RESHAPE Returns a tensor with the same type/values as the input, with a new shape specified by the shape argument. Reshape may operate on tensors of any rank. No data conversion happens during a reshape operation. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input1|shape1|Input tensor -|Attribute|int32_t|new_shape|[rank(shape)]|List of values, with each element giving the size of the result tensor for the given dimension. -|Output|in_out_t*|output|shape|Output tensor of same type, size as the input tensor -|=== +include::{generated}/operators/RESHAPE.adoc[] *Operation Function:* @@ -153,33 +100,11 @@ for_each(index in shape) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|Any|Boolean|bool_t -|Any|signed 8|int8_t -|Any|signed 16|int16_t -|Any|signed 32|int32_t -|MI, MT|fp16|fp16_t -|MI, MT|bf16|bf16_t -|MI, MT|fp32|fp32_t -|=== - ==== REVERSE Returns a tensor with the same type/values as the input, with the data reversed along the given axis. No data conversion happens during a reverse operation. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input|shape|Input tensor from 1 to 4 dims -|Attribute|int32_t|axis|-|Axis to reverse, in range from 0 to rank(shape)-1 -|Output|in_out_t*|output|shape|Output tensor. Same shape as input tensor. -|=== +include::{generated}/operators/REVERSE.adoc[] *Operation Function:* @@ -194,34 +119,12 @@ for_each(index in shape) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|Any|Boolean|bool_t -|Any|signed 8|int8_t -|Any|signed 16|int16_t -|Any|signed 32|int32_t -|MI, MT|fp16|fp16_t -|MI, MT|bf16|bf16_t -|MI, MT|fp32|fp32_t -|=== - ==== SLICE Extracts a slice of the input1 on the given axis, beginning at the start coordinates, and extending for size elements in each direction. No data conversion happens during a slice operation. -*Arguments:* -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input1|shape1|Input tensor with rank from 1 to 4 -|Attribute|int32_t|start|[rank(input1)]|List of integer coordinates, of length equal to the rank of input1. Start coordinate for slicing. -|Attribute|int32_t|size|[rank(input1)]|List of integer size values, of length equal to the rank of input1. Size of the input to be used. -|Output|in_out_t*|output|shape|Output tensor of same type as the input tensor -|=== +include::{generated}/operators/SLICE.adoc[] *Operation Function:* @@ -248,33 +151,11 @@ for_each(index in shape) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|Any|Boolean|bool_t -|Any|signed 8|int8_t -|Any|signed 16|int16_t -|Any|signed 32|int32_t -|MI, MT|fp16|fp16_t -|MI, MT|bf16|bf16_t -|MI, MT|fp32|fp32_t -|=== - ==== TILE Replicates input1 multiples times along each dimension. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input1|shape1|Input tensor with rank from 1 to 4 -|Attribute|int32_t|multiples|[rank(shape1)]|Number of times to replicate input1 in each dimension -|Output|in_out_t*|output|shape|Output tensor of same type, rank as the input tensor -|=== +include::{generated}/operators/TILE.adoc[] *Operation Function:* @@ -291,34 +172,12 @@ for_each(index in shape) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|Any|Boolean|bool_t -|Any|signed 8|int8_t -|Any|signed 16|int16_t -|Any|signed 32|int32_t -|MI, MT|fp16|fp16_t -|MI, MT|bf16|bf16_t -|MI, MT|fp32|fp32_t -|=== - ==== TRANSPOSE Permutes the dimensions of the input tensor input1 based on the perms argument. Each value in the perms list must be a valid dimension of the input tensor and may not be repeated. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input1|shape1|Input tensor with minimum rank of one. -|Attribute|int32_t|perms|[rank(input1)]|List of integers of length equal to the rank of input1. Values must be valid dimensions within shape1, and may not be repeated. -|Output|in_out_t*|output|shape|Output tensor of same type, rank as the input tensor -|=== +include::{generated}/operators/TRANSPOSE.adoc[] *Operation Function:* @@ -348,17 +207,3 @@ for_each(index in shape) { tensor_write(output, shape, index, value); } ---- - -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|Any|Boolean|bool_t -|Any|signed 8|int8_t -|Any|signed 16|int16_t -|Any|signed 32|int32_t -|MI, MT|fp16|fp16_t -|MI, MT|bf16|bf16_t -|MI, MT|fp32|fp32_t -|=== diff --git a/chapters/data_nodes.adoc b/chapters/data_nodes.adoc index 5f45464..65e6b75 100644 --- a/chapters/data_nodes.adoc +++ b/chapters/data_nodes.adoc @@ -13,52 +13,11 @@ A node containing constant data for use as the input to an operation. May hold data in any of the supported data formats. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Attribute|out_t*|values|shape|Constant values -|Output|out_t*|output|shape|Output tensor of same type, size as the input tensor -|=== - -*Supported Data Types:* - -|=== -|Profile|Mode|out_t - -|Any|Boolean|bool_t -|Any|signed 8|int8_t -|Any|signed 16|int16_t -|Any|signed 32|int32_t -|MI, MT|fp16|fp16_t -|MI, MT|bf16|bf16_t -|MI, MT|fp32|fp32_t -|=== +include::{generated}/operators/CONST.adoc[] ==== IDENTITY Returns a tensor with the same shape, type, and contents as the input. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input1|shape|Input tensor -|Output|in_out_t*|output|shape|Output tensor of same type, size as the input tensor -|=== - -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t +include::{generated}/operators/IDENTITY.adoc[] -|Any|Boolean|bool_t -|Any|signed 8|int8_t -|Any|signed 16|int16_t -|Any|signed 32|int32_t -|MI, MT|fp16|fp16_t -|MI, MT|bf16|bf16_t -|MI, MT|fp32|fp32_t -|=== diff --git a/chapters/ewise_binary.adoc b/chapters/ewise_binary.adoc index 27efb44..dcd44b4 100644 --- a/chapters/ewise_binary.adoc +++ b/chapters/ewise_binary.adoc @@ -14,15 +14,7 @@ Elementwise addition of input1 and input2. Axis of size 1 will be broadcast, as necessary. Rank of input tensors must match. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input1|shape1|Input tensor -|Input|in_out_t*|input2|shape2|Input tensor with the same rank as input1 -|Output|in_out_t*|output|shape|Output tensor with broadcast shape if necessary -|=== +include::{generated}/operators/ADD.adoc[] *Operation Function:* @@ -38,32 +30,12 @@ for_each(index in shape) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|Any|signed 32|int32_t -|MI, MT|fp16|fp16_t -|MI, MT|bf16|bf16_t -|MI, MT|fp32|fp32_t -|=== - ==== ARITHMETIC_RIGHT_SHIFT Elementwise arithmetic right shift of input1 by the amount specified in input2. Axis of size 1 will be broadcast, as necessary. Rank of input tensors must match. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input1|shape1|Input tensor -|Input|in_out_t*|input2|shape2|Input tensor with the same rank as input1 -|Attribute|bool_t|round|-|If true then the shift is rounded -|Output|in_out_t*|output|shape|Output tensor with broadcast shape if necessary -|=== +include::{generated}/operators/ARITHMETIC_RIGHT_SHIFT.adoc[] *Operation Function:* @@ -89,30 +61,12 @@ for_each(index in shape) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|Any|signed 8|int8_t -|Any|signed 16|int16_t -|Any|signed 32|int32_t -|=== - ==== BITWISE_AND Elementwise bitwise AND of input1 and input2. Axis of size 1 will be broadcast as necessary. Rank of input tensors must match. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input1|shape1|Input tensor -|Input|in_out_t*|input2|shape2|Input tensor with the same rank as input1 -|Output|in_out_t*|output|shape|Output tensor of same type as the input tensors, with broadcast shape if necessary -|=== +include::{generated}/operators/BITWISE_AND.adoc[] *Operation Function:* @@ -128,30 +82,12 @@ for_each(index in shape) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|Any|signed 8|int8_t -|Any|signed 16|int16_t -|Any|signed 32|int32_t -|=== - ==== BITWISE_OR Elementwise bitwise OR of input1 and input2. Axis of size 1 will be broadcast as necessary. Rank of input tensors must match. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input1|shape1|Input tensor -|Input|in_out_t*|input2|shape2|Input tensor with the same rank as input1 -|Output|in_out_t*|output|shape|Output tensor with broadcast shape if necessary -|=== +include::{generated}/operators/BITWISE_OR.adoc[] *Operation Function:* @@ -167,30 +103,12 @@ for_each(index in shape) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|Any|signed 8|int8_t -|Any|signed 16|int16_t -|Any|signed 32|int32_t -|=== - ==== BITWISE_XOR Elementwise bitwise XOR of input1 and input2. Axis of size 1 will be broadcast as necessary. Rank of input tensors must match. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input1|shape1|Input tensor -|Input|in_out_t*|input2|shape2|Input tensor with the same rank as input1 -|Output|in_out_t*|output|shape|Output tensor with broadcast shape if necessary -|=== +include::{generated}/operators/BITWISE_XOR.adoc[] *Operation Function:* @@ -206,16 +124,6 @@ for_each(index in shape) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|Any|signed 8|int8_t -|Any|signed 16|int16_t -|Any|signed 32|int32_t -|=== - ==== INTDIV Elementwise integer divide of input1 by input2. @@ -224,15 +132,7 @@ Expected use is for operations on non-scaled integers. Floating point divide should use RECIPROCAL and MUL. Quantized integer divide should use TABLE (for 1/x) and MUL. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input1|shape1|Input tensor -|Input|in_out_t*|input2|shape2|Input tensor with the same rank as input1 -|Output|in_out_t*|output|shape|Output tensor with broadcast shape if necessary -|=== +include::{generated}/operators/INTDIV.adoc[] *Operation Function:* @@ -252,27 +152,12 @@ for_each(index in shape) { } ---- -*Supported Data Types:* -|=== -|Profile|Mode|in_out_t - -|Any|signed 32|int32_t -|=== - ==== LOGICAL_AND Elementwise logical AND of input1 and input2. Axis of size 1 will be broadcast, as necessary. Rank of input tensors must match. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input1|shape1|Input tensor -|Input|in_out_t*|input2|shape2|Input tensor with the same rank as input1 -|Output|in_out_t*|output|shape|Output tensor with broadcast shape if necessary -|=== +include::{generated}/operators/LOGICAL_AND.adoc[] *Operation Function:* @@ -288,28 +173,12 @@ for_each(index in shape) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|Any|Bool|bool_t -|=== - ==== LOGICAL_LEFT_SHIFT Elementwise left shift of input1 and input2. Axis of size 1 will be broadcast, as necessary. Rank of input tensors must match. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input1|shape1|Input tensor -|Input|in_out_t*|input2|shape2|Input tensor with the same rank as input1 -|Output|in_out_t*|output|shape|Output tensor with broadcast shape if necessary -|=== +include::{generated}/operators/LOGICAL_LEFT_SHIFT.adoc[] *Operation Function:* @@ -326,30 +195,12 @@ for_each(index in shape) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|Any|signed 8|int8_t -|Any|signed 16|int16_t -|Any|signed 32|int32_t -|=== - ==== LOGICAL_RIGHT_SHIFT Elementwise logical right shift of input1 by the amount specified in input2. Axis of size 1 will be broadcast, as necessary. Rank of input tensors must match. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input1|shape1|Input tensor -|Input|in_out_t*|input2|shape2|Input tensor with the same rank as input1 -|Output|in_out_t*|output|shape|Output tensor with broadcast shape if necessary -|=== +include::{generated}/operators/LOGICAL_RIGHT_SHIFT.adoc[] *Operation Function:* @@ -366,30 +217,12 @@ for_each(index in shape) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|Any|signed 8|int8_t -|Any|signed 16|int16_t -|Any|signed 32|int32_t -|=== - ==== LOGICAL_OR Elementwise logical OR of input1 and input2. Axis of size 1 will be broadcast as necessary. Rank of input tensors must match. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input1|shape1|Input tensor -|Input|in_out_t*|input2|shape2|Input tensor with the same rank as input1 -|Output|in_out_t*|output|shape|Output tensor with broadcast shape if necessary -|=== +include::{generated}/operators/LOGICAL_OR.adoc[] *Operation Function:* @@ -405,28 +238,12 @@ for_each(index in shape) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|Any|Bool|bool_t -|=== - ==== LOGICAL_XOR Elementwise logical XOR of input1 and input2. Axis of size 1 will be broadcast as necessary. Rank of input tensors must match. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input1|shape1|Input tensor -|Input|in_out_t*|input2|shape2|Input tensor with the same rank as input1 -|Output|in_out_t*|output|shape|Output tensor of same type as the input tensors, with broadcast shape if necessary -|=== +include::{generated}/operators/LOGICAL_XOR.adoc[] *Operation Function:* @@ -442,28 +259,12 @@ for_each(index in shape) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|Any|Bool|bool_t -|=== - ==== MAXIMUM Elementwise max of input1 and input2. Axis of size 1 will be broadcast, as necessary. Rank of input tensors must match. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input1|shape1|Input tensor -|Input|in_out_t*|input2|shape2|Input tensor with the same rank as input1 -|Output|in_out_t*|output|shape|Output tensor with broadcast shape if necessary -|=== +include::{generated}/operators/MAXIMUM.adoc[] *Operation Function:* @@ -479,31 +280,12 @@ for_each(index in shape) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|Any|signed 32|int32_t -|MI, MT|fp16|fp16_t -|MI, MT|bf16|bf16_t -|MI, MT|fp32|fp32_t -|=== - ==== MINIMUM Elementwise minimum of input1 and input2. Axis of size 1 will be broadcast, as necessary. Rank of input tensors must match. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input1|shape1|Input tensor -|Input|in_out_t*|input2|shape2|Input tensor with the same rank as input1 -|Output|in_out_t*|output|shape|Output tensor with broadcast shape if necessary -|=== +include::{generated}/operators/MINIMUM.adoc[] *Operation Function:* @@ -519,32 +301,12 @@ for_each(index in shape) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|Any|signed 32|int32_t -|MI, MT|fp16|fp16_t -|MI, MT|bf16|bf16_t -|MI, MT|fp32|fp32_t -|=== - ==== MUL Elementwise multiplication (Hadamard product) of input1 and input2. Axis of size 1 will be broadcast, as necessary. Rank of input tensors must match. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_t*|input1|shape1|Input tensor -|Input|in_t*|input2|shape2|Input tensor with the same rank as input1 -|Input (MT profile) Attribute (BI/MI profiles)|uint6_t|shift|-|Result right shift (int32_t data type only) -|Output|out_t*|output|shape|Output tensor with broadcast shape if necessary -|=== +include::{generated}/operators/MUL.adoc[] *Operation Function:* @@ -570,32 +332,12 @@ for_each(index in shape) { } ---- -*Supported Data Types:* -|=== -|Profile|Mode|in_t|out_t - -|Any|signed 8|int8_t|int32_t -|Any|signed 16|int16_t|int32_t -|Any|signed 32|int32_t|int32_t -|MI, MT|fp16|fp16_t|fp16_t -|MI, MT|bf16|bf16_t|bf16_t -|MI, MT|fp32|fp32_t|fp32_t -|=== - ==== POW Elementwise input1 value raised to the power of input2. Axis of size 1 will be broadcast, as necessary. Rank of input tensors must match. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input1|shape1|Input tensor from 1 to 4 dims -|Input|in_out_t*|input2|shape2|Input tensor with the same rank as input1 -|Output|in_out_t*|output|shape|Output tensor of same type as the input tensors, with broadcast shape if necessary -|=== +include::{generated}/operators/POW.adoc[] *Operation Function:* @@ -611,30 +353,12 @@ for_each(index in shape) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|MI, MT|fp16|fp16_t -|MI, MT|bf16|bf16_t -|MI, MT|fp32|fp32_t -|=== - ==== SUB Elementwise subtraction of input1 and input2. Axis of size 1 will be broadcast as necessary. Rank of input tensors must match. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input1|shape1|Input tensor -|Input|in_out_t*|input2|shape2|Input tensor with the same rank as input1 -|Output|in_out_t*|output|shape|Output tensor with broadcast shape if necessary -|=== +include::{generated}/operators/SUB.adoc[] *Operation Function:* @@ -650,17 +374,6 @@ for_each(index in shape) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|Any|signed 32|int32_t -|MI, MT|fp16|fp16_t -|MI, MT|bf16|bf16_t -|MI, MT|fp32|fp32_t -|=== - ==== TABLE Table lookup operation. @@ -677,15 +390,7 @@ An int16_t to int16_t table lookup can be constructed in TOSA as follows: * Use the TABLE operator to produce a fixed point 16.7 interpolated result * Use RESCALE (in_t=int32_t, out_t=int16_t, scale=1<<14, shift=21) to scale the output to int16_t range (or alternate scale as required) -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_t*|Input|shape|Input tensor -|Input (MT profile) Attribute (BI/MI profiles)|table_t*|table|[TABLE_SIZE]|Lookup table tensor -|Output|out_t*|output|shape|Output tensor -|=== +include::{generated}/operators/TABLE.adoc[] *Operation Function:* @@ -704,13 +409,3 @@ for_each(index in shape) { tensor_write(output, shape, index, result); } ---- - -*Supported Data Types:* - -|=== -|Profile|Mode|in_t|table_t|TABLE_SIZE|out_t - -|Any|signed 8|int8_t|int8_t|256|int8_t -|Any|signed 16|int16_t|int16_t|513|int32_t -|=== - diff --git a/chapters/ewise_ternary.adoc b/chapters/ewise_ternary.adoc index 84fe14d..5cd1409 100644 --- a/chapters/ewise_ternary.adoc +++ b/chapters/ewise_ternary.adoc @@ -13,16 +13,7 @@ Elementwise select of the output based on a condition. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|cmp_t|input1|shape1|Input selector tensor -|Input|in_out_t*|input2|shape2|Input value tensor if input1 is True -|Input|in_out_t*|input3|shape3|Input value tensor if input1 is False -|Output|in_out_t*|output|shape|Output tensor of same type as input2 and input3, with broadcast shape if necessary -|=== +include::{generated}/operators/SELECT.adoc[] *Operation Function:* @@ -44,16 +35,3 @@ for_each(index in shape) { tensor_write(output, shape, index, result); } ---- - -*Supported Data Types:* -|=== -|Profile|Mode|cmp_t|in_out_t - -|Any|Boolean|bool_t|bool_t -|Any|signed 8|bool_t|int8_t -|Any|signed 16|bool_t|int16_t -|Any|signed 32|bool_t|int32_t -|MI, MT|bool_t|fp16|fp16_t -|MI, MT|bool_t|bf16|bf16_t -|MI, MT|bool_t|fp32|fp32_t -|=== diff --git a/chapters/ewise_unary.adoc b/chapters/ewise_unary.adoc index 8c88f47..289b657 100644 --- a/chapters/ewise_unary.adoc +++ b/chapters/ewise_unary.adoc @@ -13,14 +13,7 @@ Elementwise absolute value operation -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input1|shape|Input tensor -|Output|in_out_t*|output|shape|Output tensor of same type, size as the input tensor -|=== +include::{generated}/operators/ABS.adoc[] *Floating-point behavior:* |=== @@ -44,27 +37,11 @@ for_each(index in shape) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|Any|signed 32|int32_t -|MI, MT|floating-point|float_t -|=== - ==== BITWISE_NOT Elementwise bitwise NOT of input tensor. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input1|shape|Input tensor -|Output|in_out_t*|output|shape|Output tensor of same type, size as the input tensor -|=== +include::{generated}/operators/BITWISE_NOT.adoc[] *Operation Function:* @@ -77,28 +54,11 @@ for_each(index in shape) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|Any|signed 8|int8_t -|Any|signed 16|int16_t -|Any|signed 32|int32_t -|=== - ==== CEIL Elementwise ceiling operation -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input1|shape|Input tensor -|Output|in_out_t*|output|shape|Output tensor of same type, size as the input tensor -|=== +include::{generated}/operators/CEIL.adoc[] *Floating-point behavior:* |=== @@ -118,26 +78,11 @@ for_each(index in shape) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|MI, MT|floating-point|float_t -|=== - ==== CLZ Elementwise count leading zeros operation -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input1|shape|Input tensor -|Output|in_out_t*|output|shape|Output tensor of same type, size as the input tensor -|=== +include::{generated}/operators/CLZ.adoc[] *Operation Function:* @@ -150,25 +95,11 @@ for_each(index in shape) { } ---- -*Supported Data Types:* -|=== -|Profile|Mode|in_out_t - -|Any|signed 32|int32_t -|=== - ==== EXP Elementwise e to the x operation -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input1|shape|Input tensor -|Output|in_out_t*|output|shape|Output tensor of same type, size as the input tensor -|=== +include::{generated}/operators/EXP.adoc[] *Floating-point behavior:* |=== @@ -188,28 +119,11 @@ for_each(index in shape) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|MI, MT|fp16|fp16_t -|MI, MT|bf16|bf16_t -|MI, MT|fp32|fp32_t -|=== - ==== FLOOR Elementwise floor operation -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input1|shape|Input tensor -|Output|in_out_t*|output|shape|Output tensor of same type, size as the input tensor -|=== +include::{generated}/operators/FLOOR.adoc[] *Floating-point behavior:* |=== @@ -229,28 +143,11 @@ for_each(index in shape) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|MI, MT|fp16|fp16_t -|MI, MT|bf16|bf16_t -|MI, MT|fp32|fp32_t -|=== - ==== LOG Elementwise natural logarithm operation -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input1|shape|Input tensor -|Output|in_out_t*|output|shape|Output tensor of same type, size as the input tensor -|=== +include::{generated}/operators/LOG.adoc[] *Floating-point behavior:* |=== @@ -270,28 +167,11 @@ for_each(index in shape) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|MI, MT|fp16|fp16_t -|MI, MT|bf16|bf16_t -|MI, MT|fp32|fp32_t -|=== - ==== LOGICAL_NOT Elementwise logical NOT of input. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input1|shape|Input tensor -|Output|in_out_t*|output|shape|Output tensor of same type, size as the input tensor -|=== +include::{generated}/operators/LOGICAL_NOT.adoc[] *Operation Function:* @@ -304,28 +184,11 @@ for_each(index in shape) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|Any|bool|bool_t -|=== - ==== NEGATE Elementwise negation operation -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input1|shape|Input tensor -|Attribute|in_out_t|input1_zp|-|Input 1 zero point. Must be zero for non-int8 types. -|Attribute|in_out_t|output_zp|-|Output zero point. Must be zero for non-int8 types. -|Output|in_out_t*|output|shape|Output tensor of same type, size as the input tensor -|=== +include::{generated}/operators/NEGATE.adoc[] *Floating-point behavior:* |=== @@ -349,31 +212,11 @@ for_each(index in shape) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t|acc_t - -|Any|signed 8|int8_t|int32_t -|Any|signed 16|int16_t|int32_t -|Any|signed 32|int32_t|int32_t -|MI, MT|fp16|fp16_t|fp16_t -|MI, MT|bf16|bf16_t|bf16_t -|MI, MT|fp32|fp32_t|fp32_t -|=== - ==== RECIPROCAL Elementwise reciprocal operation. For integer operation, a TABLE should be used with the appropriate ranges. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input1|shape|Input tensor -|Output|in_out_t*|output|shape|Output tensor of same type, size as the input tensor -|=== +include::{generated}/operators/RECIPROCAL.adoc[] *Floating-point behavior:* |=== @@ -393,28 +236,11 @@ for_each(index in shape) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|MI, MT|fp16|fp16_t -|MI, MT|bf16|bf16_t -|MI, MT|fp32|fp32_t -|=== - ==== RSQRT Elementwise reciprocal square root operation. For integer operation, a TABLE should be used with the appropriate ranges. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input1|shape|Input tensor -|Output|in_out_t*|output|shape|Output tensor of same type, size as the input tensor -|=== +include::{generated}/operators/RSQRT.adoc[] *Floating-point behavior:* |=== @@ -439,13 +265,3 @@ for_each(index in shape) { tensor_write(output, shape, index, result); } ---- - -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|MI, MT|fp16|fp16_t -|MI, MT|bf16|bf16_t -|MI, MT|fp32|fp32_t -|=== diff --git a/chapters/image.adoc b/chapters/image.adoc index d6177e4..2c23e79 100644 --- a/chapters/image.adoc +++ b/chapters/image.adoc @@ -49,17 +49,14 @@ the scale as described in the pseudocode. The [border_y, border_x] values adjust the output size to allow fractional sampling beyond integer input position (IH - 1,IW - 1). -*Arguments:* +include::{generated}/operators/RESIZE.adoc[] +*Resize Modes:* |=== -|Argument|Type|Name|Shape|Description - -|Input|in_t*|input|[N,IH,IW,C]|Input tensor -|Attribute|int16_t *|scale|[4]|[scale_y_n, scale_y_d, scale_x_n, scale_x_d] -|Attribute|int16_t *|offset|[2]|[offset_y, offset_x] -|Attribute|int32_t* |border|[2]|[border_y, border_x] -|Attribute|mode_t|mode|-|BILINEAR or NEAREST -|Output|out_t*|output|[N,OH,OW,C]|Output tensor +|Mode|Description + +|NEAREST|Nearest Neighbor +|BILINEAR|Bilinear interpoloation |=== *Operation Function* @@ -129,25 +126,3 @@ for_each(0 <= n < N, 0 <= oy < OH, 0 <= ox < OW; 0 <= c < C) { } } ---- - -*Supported Data Types:* - -|=== -|Profile|Mode|resize_t|in_t|out_t - -|Any|signed 8, bilinear|int16_t|int8_t|int32_t -|Any|signed 8, nearest |int16_t|int8_t|int8_t -|Any|signed 16, bilinear|int16_t|int16_t|int48_t -|Any|signed 16, nearest |int16_t|int16_t|int16_t -|MI,MT|fp16|fp32_t|fp16_t|fp16_t -|MI,MT|bf16|fp32_t|bf16_t|bf16_t -|MI,MT|fp32|fp32_t|fp32_t|fp32_t -|=== - -*Resize Modes:* -|=== -|Mode|Description - -|NEAREST|Nearest Neighbor -|BILINEAR|Bilinear interpoloation -|=== diff --git a/chapters/reduction.adoc b/chapters/reduction.adoc index 368d82e..cd1db7b 100644 --- a/chapters/reduction.adoc +++ b/chapters/reduction.adoc @@ -13,15 +13,7 @@ Reduce a tensor along the given axis with a logical AND operation -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input|shape1|Input tensor with rank from 1 to 4 -|Attribute|int32_t|axis|-|Axis to reduce, in range from 0 to rank(shape1)-1 -|Output|in_out_t*|output|shape|Output tensor. Same rank as the input tensor. -|=== +include::{generated}/operators/REDUCE_ALL.adoc[] *Operation Function:* @@ -44,27 +36,11 @@ for_each(index in shape1) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|Any|Boolean|bool_t -|=== - ==== REDUCE_ANY Reduce a tensor along the given axis with a logical OR operation -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input|shape1|Input tensor with rank from 1 to 4 -|Attribute|int32_t|axis|-|Axis to reduce, in range from 0 to rank(shape1)-1 -|Output|in_out_t*|output|shape|Output tensor. Same rank as the input tensor. -|=== +include::{generated}/operators/REDUCE_ANY.adoc[] *Operation Function:* @@ -87,27 +63,11 @@ for_each(index in shape1) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|Any|Boolean|bool_t -|=== - ==== REDUCE_MAX Reduce a tensor along the given axis with a maximum operation -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input|shape1|Input tensor with rank from 1 to 4 -|Attribute|int32_t|axis|-|Axis to reduce, in range from 0 to rank(shape1)-1 -|Output|in_out_t*|output|shape|Output tensor. Same rank as the input tensor. -|=== +include::{generated}/operators/REDUCE_MAX.adoc[] *Operation Function:* @@ -128,31 +88,11 @@ for_each(index in shape1) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|Any|signed 8|int8_t -|Any|signed 16|int16_t -|Any|signed 32|int32_t -|MI, MT|fp16|fp16_t -|MI, MT|bf16|bf16_t -|MI, MT|fp32|fp32_t -|=== - ==== REDUCE_MIN Reduce a tensor along the given axis with a minimum operation -*Arguments:* -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input|shape1|Input tensor with rank from 1 to 4 -|Attribute|int32_t|axis|-|Axis to reduce, in range from 0 to rank(shape1)-1 -|Output|in_out_t*|output|shape|Output tensor. Same rank as the input tensor. -|=== +include::{generated}/operators/REDUCE_MIN.adoc[] *Operation Function:* @@ -173,32 +113,11 @@ for_each(index in shape1) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|Any|signed 8|int8_t -|Any|signed 16|int16_t -|Any|signed 32|int32_t -|MI, MT|fp16|fp16_t -|MI, MT|bf16|bf16_t -|MI, MT|fp32|fp32_t -|=== - ==== REDUCE_PRODUCT Reduce a tensor along the given axis by computing the product of the axis. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input|shape1|Input tensor with rank from 1 to 4 -|Attribute|int32_t|axis|-|Axis to reduce, in range from 0 to rank(shape1)-1 -|Output|in_out_t*|output|shape|Output tensor. Same rank as the input tensor. -|=== +include::{generated}/operators/REDUCE_PRODUCT.adoc[] *Operation Function:* @@ -219,29 +138,11 @@ for_each(index in shape1) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|MI, MT|fp16|fp16_t -|MI, MT|bf16|bf16_t -|MI, MT|fp32|fp32_t -|=== - ==== REDUCE_SUM Reduce a tensor along the given axis by computing the sum of the axis. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input|shape1|Input tensor with rank from 1 to 4 -|Attribute|int32_t|axis|-|Axis to reduce, in range from 0 to rank(shape1)-1 -|Output|in_out_t*|output|shape|Output tensor. Same rank as the input tensor. -|=== +include::{generated}/operators/REDUCE_SUM.adoc[] *Operation Function:* @@ -261,15 +162,3 @@ for_each(index in shape1) { tensor_write(output, shape, out_index, state); } ---- - -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|Any|signed 32|int32_t -|MI, MT|fp16|fp16_t -|MI, MT|bf16|bf16_t -|MI, MT|fp32|fp32_t -|=== - diff --git a/chapters/scatter_gather.adoc b/chapters/scatter_gather.adoc index 524bfd3..43cc047 100644 --- a/chapters/scatter_gather.adoc +++ b/chapters/scatter_gather.adoc @@ -14,15 +14,7 @@ Generate a tensor for which each element in the output is a subtensor of the values tensor based on the indices. N is the number of batches, W the number of indices in each batch, K the range of each index and C the number data channels for each index. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|value_t*|values|[N,K,C]|3D value tensor -|Input|index_t*|indices|[N,W]|2D index tensor -|Output|value_t*|output|[N,W,C]|3D output tensor -|=== +include::{generated}/operators/GATHER.adoc[] *Operation Function:* @@ -36,17 +28,6 @@ for_each(0 <= n < N, 0 <= w < W, 0 <= c < C) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|index_t|value_t - -|Any|signed 8|int32_t|int8_t -|Any|signed 16|int32_t|int16_t -|Any|signed 32|int32_t|int32_t -|MI,MT|float|int32_t|float -|=== - ==== SCATTER The values_out tensor is set to the values_in tensor with data modified as follows: data from the input tensor is inserted at the positions specified by the indices tensor. @@ -54,20 +35,7 @@ N is the number of batches, W the number of indices in each batch, K the range o It is not permitted to repeat the same output index within a single SCATTER operation and so each output index occurs at most once. In use cases that require multiple updates to the same output position, these must be decomposed into multiple SCATTER operations. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|value_t*|values_in|[N,K,C]|3D values in tensor -|Input|index_t*|indices|[N,W]|2D index tensor -|Input|value_t*|input|[N,W,C]|3D input tensor -|Output|value_t*|values_out|[N,K,C]|3D values out tensor -|=== - -*Quantization Parameters:* - -None +include::{generated}/operators/SCATTER.adoc[] *Operation Function:* @@ -96,16 +64,3 @@ for_each(0 <= n < N, 0 <= w < W, 0 <= c < C) { output_modified[n,k,c] = true; } ---- - -*Supported Data Types:* - -|=== -|Profile|Mode|index_t|value_t - -|Any|signed 8|int32_t|int8_t -|Any|signed 16|int32_t|int16_t -|Any|signed 32|int32_t|int32_t -|MI,MT|fp16|int32_t|fp16_t -|MI,MT|bf16|int32_t|bf16_t -|MI,MT|fp32|int32_t|fp32_t -|=== diff --git a/chapters/tensor_ops.adoc b/chapters/tensor_ops.adoc index fb657f7..4c9a25b 100644 --- a/chapters/tensor_ops.adoc +++ b/chapters/tensor_ops.adoc @@ -13,15 +13,7 @@ This returns the index with the largest value across the given axis of the input tensor. -*Arguments* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_t*|input|shape1|Input tensor with rank from 1 to 4 -|Attribute|int32_t|axis|-|Axis in range from 0 to rank(shape1)-1 -|Output|out_t*|output|shape|Output tensor, with rank = rank(shape1)-1 -|=== +include::{generated}/operators/ARGMAX.adoc[] *Operation Function:* @@ -54,36 +46,13 @@ for_each(left_index in left_shape) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_t|out_t - -|Any|signed 8|int8_t|int32_t -|Any|signed 16|int16_t|int32_t -|MI, MT|fp16|fp16_t|int32_t -|MI, MT|bf16|bf16_t|int32_t -|MI, MT|fp32|fp32_t|int32_t -|=== - ==== AVG_POOL2D This performs an average pooling over the given input tensor. A sliding window of size given by is passed over the input tensor, with the mean value being placed in the output tensor. When calculating the average, only the number of valid input tensor values, but not padding, are used to calculate the divisor. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description -|Input|in_out_t*|input|[N,IH,IW,C]|Input tensor 4D -|Attribute|int32_t*|kernel|[2]|[kernel_y, kernel_x] -|Attribute|int32_t*|stride|[2]|[stride_y, stride_x] -|Attribute|int32_t*|pad|[4]|[pad_top, pad_bottom, pad_left, pad_right] -|Attribute|in_out_t|input_zp|-|Input tensor zero point. Must be zero for non-int8 types. -|Attribute|in_out_t|output_zp|-|Output tensor zero point. Must be zero for non-int8 types. -|Output|in_out_t*|output|[N,OH,OW,C]|Output tensor 4D -|=== +include::{generated}/operators/AVG_POOL2D.adoc[] *Operation Function:* @@ -130,37 +99,11 @@ for_each(0 <= n < N, 0 <= oy < OH, 0 <= ox < OW, 0 <= c < C ) { } ---- -*Supported Data Types:* -|=== -|Profile|Mode|in_out_t|acc_t - -|Any|signed 8|int8_t|int32_t -|Any|signed 16|int16_t|int32_t -|MI, MT|fp16 with fp16 accumulate|fp16_t|fp16_t -|MI, MT|fp16 with fp32 accumulate|fp16_t|fp32_t -|MI, MT|bf16 with fp32 accumulate|bf16_t|fp32_t -|MI, MT|fp32|fp32_t|fp32_t -|=== - ==== CONV2D Performs a 2D convolution over the given tensor input, using the weight tensor. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_t*|input|[N,IH,IW,IC]|Input tensor -|Input (MT profile) Attribute (BI/MI profiles)|weight_t*|weight|[OC,KH,KW,IC]|Weight kernel size KH x KW -|Input (MT profile) Attribute (BI/MI profiles)|out_t*|bias|[OC]|Per output channel bias data. -|Attribute|int32_t*|pad|[4]|[pad_top, pad_bottom, pad_left, pad_right] -|Attribute|int32_t*|stride|[2]|[stride_y, stride_x] -|Attribute|int32_t*|dilation|[2]|[dilation_y, dilation_x] -|Attribute|in_t|input_zp|-|Input tensor zero point. Must be zero for non-int8 types. -|Attribute|weight_t|weight_zp|-|Weight zero point. Must be zero for non-int8 types. -|Output|out_t*|output|[N,OH,OW,OC]|Output tensor -|=== +include::{generated}/operators/CONV2D.adoc[] *Operation Function* @@ -195,39 +138,11 @@ for_each(0 <= n < N, 0 <= oy < OH, 0 <= ox < OW; 0 <= oc < OC) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_t|weight_t|out_t - -|Any|signed 8x8|int8_t|int8_t|int32_t -|Any|signed 8x4|int8_t|int4_t|int32_t -|Any|signed 16x8|int16_t|int8_t|int48_t -|MI, MT|fp16 with fp16 accumulate|fp16_t|fp16_t|fp16_t -|MI, MT|fp16 with fp32 accumulate|fp16_t|fp16_t|fp32_t -|MI, MT|bf16 with fp32 accumulate|bf16_t|bf16_t|fp32_t -|MI, MT|fp32|fp32_t|fp32_t|fp32_t -|=== - ==== CONV3D Performs a 3D convolution over the given input tensor. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_t*|input|[N,ID,IH,IW,IC]|Input tensor -|Input (MT profile) Attribute (BI/MI profiles)|weight_t*|weight|[OC,KD,KH,KW,IC]|Weight kernel size KDxKHxKW -|Input (MT profile) Attribute (BI/MI profiles)|out_t*|bias|[OC]|Per output channel bias data. -|Attribute|int32_t*|pad|[6]|[pad_d0, pad_d1, pad_top, pad_bottom, pad_left, pad_right] -|Attribute|int32_t*|stride|[3]|[stride_d, stride_y, stride_x] -|Attribute|int32_t*|dilation|[3]|[dilation_d, dilation_y, dilation_x] -|Attribute|in_t|input_zp|-|Input tensor zero point. Must be zero for non-int8 types. -|Attribute|weight_t|weight_zp|-|Weight zero point. Must be zero for non-int8 types. -|Output|out_t*|output|[N,OD,OH,OW,OC]|Output tensor -|=== +include::{generated}/operators/CONV3D.adoc[] *Operation Function* @@ -265,40 +180,11 @@ for_each(0 <= n < N, 0 <= od < OD, 0 <= oy < OH, 0 <= ox < OW; 0 <= oc < OC) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_t|weight_t|out_t - -|Any|signed 8x8|int8_t|int8_t|int32_t -|Any|signed 8x4|int8_t|int4_t|int32_t -|Any|signed 16x8|int16_t|int8_t|int48_t -|MI, MT|fp16 with fp16 accumulate|fp16_t|fp16_t|fp16_t -|MI, MT|fp16 with fp32 accumulate|fp16_t|fp16_t|fp32_t -|MI, MT|bf16 with fp32 accumulate|bf16_t|bf16_t|fp32_t -|MI, MT|fp32|fp32_t|fp32_t|fp32_t -|=== - - ==== DEPTHWISE_CONV2D Performs 2D convolutions separately over each channel of the given tensor input, using the weight tensor. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_t*|input|[N,H,W,C]|Input tensor -|Input (MT profile) Attribute (BI/MI profiles)|weight_t*|weight|[KH,KW,C,M]|Weight kernel size KH x KW -|Input (MT profile) Attribute (BI/MI profiles)|out_t*|bias|[C*M]|Per output channel bias data. -|Attribute|int32_t*|pad|[4]|[pad_top, pad_bottom, pad_left, pad_right] -|Attribute|int32_t*|stride|[2]|[stride_y, stride_x] -|Attribute|int32_t*|dilation|[2]|[dilation_y, dilation_x] -|Attribute|in_t|input_zp|-|Input tensor zero point. Must be zero for non-int8 types. -|Attribute|weight_t|weight_zp|-|Weight zero point. Must be zero for non-int8 types. -|Output|out_t*|output|[N,OH,OW,C*M]|Output tensor -|=== +include::{generated}/operators/DEPTHWISE_CONV2D.adoc[] *Operation Function* @@ -333,20 +219,6 @@ for_each(0 <= n < N, 0 <= oy < OH, 0 <= ox < OW; 0 <= c < C, 0 <= m < M) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_t|weight_t|out_t - -|Any|signed 8x8|int8_t|int8_t|int32_t -|Any|signed 8x4|int8_t|int4_t|int32_t -|Any|signed 16x8|int16_t|int8_t|int48_t -|MI, MT|fp16 with fp16 accumulate|fp16_t|fp16_t|fp16_t -|MI, MT|fp16 with fp32 accumulate|fp16_t|fp16_t|fp32_t -|MI, MT|bf16 with fp32 accumulate|bf16_t|bf16_t|fp32_t -|MI, MT|fp32|fp32_t|fp32_t|fp32_t -|=== - ==== FFT2D Performs a batched complex 2D Fast Fourier Transform over the input. @@ -364,17 +236,7 @@ image::forward_fft2d.svg["forward FFT definition", align="center"] .Calculation for the inverse FFT2D calculation (inverse=true) image::inverse_fft2d.svg["inverse FFT definition", align="center"] -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input_real|[N,H,W]|Real part of the complex input. H,W must be powers of two. -|Input|in_out_t*|input_imag|[N,H,W]|Imaginary part of the complex input. H,W must be powers of two. -|Attribute|bool_t|inverse|-|false for forward FFT, true for inverse FFT -|Output|in_out_t*|output_real|[N,H,W]|Real part of the complex output -|Output|in_out_t*|output_imag|[N,H,W]|Imaginary part of the complex output. -|=== +include::{generated}/operators/FFT2D.adoc[] *Operation Function* @@ -404,30 +266,11 @@ for_each(0 <= n < N, 0 <= oy < H, 0 <= ox < W) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|MI,MT|fp32_t|fp32_t -|=== - ==== FULLY_CONNECTED Performs a fully connected network. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_t*|input|[N,IC]|Input tensor -|Attribute|weight_t*|weight|[OC,IC]|Weights -|Attribute|out_t*|bias|[OC]|Per output channel bias data. -|Attribute|in_t|input_zp|-|Input tensor zero point. Must be zero for non-int8 types. -|Attribute|weight_t|weight_zp|-|Weight zero point. Must be zero for non-int8 types. -|Output|out_t*|output|[N,OC]|Output tensor -|=== +include::{generated}/operators/FULLY_CONNECTED.adoc[] *Operation Function* @@ -449,34 +292,11 @@ for_each(0 <= n < N, 0 <= oc < OC) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_t|weight_t|out_t - -|Any|signed 8x8|int8_t|int8_t|int32_t -|Any|signed 8x4|int8_t|int4_t|int32_t -|Any|signed 16x8 |int16_t|int8_t|int48_t -|MI, MT|fp16 with fp16 accumulate|fp16_t|fp16_t|fp16_t -|MI, MT|fp16 with fp32 accumulate|fp16_t|fp16_t|fp32_t -|MI, MT|bf16 with fp32 accumulate|bf16_t|bf16_t|fp32_t -|MI, MT|fp32|fp32_t|fp32_t|fp32_t -|=== - ==== MATMUL -Performs two dimensional matrix multiplications. This allows both inputs to be activations, rather than reserving weights as an attribute in the FULLY_CONNECTED operator. - -*Arguments:* -|=== -|Argument|Type|Name|Shape|Description +Performs two dimensional matrix multiplications. This allows both inputs to be activations, rather than reserving weights as an attribute in the FULLY_CONNECTED operator. -|Input|in_t*|A|[N,H,C]|Input tensor A, N matrices of size HxC -|Input|in_t*|B|[N,C,W]|Input tensor B, N matrices of size CxW -|Attribute|in_t|A_zp|-|Input tensor A zero point. Must be zero for non-int8 types. -|Attribute|in_t|B_zp|-|Input tensor B zero point. Must be zero for non-int8 types. -|Output|out_t*|output|[N,H,W]|Output tensor, N matrices of size HxW -|=== +include::{generated}/operators/MATMUL.adoc[] *Operation Function* @@ -496,33 +316,11 @@ for_each(0 <= n < N, 0 <= h < H, 0 <= w < W) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_t|out_t - -|Any|signed 8x8|int8_t|int32_t -|Any|signed 16x16|int16_t|int48_t -|MI, MT|fp16 with fp16 accumulate|fp16_t|fp16_t -|MI, MT|fp16 with fp32 accumulate|fp16_t|fp32_t -|MI, MT|bf16 with fp32 accumulate|bf16_t|fp32_t -|MI, MT|fp32|fp32_t|fp32_t -|=== - ==== MAX_POOL2D -This performs a max pooling over the given input tensor. A sliding window of size given by is passed over the input tensor, with the maximum value being placed in the output tensor. - -*Arguments:* -|=== -|Argument|Type|Name|Shape|Description +This performs a max pooling over the given input tensor. A sliding window of size given by is passed over the input tensor, with the maximum value being placed in the output tensor. -|Input|in_out_t*|input|[N,IH,IW,C]|Input tensor 4D -|Attribute|int32_t*|kernel|[2]|[kernel_y, kernel_x] -|Attribute|int32_t*|stride|[2]|[stride_y, stride_x] -|Attribute|int32_t*|pad|[4]|[pad_top, pad_bottom, pad_left, pad_right] -|Output|in_out_t*|output|[N,OH,OW,C]|Output tensor 4D -|=== +include::{generated}/operators/MAX_POOL2D.adoc[] *Operation Function:* @@ -554,18 +352,6 @@ for_each(0 <= n < N, 0 <= oy < H, 0 <= ox < W, 0 <= c < C ) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|Any|signed 8|int8_t -|Any|16-bit|int16_t -|MI, MT|fp16|fp16_t -|MI, MT|bf16|bf16_t -|MI, MT|fp32|fp32_t -|=== - ==== RFFT2D Performs a batched 2D real-valued Fast Fourier Transform over the input where the input tensor consists of real values producing complex valued output. @@ -575,15 +361,7 @@ Imaginary values with locations h=0,H/2 or w=0,W/2 are zero. image::forward_fft2d.svg["forward FFT definition", align="center"] -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_out_t*|input|[N,H,W]|Real input. H,W must be powers of two. -|Output|in_out_t*|output_real|[N,H/2 + 1,W/2 + 1]|Real part of the complex output -|Output|in_out_t*|output_imag|[N,H/2 + 1,W/2 + 1]|Imaginary part of the complex output. -|=== +include::{generated}/operators/RFFT2D.adoc[] *Operation Function* @@ -606,34 +384,11 @@ for_each(0 <= n < N, 0 <= oy < H/2 + 1, 0 <= ox < W/2 + 1) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_out_t - -|MI,MT|fp32_t|fp32_t -|=== - - ==== TRANSPOSE_CONV2D Performs a 2D transposed convolution over the given tensor input, using the weights tensor. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_t*|input|[N,IH,IW,IC]|Input tensor -|Input (MT profile) Attribute (BI/MI profiles)|weight_t*|weight|[OC,KH,KW,IC]|Weight kernel size KH x KW -|Input (MT profile) Attribute (BI/MI profiles)|out_t*|bias|[OC]|Per output channel bias data. -|Attribute|int32_t*|out_pad|[4]|[out_pad_top, out_pad_bottom, out_pad_left, out_pad_right] -|Attribute|int32_t*|stride|[2]|[stride_y, stride_x] -|Attribute|int32_t*|out_shape|[4]|[N,OH,OW,OC] -|Attribute|in_t|input_zp|-|Input tensor zero point. Must be zero for non-int8 types. -|Attribute|weight_t|weight_zp|-|Weight zero point. Must be zero for non-int8 types. -|Output|out_t*|output|[N,OH,OW,OC]|Output tensor -|=== +include::{generated}/operators/TRANSPOSE_CONV2D.adoc[] *Operation Function* @@ -665,17 +420,3 @@ for_each(0 <= n < N, 0 <= iy < IH, 0 <= ix < IW, 0 <= oc < OC, } } ---- - -*Supported Data Types:* - -|=== -|Profile|Mode|in_t|weight_t|out_t - -|Any|signed 8x8|int8_t|int8_t|int32_t -|Any|signed 8x4|int8_t|int4_t|int32_t -|Any|signed 16x8|int16_t|int8_t|int48_t -|MI, MT|fp16 with fp16 accumulate|fp16_t|fp16_t|fp16_t -|MI, MT|fp16 with fp32 accumulate|fp16_t|fp16_t|fp32_t -|MI, MT|bf16 with fp32 accumulate|bf16_t|bf16_t|fp32_t -|MI, MT|fp32|fp32_t|fp32_t|fp32_t -|=== diff --git a/chapters/type_conversion.adoc b/chapters/type_conversion.adoc index 4a5349b..90452a3 100644 --- a/chapters/type_conversion.adoc +++ b/chapters/type_conversion.adoc @@ -13,14 +13,7 @@ Casts a tensor from one data type to another. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_t*|input|shape|Input tensor -|Output|out_t*|output|shape|Output tensor -|=== +include::{generated}/operators/CAST.adoc[] *Operation Function:* @@ -46,62 +39,11 @@ for_each(index in shape) { } ---- -*Supported Data Types:* - -|=== -|Profile|Mode|in_t|out_t - -|Any|bool to signed 8|bool_t|int8_t -|Any|bool to signed 16|bool_t|int16_t -|Any|bool to signed 32|bool_t|int32_t -|Any|signed 8 to bool|int8_t|bool_t -|Any|signed 8 to signed 16|int8_t|int16_t -|Any|signed 8 to signed 32|int8_t|int32_t -|MI, MT|signed 8 to fp16|int8_t|fp16_t -|MI, MT|signed 8 to bf16|int8_t|bf16_t -|MI, MT|signed 8 to fp32|int8_t|fp32_t -|Any|signed 16 to bool|int16_t|bool_t -|Any|signed 16 to signed 8|int16_t|int8_t -|Any|signed 16 to signed 32|int16_t|int32_t -|MI, MT|signed 16 to fp16|int16_t|fp16_t -|MI, MT|signed 16 to bf16|int16_t|bf16_t -|MI, MT|signed 16 to fp32|int16_t|fp32_t -|Any|signed 32 to bool|int32_t|bool_t -|Any|signed 32 to signed 8|int32_t|int8_t -|Any|signed 32 to signed 16|int32_t|int16_t -|MI, MT|signed 32 to fp16|int32_t|fp16_t -|MI, MT|signed 32 to bf16|int32_t|bf16_t -|MI, MT|signed 32 to fp32|int32_t|fp32_t -|MI, MT|fp16 to signed 8|fp16_t|int8_t -|MI, MT|fp16 to signed 16|fp16_t|int16_t -|MI, MT|fp16 to signed 32|fp16_t|int32_t -|MI, MT|bf16 to signed 8|bf16_t|int8_t -|MI, MT|bf16 to signed 16|bf16_t|int16_t -|MI, MT|bf16 to signed 32|bf16_t|int32_t -|MI, MT|fp32 to signed 8|fp32_t|int8_t -|MI, MT|fp32 to signed 16|fp32_t|int16_t -|MI, MT|fp32 to signed 32|fp32_t|int32_t -|=== - ==== RESCALE Rescale quantized values into a new domain. This function scales by factor: multiplier * 2^-shift^. -*Arguments:* - -|=== -|Argument|Type|Name|Shape|Description - -|Input|in_t*|input|shape|Input tensor from 1 to 4 dims -|Output|out_t*|output|shape|Output tensor with the same shape as input -|Attribute|in_t|input_zp|-|Input tensor zero point. Must be zero for non-int8 types. -|Attribute|out_t|output_zp|-|Output tensor zero point. Must be zero for non-int8 types. -|Input (MT profile) Attribute (BI/MI profiles)|mul_t*|multiplier|[NC]|Scaling multiplier array -|Input (MT profile) Attribute (BI/MI profiles)|uint6_t*|shift|[NC]|Scaling shift array -|Attribute|bool_t|scale32|-|if (scale32) mul_t=int32_t else mul_t=int16_t -|Attribute|bool_t|double_round|-|Select double round mode -|Attribute|bool_t|per_channel|-|if (per_channel) NC=shape[dims-1] else NC=1 -|=== +include::{generated}/operators/RESCALE.adoc[] *Operation Function:* @@ -131,28 +73,3 @@ for_each(index in shape) { tensor_write(output, shape, index, result); } ---- - -*Supported Data Types:* - -|=== -|Profile|Mode|in_t|out_t - -|Any|signed 8 to signed 8|int8_t|int8_t -|Any|signed 8 to signed 16|int8_t|int16_t -|Any|signed 8 to signed 32|int8_t|int32_t -|Any|signed 8 to unsigned 8|int8_t|uint8_t -|Any|signed 16 to signed 8|int16_t|int8_t -|Any|signed 16 to signed 16|int16_t|int16_t -|Any|signed 16 to signed 32|int16_t|int32_t -|Any|signed 16 to unsigned 8|int16_t|uint8_t -|Any|signed 16 to unsigned 16|int16_t|uint16_t -|Any|signed 32 to signed 8|int32_t|int8_t -|Any|signed 32 to signed 16|int32_t|int16_t -|Any|signed 32 to signed 32|int32_t|int32_t -|Any|signed 48 to signed 8|int48_t|int8_t -|Any|signed 48 to signed 16|int48_t|int16_t -|Any|signed 48 to signed 32|int48_t|int32_t -|Any|unsigned 8 to signed 8|uint8_t|int8_t -|Any|unsigned 8 to signed 16|uint8_t|int16_t -|Any|unsigned 16 to signed 16|uint16_t|int16_t -|=== diff --git a/tools/dictionary.dic b/tools/dictionary.dic index b062ac2..5f53b0c 100644 --- a/tools/dictionary.dic +++ b/tools/dictionary.dic @@ -1,5 +1,6 @@ personal_ws-1.1 en 500 activations +adoc ARGMAX AsciiDoc BILINEAR diff --git a/tools/genspec.py b/tools/genspec.py new file mode 100755 index 0000000..c871b75 --- /dev/null +++ b/tools/genspec.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python3 +import os + +import tosa + + +class TOSASpecAsciidocGenerator: + def __init__(self, spec): + self.spec = spec + + def generate_operator(self, op, file): + file.write("\n*Arguments:*\n") + file.write("\n|===\n") + file.write("|Argument|Type|Name|Shape|Description\n\n") + for arg in op.arguments: + cats = arg.categories + if len(cats) > 1: + cattext = "" + sep = "" + for cat in cats: + proflist = "/".join(cat.profiles) + profcaption = "profiles" if len(cat.profiles) > 1 else "profile" + cattext += sep + cat.name.title() + f" ({proflist} {profcaption})" + sep = " " + else: + cattext = cats[0].name.title() + file.write( + f"|{cattext}|{arg.type}|{arg.name}|{arg.shape}|{arg.description}\n" + ) + file.write("|===\n") + if op.typesupports: + file.write("\n*Supported Data Types:*\n\n") + file.write("|===\n") + header = "|Profile|Mode" + for ty in op.types: + header += f"|{ty}" + file.write(header) + file.write("\n\n") + for tysup in op.typesupports: + profile = ", ".join(tysup.profiles) if tysup.profiles else "Any" + entry = f"|{profile}|{tysup.mode}" + for ty in op.types: + entry += f"|{tysup.tymap[ty]}" + entry += "\n" + file.write(entry) + file.write("|===\n") + + def generate(self, outdir): + opdir = os.path.join(outdir, "operators") + os.makedirs(opdir, exist_ok=True) + for group in self.spec.operatorgroups: + for op in group.operators: + with open(os.path.join(opdir, op.name + ".adoc"), "w") as f: + self.generate_operator(op, f) + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument("--xml", required=True, help="Path to specification XML") + parser.add_argument("--outdir", required=True, help="Output directory") + args = parser.parse_args() + + spec = tosa.TOSASpec(args.xml) + + generator = TOSASpecAsciidocGenerator(spec) + generator.generate(args.outdir) diff --git a/tools/get_descriptions.py b/tools/get_descriptions.py index beded87..0a39a19 100755 --- a/tools/get_descriptions.py +++ b/tools/get_descriptions.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 - # Copyright (c) 2022, ARM Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,10 +12,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - # Script to pull the descriptions out of the specification so that # they can be run through a spellcheck with less noise - import argparse import re @@ -40,17 +37,17 @@ for name in args.filenames: continue if not in_description: # Look for the start of an operator - if re.match(r'^===', text): + if re.match(r"^===", text): in_description = True print(text) else: # Stop when we get to a subsection like *Arguments* # or pseudocode in a [source] section. Spellcheck is # not useful there - if re.match(r'[\[\*]', text): + if re.match(r"[\[\*]", text): in_description = False # skip comments - elif re.match(r'\w*\/\/', text): + elif re.match(r"\w*\/\/", text): continue else: print(text) diff --git a/tools/tosa.py b/tools/tosa.py new file mode 100644 index 0000000..87b4f1a --- /dev/null +++ b/tools/tosa.py @@ -0,0 +1,97 @@ +import re +import xml.etree.ElementTree as ET + + +class TOSAOperatorArgumentCategory: + def __init__(self, name, profiles=None): + self.name = name + self.profiles = profiles + + +class TOSAOperatorArgument: + def __init__(self, name, description, categories, ty, shape): + self.name = name + self.description = description + self.categories = categories + self.type = ty + self.shape = shape + + +class TOSAOperatorDataTypeSupport: + def __init__(self, mode, tymap, profiles=None): + self.mode = mode + self.tymap = tymap + self.profiles = profiles + + +class TOSAOperator: + def __init__(self, name, arguments, types, typesupports): + self.name = name + self.arguments = arguments + self.types = types + self.typesupports = typesupports + + +class TOSAOperatorGroup: + def __init__(self, name, operators): + self.name = name + self.operators = operators + + +class TOSASpec: + def __init__(self, xmlpath): + tree = ET.parse(xmlpath) + self.xmlroot = tree.getroot() + self.operatorgroups = [] + self.__load_spec() + + def __load_spec(self): + for group in self.xmlroot.findall("./operators/operatorgroup"): + self.operatorgroups.append(self.__load_operator_group(group)) + + def __load_operator_group(self, group): + name = group.get("name") + operators = [] + for op in group.findall("operator"): + operators.append(self.__load_operator(op)) + return TOSAOperatorGroup(name, operators) + + def __load_operator(self, op): + name = op.find("name").text + args = [] + types = [] + typesupports = [] + for arg in op.findall("arguments/argument"): + args.append(self.__load_operator_argument(arg)) + + # TODO add pseudo-code to operator object? + + for ty in op.findall("types/type"): + types.append(ty.get("name")) + + for tysup in op.findall("typesupport"): + tsmode = tysup.get("mode") + tsmap = {} + profiles = tysup.findall("profile") + tsprofiles = [] + for p in profiles: + tsprofiles.append(p.get("name")) + for ty in types: + tsmap[ty] = tysup.get(ty) + typesupports.append(TOSAOperatorDataTypeSupport(tsmode, tsmap, tsprofiles)) + return TOSAOperator(name, args, types, typesupports) + + def __load_operator_argument(self, arg): + name = arg.get("name") + desc = arg.find("description").text.strip() + argcats = [] + argtype = arg.get("type") + shape = arg.get("shape") + + cats = re.findall( + r"(input|output|attribute)\(?([A-Z,]+)?\)?", arg.get("category") + ) + for cat in cats: + argcats.append(TOSAOperatorArgumentCategory(cat[0], cat[1].split(","))) + + return TOSAOperatorArgument(name, desc, argcats, argtype, shape) diff --git a/tosa.xml b/tosa.xml new file mode 100644 index 0000000..8900721 --- /dev/null +++ b/tosa.xml @@ -0,0 +1,2160 @@ + + + + + Base Inference + Main Inference + Main Training + + + + + ARGMAX + + + Input tensor with rank from 1 to 4 + + + Axis in range from 0 to rank(shape1)-1 + + + Output tensor, with rank = rank(shape1)-1 + + + + + + + + + + + + + + + + + + + + + + + AVG_POOL2D + + + Input tensor 4D + + + [kernel_y, kernel_x] + + + [stride_y, stride_x] + + + [pad_top, pad_bottom, pad_left, pad_right] + + + Input tensor zero point. Must be zero for non-int8 types. + + + Output tensor zero point. Must be zero for non-int8 types. + + + Output tensor 4D + + + + + + + + + + + + + + + + + + + + + + + + + + + CONV2D + + + Input tensor + + + Weight kernel size KH x KW + + + Per output channel bias data. + + + [pad_top, pad_bottom, pad_left, pad_right] + + + [stride_y, stride_x] + + + [dilation_y, dilation_x] + + + Input tensor zero point. Must be zero for non-int8 types. + + + Weight zero point. Must be zero for non-int8 types. + + + Output tensor + + + + + + + + + + + + + + + + + + + + + + + + + + + + + CONV3D + + + Input tensor + + + Weight kernel size KDxKHxKW + + + Per output channel bias data. + + + [pad_d0, pad_d1, pad_top, pad_bottom, pad_left, pad_right] + + + [stride_d, stride_y, stride_x] + + + [dilation_d, dilation_y, dilation_x] + + + Input tensor zero point. Must be zero for non-int8 types. + + + Weight zero point. Must be zero for non-int8 types. + + + Output tensor + + + + + + + + + + + + + + + + + + + + + + + + + + + + + DEPTHWISE_CONV2D + + + Input tensor + + + Weight kernel size KH x KW + + + Per output channel bias data. + + + [pad_top, pad_bottom, pad_left, pad_right] + + + [stride_y, stride_x] + + + [dilation_y, dilation_x] + + + Input tensor zero point. Must be zero for non-int8 types. + + + Weight zero point. Must be zero for non-int8 types. + + + Output tensor + + + + + + + + + + + + + + + + + + + + + + + + + + + + + FFT2D + + + Real part of the complex input. H,W must be powers of two. + + + Imaginary part of the complex input. H,W must be powers of two. + + + false for forward FFT, true for inverse FFT + + + Real part of the complex output. + + + Imaginary part of the complex output. + + + + + + + + + + + + FULLY_CONNECTED + + + Input tensor + + + Weights + + + Per output channel bias data. + + + Input tensor zero point. Must be zero for non-int8 types. + + + Weight zero point. Must be zero for non-int8 types. + + + Output tensor + + + + + + + + + + + + + + + + + + + + + + + + + + + + + MATMUL + + + Input tensor A, N matrices of size HxC + + + Input tensor B, N matrices of size CxW + + + Input tensor A zero point. Must be zero for non-int8 types. + + + Input tensor B zero point. Must be zero for non-int8 types. + + + Output tensor, N matrices of size HxW + + + + + + + + + + + + + + + + + + + + + + + + + + + MAX_POOL2D + + + Input tensor 4D + + + [kernel_y, kernel_x] + + + [stride_y, stride_x] + + + [pad_top, pad_bottom, pad_left, pad_right] + + + Output tensor 4D + + + + + + + + + + + + + + + + + + + + + + RFFT2D + + + Real input. H,W must be powers of two. + + + Real part of the complex output + + + Imaginary part of the complex output. + + + + + + + + + + + + TRANSPOSE_CONV2D + + + Input tensor + + + Weight kernel size KH x KW + + + Per output channel bias data. + + + [out_pad_top, out_pad_bottom, out_pad_left, out_pad_right] + + + [stride_y, stride_x] + + + [N,OH,OW,OC] + + + Input tensor zero point. Must be zero for non-int8 types. + + + Weight zero point. Must be zero for non-int8 types. + + + Output tensor + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + CLAMP + + + Input tensor + + + Minimum clip value + + + Maximum clip value + + + Output tensor of same type and shape as input + + + + + + + + + + + + + + + + + + + + + + SIGMOID + + + Input tensor + + + Output tensor of same type and shape as input + + + + + + + + + + + + + + + + + + + + TANH + + + Input tensor + + + Output tensor of same type and shape as input + + + + + + + + + + + + + + + + + + + + + + ADD + + + Input tensor + + + Input tensor with the same rank as input1 + + + Output tensor with broadcast shape if necessary + + + + + + + + + + + + + + + + + + + + + ARITHMETIC_RIGHT_SHIFT + + + Input tensor + + + Input tensor with the same rank as input1 + + + If true then the shift is rounded + + + Output tensor with broadcast shape if necessary + + + + + + + + + + + BITWISE_AND + + + Input tensor + + + Input tensor with the same rank as input1 + + + Output tensor with broadcast shape if necessary + + + + + + + + + + + BITWISE_OR + + + Input tensor + + + Input tensor with the same rank as input1 + + + Output tensor with broadcast shape if necessary + + + + + + + + + + + BITWISE_XOR + + + Input tensor + + + Input tensor with the same rank as input1 + + + Output tensor with broadcast shape if necessary + + + + + + + + + + + INTDIV + + + Input tensor + + + Input tensor with the same rank as input1 + + + Output tensor with broadcast shape if necessary + + + + + + + + + LOGICAL_AND + + + Input tensor + + + Input tensor with the same rank as input1 + + + Output tensor with broadcast shape if necessary + + + + + + + + + LOGICAL_LEFT_SHIFT + + + Input tensor + + + Input tensor with the same rank as input1 + + + Output tensor with broadcast shape if necessary + + + + + + + + + + + LOGICAL_RIGHT_SHIFT + + + Input tensor + + + Input tensor with the same rank as input1 + + + Output tensor with broadcast shape if necessary + + + + + + + + + + + LOGICAL_OR + + + Input tensor + + + Input tensor with the same rank as input1 + + + Output tensor with broadcast shape if necessary + + + + + + + + + LOGICAL_XOR + + + Input tensor + + + Input tensor with the same rank as input1 + + + Output tensor with broadcast shape if necessary + + + + + + + + + MAXIMUM + + + Input tensor + + + Input tensor with the same rank as input1 + + + Output tensor with broadcast shape if necessary + + + + + + + + + + + + + + + + + + + + + MINIMUM + + + Input tensor + + + Input tensor with the same rank as input1 + + + Output tensor with broadcast shape if necessary + + + + + + + + + + + + + + + + + + + + + MUL + + + Input tensor + + + Input tensor with the same rank as input1 + + + Result right shift (int32_t data type only) + + + Output tensor with broadcast shape if necessary + + + + + + + + + + + + + + + + + + + + + + + + POW + + + Input tensor from 1 to 4 dims + + + Input tensor with the same rank as input1 + + + Output tensor with broadcast shape if necessary + + + + + + + + + + + + + + + + + + + + SUB + + + Input tensor + + + Input tensor with the same rank as input1 + + + Output tensor with broadcast shape if necessary + + + + + + + + + + + + + + + + + + + + + TABLE + + + Input tensor + + + Lookup table tensor + + + Output tensor + + + + + + + + + + + + + + + ABS + + + Input tensor + + + Output tensor of same type, size as the input tensor + + + + + + + + + + + + + + + + + + + + + BITWISE_NOT + + + Input tensor + + + Output tensor of same type, size as the input tensor + + + + + + + + + + + CEIL + + + Input tensor + + + Output tensor of same type, size as the input tensor + + + + + + + + + + + + + + + + + + + + CLZ + + + Input tensor + + + Output tensor of same type, size as the input tensor + + + + + + + + + EXP + + + Input tensor + + + Output tensor of same type, size as the input tensor + + + + + + + + + + + + + + + + + + + + FLOOR + + + Input tensor + + + Output tensor of same type, size as the input tensor + + + + + + + + + + + + + + + + + + + + LOG + + + Input tensor + + + Output tensor of same type, size as the input tensor + + + + + + + + + + + + + + + + + + + + LOGICAL_NOT + + + Input tensor + + + Output tensor of same type, size as the input tensor + + + + + + + + + NEGATE + + + Input tensor + + + Input 1 zero point. Must be zero for non-int8 types. + + + Output zero point. Must be zero for non-int8 types. + + + Output tensor of same type, size as the input tensor + + + + + + + + + + + + + + + + + + + + + + + + RECIPROCAL + + + Input tensor + + + Output tensor of same type, size as the input tensor + + + + + + + + + + + + + + + + + + + + RSQRT + + + Input tensor + + + Output tensor of same type, size as the input tensor + + + + + + + + + + + + + + + + + + + + + + SELECT + + + Input selector tensor + + + Input value tensor if input1 is True + + + Input value tensor if input1 is False + + + Output tensor of same type as input2 and input3, with broadcast shape if necessary + + + + + + + + + + + + + + + + + + + + + + + + + + + EQUAL + + + Input tensor + + + Input tensor with the same rank as input1 + + + Output tensor with broadcast shape if necessary + + + + + + + + + + + + + + + + + + + + + + GREATER + + + Input tensor + + + Input tensor with the same rank as input1 + + + Output tensor with broadcast shape if necessary + + + + + + + + + + + + + + + + + + + + + + GREATER_EQUAL + + + Input tensor + + + Input tensor with the same rank as input1 + + + Output tensor with broadcast shape if necessary + + + + + + + + + + + + + + + + + + + + + + + + REDUCE_ALL + + + Input tensor with rank from 1 to 4 + + + Axis to reduce, in range from 0 to rank(shape1)-1 + + + Output tensor. Same rank as the input tensor. + + + + + + + + + REDUCE_ANY + + + Input tensor with rank from 1 to 4 + + + Axis to reduce, in range from 0 to rank(shape1)-1 + + + Output tensor. Same rank as the input tensor. + + + + + + + + + REDUCE_MAX + + + Input tensor with rank from 1 to 4 + + + Axis to reduce, in range from 0 to rank(shape1)-1 + + + Output tensor. Same rank as the input tensor. + + + + + + + + + + + + + + + + + + + + + + + REDUCE_MIN + + + Input tensor with rank from 1 to 4 + + + Axis to reduce, in range from 0 to rank(shape1)-1 + + + Output tensor. Same rank as the input tensor. + + + + + + + + + + + + + + + + + + + + + + + REDUCE_PRODUCT + + + Input tensor with rank from 1 to 4 + + + Axis to reduce, in range from 0 to rank(shape1)-1 + + + Output tensor. Same rank as the input tensor. + + + + + + + + + + + + + + + + + + + + REDUCE_SUM + + + Input tensor with rank from 1 to 4 + + + Axis to reduce, in range from 0 to rank(shape1)-1 + + + Output tensor. Same rank as the input tensor. + + + + + + + + + + + + + + + + + + + + + + + CONCAT + + + + List of input tensors. All inputs must have the same rank and data type + + + Axis along which concatenation is to occur, in range from 0 to rank(shape)-1 + + + Output tensor + + + + + + + + + + + + + + + + + + + + + + + + PAD + + + Input tensor + + + Number of pad elements at the start and end of each dimension + + + Constant value to be used as padding + + + Output tensor of same type as the input tensor + + + + + + + + + + + + + + + + + + + + + + + + RESHAPE + + + Input tensor + + + List of values, with each element giving the size of the result tensor for the given dimension. At most one dimension may be given as -1 to automatically calculate the dimension size. + + + Output tensor of same type, size as the input tensor + + + + + + + + + + + + + + + + + + + + + + + + REVERSE + + + Input tensor from 1 to 4 dims + + + Axis to reverse, in range from 0 to rank(shape)-1 + + + Output tensor. Same shape as input tensor + + + + + + + + + + + + + + + + + + + + + + + + SLICE + + + Input tensor with rank from 1 to 4 + + + List of integer coordinates, of length equal to the rank of input1. Start coordinate for slicing. + + + List of integer size values, of length equal to the rank of input1. Size of the input to be +used. + + + Output tensor of same type as the input tensor + + + + + + + + + + + + + + + + + + + + + + + + TILE + + + Input tensor with rank from 1 to 4 + + + Number of times to replicate input1 in each dimension + + + Output tensor of same type, rank as the input tensor + + + + + + + + + + + + + + + + + + + + + + + + TRANSPOSE + + + Input tensor with minimum rank of one. + + + List of integers of length equal to the rank of input1. Values must be valid dimensions within shape1, and may not be repeated. + + + Output tensor of same type, rank as the input tensor + + + + + + + + + + + + + + + + + + + + + + + + + + GATHER + + + 3D value tensor + + + 2D index tensor + + + 3D output tensor + + + + + + + + + + + + + + + + + + + + + + + + SCATTER + + + 3D values in tensor + + + 2D index tensor + + + 3D input tensor + + + 3D output tensor + + + + + + + + + + + + + + + + + + + + + + + + + + RESIZE + + + Input tensor + + + [scale_y_n, scale_y_d, scale_x_n, scale_x_d] + + + [offset_y, offset_x] + + + [border_y, border_x] + + + BILINEAR or NEAREST + + + Output tensor + + + + + + + + + + + + + + + + + + + + + + + + + + + + CAST + + + Input tensor + + + Output tensor + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + RESCALE + + + Input tensor from 1 to 4 dims + + + Output tensor with the same shape as input + + + Input tensor zero point. Must be zero for non-int8 types. + + + Output tensor zero point. Must be zero for non-int8 types. + + + Scaling multiplier array + + + Scaling shift array + + + if (scale32) mul_t=int32_t else mul_t=int16_t + + + Select double round mode + + + if (per_channel) NC=shape[dims-1] else NC=1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + CONST + + + Constant values + + + Output tensor of the same type, size as the input tensor + + + + + + + + + + + + + + + + + + + + + + + + IDENTITY + + + Input tensor + + + Output tensor of the same type, size as the input tensor + + + + + + + + + + + + + + + + + + + + + + + + + + COND_IF + + + List of input tensors + + + Input condition as rank-0 tensor + + + TOSA graph to execute if condition is true + + + TOSA graph to execute if condition is false + + + List of output tensors + + + + + WHILE_LOOP + + + List of input tensors + + + TOSA graph to evaluate the condition + + + TOSA graph to execute the loop body + + + List of output tensors + + + + + + diff --git a/tosa.xsd b/tosa.xsd new file mode 100644 index 0000000..8f57131 --- /dev/null +++ b/tosa.xsd @@ -0,0 +1,169 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + -- cgit v1.2.1