diff options
author | Luke Hutton <luke.hutton@arm.com> | 2023-02-06 16:37:15 +0000 |
---|---|---|
committer | Luke Hutton <luke.hutton@arm.com> | 2023-02-24 11:54:07 +0000 |
commit | 580fdd133ad239e491d239fe4fdc16b442161b31 (patch) | |
tree | d293c4a6e72337b571e38cd2e4c44031ee49c5e6 | |
parent | 87bf309c606ad9bc3c51980ca656885397e6fb39 (diff) | |
download | specification-580fdd133ad239e491d239fe4fdc16b442161b31.tar.gz |
Fix rank limits for various data layout operations
This commit removes the maximum rank 4 restriction
from REVERSE, SLICE and TILE operations so that the
new maximum is now MAX_RANK.
In doing so some rank inconsistencies were also
cleaned up, including:
* Adding a minimum rank to the PAD op description
* Adding level limit checks to SLICE and TILE ops
* Adding checks for rank(shape1) == rank(shape)
for TILE and TRANSPOSE ops
* Add tensor_size(shape1) == tensor_size(shape)
check for TRANSPOSE
* Replace tensor name with shape name in pesudo
code where necessary
Change-Id: I94bb8ee0b6b720b2ba5a884ce9300dca6281245a
Signed-off-by: Luke Hutton <luke.hutton@arm.com>
-rw-r--r-- | chapters/data_layout.adoc | 21 | ||||
-rw-r--r-- | tosa.xml | 18 |
2 files changed, 23 insertions, 16 deletions
diff --git a/chapters/data_layout.adoc b/chapters/data_layout.adoc index 395cb6b..2dc7057 100644 --- a/chapters/data_layout.adoc +++ b/chapters/data_layout.adoc @@ -1,7 +1,7 @@ // // This confidential and proprietary software may be used only as // authorised by a licensing agreement from ARM Limited -// (C) COPYRIGHT 2020-2022 ARM Limited +// (C) COPYRIGHT 2020-2023 ARM Limited // ALL RIGHTS RESERVED // The entire notice above must be reproduced on all authorised // copies and copies may only be made to the extent permitted @@ -89,7 +89,7 @@ for_each(index in shape) { dim_t tmp_index = tensor_offset_to_index(shape1, offset); // Now read/write the value - in_out_t val = tensor_read<in_out_t>(input, shape1, tmp_index); + in_out_t val = tensor_read<in_out_t>(input1, shape1, tmp_index); tensor_write<in_out_t>(output, shape, index, val); } ---- @@ -120,11 +120,11 @@ include::{generated}/operators/SLICE.adoc[] [source,c++] ---- -ERROR_IF(rank(input1) != length(start) || rank(input1) != length(size)); -ERROR_IF(rank(input1) != rank(output)) +ERROR_IF(rank(shape1) != length(start) || rank(shape1) != length(size)); +ERROR_IF(rank(shape1) != rank(shape)); // Sanity check the given coordinates, ensure start and end are // within tensor bounds -for_each(index in rank(input1)) { +for_each(index in rank(shape1)) { ERROR_IF(start[index] < 0); ERROR_IF(size[index] <= 0); //Output must be positive size ERROR_IF(start[index] + size[index] > shape1[index]); @@ -136,7 +136,7 @@ for_each(index in shape) { for(i = 0; i < rank(shape); i++) { tmp_index[i] = index[i] + start[i]; } - in_out_t value = tensor_read<in_out_t>(input, shape1, tmp_index); + in_out_t value = tensor_read<in_out_t>(input1, shape1, tmp_index); tensor_write<in_out_t>(output, shape, index, value); } ---- @@ -149,13 +149,15 @@ include::{generated}/operators/TILE.adoc[] [source,c++] ---- +ERROR_IF(rank(shape1) != rank(shape)); + for_each(index in shape) { dim_t tmp_index = index; for(i = 0; i < rank(shape); i++) { ERROR_IF(shape1[i] * multiples[i] != shape[i]); tmp_index[i] = index[i] % shape1[i]; } - in_out_t value = tensor_read<in_out_t>(input, shape1, tmp_index); + in_out_t value = tensor_read<in_out_t>(input1, shape1, tmp_index); tensor_write<in_out_t>(output, shape, index, value); } ---- @@ -169,6 +171,9 @@ include::{generated}/operators/TRANSPOSE.adoc[] [source,c++] ---- +ERROR_IF(rank(shape1) != rank(shape)); +ERROR_IF(tensor_size(shape1) != tensor_size(shape)); + for_each(index in perms) { // Ensure each perms value is a valid value ERROR_IF(index >= rank(shape1)); @@ -189,7 +194,7 @@ for_each(index in shape) { for(i = 0; i < rank(shape); i++) { tmp_index[perms[i]] = index[i] } - in_out_t value = tensor_read<in_out_t>(input, shape1, tmp_index); + in_out_t value = tensor_read<in_out_t>(input1, shape1, tmp_index); tensor_write<in_out_t>(output, shape, index, value); } ---- @@ -1679,7 +1679,7 @@ <name>PAD</name> <arguments> <argument category="input" name="input1" type="in_out_t*" shape="shape1"> - <description>Input tensor</description> + <description>Input tensor with minimum rank of one.</description> </argument> <argument category="attribute" name="padding" type="int32_t" shape="[rank(shape1),2]"> <description>Number of pad elements at the start and end of each dimension</description> @@ -1719,7 +1719,7 @@ <description>Input tensor</description> <levellimit value="rank(shape1)" limit="MAX_RANK"/> </argument> - <argument category="attribute" name="new_shape" type="int32_t" shape="[rank(output)]"> + <argument category="attribute" name="new_shape" type="int32_t" shape="[rank(shape)]"> <description>List of values, with each element giving the size of the result tensor for the given dimension. At most one dimension may be given as -1 to automatically calculate the dimension size.</description> </argument> <argument category="output" name="output" type="in_out_t*" shape="shape"> @@ -1751,7 +1751,7 @@ <name>REVERSE</name> <arguments> <argument category="input" name="input" type="in_out_t*" shape="shape"> - <description>Input tensor with rank from 1 to 4</description> + <description>Input tensor with minimum rank of one.</description> <levellimit value="rank(shape)" limit="MAX_RANK"/> </argument> <argument category="attribute" name="axis" type="int32_t" shape="-"> @@ -1785,14 +1785,15 @@ <name>SLICE</name> <arguments> <argument category="input" name="input1" type="in_out_t*" shape="shape1"> - <description>Input tensor with rank from 1 to 4</description> + <description>Input tensor with minimum rank of one.</description> </argument> - <argument category="attribute" name="start" type="int32_t" shape="[rank(input1)]"> + <argument category="attribute" name="start" type="int32_t" shape="[rank(shape1)]"> <description>List of integer coordinates, of length equal to the rank of input1. Start coordinate for slicing.</description> </argument> - <argument category="attribute" name="size" type="int32_t" shape="[rank(input1)]"> + <argument category="attribute" name="size" type="int32_t" shape="[rank(shape1)]"> <description>List of integer size values, of length equal to the rank of input1. Size of the input to be used.</description> + <levellimit value="rank(shape)" limit="MAX_RANK"/> </argument> <argument category="output" name="output" type="in_out_t*" shape="shape"> <description>Output tensor of same type as the input tensor</description> @@ -1822,13 +1823,14 @@ used.</description> <name>TILE</name> <arguments> <argument category="input" name="input1" type="in_out_t*" shape="shape1"> - <description>Input tensor with rank from 1 to 4</description> + <description>Input tensor with minimum rank of one.</description> </argument> <argument category="attribute" name="multiplies" type="int32_t" shape="[rank(shape1)]"> <description>Number of times to replicate input1 in each dimension</description> </argument> <argument category="output" name="output" type="in_out_t*" shape="shape"> <description>Output tensor of same type, rank as the input tensor</description> + <levellimit value="rank(shape)" limit="MAX_RANK"/> </argument> </arguments> <types> @@ -1857,7 +1859,7 @@ used.</description> <argument category="input" name="input1" type="in_out_t*" shape="shape1"> <description>Input tensor with minimum rank of one.</description> </argument> - <argument category="attribute" name="perms" type="int32_t" shape="[rank(input1)]"> + <argument category="attribute" name="perms" type="int32_t" shape="[rank(shape1)]"> <description>List of integers of length equal to the rank of input1. Values must be valid dimensions within shape1, and may not be repeated.</description> </argument> <argument category="output" name="output" type="in_out_t*" shape="shape"> |