aboutsummaryrefslogtreecommitdiff
path: root/chapters/data_layout.adoc
diff options
context:
space:
mode:
authorLuke Hutton <luke.hutton@arm.com>2023-02-06 16:37:15 +0000
committerLuke Hutton <luke.hutton@arm.com>2023-02-24 11:54:07 +0000
commit580fdd133ad239e491d239fe4fdc16b442161b31 (patch)
treed293c4a6e72337b571e38cd2e4c44031ee49c5e6 /chapters/data_layout.adoc
parent87bf309c606ad9bc3c51980ca656885397e6fb39 (diff)
downloadspecification-580fdd133ad239e491d239fe4fdc16b442161b31.tar.gz
Fix rank limits for various data layout operations
This commit removes the maximum rank 4 restriction from REVERSE, SLICE and TILE operations so that the new maximum is now MAX_RANK. In doing so some rank inconsistencies were also cleaned up, including: * Adding a minimum rank to the PAD op description * Adding level limit checks to SLICE and TILE ops * Adding checks for rank(shape1) == rank(shape) for TILE and TRANSPOSE ops * Add tensor_size(shape1) == tensor_size(shape) check for TRANSPOSE * Replace tensor name with shape name in pesudo code where necessary Change-Id: I94bb8ee0b6b720b2ba5a884ce9300dca6281245a Signed-off-by: Luke Hutton <luke.hutton@arm.com>
Diffstat (limited to 'chapters/data_layout.adoc')
-rw-r--r--chapters/data_layout.adoc21
1 files changed, 13 insertions, 8 deletions
diff --git a/chapters/data_layout.adoc b/chapters/data_layout.adoc
index 395cb6b..2dc7057 100644
--- a/chapters/data_layout.adoc
+++ b/chapters/data_layout.adoc
@@ -1,7 +1,7 @@
//
// This confidential and proprietary software may be used only as
// authorised by a licensing agreement from ARM Limited
-// (C) COPYRIGHT 2020-2022 ARM Limited
+// (C) COPYRIGHT 2020-2023 ARM Limited
// ALL RIGHTS RESERVED
// The entire notice above must be reproduced on all authorised
// copies and copies may only be made to the extent permitted
@@ -89,7 +89,7 @@ for_each(index in shape) {
dim_t tmp_index = tensor_offset_to_index(shape1, offset);
// Now read/write the value
- in_out_t val = tensor_read<in_out_t>(input, shape1, tmp_index);
+ in_out_t val = tensor_read<in_out_t>(input1, shape1, tmp_index);
tensor_write<in_out_t>(output, shape, index, val);
}
----
@@ -120,11 +120,11 @@ include::{generated}/operators/SLICE.adoc[]
[source,c++]
----
-ERROR_IF(rank(input1) != length(start) || rank(input1) != length(size));
-ERROR_IF(rank(input1) != rank(output))
+ERROR_IF(rank(shape1) != length(start) || rank(shape1) != length(size));
+ERROR_IF(rank(shape1) != rank(shape));
// Sanity check the given coordinates, ensure start and end are
// within tensor bounds
-for_each(index in rank(input1)) {
+for_each(index in rank(shape1)) {
ERROR_IF(start[index] < 0);
ERROR_IF(size[index] <= 0); //Output must be positive size
ERROR_IF(start[index] + size[index] > shape1[index]);
@@ -136,7 +136,7 @@ for_each(index in shape) {
for(i = 0; i < rank(shape); i++) {
tmp_index[i] = index[i] + start[i];
}
- in_out_t value = tensor_read<in_out_t>(input, shape1, tmp_index);
+ in_out_t value = tensor_read<in_out_t>(input1, shape1, tmp_index);
tensor_write<in_out_t>(output, shape, index, value);
}
----
@@ -149,13 +149,15 @@ include::{generated}/operators/TILE.adoc[]
[source,c++]
----
+ERROR_IF(rank(shape1) != rank(shape));
+
for_each(index in shape) {
dim_t tmp_index = index;
for(i = 0; i < rank(shape); i++) {
ERROR_IF(shape1[i] * multiples[i] != shape[i]);
tmp_index[i] = index[i] % shape1[i];
}
- in_out_t value = tensor_read<in_out_t>(input, shape1, tmp_index);
+ in_out_t value = tensor_read<in_out_t>(input1, shape1, tmp_index);
tensor_write<in_out_t>(output, shape, index, value);
}
----
@@ -169,6 +171,9 @@ include::{generated}/operators/TRANSPOSE.adoc[]
[source,c++]
----
+ERROR_IF(rank(shape1) != rank(shape));
+ERROR_IF(tensor_size(shape1) != tensor_size(shape));
+
for_each(index in perms) {
// Ensure each perms value is a valid value
ERROR_IF(index >= rank(shape1));
@@ -189,7 +194,7 @@ for_each(index in shape) {
for(i = 0; i < rank(shape); i++) {
tmp_index[perms[i]] = index[i]
}
- in_out_t value = tensor_read<in_out_t>(input, shape1, tmp_index);
+ in_out_t value = tensor_read<in_out_t>(input1, shape1, tmp_index);
tensor_write<in_out_t>(output, shape, index, value);
}
----