aboutsummaryrefslogtreecommitdiff
path: root/chapters/data_layout.adoc
diff options
context:
space:
mode:
Diffstat (limited to 'chapters/data_layout.adoc')
-rw-r--r--chapters/data_layout.adoc129
1 files changed, 9 insertions, 120 deletions
diff --git a/chapters/data_layout.adoc b/chapters/data_layout.adoc
index 2d48eb1..1ce92be 100644
--- a/chapters/data_layout.adoc
+++ b/chapters/data_layout.adoc
@@ -1,7 +1,7 @@
//
// This confidential and proprietary software may be used only as
// authorised by a licensing agreement from ARM Limited
-// (C) COPYRIGHT 2020-2023 ARM Limited
+// (C) COPYRIGHT 2020-2024 ARM Limited
// ALL RIGHTS RESERVED
// The entire notice above must be reproduced on all authorised
// copies and copies may only be made to the extent permitted
@@ -17,30 +17,7 @@ include::{generated}/operators/CONCAT.adoc[]
[source,c]
----
-ERROR_IF(axis < 0 || axis >= max(1,rank(shapes1[0])));
-ERROR_IF(shape[axis] != sum(shape_dim(shapes1[k], axis) for all k))
-ERROR_IF(in_out_t == shape_t && rank(shape) > 1);
-// The following checks ensure all inputs are compatible for concatenation
-for_each(input_shape in shapes1) {
- ERROR_IF(rank(input_shape) != rank(shapes1[0]));
- for_each(index in input_shape) {
- ERROR_IF(index != axis && input_shape[index] != shapes1[0][index]);
- }
-}
-for_each(index1 in shape) {
- dim_t index2 = index1;
- for (tensor t = 0; t < length(input1); t++) {
- // Continue to concatenate along axis from each tensor
- // For each output location, we are looking for the
- // appropriate input tensor
- if (index2[axis] >= 0 && index2[axis] < shape_dim(shapes1[t], axis)) {
- in_out_t value = tensor_read<in_out_t>(input1[t], shapes1[t], index2);
- tensor_write<in_out_t>(output, shape, index1, value);
- }
- index2[axis] = index2[axis] - shape_dim(shapes1[t], axis);
- }
-}
-
+include::{pseudocode}/operators/CONCAT.tosac[lines=10..-1]
----
==== PAD
@@ -53,24 +30,7 @@ include::{generated}/operators/PAD.adoc[]
[source,c++]
----
-// Check output shape matches the padded input shape
-ERROR_IF(rank(shape) != rank(shape1));
-for (i = 0; i < rank(shape); i++) {
- ERROR_IF(padding[i,0] < 0 || padding[i,1] < 0);
- ERROR_IF(shape[i] != padding[i, 0] + shape1[i] + padding[i, 1]);
-}
-for_each(index in shape) {
- dim_t index1 = index;
- bool_t is_pad = false;
- for(i = 0; i < rank(shape); i++) {
- index1[i] = index1[i] - padding[i,0];
- if (index1[i] < 0 || index[i] >= length(shape[i])) {
- is_pad = true;
- }
- }
- in_out_t value = is_pad ? pad_const : tensor_read<in_out_t>(input1, shape1, index1);
- tensor_write<in_out_t>(output, shape, index, value);
-}
+include::{pseudocode}/operators/PAD.tosac[lines=10..-1]
----
==== DIM
@@ -81,8 +41,7 @@ include::{generated}/operators/DIM.adoc[]
[source,c++]
----
-ERROR_IF(axis >= rank(shape));
-tensor_write<shape_t>(output, [], [], shape_dim(shape, axis));
+include::{pseudocode}/operators/DIM.tosac[lines=10..-1]
----
==== RESHAPE
@@ -93,18 +52,7 @@ include::{generated}/operators/RESHAPE.adoc[]
[source,c++]
----
-ERROR_IF(tensor_size(shape1) != tensor_size(shape));
-
-for_each(index in shape) {
- // Calculate flattened index for the output location (index)
- size_t offset = tensor_index_to_offset(shape, index);
- // Now convert to the location in the input
- dim_t tmp_index = tensor_offset_to_index(shape1, offset);
-
- // Now read/write the value
- in_out_t val = tensor_read<in_out_t>(input1, shape1, tmp_index);
- tensor_write<in_out_t>(output, shape, index, val);
-}
+include::{pseudocode}/operators/RESHAPE.tosac[lines=10..-1]
----
==== REVERSE
@@ -115,13 +63,7 @@ include::{generated}/operators/REVERSE.adoc[]
[source,c++]
----
-ERROR_IF(axis < 0 || axis >= rank(shape));
-for_each(index in shape) {
- dim_t tmp_index = index;
- tmp_index[axis] = shape[axis] - 1 - index[axis];
- in_out_t value = tensor_read<in_out_t>(input, shape, tmp_index);
- tensor_write<in_out_t>(output, shape, index, value);
-}
+include::{pseudocode}/operators/REVERSE.tosac[lines=10..-1]
----
==== SLICE
@@ -133,25 +75,7 @@ include::{generated}/operators/SLICE.adoc[]
[source,c++]
----
-ERROR_IF(rank(shape1) != length(start) || rank(shape1) != length(size));
-ERROR_IF(rank(shape1) != rank(shape));
-// Sanity check the given coordinates, ensure start and end are
-// within tensor bounds
-for_each(index in rank(shape1)) {
- ERROR_IF(start[index] < 0);
- ERROR_IF(size[index] <= 0); //Output must be positive size
- ERROR_IF(start[index] + size[index] > shape1[index]);
- ERROR_IF(shape[index] != size[index]);
-}
-
-for_each(index in shape) {
- dim_t tmp_index = index;
- for(i = 0; i < rank(shape); i++) {
- tmp_index[i] = index[i] + start[i];
- }
- in_out_t value = tensor_read<in_out_t>(input1, shape1, tmp_index);
- tensor_write<in_out_t>(output, shape, index, value);
-}
+include::{pseudocode}/operators/SLICE.tosac[lines=10..-1]
----
==== TILE
@@ -162,17 +86,7 @@ include::{generated}/operators/TILE.adoc[]
[source,c++]
----
-ERROR_IF(rank(shape1) != rank(shape));
-
-for_each(index in shape) {
- dim_t tmp_index = index;
- for(i = 0; i < rank(shape); i++) {
- ERROR_IF(shape1[i] * multiples[i] != shape[i]);
- tmp_index[i] = index[i] % shape1[i];
- }
- in_out_t value = tensor_read<in_out_t>(input1, shape1, tmp_index);
- tensor_write<in_out_t>(output, shape, index, value);
-}
+include::{pseudocode}/operators/TILE.tosac[lines=10..-1]
----
==== TRANSPOSE
@@ -184,30 +98,5 @@ include::{generated}/operators/TRANSPOSE.adoc[]
[source,c++]
----
-ERROR_IF(rank(shape1) != rank(shape));
-ERROR_IF(tensor_size(shape1) != tensor_size(shape));
-
-for_each(index in perms) {
- // Ensure each perms value is a valid value
- ERROR_IF(index >= rank(shape1));
- ERROR_IF(index < 0);
- // Ensure ranks aren't repeated
- ERROR_IF(indexes_used[index] == true);
- indexes_used[index] = true;
-}
-
-// Ensure that the output shapes have the properly
-// permuted shapes
-for(i = 0; i < rank(shape); i++) {
- ERROR_IF(shape1[perms[i]] != shape[i])
-}
-
-for_each(index in shape) {
- dim_t tmp_index = index;
- for(i = 0; i < rank(shape); i++) {
- tmp_index[perms[i]] = index[i]
- }
- in_out_t value = tensor_read<in_out_t>(input1, shape1, tmp_index);
- tensor_write<in_out_t>(output, shape, index, value);
-}
+include::{pseudocode}/operators/TRANSPOSE.tosac[lines=10..-1]
----