// // This confidential and proprietary software may be used only as // authorised by a licensing agreement from ARM Limited // (C) COPYRIGHT 2020-2022 ARM Limited // ALL RIGHTS RESERVED // The entire notice above must be reproduced on all authorised // copies and copies may only be made to the extent permitted // by a licensing agreement from ARM Limited. === Data Layout ==== CONCAT Concatenate a list of tensors along a given axis. No data conversion happens during a concat operation. include::{generated}/operators/CONCAT.adoc[] *Operation Function:* [source,c] ---- ERROR_IF(axis < 0 || axis >= rank(shapes1[0])); ERROR_IF(shape[axis] != sum(shape1[k][axis] for all k)) // The following checks ensure all inputs are compatible for concatenation for_each(input_shape in shapes1) { ERROR_IF(rank(input_shape) != rank(shapes1[0])); for_each(index in input_shape) { ERROR_IF(input_shape[index] != shapes1[0][index] && index != axis); } } for_each(index1 in shape) { dim_t index2 = index1; for (tensor t = 0; t < length(input1); t++) { // Continue to concatenate along axis from each tensor // For each output location, we are looking for the // appropriate input tensor if (index2[axis] >= 0 && index2[axis] < shapes1[t][axis]) { in_out_t value = tensor_read(input1[t], shapes1[t], index2); tensor_write(output, shape, index1, value); } index2[axis] = index2[axis] - shapes1[t][axis]; } } ---- ==== PAD Pads a tensor along the borders of each dimension with a supplied value. Returns a new tensor with the padding included. The pad_const value includes the zero point if the tensor uses a zero point. include::{generated}/operators/PAD.adoc[] *Operation Function:* [source,c++] ---- // Check output shape matches the padded input shape ERROR_IF(rank(shape) != rank(shape1)); for (i = 0; i < rank(shape); i++) { ERROR_IF(padding[i,0] < 0 || padding[i,1] < 0); ERROR_IF(shape[i] != padding[i, 0] + shape1[i] + padding[i, 1]); } for_each(index in shape) { dim_t index1 = index; bool_t is_pad = false; for(i = 0; i < rank(shape); i++) { index1[i] = index1[i] - padding[i,0]; if (index1[i] < 0 || index[i] >= length(shape[i])) { is_pad = true; } } in_out_t value = is_pad ? pad_const : tensor_read(input1, shape1, index1); tensor_write(output, shape, index, value); } ---- ==== RESHAPE Returns a tensor with the same type/values as the input, with a new shape specified by the shape argument. Reshape may operate on tensors of any rank. No data conversion happens during a reshape operation. include::{generated}/operators/RESHAPE.adoc[] *Operation Function:* [source,c++] ---- ERROR_IF(tensor_size(shape1) != tensor_size(shape)); for_each(index in shape) { // Calculate flattened index for the output location (index) size_t offset = tensor_index_to_offset(shape, index); // Now convert to the location in the input dim_t tmp_index = tensor_offset_to_index(shape1, offset); // Now read/write the value in_out_t val = tensor_read(input, shape1, tmp_index); tensor_write(output, shape, index, val); } ---- ==== REVERSE Returns a tensor with the same type/values as the input, with the data reversed along the given axis. No data conversion happens during a reverse operation. include::{generated}/operators/REVERSE.adoc[] *Operation Function:* [source,c++] ---- ERROR_IF(axis < 0 || axis >= rank(shape)); for_each(index in shape) { dim_t tmp_index = index; tmp_index[axis] = shape[axis] - 1 - index[axis]; in_out_t value = tensor_read(input, shape, tmp_index); tensor_write(output, shape, index, value); } ---- ==== SLICE Extracts a slice of the input1 on the given axis, beginning at the start coordinates, and extending for size elements in each direction. No data conversion happens during a slice operation. include::{generated}/operators/SLICE.adoc[] *Operation Function:* [source,c++] ---- ERROR_IF(rank(input1) != length(start) || rank(input1) != length(size)); ERROR_IF(rank(input1) != rank(output)) // Sanity check the given coordinates, ensure start and end are // within tensor bounds for_each(index in rank(input1)) { ERROR_IF(start[index] < 0); ERROR_IF(size[index] <= 0); //Output must be positive size ERROR_IF(start[index] + size[index] > shape1[index]); ERROR_IF(shape[index] != size[index]); } for_each(index in shape) { dim_t tmp_index = index; for(i = 0; i < rank(shape); i++) { tmp_index[i] = index[i] + start[i]; } in_out_t value = tensor_read(input, shape1, tmp_index); tensor_write(output, shape, index, value); } ---- ==== TILE Replicates input1 multiples times along each dimension. include::{generated}/operators/TILE.adoc[] *Operation Function:* [source,c++] ---- for_each(index in shape) { dim_t tmp_index = index; for(i = 0; i < rank(shape); i++) { ERROR_IF(shape1[i] * multiples[i] != shape[i]); tmp_index[i] = index[i] % shape1[i]; } in_out_t value = tensor_read(input, shape1, tmp_index); tensor_write(output, shape, index, value); } ---- ==== TRANSPOSE Permutes the dimensions of the input tensor input1 based on the perms argument. Each value in the perms list must be a valid dimension of the input tensor and may not be repeated. include::{generated}/operators/TRANSPOSE.adoc[] *Operation Function:* [source,c++] ---- for_each(index in perms) { // Ensure each perms value is a valid value ERROR_IF(index >= rank(shape1)); ERROR_IF(index < 0); // Ensure ranks aren't repeated ERROR_IF(indexes_used[index] == true); indexes_used[index] = true; } // Ensure that the output shapes have the properly // permuted shapes for(i = 0; i < rank(shape); i++) { ERROR_IF(shape1[perms[i]] != shape[i]) } for_each(index in shape) { dim_t tmp_index = index; for(i = 0; i < rank(shape); i++) { tmp_index[perms[i]] = index[i] } in_out_t value = tensor_read(input, shape1, tmp_index); tensor_write(output, shape, index, value); } ----