From 298a029348e07903b1f78eb9994230fa205e869e Mon Sep 17 00:00:00 2001 From: Dominic Symes Date: Thu, 22 Oct 2020 15:36:24 +0100 Subject: Make rank() consistent across files Complete the function naming change from dimensions() to rank(). Fix MATMUL to use apply_add() for additions, consistent with the other operations. Signed-off-by: Dominic Symes Change-Id: I6ba84cf8b016505e8477b04dd00f2a2bf3194492 --- chapters/activation_funcs.adoc | 2 +- chapters/data_layout.adoc | 10 +++++----- chapters/ewise_binary.adoc | 2 +- chapters/introduction.adoc | 1 + chapters/reduction.adoc | 12 ++++++------ chapters/tensor_ops.adoc | 4 ++-- 6 files changed, 16 insertions(+), 15 deletions(-) diff --git a/chapters/activation_funcs.adoc b/chapters/activation_funcs.adoc index 65266b3..05a693b 100644 --- a/chapters/activation_funcs.adoc +++ b/chapters/activation_funcs.adoc @@ -26,7 +26,7 @@ Clamp to an arbitrary minimum and maximum value. Note that the maximum and minim *Operation Function:* .... -assert(dimensions(shape)<=4) +assert(rank(shape)<=4) for_each (index in shape) { value = tensor_read(input, shape, index) acc = apply_clip(value, min_val, max_val) diff --git a/chapters/data_layout.adoc b/chapters/data_layout.adoc index 3a7c3c3..bec551b 100644 --- a/chapters/data_layout.adoc +++ b/chapters/data_layout.adoc @@ -77,7 +77,7 @@ Zero-pads a tensor along borders of each dimension. ---- for_each (index in shape) { index1 = index - for (i=0; i(input1, shape1, index1, input1_zp, padding) @@ -151,7 +151,7 @@ Returns a tensor with the same type/values as the input, with the data reversed [source,c] ---- -assert(0<=axis && axis(input, shape1, tmp_index); @@ -232,7 +232,7 @@ Replicates input 0 multiplies times along each dimension. ---- for_each (index in shape) { tmp_index = index; - for (i=0; i(input, shape, index) out_t acc = apply_lookup(table, value) diff --git a/chapters/introduction.adoc b/chapters/introduction.adoc index 4137e8f..09a21dd 100644 --- a/chapters/introduction.adoc +++ b/chapters/introduction.adoc @@ -145,6 +145,7 @@ Tensors have an associated tensorinfo that contains information about the tensor * Data Type * Shape +The number of dimensions in a shape is called the rank. Thus a tensor shape is an array of integers of size rank(shape) with shape[i] giving the the number of elements for dimension i. The following pseudocode represents the operations that will happen to data elements as they are read in to be processed, or have their results written out. *Functionality of tensor read* diff --git a/chapters/reduction.adoc b/chapters/reduction.adoc index af44ab6..0739dfd 100644 --- a/chapters/reduction.adoc +++ b/chapters/reduction.adoc @@ -27,7 +27,7 @@ Reduce a tensor along the given axis with a logical AND operation [source,c] ---- -assert(0<=axis && axis(output, out_shape, index, true) @@ -68,7 +68,7 @@ Reduce a tensor along the given axis with a logical OR operation [source,c] ---- -assert(0<=axis && axis(output, out_shape, index, false) @@ -109,7 +109,7 @@ Reduce a tensor along the given axis with a maximum operation [source,c] ---- -assert(0<=axis && axis(output, out_shape, index, minimum) @@ -156,7 +156,7 @@ Quantization is ignored when doing the REDUCE_MIN operation. The input and outpu [source,c] ---- -assert(0<=axis && axis(output, out_shape, index, maximum) @@ -200,7 +200,7 @@ Reduce a tensor along the given axis by computing the product of the axis. [source,c] ---- -assert(0<=axis && axis(output, out_shape, index, 1.0) @@ -241,7 +241,7 @@ Reduce a tensor along the given axis by computing the sum of the axis. [source,c] ---- -assert(0<=axis && axis(output, out_shape, index, 0) diff --git a/chapters/tensor_ops.adoc b/chapters/tensor_ops.adoc index 2ea4ba8..b8e053c 100644 --- a/chapters/tensor_ops.adoc +++ b/chapters/tensor_ops.adoc @@ -387,13 +387,13 @@ Performs a two dimensional matrix multiplication. This allows both inputs to be [source,c] ---- -assert(in_t==aint8_t || (A_zp==0 && B_zp==0) // Zero point only for asymmetric int8 +assert(in_t==aint8_t || (A_zp==0 && B_zp==0)) // Zero point only for asymmetric int8 for_each (0<=m(A, [M,K], [m,k], A_zp) value2 = tensor_read(B, [K,N], [k,n], B_zp) - acc = acc + value1 * value2 + acc = apply_add(acc, value1 * value2) } tensor_write(C, [M,N], [m,n], acc) } -- cgit v1.2.1