aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--chapters/activation_funcs.adoc2
-rw-r--r--chapters/data_layout.adoc10
-rw-r--r--chapters/ewise_binary.adoc2
-rw-r--r--chapters/introduction.adoc1
-rw-r--r--chapters/reduction.adoc12
-rw-r--r--chapters/tensor_ops.adoc4
6 files changed, 16 insertions, 15 deletions
diff --git a/chapters/activation_funcs.adoc b/chapters/activation_funcs.adoc
index 65266b3..05a693b 100644
--- a/chapters/activation_funcs.adoc
+++ b/chapters/activation_funcs.adoc
@@ -26,7 +26,7 @@ Clamp to an arbitrary minimum and maximum value. Note that the maximum and minim
*Operation Function:*
....
-assert(dimensions(shape)<=4)
+assert(rank(shape)<=4)
for_each (index in shape) {
value = tensor_read<in_t>(input, shape, index)
acc = apply_clip(value, min_val, max_val)
diff --git a/chapters/data_layout.adoc b/chapters/data_layout.adoc
index 3a7c3c3..bec551b 100644
--- a/chapters/data_layout.adoc
+++ b/chapters/data_layout.adoc
@@ -77,7 +77,7 @@ Zero-pads a tensor along borders of each dimension.
----
for_each (index in shape) {
index1 = index
- for (i=0; i<dimensions(shape); i++) {
+ for (i=0; i<rank(shape); i++) {
index1[i] = index1[i] - padding[i,0]
}
value = tensor_read<in_t>(input1, shape1, index1, input1_zp, padding)
@@ -151,7 +151,7 @@ Returns a tensor with the same type/values as the input, with the data reversed
[source,c]
----
-assert(0<=axis && axis<dimensions(shape))
+assert(0<=axis && axis<rank(shape))
for_each (index in shape) {
tmp_index = index;
tmp_index[axis] = shape[axis]-1-index[axis];
@@ -192,7 +192,7 @@ Extracts a slice of the input tensor 0 on the given axis, beginning at the start
----
for_each (index in shape) {
tmp_index = index;
- for (i=0; i<dimensions(shape); i++) {
+ for (i=0; i<rank(shape); i++) {
tmp_index[i] = index[i] + start[i];
}
value = tensor_read<in_t>(input, shape1, tmp_index);
@@ -232,7 +232,7 @@ Replicates input 0 multiplies times along each dimension.
----
for_each (index in shape) {
tmp_index = index;
- for (i=0; i<dimensions(shape); i++) {
+ for (i=0; i<rank(shape); i++) {
assert(shape1[i] * multiplies[i] == shape[i])
tmp_index[i] = index[i] % shape1[i]
}
@@ -273,7 +273,7 @@ Permutes the dimensions based on perm.
----
for_each (index in shape) {
tmp_index = index;
- for (i=0; i<dimensions(shape); i++) {
+ for (i=0; i<rank(shape); i++) {
assert(shape1[perm[i]] == shape[i])
tmp_index[perm[i]] = index[i]
}
diff --git a/chapters/ewise_binary.adoc b/chapters/ewise_binary.adoc
index 92c4926..9c0e71c 100644
--- a/chapters/ewise_binary.adoc
+++ b/chapters/ewise_binary.adoc
@@ -593,7 +593,7 @@ None
[source,c]
----
-assert(dimensions(shape)<=4)
+assert(rank(shape)<=4)
for_each (index in shape) {
in_t value = tensor_read<in_t>(input, shape, index)
out_t acc = apply_lookup(table, value)
diff --git a/chapters/introduction.adoc b/chapters/introduction.adoc
index 4137e8f..09a21dd 100644
--- a/chapters/introduction.adoc
+++ b/chapters/introduction.adoc
@@ -145,6 +145,7 @@ Tensors have an associated tensorinfo that contains information about the tensor
* Data Type
* Shape
+The number of dimensions in a shape is called the rank. Thus a tensor shape is an array of integers of size rank(shape) with shape[i] giving the the number of elements for dimension i.
The following pseudocode represents the operations that will happen to data elements as they are read in to be processed, or have their results written out.
*Functionality of tensor read*
diff --git a/chapters/reduction.adoc b/chapters/reduction.adoc
index af44ab6..0739dfd 100644
--- a/chapters/reduction.adoc
+++ b/chapters/reduction.adoc
@@ -27,7 +27,7 @@ Reduce a tensor along the given axis with a logical AND operation
[source,c]
----
-assert(0<=axis && axis<dimensions(in_shape))
+assert(0<=axis && axis<rank(in_shape))
assert(out_shape[axis]==1)
for_each (index in out_shape) {
tensor_write<in_t>(output, out_shape, index, true)
@@ -68,7 +68,7 @@ Reduce a tensor along the given axis with a logical OR operation
[source,c]
----
-assert(0<=axis && axis<dimensions(in_shape))
+assert(0<=axis && axis<rank(in_shape))
assert(out_shape[axis]==1)
for_each (index in out_shape) {
tensor_write<in_t>(output, out_shape, index, false)
@@ -109,7 +109,7 @@ Reduce a tensor along the given axis with a maximum operation
[source,c]
----
-assert(0<=axis && axis<dimensions(in_shape))
+assert(0<=axis && axis<rank(in_shape))
assert(out_shape[axis]==1)
for_each (index in out_shape) {
tensor_write<in_t>(output, out_shape, index, minimum<in_t>)
@@ -156,7 +156,7 @@ Quantization is ignored when doing the REDUCE_MIN operation. The input and outpu
[source,c]
----
-assert(0<=axis && axis<dimensions(in_shape))
+assert(0<=axis && axis<rank(in_shape))
assert(out_shape[axis]==1)
for_each (index in out_shape) {
tensor_write<in_t>(output, out_shape, index, maximum<in_t>)
@@ -200,7 +200,7 @@ Reduce a tensor along the given axis by computing the product of the axis.
[source,c]
----
-assert(0<=axis && axis<dimensions(in_shape))
+assert(0<=axis && axis<rank(in_shape))
assert(out_shape[axis]==1)
for_each (index in out_shape) {
tensor_write<in_t>(output, out_shape, index, 1.0)
@@ -241,7 +241,7 @@ Reduce a tensor along the given axis by computing the sum of the axis.
[source,c]
----
-assert(0<=axis && axis<dimensions(in_shape))
+assert(0<=axis && axis<rank(in_shape))
assert(out_shape[axis]==1)
for_each (index in out_shape) {
tensor_write<in_t>(output, out_shape, index, 0)
diff --git a/chapters/tensor_ops.adoc b/chapters/tensor_ops.adoc
index 2ea4ba8..b8e053c 100644
--- a/chapters/tensor_ops.adoc
+++ b/chapters/tensor_ops.adoc
@@ -387,13 +387,13 @@ Performs a two dimensional matrix multiplication. This allows both inputs to be
[source,c]
----
-assert(in_t==aint8_t || (A_zp==0 && B_zp==0) // Zero point only for asymmetric int8
+assert(in_t==aint8_t || (A_zp==0 && B_zp==0)) // Zero point only for asymmetric int8
for_each (0<=m<M, 0<=n<N) {
acc_t acc = 0
for_each (0<=k<K) {
value1 = tensor_read<in_t>(A, [M,K], [m,k], A_zp)
value2 = tensor_read<in_t>(B, [K,N], [k,n], B_zp)
- acc = acc + value1 * value2
+ acc = apply_add<acc_t>(acc, value1 * value2)
}
tensor_write<acc_t>(C, [M,N], [m,n], acc)
}