aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEric Kunze <eric.kunze@arm.com>2020-11-04 14:23:31 -0800
committerEric Kunze <eric.kunze@arm.com>2020-11-06 09:10:10 -0800
commit646ef42f0357c85b2898c39d1657a85487ac751c (patch)
treee94d322939b977c9a1037b1a8a47e2a844a85687
parent9424cc4d4c5002c1da441f20052342b6b1b6ac7c (diff)
downloadspecification-646ef42f0357c85b2898c39d1657a85487ac751c.tar.gz
Operator argument consistency improvement.
Add values attribute to CONST operator. Remove stale references to input tensor 0 and 1. Remove out_t types where in and out are guaranteed to be the same type. Signed-off-by: Eric Kunze <eric.kunze@arm.com> Change-Id: I75a8eb4dee67afbee3652d9e937aa0b82318dbd0
-rw-r--r--chapters/data_layout.adoc118
-rw-r--r--chapters/data_nodes.adoc29
-rw-r--r--chapters/ewise_binary.adoc53
-rw-r--r--chapters/ewise_ternary.adoc27
-rw-r--r--chapters/ewise_unary.adoc103
5 files changed, 174 insertions, 156 deletions
diff --git a/chapters/data_layout.adoc b/chapters/data_layout.adoc
index 4e6d24d..e764be9 100644
--- a/chapters/data_layout.adoc
+++ b/chapters/data_layout.adoc
@@ -20,7 +20,7 @@ Concatenate two tensors along a given axis. No data conversion happens during a
|Input|in_t*|input1|shape1|Input tensor with rank from 1 to 4
|Input|in_t*|input2|shape2|Input tensor with rank matching input1
|Attribute|int|axis|-|Axis along which concatenation is to occur.
-|Output|out_t*|output|shape|Output tensor of same type, size as the input tensor
+|Output|in_t*|output|shape|Output tensor of same type, size as the input tensor
|===
*Operation Function:*
@@ -30,7 +30,7 @@ Concatenate two tensors along a given axis. No data conversion happens during a
for_each (index1 in shape) {
index2 = index1
index2[axis] = index1[axis] - shape1[axis]
- value = (index2[axis] < 0) ?
+ in_t value = (index2[axis] < 0) ?
tensor_read<in_t>(input1, shape1, index1) :
tensor_read<in_t>(input2, shape2, index2) ;
tensor_write<in_t>(output, shape, index1, value);
@@ -40,13 +40,13 @@ for_each (index1 in shape) {
*Supported Data Types:*
|===
-|Profile|Mode|in_t|out_t
+|Profile|Mode|in_t
-|Any|Boolean|bool|bool
-|Any|signed 8|int8/aint8|int8/aint8
-|Any|signed 16|int16|int16
-|Any|signed 32|int32|int32
-|MI, MT|float|float|float
+|Any|Boolean|bool
+|Any|signed 8|int8/aint8
+|Any|signed 16|int16
+|Any|signed 32|int32
+|MI, MT|float|float
|===
==== PAD
@@ -60,7 +60,7 @@ Zero-pads a tensor along borders of each dimension.
|Input|in_t*|input1|shape1|Input tensor
|Attribute|int|padding|[rank(input1),2]|Amount of padding to be done
-|Output|out_t*|output|shape|Output tensor of same type as the input tensor
+|Output|in_t*|output|shape|Output tensor of same type as the input tensor
|===
*Quantization Parameters:*
@@ -80,7 +80,7 @@ for_each (index in shape) {
for (i=0; i<rank(shape); i++) {
index1[i] = index1[i] - padding[i,0]
}
- value = tensor_read<in_t>(input1, shape1, index1, input1_zp, padding)
+ in_t value = tensor_read<in_t>(input1, shape1, index1, input1_zp, padding)
tensor_write<in_t>(output, shape, index, value + input1_zp);
}
----
@@ -88,13 +88,13 @@ for_each (index in shape) {
*Supported Data Types:*
|===
-|Profile|Mode|in_t|out_t
+|Profile|Mode|in_t
-|Any|Boolean|bool|bool
-|Any|signed 8|int8/aint8|int8/aint8
-|Any|signed 16|int16|int16
-|Any|signed 32|int32|int32
-|MI, MT|float|float|float
+|Any|Boolean|bool
+|Any|signed 8|int8/aint8
+|Any|signed 16|int16
+|Any|signed 32|int32
+|MI, MT|float|float
|===
==== RESHAPE
@@ -108,7 +108,7 @@ Returns a tensor with the same type/values as the input, with a new shape specif
|Input|in_t*|input1|shape1|Input tensor
|Attribute|int|new_shape|[rank(output)]|List of values, with each element giving the size of the result tensor for the given dimension. At most one dimension may be given as-1 to automatically calculate the dimension size.
-|Output|out_t*|output|shape|Output tensor of same type, size as the input tensor
+|Output|in_t*|output|shape|Output tensor of same type, size as the input tensor
|===
*Operation Function:*
@@ -124,13 +124,13 @@ for (i=0; i<tensor_size(shape); i++) {
*Supported Data Types:*
|===
-|Profile|Mode|in_t|out_t
+|Profile|Mode|in_t
-|Any|Boolean|bool|bool
-|Any|signed 8|int8/aint8|int8/aint8
-|Any|signed 16|int16|int16
-|Any|signed 32|int32|int32
-|MI, MT|float|float|float
+|Any|Boolean|bool
+|Any|signed 8|int8/aint8
+|Any|signed 16|int16
+|Any|signed 32|int32
+|MI, MT|float|float
|===
==== REVERSE
@@ -144,7 +144,7 @@ Returns a tensor with the same type/values as the input, with the data reversed
|Input|in_t*|input|shape|Input tensor from 1 to 4 dims
|Attribute|int|axis|-|Axis to reverse
-|Output|out_t*|output|shape|Output tensor. Same shape as input tensor.
+|Output|in_t*|output|shape|Output tensor. Same shape as input tensor.
|===
*Operation Function:*
@@ -155,7 +155,7 @@ assert(0<=axis && axis<rank(shape))
for_each (index in shape) {
tmp_index = index;
tmp_index[axis] = shape[axis]-1-index[axis];
- value = tensor_read<in_t>(input, shape, tmp_index);
+ in_t value = tensor_read<in_t>(input, shape, tmp_index);
tensor_write<in_t>(output, shape, index, value);
}
----
@@ -163,27 +163,27 @@ for_each (index in shape) {
*Supported Data Types:*
|===
-|Profile|Mode|in_t|out_t
+|Profile|Mode|in_t
-|Any|Boolean|bool|bool
-|Any|signed 8|int8/aint8|int8/aint8
-|Any|signed 16|int16|int16
-|Any|signed 32|int32|int32
-|MI, MT|float|float|float
+|Any|Boolean|bool
+|Any|signed 8|int8/aint8
+|Any|signed 16|int16
+|Any|signed 32|int32
+|MI, MT|float|float
|===
==== SLICE
-Extracts a slice of the input tensor 0 on the given axis, beginning at the start coordinates, and extending for size elements in each direction. No data conversion happens during a slice operation.
+Extracts a slice of the input1 on the given axis, beginning at the start coordinates, and extending for size elements in each direction. No data conversion happens during a slice operation.
*Arguments:*
|===
|Argument|Type|Name|Shape|Description
|Input|in_t*|input1|shape1|Input tensor with rank from 1 to 4
-|Attribute|int|start|[rank(input1)]|List of integer coordinates, of length equal to the rank of input 0. Start coordinate for slicing.
-|Attribute|int|size|[rank(input1)]|List of integer size values, of length equal to the rank of input 0. Size of the input to be used.
-|Output|out_t*|output|shape|Output tensor of same type as the input tensor
+|Attribute|int|start|[rank(input1)]|List of integer coordinates, of length equal to the rank of input1. Start coordinate for slicing.
+|Attribute|int|size|[rank(input1)]|List of integer size values, of length equal to the rank of input1. Size of the input to be used.
+|Output|in_t*|output|shape|Output tensor of same type as the input tensor
|===
*Operation Function:*
@@ -195,7 +195,7 @@ for_each (index in shape) {
for (i=0; i<rank(shape); i++) {
tmp_index[i] = index[i] + start[i];
}
- value = tensor_read<in_t>(input, shape1, tmp_index);
+ in_t value = tensor_read<in_t>(input, shape1, tmp_index);
tensor_write<in_t>(output, shape, index, value);
}
----
@@ -203,18 +203,18 @@ for_each (index in shape) {
*Supported Data Types:*
|===
-|Profile|Mode|in_t|out_t
+|Profile|Mode|in_t
-|Any|Boolean|bool|bool
-|Any|signed 8|int8/aint8|int8/aint8
-|Any|signed 16|int16|int16
-|Any|signed 32|int32|int32
-|MI, MT|float|float|float
+|Any|Boolean|bool
+|Any|signed 8|int8/aint8
+|Any|signed 16|int16
+|Any|signed 32|int32
+|MI, MT|float|float
|===
==== TILE
-Replicates input 0 multiplies times along each dimension.
+Replicates input1 multiplies times along each dimension.
*Arguments:*
@@ -223,7 +223,7 @@ Replicates input 0 multiplies times along each dimension.
|Input|in_t*|input1|shape1|Input tensor with rank from 1 to 4
|Attribute|int|multiplies|[rank(shape1)]|Number of times to replicate input1 in each dimension
-|Output|out_t*|output|shape|Output tensor of same type, rank as the input tensor
+|Output|in_t*|output|shape|Output tensor of same type, rank as the input tensor
|===
*Operation Function:*
@@ -236,7 +236,7 @@ for_each (index in shape) {
assert(shape1[i] * multiplies[i] == shape[i])
tmp_index[i] = index[i] % shape1[i]
}
- value = tensor_read<in_t>(input, shape1, tmp_index);
+ in_t value = tensor_read<in_t>(input, shape1, tmp_index);
tensor_write<in_t>(output, shape, index, value);
}
----
@@ -244,13 +244,13 @@ for_each (index in shape) {
*Supported Data Types:*
|===
-|Profile|Mode|in_t|out_t
+|Profile|Mode|in_t
-|Any|Boolean|bool|bool
-|Any|signed 8|int8/aint8|int8/aint8
-|Any|signed 16|int16|int16
-|Any|signed 32|int32|int32
-|MI, MT|float|float|float
+|Any|Boolean|bool
+|Any|signed 8|int8/aint8
+|Any|signed 16|int16
+|Any|signed 32|int32
+|MI, MT|float|float
|===
==== TRANSPOSE
@@ -264,7 +264,7 @@ Permutes the dimensions based on perm.
|Input|in_t*|input1|shape1|Input tensor with rank from 1 to 4
|Attribute|int|perms|[rank(input1)]|List of integers of length equal to the rank of input1.
-|Output|out_t*|output|shape|Output tensor of same type, rank as the input tensor
+|Output|in_t*|output|shape|Output tensor of same type, rank as the input tensor
|===
*Operation Function:*
@@ -277,7 +277,7 @@ for_each (index in shape) {
assert(shape1[perm[i]] == shape[i])
tmp_index[perm[i]] = index[i]
}
- value = tensor_read<in_t>(input, shape1, tmp_index);
+ in_t value = tensor_read<in_t>(input, shape1, tmp_index);
tensor_write<in_t>(output, shape, index, value);
}
----
@@ -285,11 +285,11 @@ for_each (index in shape) {
*Supported Data Types:*
|===
-|Profile|Mode|in_t|out_t
+|Profile|Mode|in_t
-|Any|Boolean|bool|bool
-|Any|signed 8|int8/aint8|int8/aint8
-|Any|signed 16|int16|int16
-|Any|signed 32|int32|int32
-|MI, MT|float|float|float
+|Any|Boolean|bool
+|Any|signed 8|int8/aint8
+|Any|signed 16|int16
+|Any|signed 32|int32
+|MI, MT|float|float
|===
diff --git a/chapters/data_nodes.adoc b/chapters/data_nodes.adoc
index ba38ef9..6164a95 100644
--- a/chapters/data_nodes.adoc
+++ b/chapters/data_nodes.adoc
@@ -18,6 +18,7 @@ A node containing constant data for use as the input to an operation. May hold d
|===
|Argument|Type|Name|Shape|Description
+|Attribute|out_t*|values|shape|Constant values
|Output|out_t*|output|shape|Output tensor of same type, size as the input tensor
|===
@@ -43,19 +44,19 @@ Returns a tensor with the same shape, type, and contents as the input.
|Argument|Type|Name|Shape|Description
|Input|in_t|input1|shape|Input tensor
-|Output|out_t*|output|shape|Output tensor of same type, size as the input tensor
+|Output|in_t*|output|shape|Output tensor of same type, size as the input tensor
|===
*Supported Data Types:*
|===
-|Profile|Mode|in_t|out_t
+|Profile|Mode|in_t
-|Any|Boolean|bool|bool
-|Any|signed 8|int8/aint8|int8/aint8
-|Any|signed 16|int16|int16
-|Any|signed 32|int32|int32
-|MI, MT|float|float|float
+|Any|Boolean|bool
+|Any|signed 8|int8/aint8
+|Any|signed 16|int16
+|Any|signed 32|int32
+|MI, MT|float|float
|===
==== IDENTITYN
@@ -68,19 +69,19 @@ Returns a list of tensors with the same shape, type, and contents as the input l
|Argument|Type|Name|Shape|Description
|Input|in_t|input1|[shape1, shape2, …]|List of input tensors
-|Output|out_t*|output|[shape1, shape2, …]|List of output tensors of same type, size as the input tensors
+|Output|in_t*|output|[shape1, shape2, …]|List of output tensors of same type, size as the input tensors
|===
*Supported Data Types:*
|===
-|Profile|Mode|in_t|out_t
+|Profile|Mode|in_t
-|Any|Boolean|bool|bool
-|Any|signed 8|int8/aint8|int8/aint8
-|Any|signed 16|int16|int16
-|Any|signed 32|int32|int32
-|MI, MT|float|float|float
+|Any|Boolean|bool
+|Any|signed 8|int8/aint8
+|Any|signed 16|int16
+|Any|signed 32|int32
+|MI, MT|float|float
|===
==== PLACEHOLDER
diff --git a/chapters/ewise_binary.adoc b/chapters/ewise_binary.adoc
index a9787b3..f3c9fbe 100644
--- a/chapters/ewise_binary.adoc
+++ b/chapters/ewise_binary.adoc
@@ -11,7 +11,8 @@
==== ADD
-Elementwise addition of input1 and input2. Axis of size 1 will be broadcast, as necessary. Rank of input tensors must match.
+Elementwise addition of input1 and input2.
+Axis of size 1 will be broadcast, as necessary. Rank of input tensors must match.
*Arguments:*
@@ -48,7 +49,8 @@ for_each (index in shape) {
==== ARITHMETIC_RIGHT_SHIFT
-Elementwise arithmetic right shift of input1 by the amount specified in input2. Axis of size 1 will be broadcast, as necessary. Rank of input tensors must match.
+Elementwise arithmetic right shift of input1 by the amount specified in input2.
+Axis of size 1 will be broadcast, as necessary. Rank of input tensors must match.
*Arguments:*
@@ -92,7 +94,8 @@ for_each (index in shape) {
==== BITWISE_AND
-Elementwise bitwise AND of input tensor 0 and input tensor 1. Axis of size 1 will be broadcast as necessary. Rank of input tensors must match.
+Elementwise bitwise AND of input1 and input2.
+Axis of size 1 will be broadcast as necessary. Rank of input tensors must match.
*Arguments:*
@@ -130,7 +133,8 @@ for_each (index in shape) {
==== BITWISE_OR
-Elementwise bitwise OR of input1 and input2. Axis of size 1 will be broadcast as necessary. Rank of input tensors must match.
+Elementwise bitwise OR of input1 and input2.
+Axis of size 1 will be broadcast as necessary. Rank of input tensors must match.
*Arguments:*
@@ -168,7 +172,8 @@ for_each (index in shape) {
==== BITWISE_XOR
-Elementwise bitwise XOR of input1 and input2. Axis of size 1 will be broadcast as necessary. Rank of input tensors must match.
+Elementwise bitwise XOR of input1 and input2.
+Axis of size 1 will be broadcast as necessary. Rank of input tensors must match.
*Arguments:*
@@ -206,7 +211,8 @@ for_each (index in shape) {
==== LOGICAL_AND
-Elementwise logical AND of input1 and input2. Axis of size 1 will be broadcast, as necessary. Rank of input tensors must match.
+Elementwise logical AND of input1 and input2.
+Axis of size 1 will be broadcast, as necessary. Rank of input tensors must match.
*Arguments:*
@@ -246,7 +252,8 @@ for_each (index in shape) {
==== LOGICAL_LEFT_SHIFT
-Elementwise left shift of input1 and input2. Axis of size 1 will be broadcast, as necessary. Rank of input tensors must match.
+Elementwise left shift of input1 and input2.
+Axis of size 1 will be broadcast, as necessary. Rank of input tensors must match.
*Arguments:*
@@ -285,7 +292,8 @@ for_each (index in shape) {
==== LOGICAL_RIGHT_SHIFT
-Elementwise logical right shift of input1 by the amount specified in input2. Axis of size 1 will be broadcast, as necessary. Rank of input tensors must match.
+Elementwise logical right shift of input1 by the amount specified in input2.
+Axis of size 1 will be broadcast, as necessary. Rank of input tensors must match.
*Arguments:*
@@ -324,7 +332,8 @@ for_each (index in shape) {
==== LOGICAL_OR
-Elementwise logical OR of input1 and input2. Axis of size 1 will be broadcast as necessary. Rank of input tensors must match.
+Elementwise logical OR of input1 and input2.
+Axis of size 1 will be broadcast as necessary. Rank of input tensors must match.
*Arguments:*
@@ -360,7 +369,8 @@ for_each (index in shape) {
==== LOGICAL_XOR
-Elementwise logical XOR of input tensor 0 and input tensor 1. Axis of size 1 will be broadcast as necessary. Rank of input tensors must match.
+Elementwise logical XOR of input1 and input2.
+Axis of size 1 will be broadcast as necessary. Rank of input tensors must match.
*Arguments:*
@@ -368,7 +378,7 @@ Elementwise logical XOR of input tensor 0 and input tensor 1. Axis of size 1 wil
|Argument|Type|Name|Shape|Description
|Input|in_t*|input1|shape1|Input tensor from 1 to 4 dims
-|Input|in_t*|input2|shape2|Input tensor with the same rank as Input 0
+|Input|in_t*|input2|shape2|Input tensor with the same rank as input1
|Output|in_t*|output|shape|Output tensor of same type as the input tensors, with broadcast shape if necessary
|===
@@ -396,7 +406,8 @@ for_each (index in shape) {
==== MAXIMUM
-Elementwise max of input1 and input2. Axis of size 1 will be broadcast, as necessary. Rank of input tensors must match.
+Elementwise max of input1 and input2.
+Axis of size 1 will be broadcast, as necessary. Rank of input tensors must match.
*Arguments:*
@@ -433,7 +444,8 @@ for_each (index in shape) {
==== MINIMUM
-Elementwise minimum of input tensor 0 and input tensor 1. Axis of size 1 will be broadcast, as necessary. Rank of input tensors must match.
+Elementwise minimum of input1 and input2.
+Axis of size 1 will be broadcast, as necessary. Rank of input tensors must match.
*Arguments:*
@@ -470,7 +482,8 @@ for_each (index in shape) {
==== MUL
-Elementwise multiplication (Hadamard product) of input tensor 0 and input tensor 1. Axis of size 1 will be broadcast, as necessary. Rank of input tensors must match.
+Elementwise multiplication (Hadamard product) of input1 and input2.
+Axis of size 1 will be broadcast, as necessary. Rank of input tensors must match.
*Arguments:*
@@ -478,7 +491,7 @@ Elementwise multiplication (Hadamard product) of input tensor 0 and input tensor
|Argument|Type|Name|Shape|Description
|Input|in_t*|input1|shape1|Input tensor
-|Input|in_t*|input2|shape2|Input tensor with the same rank as Input 0
+|Input|in_t*|input2|shape2|Input tensor with the same rank as input1
|Input (MT profile) Attribute (BI/MI profiles)|uint6_t|shift|-|Result right shift (int32 data type only)
|Output|out_t*|output|shape|Output tensor with broadcast shape if necessary
|===
@@ -514,7 +527,8 @@ for_each (index in shape) {
==== POW
-Elementwise input tensor 0 value raised to the power of input 1 tensor. Axis of size 1 will be broadcast, as necessary. Rank of input tensors must match.
+Elementwise input1 value raised to the power of input2.
+Axis of size 1 will be broadcast, as necessary. Rank of input tensors must match.
*Arguments:*
@@ -522,7 +536,7 @@ Elementwise input tensor 0 value raised to the power of input 1 tensor. Axis of
|Argument|Type|Name|Shape|Description
|Input|in_t*|input1|shape1|Input tensor from 1 to 4 dims
-|Input|in_t*|input2|shape2|Input tensor with the same rank as Input 0
+|Input|in_t*|input2|shape2|Input tensor with the same rank as input1
|Output|in_t*|output|shape|Output tensor of same type as the input tensors, with broadcast shape if necessary
|===
@@ -540,7 +554,8 @@ Only supported with floating point values.
==== SUB
-Elementwise subtraction of input tensor 0 and input tensor 1. Axis of size 1 will be broadcast as necessary. Rank of input tensors must match.
+Elementwise subtraction of input1 and input2.
+Axis of size 1 will be broadcast as necessary. Rank of input tensors must match.
*Arguments:*
@@ -548,7 +563,7 @@ Elementwise subtraction of input tensor 0 and input tensor 1. Axis of size 1 wil
|Argument|Type|Name|Shape|Description
|Input|in_t*|input1|shape1|Input tensor
-|Input|in_t*|input2|shape2|Input tensor with the same rank as Input 0
+|Input|in_t*|input2|shape2|Input tensor with the same rank as input1
|Output|in_t*|output|shape|Output tensor with broadcast shape if necessary
|===
diff --git a/chapters/ewise_ternary.adoc b/chapters/ewise_ternary.adoc
index 1724777..8dc9172 100644
--- a/chapters/ewise_ternary.adoc
+++ b/chapters/ewise_ternary.adoc
@@ -18,10 +18,10 @@ Elementwise select of the output based on a condition.
|===
|Argument|Type|Name|Shape|Description
-|Input|bool_t|input1|shape1|
-|Input|in_t*|input2|shape2|Input tensor from 1 to 4 dims
-|Input|in_t*|input3|shape3|Input tensor with the same rank as Input 0
-|Output|out_t*|output|shape|Output tensor of same type, as the input tensors, with broadcast shape if necessary
+|Input|bool_t|input1|shape1|Input selector tensor
+|Input|in_t*|input2|shape2|Input value tensor if input1 is True
+|Input|in_t*|input3|shape3|Input value tensor if input1 is False
+|Output|in_t*|output|shape|Output tensor of same type as input2 and input3, with broadcast shape if necessary
|===
*Quantization Parameters:*
@@ -39,22 +39,23 @@ for_each (index in shape) {
bool_t value1 = tensor_read<in_t>(input1, shape1, index1)
in_t value2 = tensor_read<in_t>(input2, shape2, index2)
in_t value3 = tensor_read<in_t>(input3, shape3, index3)
+ in_t acc = 0
if (value1 == True){
- in_t acc = value2
+ acc = value2
} else {
- in_t acc = value3
+ acc = value3
}
- tensor_write<out_t>(output, shape, index, acc)
+ tensor_write<in_t>(output, shape, index, acc)
}
----
*Supported Data Types:*
|===
-|Profile|Mode|bool_t|in_t|out_t
+|Profile|Mode|bool_t|in_t
-|Any|Boolean|bool|bool|bool
-|Any|signed 8|bool|aint8/int8|aint8/int8
-|Any|signed 16|bool|int16|int16
-|Any|signed 32|bool|int32|int32
-|MI, MT|float|bool|float|float
+|Any|Boolean|bool|bool
+|Any|signed 8|bool|aint8/int8
+|Any|signed 16|bool|int16
+|Any|signed 32|bool|int32
+|MI, MT|float|bool|float
|===
diff --git a/chapters/ewise_unary.adoc b/chapters/ewise_unary.adoc
index 34b64a9..7df32ba 100644
--- a/chapters/ewise_unary.adoc
+++ b/chapters/ewise_unary.adoc
@@ -19,7 +19,7 @@ Elementwise absolute value operation
|Argument|Type|Name|Shape|Description
|Input|in_t*|input1|shape|Input tensor
-|Output|out_t*|output|shape|Output tensor of same type, size as the input tensor
+|Output|in_t*|output|shape|Output tensor of same type, size as the input tensor
|===
*Operation Function:*
@@ -27,10 +27,10 @@ Elementwise absolute value operation
[source,c]
----
for_each (index in shape) {
- int32_t value1 = tensor_read<in_t>(input1, shape, index)
+ in_t value1 = tensor_read<in_t>(input1, shape, index)
if (value1 < 0)
- value1 = apply_sub<out_t>(0, value1)
- tensor_write<out_t>(output, shape, index, value1)
+ value1 = apply_sub<in_t>(0, value1)
+ tensor_write<in_t>(output, shape, index, value1)
}
----
@@ -39,8 +39,8 @@ for_each (index in shape) {
|===
|Profile|Mode|in_t|out_t
-|Any|signed 32|int32|int32
-|MI, MT|float|float|float
+|Any|signed 32|int32
+|MI, MT|float|float
|===
==== BITWISE_NOT
@@ -53,7 +53,7 @@ Elementwise bitwise NOT of input tensor.
|Argument|Type|Name|Shape|Description
|Input|in_t*|input1|shape|Input tensor
-|Output|out_t*|output|shape|Output tensor of same type, size as the input tensor
+|Output|in_t*|output|shape|Output tensor of same type, size as the input tensor
|===
*Quantization Parameters:*
@@ -65,20 +65,20 @@ None
[source,c]
----
for_each (index in shape) {
- int32_t value1 = tensor_read<in_t>(input1, shape, index)
- int32_t acc = ~value1
- tensor_write<out_t>(output, shape, index, acc)
+ in_t value1 = tensor_read<in_t>(input1, shape, index)
+ in_t acc = ~value1
+ tensor_write<in_t>(output, shape, index, acc)
}
----
*Supported Data Types:*
|===
-|Profile|Mode|in_t|out_t
+|Profile|Mode|in_t
-|Any|signed 8|aint8|aint8
-|Any|signed 16|int16|int16
-|Any|signed 32|int32|int32
+|Any|signed 8|aint8
+|Any|signed 16|int16
+|Any|signed 32|int32
|===
==== CEIL
@@ -91,15 +91,15 @@ Elementwise ceiling operation
|Argument|Type|Name|Shape|Description
|Input|in_t*|input1|shape|Input tensor
-|Output|out_t*|output|shape|Output tensor of same type, size as the input tensor
+|Output|in_t*|output|shape|Output tensor of same type, size as the input tensor
|===
*Supported Data Types:*
|===
-|Profile|Mode|in_t|out_t
+|Profile|Mode|in_t
-|MI, MT|float|float|float
+|MI, MT|float|float
|===
==== CLZ
@@ -112,7 +112,7 @@ Elementwise count leading zeros operation
|Argument|Type|Name|Shape|Description
|Input|in_t*|input1|shape|Input tensor
-|Output|out_t*|output|shape|Output tensor of same type, size as the input tensor
+|Output|in_t*|output|shape|Output tensor of same type, size as the input tensor
|===
*Operation Function:*
@@ -120,7 +120,8 @@ Elementwise count leading zeros operation
[source,c]
----
for_each (index in shape) {
- int32_t value1 = tensor_read<in_t>(input1, shape, index)
+ in_t acc = 0
+ in_t value1 = tensor_read<in_t>(input1, shape, index)
if (value1 == 0) {
acc = 32 // input1_width
}
@@ -132,15 +133,15 @@ for_each (index in shape) {
acc = acc + 1
}
}
- tensor_write<out_t>(output, shape, index, acc)
+ tensor_write<in_t>(output, shape, index, acc)
}
----
*Supported Data Types:*
|===
-|Profile|Mode|in_t|out_t
+|Profile|Mode|in_t
-|Any|signed 32|int32|int32
+|Any|signed 32|int32
|===
==== EXP
@@ -153,15 +154,15 @@ Elementwise e to the x operation
|Argument|Type|Name|Shape|Description
|Input|in_t*|input1|shape|Input tensor
-|Output|out_t*|output|shape|Output tensor of same type, size as the input tensor
+|Output|in_t*|output|shape|Output tensor of same type, size as the input tensor
|===
*Supported Data Types:*
|===
-|Profile|Mode|Input 0|Output
+|Profile|Mode|in_t
-|Any|float|float|float
+|Any|float|float
|===
==== FLOOR
@@ -174,15 +175,15 @@ Elementwise floor operation
|Argument|Type|Name|Shape|Description
|Input|in_t*|input1|shape|Input tensor
-|Output|out_t*|output|shape|Output tensor of same type, size as the input tensor
+|Output|in_t*|output|shape|Output tensor of same type, size as the input tensor
|===
*Supported Data Types:*
|===
-|Profile|Mode|in_t|out_t
+|Profile|Mode|in_t
-|MI, MT|float|float|float
+|MI, MT|float|float
|===
==== LOG
@@ -195,15 +196,15 @@ Elementwise natural logarithm operation
|Argument|Type|Name|Shape|Description
|Input|in_t*|input1|shape|Input tensor
-|Output|out_t*|output|shape|Output tensor of same type, size as the input tensor
+|Output|in_t*|output|shape|Output tensor of same type, size as the input tensor
|===
*Supported Data Types:*
|===
-|Profile|Mode|in_t|out_t
+|Profile|Mode|in_t
-|MI, MT|float|float|float
+|MI, MT|float|float
|===
==== LOGICAL_NOT
@@ -216,7 +217,7 @@ Elementwise logical NOT of input.
|Argument|Type|Name|Shape|Description
|Input|in_t*|input1|shape|Input tensor
-|Output|out_t*|output|shape|Output tensor of same type, size as the input tensor
+|Output|in_t*|output|shape|Output tensor of same type, size as the input tensor
|===
*Quantization Parameters:*
@@ -252,7 +253,7 @@ Elementwise negation operation
|Argument|Type|Name|Shape|Description
|Input|in_t*|input1|shape|Input tensor
-|Output|out_t*|output|shape|Output tensor of same type, size as the input tensor
+|Output|in_t*|output|shape|Output tensor of same type, size as the input tensor
|===
*Quantization Parameters:*
@@ -261,7 +262,7 @@ Elementwise negation operation
|Argument|Type|Name|Shape|Description
|Attribute|in_t|input1_zp|-|Input 1 zero point
-|Attribute|out_t|output_zp|-|Output zero point
+|Attribute|in_t|output_zp|-|Output zero point
|===
*Operation Function:*
@@ -269,24 +270,24 @@ Elementwise negation operation
[source,c]
----
assert(in_t == aint8_t || input_zp == 0) // Zero point only for asymmetric int8
-assert(out_t == aint8_t || output_zp == 0) // Zero point only for asymmetric int8
+assert(in_t == aint8_t || output_zp == 0) // Zero point only for asymmetric int8
for_each (index in shape) {
- int32_t value1 = tensor_read<in_t>(input1, shape, index)
- int32_t acc = appl_sub<int32_t>(0, value1 - input1_zp)
- acc = apply_clip(acc, minimum<out_t>, maximum<out_t>)
- tensor_write<out_t>(output + output_zp, shape, index, acc)
+ in_t value1 = tensor_read<in_t>(input1, shape, index)
+ in_t acc = appl_sub<in_t>(0, value1 - input1_zp)
+ acc = apply_clip(acc, minimum<in_t>, maximum<in_t>)
+ tensor_write<in_t>(output + output_zp, shape, index, acc)
}
----
*Supported Data Types:*
|===
-|Profile|Mode|Input 0|Output
+|Profile|Mode|in_t
-|Any|signed 8|aint8|aint8
-|Any|signed 16|int16|int16
-|Any|signed 32|int32|int32
-|MI, MT|float|float|float
+|Any|signed 8|aint8
+|Any|signed 16|int16
+|Any|signed 32|int32
+|MI, MT|float|float
|===
==== RECIPROCAL
@@ -299,15 +300,15 @@ Elementwise reciprocal operation. For integer operation, a TABLE should be used
|Argument|Type|Name|Shape|Description
|Input|in_t*|input1|shape|Input tensor
-|Output|out_t*|output|shape|Output tensor of same type, size as the input tensor
+|Output|in_t*|output|shape|Output tensor of same type, size as the input tensor
|===
*Supported Data Types:*
|===
-|Profile|Mode|Input 0|Output
+|Profile|Mode|in_t
-|MI, MT|float|float|float
+|MI, MT|float|float
|===
==== RSQRT
@@ -320,13 +321,13 @@ Elementwise reciprocal square root operation. For integer operation, a TABLE sho
|Argument|Type|Name|Shape|Description
|Input|in_t*|input1|shape|Input tensor
-|Output|out_t*|output|shape|Output tensor of same type, size as the input tensor
+|Output|in_t*|output|shape|Output tensor of same type, size as the input tensor
|===
*Supported Data Types:*
|===
-|Profile|Mode|Input 0|Output
+|Profile|Mode|in_t
-|MI, MT|float|float|float
-|=== \ No newline at end of file
+|MI, MT|float|float
+|===