diff options
author | Eric Kunze <eric.kunze@arm.com> | 2024-01-12 17:18:42 -0800 |
---|---|---|
committer | Eric Kunze <eric.kunze@arm.com> | 2024-01-31 06:00:03 +0000 |
commit | 526f6c7b5d20e967109ca92c8fc54c26c0438135 (patch) | |
tree | 7186a2dec336db8389cdf0cf8bacd60f9da4cf64 /pseudocode/operators/MUL.tosac | |
parent | 7e5d187c612fcc715ea3f7f0c900eb13af75a660 (diff) | |
download | specification-526f6c7b5d20e967109ca92c8fc54c26c0438135.tar.gz |
Add section of shape operatorsv0.90.0
Rework of the shape operations. Shape operations are now done in shape specific
operators rather than being based on type.
shape_t is reworked to a list of size_t values.
Signed-off-by: Eric Kunze <eric.kunze@arm.com>
Change-Id: I2fca0728f9caa6a6fc34a8ce9e389bb581eea959
Diffstat (limited to 'pseudocode/operators/MUL.tosac')
-rw-r--r-- | pseudocode/operators/MUL.tosac | 44 |
1 files changed, 18 insertions, 26 deletions
diff --git a/pseudocode/operators/MUL.tosac b/pseudocode/operators/MUL.tosac index 078525e..5cc9f80 100644 --- a/pseudocode/operators/MUL.tosac +++ b/pseudocode/operators/MUL.tosac @@ -7,31 +7,23 @@ // copies and copies may only be made to the extent permitted // by a licensing agreement from ARM Limited. -if (in_out_t == shape_t) { - ERROR_IF(rank(shape) != 0 || rank(shape1) != 0 || rank(shape2) != 0); - shape_t value1 = tensor_read<shape_t>(input1, [], []); - shape_t value2 = tensor_read<shape_t>(input2, [], []); - shape_t result = value1 * value2; - tensor_write<shape_t>(output, [], [], result); -} else { - REQUIRE(0 <= shift && shift <= 63); - REQUIRE(in_t == int32_t || shift == 0); - ERROR_IF(shape != broadcast_shape(shape1, shape2)); - for_each(index in shape) { - dim_t index1 = apply_broadcast(shape, shape1, index); - dim_t index2 = apply_broadcast(shape, shape2, index); - in_t value1 = tensor_read<in_t>(input1, shape1, index1); - in_t value2 = tensor_read<in_t>(input2, shape2, index2); - out_t result; - if (in_t == i32_t && shift > 0) { - int64_t product = sign_extend<int64_t>(value1) * sign_extend<int64_t>(value2); - int64_t round = static_cast<int64_t>(1) << (shift - 1); - product = (product + round) >> shift; - REQUIRE(product >= minimum_s<i32_t> && product <= maximum_s<i32_t>) - result = product; - } else { - result = apply_mul_s(value1, value2); // low 32-bits of result for i32_t - } - tensor_write<out_t>(output, shape, index, result); +REQUIRE(0 <= shift && shift <= 63); +REQUIRE(in_t == int32_t || shift == 0); +ERROR_IF(shape != broadcast_shape(shape1, shape2)); +for_each(index in shape) { + shape_t index1 = apply_broadcast(shape, shape1, index); + shape_t index2 = apply_broadcast(shape, shape2, index); + in_t value1 = tensor_read<in_t>(input1, shape1, index1); + in_t value2 = tensor_read<in_t>(input2, shape2, index2); + out_t result; + if (in_t == i32_t && shift > 0) { + int64_t product = sign_extend<int64_t>(value1) * sign_extend<int64_t>(value2); + int64_t round = static_cast<int64_t>(1) << (shift - 1); + product = (product + round) >> shift; + REQUIRE(product >= minimum_s<i32_t> && product <= maximum_s<i32_t>) + result = product; + } else { + result = apply_mul_s(value1, value2); // low 32-bits of result for i32_t } + tensor_write<out_t>(output, shape, index, result); } |