aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJacob Bohlin <jacob.bohlin@arm.com>2020-08-13 10:21:34 +0200
committerFredrik Knutsson <fredrik.knutsson.hunnebo@gmail.com>2020-08-13 13:34:44 +0000
commitbe733cf04bb262d4eee791d76f01cecd64ff9255 (patch)
tree3a486deb25c12f647179a223ce97f135363b532e
parentbf61268fae3d05ae7687067ae2ab2964067634c9 (diff)
downloadethos-u-vela-be733cf04bb262d4eee791d76f01cecd64ff9255.tar.gz
MLBEDSW-2639: Remove reverse_op_order attribute
Signed-off-by: Jacob Bohlin <jacob.bohlin@arm.com> Change-Id: Id762ee2c03cd8f162cd0c450511ee5b2e0624586
-rw-r--r--ethosu/vela/register_command_stream_generator.py4
-rw-r--r--ethosu/vela/softmax.py10
2 files changed, 3 insertions, 11 deletions
diff --git a/ethosu/vela/register_command_stream_generator.py b/ethosu/vela/register_command_stream_generator.py
index 28b67656..09348811 100644
--- a/ethosu/vela/register_command_stream_generator.py
+++ b/ethosu/vela/register_command_stream_generator.py
@@ -483,9 +483,7 @@ def generate_register_command_stream(nng, sg, arch, verbose=False):
shared_buffer = ps.shared_buffer
if npu_block_type == NpuBlockType.ElementWise:
- ifm2_broadcast = (
- IFM2Broadcast.ReverseOperandOrder if primary_op.attrs.get("reverse_op_order", False) else 0
- )
+ ifm2_broadcast = 0
if cmd.ifm2_tensor and not ifm_ifm2_correct_order(cmd.ifm_tensor.shape, cmd.ifm2_tensor.shape):
# The scalar has to be the ifm2 tensor so switch the ifms
diff --git a/ethosu/vela/softmax.py b/ethosu/vela/softmax.py
index b4a3a099..0a589eb6 100644
--- a/ethosu/vela/softmax.py
+++ b/ethosu/vela/softmax.py
@@ -269,30 +269,24 @@ class SoftMax:
# PASS 6 - Sub
sub6_op = Operation("SubAct", self.op.name + "_sub6")
- sub6_op.add_input_tensor(headroom_plus_one)
sub6_op.add_input_tensor(
create_const_tensor(
sub6_op.name + "_const", [1, 1, 1, 1], DataType.int32, [31], np.uint32, quantization=no_scale_quant
),
)
- # TODO: Adding this attribute to reverse the operand order is not ideal
- # it should be handled automatically by register_command_stream_generator
- # or added as an internal operator.
- sub6_op.attrs["reverse_op_order"] = True
+ sub6_op.add_input_tensor(headroom_plus_one)
reciprocal_right_shift = Tensor(reduce_sum_shape, DataType.int32, sub6_op.name + "_0")
reciprocal_right_shift.quantization = no_scale_quant
sub6_op.set_output_tensor(reciprocal_right_shift)
# PASS 7 - SHL
shl7_op = Operation("SHL", self.op.name + "_shl7")
- shl7_op.add_input_tensor(reciprocal_right_shift)
shl7_op.add_input_tensor(
create_const_tensor(
shl7_op.name + "_const", [1, 1, 1, 1], DataType.int32, [1], np.uint32, quantization=no_scale_quant
),
)
- # TODO: See above
- shl7_op.attrs["reverse_op_order"] = True
+ shl7_op.add_input_tensor(reciprocal_right_shift)
constant_one = Tensor(reduce_sum_shape, DataType.int32, shl7_op.name + "0")
constant_one.quantization = no_scale_quant
shl7_op.set_output_tensor(constant_one)