diff options
author | Fredrik Svedberg <fredrik.svedberg@arm.com> | 2020-08-25 11:31:47 +0200 |
---|---|---|
committer | Fredrik Svedberg <fredrik.svedberg@arm.com> | 2020-08-27 16:39:29 +0200 |
commit | 880e73543120648f08886365a45e8b2ce32d5ff1 (patch) | |
tree | 420627fb8e7e5000f23f82c52ba6569f4a696813 /ethosu/vela/supported_operators.py | |
parent | a41cd4de2af1e43b76a2a33d78eeb2d90a88b757 (diff) | |
download | ethos-u-vela-880e73543120648f08886365a45e8b2ce32d5ff1.tar.gz |
[MLBEDSW-2846] Do not use NHCWB16 for reduce_sum int32
Added checks for not using NHCWB16 for reduce_sum int32 which makes
int8/uint8 softmax work.
Also enabled softmax graph rewrite by default and fixed a saturation
problem.
Change-Id: Ic01bd9ece7e5c3edb2900b7915cc747efe9e5760
Signed-off-by: Fredrik Svedberg <fredrik.svedberg@arm.com>
Diffstat (limited to 'ethosu/vela/supported_operators.py')
-rw-r--r-- | ethosu/vela/supported_operators.py | 6 |
1 files changed, 1 insertions, 5 deletions
diff --git a/ethosu/vela/supported_operators.py b/ethosu/vela/supported_operators.py index 567c05ca..f57cbee2 100644 --- a/ethosu/vela/supported_operators.py +++ b/ethosu/vela/supported_operators.py @@ -22,8 +22,7 @@ from .data_type import DataType class SupportedOperators: - def __init__(self, softmax_support): - self.softmax_support = softmax_support + def __init__(self): # Categorised lists of supported operators self.npu_pre_ops = set(("QuantizedResizeBilinear", "SplitSliceRead",)) self.convolution_ops = set(("Conv2DBiasAct", "Conv2D", "QuantizedConv2D",)) @@ -393,9 +392,6 @@ class SupportedOperators: def check_activation_ops(self, op): if op.type == "Softmax": - if not self.softmax_support: - return False - ifm_tensor = op.inputs[0] ofm_tensor = op.outputs[0] |