aboutsummaryrefslogtreecommitdiff
path: root/ethosu/vela/tflite_graph_optimiser.py
diff options
context:
space:
mode:
authorPatrik Gustavsson <patrik.gustavsson@arm.com>2021-09-14 14:56:48 +0200
committerPatrik Gustavsson <patrik.gustavsson@arm.com>2021-09-15 13:21:16 +0200
commitf436ada9caea87ec2dd686a92e41a15c1dcdeb1d (patch)
tree5d3c136de06e0ee54833d2a379eb48cfd12ccd75 /ethosu/vela/tflite_graph_optimiser.py
parent0957e3ef4b94f17efb67429c88bab8ba650f78e8 (diff)
downloadethos-u-vela-f436ada9caea87ec2dd686a92e41a15c1dcdeb1d.tar.gz
TOSA: Support for TABLE operator (int8)
Added support to map TABLE operator to LUT. Limitations: -Only supported for int8 -TABLE input must be constant This also adds the support for TFLite legalisation of Tanh/Sigmoid (int8/uint8). Signed-off-by: Patrik Gustavsson <patrik.gustavsson@arm.com> Change-Id: I1a95f61fb02fdd42c4a690494418cc0765c8b275
Diffstat (limited to 'ethosu/vela/tflite_graph_optimiser.py')
-rw-r--r--ethosu/vela/tflite_graph_optimiser.py30
1 files changed, 1 insertions, 29 deletions
diff --git a/ethosu/vela/tflite_graph_optimiser.py b/ethosu/vela/tflite_graph_optimiser.py
index b48cc7af..cf211de4 100644
--- a/ethosu/vela/tflite_graph_optimiser.py
+++ b/ethosu/vela/tflite_graph_optimiser.py
@@ -22,7 +22,6 @@ import uuid
import numpy as np
from . import fp_math
-from . import lut
from . import rewrite_graph
from . import scaling
from .api import NpuRoundingMode
@@ -33,6 +32,7 @@ from .ethos_u55_regs.ethos_u55_regs import resampling_mode
from .graph_optimiser_util import bypass_memory_only_ops
from .graph_optimiser_util import calc_explicit_padding
from .graph_optimiser_util import convert_depthwise_to_conv
+from .graph_optimiser_util import convert_to_lut
from .graph_optimiser_util import fix_sg_input_output
from .graph_optimiser_util import memory_only_ops
from .graph_optimiser_util import move_splitsliceread_to_consumer
@@ -858,34 +858,6 @@ def convert_lrelu_to_mul_max(op, arch):
return op
-def convert_to_lut(op, lut_values, lut_name):
- # Rewrite the operation by Add with scalar 0 + LUT activation
- ifm = op.inputs[0]
- if ifm is None:
- return op
- assert ifm.dtype.size_in_bytes() == 1
- op.type = Op.Add
- op.name = op.name + "_lut_" + lut_name
- # Mark as no-op to enable potential fusing optimizations
- op.attrs["is_nop"] = True
- # Create an input tensor containing scalar zero
- quantization = QuantizationParameters(0.0, 255.0)
- quantization.scale_f32 = ifm.quantization.scale_f32
- quantization.zero_point = 0
- tens = create_const_tensor(op.inputs[0].name + "_scalar0", [], ifm.dtype, [0], np.uint8, quantization=quantization)
- op.add_input_tensor(tens)
- op.ifm_shapes.append(Shape4D(tens.shape))
-
- # The LUT must be applied without any preceding rescaling (the LUT itself performs the rescale),
- # so even if the OFM has a different scale than the IFM, the generated OFM scale instructions
- # should be the same as the IFM
- op.forced_output_quantization = ifm.quantization
- lut_tensor = lut.create_lut_tensor(op.name + "_values", lut_values, DataType.int8)
- op.set_activation_lut(lut_tensor)
- op.set_ifm_ofm_shapes()
- return op
-
-
def convert_to_lut8(op, fn, fn_name):
# Converts op to a no-op + int8/uint8 LUT which is generated with the given function.
# fn is a function(real) -> real