From fd62905d807b5976bea28b6d766e614c076faacf Mon Sep 17 00:00:00 2001 From: TatWai Chong Date: Mon, 25 Jul 2022 04:01:58 +0000 Subject: Update framework test generator to support TF/TFL conv3d. Add a new attribute `rank` to indicate the testing dimension range of input tensor. Also fix a minor bug in the existing conv3d simulation. And relax rescale operator in the reference model to support 5-D input. Change-Id: Ib42fe513831dc83eb7f9af07e011787a6c752704 Signed-off-by: TatWai Chong --- verif/frameworks/tensor_gen.py | 85 ++++++++++++++++++------------------------ 1 file changed, 37 insertions(+), 48 deletions(-) (limited to 'verif/frameworks/tensor_gen.py') diff --git a/verif/frameworks/tensor_gen.py b/verif/frameworks/tensor_gen.py index e57175b..3e70c87 100644 --- a/verif/frameworks/tensor_gen.py +++ b/verif/frameworks/tensor_gen.py @@ -92,22 +92,11 @@ class TGen: return tf_placeholders, tf_consts @staticmethod - def tgConv2d(op, ifm_shape, dtype, rng): + def tgConvCommon(op, ifm_shape, filter_shape, out_channels, dtype, rng): # Take the shape and generate an input and filter tf_placeholders = [] tf_consts = [] - - # Require rank 4 shape - if len(ifm_shape) != 4: - return [], [] - - filter_h, filter_w = op["filter"] - - # TODO: Hard-code the test by making the OFM depth 2x the IFM depth. - # Could randomize this in the future. - filter_shape = (filter_h, filter_w, ifm_shape[3], ifm_shape[3] * 2) - tf_placeholders.append(("placeholder_0", TGen.getRand(ifm_shape, dtype, rng))) tf_consts.append(("const_0", TGen.getRand(filter_shape, dtype, rng))) @@ -118,17 +107,13 @@ class TGen: if bias: # bias is 1D and size == output channels - bias_shape = (ifm_shape[3] * 2,) + bias_shape = (out_channels,) tf_consts.append(("const_1", TGen.getRand(bias_shape, dtype, rng))) return tf_placeholders, tf_consts @staticmethod - def tgDepthwiseConv2d(op, ifm_shape, dtype, rng): - - # Take the shape and generate an input and filter - tf_placeholders = [] - tf_consts = [] + def tgConv2d(op, ifm_shape, dtype, rng): # Require rank 4 shape if len(ifm_shape) != 4: @@ -136,32 +121,32 @@ class TGen: filter_h, filter_w = op["filter"] - # TODO: Hard-code the test by making the channel_multiplier=2. Could randomize - # this in the future. - filter_shape = (filter_h, filter_w, ifm_shape[3], 2) + # TODO: Hard-code the test by making the OFM depth 2x the IFM depth. + # Could randomize this in the future. + out_channels = ifm_shape[3] * 2 + filter_shape = (filter_h, filter_w, ifm_shape[3], out_channels) - tf_placeholders.append(("placeholder_0", TGen.getRand(ifm_shape, dtype, rng))) - tf_consts.append(("const_0", TGen.getRand(filter_shape, dtype, rng))) + return TGen.tgConvCommon(op, ifm_shape, filter_shape, out_channels, dtype, rng) - try: - bias = op["bias"] - except KeyError: - bias = False + @staticmethod + def tgDepthwiseConv2d(op, ifm_shape, dtype, rng): - if bias: - # bias is 1D and size == output channels - bias_shape = (ifm_shape[3] * 2,) - tf_consts.append(("const_1", TGen.getRand(bias_shape, dtype, rng))) + # Require rank 4 shape + if len(ifm_shape) != 4: + return [], [] - return tf_placeholders, tf_consts + filter_h, filter_w = op["filter"] + + # TODO: Hard-code the test by making the channel_multiplier=2. + # Could randomize this in the future. + filter_shape = (filter_h, filter_w, ifm_shape[3], 2) + out_channels = ifm_shape[3] * 2 + + return TGen.tgConvCommon(op, ifm_shape, filter_shape, out_channels, dtype, rng) @staticmethod def tgTransposeConv2d(op, ifm_shape, dtype, rng): - # Take the shape and generate an input and filter - tf_placeholders = [] - tf_consts = [] - # Require rank 4 shape if len(ifm_shape) != 4: return [], [] @@ -170,22 +155,26 @@ class TGen: # TODO: Hard-code the test by making the IFM depth 2x the OFM depth. # Could randomize this in the future. - filter_shape = (filter_h, filter_w, ifm_shape[3] * 2, ifm_shape[3]) + out_channels = ifm_shape[3] * 2 + filter_shape = (filter_h, filter_w, out_channels, ifm_shape[3]) - tf_placeholders.append(("placeholder_0", TGen.getRand(ifm_shape, dtype, rng))) - tf_consts.append(("const_0", TGen.getRand(filter_shape, dtype, rng))) + return TGen.tgConvCommon(op, ifm_shape, filter_shape, out_channels, dtype, rng) - try: - bias = op["bias"] - except KeyError: - bias = False + @staticmethod + def tgConv3d(op, ifm_shape, dtype, rng): - if bias: - # bias is 1D and size == output channels - bias_shape = ifm_shape[3] * 2 - tf_consts.append(("const_1", TGen.getRand(bias_shape, dtype, rng))) + # Require rank 5 shape + if len(ifm_shape) != 5: + return [], [] - return tf_placeholders, tf_consts + filter_d, filter_h, filter_w = op["filter"] + + # TODO: Hard-code the test by making the OFM depth 2x the IFM depth. + # Could randomize this in the future. + out_channels = ifm_shape[3] * 2 + filter_shape = (filter_d, filter_h, filter_w, ifm_shape[3], out_channels) + + return TGen.tgConvCommon(op, ifm_shape, filter_shape, out_channels, dtype, rng) @staticmethod def tgPooling(op, shapes, dtype, rng): -- cgit v1.2.1