aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTatWai Chong <tatwai.chong@arm.com>2022-07-25 04:01:58 +0000
committerTatWai Chong <tatwai.chong@arm.com>2022-08-29 10:43:53 -0700
commitfd62905d807b5976bea28b6d766e614c076faacf (patch)
tree09ac9ebad511a49a80888e81ec2cde5ec54696b9
parentc1a978391b16dbbe634bc3338562066a75a6c678 (diff)
downloadreference_model-fd62905d807b5976bea28b6d766e614c076faacf.tar.gz
Update framework test generator to support TF/TFL conv3d.
Add a new attribute `rank` to indicate the testing dimension range of input tensor. Also fix a minor bug in the existing conv3d simulation. And relax rescale operator in the reference model to support 5-D input. Change-Id: Ib42fe513831dc83eb7f9af07e011787a6c752704 Signed-off-by: TatWai Chong <tatwai.chong@arm.com>
-rw-r--r--reference_model/src/ops/tensor_ops.cc2
-rw-r--r--reference_model/src/ops/type_conversion.cc2
-rw-r--r--verif/frameworks/arg_gen.py89
-rw-r--r--verif/frameworks/tensor_gen.py85
-rw-r--r--verif/frameworks/test_builder.py41
-rwxr-xr-xverif/frameworks/tosa_verif_framework_generator.py73
6 files changed, 238 insertions, 54 deletions
diff --git a/reference_model/src/ops/tensor_ops.cc b/reference_model/src/ops/tensor_ops.cc
index ef6dfa7..2cd94bb 100644
--- a/reference_model/src/ops/tensor_ops.cc
+++ b/reference_model/src/ops/tensor_ops.cc
@@ -171,7 +171,7 @@ int check_conv_attribute(tosa::TosaConvAttribute* attribute,
ASSERT_MSG(conv_dimension == 2 || conv_dimension == 3, "Unsupported convolution dimension")
- int32_t offset_d = 1 ? conv_dimension == 3 : 0;
+ int32_t offset_d = conv_dimension == 3 ? 1 : 0;
int32_t ID = conv_dimension == 3 ? input_shape[1] : 1;
int32_t IH = input_shape[1 + offset_d];
int32_t IW = input_shape[2 + offset_d];
diff --git a/reference_model/src/ops/type_conversion.cc b/reference_model/src/ops/type_conversion.cc
index ac54932..52de2e4 100644
--- a/reference_model/src/ops/type_conversion.cc
+++ b/reference_model/src/ops/type_conversion.cc
@@ -29,7 +29,7 @@ OpRescale<Rank, InDtype, OutDtype>::OpRescale(SubgraphTraverser* sgt_,
: GraphNode(sgt_, Op_RESCALE, id_)
{
setRequiredOperands(1, 1);
- setRequiredRank(0, 4);
+ setRequiredRank(0, 6);
INIT_ATTRIBUTE(Rescale);
}
diff --git a/verif/frameworks/arg_gen.py b/verif/frameworks/arg_gen.py
index fa4a652..5467fa2 100644
--- a/verif/frameworks/arg_gen.py
+++ b/verif/frameworks/arg_gen.py
@@ -120,6 +120,95 @@ class ArgGen:
)
return arg_list
+ # conv3d argument generators build the TF constants
+ def agConv3d(op, shapes, rng):
+ arg_list = []
+
+ # input shape = [OC, KD, KH, KW, IC]
+ # Must be rank 5
+ if len(shapes) != 5:
+ return arg_list
+
+ if len(op["filter"]) < 3:
+ return arg_list
+
+ filter_d, filter_h, filter_w = op["filter"]
+
+ # strides, padding, dilations,
+ for stride_d in [1, 2]:
+ for stride_h in [1, 2]:
+ for stride_w in [1, 2]:
+ for padding in ["SAME", "VALID"]:
+ for dilation_d in [1, 2]:
+ for dilation_h in [1, 2]:
+ for dilation_w in [1, 2]:
+
+ # Disqualify argument combinations that would cause
+ # an illegal convolution
+ # fmt: off
+ if (padding == "VALID") and (
+ (shapes[1] - (filter_d - 1) * 2 - dilation_d) <= 0
+ or (shapes[2] - (filter_h - 1) * 2 - dilation_h) <= 0
+ or (shapes[3] - (filter_w - 1) * 2 - dilation_w) <= 0
+ ):
+ continue
+
+ if (
+ (shapes[1] - 1 - (filter_d - 1) * dilation_d) % stride_d
+ != 0
+ ) or (
+ (shapes[2] - 1 - (filter_h - 1) * dilation_h) % stride_h
+ != 0
+ ) or (
+ (shapes[3] - 1 - (filter_w - 1) * dilation_w) % stride_w
+ != 0
+ ):
+ # Not an exact integer output
+ continue
+ # fmt: on
+
+ # TODO investigate the error of `CPU implementation of Conv3D
+ # currently only supports dilated rates of 1.` from Tensorflow.
+ # Only test dilations = [1, 1, 1, 1, 1] for now.
+ if (
+ (dilation_d != 1)
+ or (dilation_h != 1)
+ or (dilation_w != 1)
+ ):
+ continue
+
+ # Tensorflow expects strides is a list of ints that has length >= 5.
+ # Strides and dilations in the batch and depth dimensions must be 1.
+ arg_list.append(
+ [
+ "_st{}{}{}{}{}_pad{}_dilat{}{}{}{}{}".format(
+ 1,
+ stride_d,
+ stride_h,
+ stride_w,
+ 1,
+ padding,
+ 1,
+ dilation_d,
+ dilation_h,
+ dilation_w,
+ 1,
+ ),
+ [
+ [1, stride_d, stride_h, stride_w, 1],
+ padding,
+ [
+ 1,
+ dilation_d,
+ dilation_h,
+ dilation_w,
+ 1,
+ ],
+ ],
+ ]
+ )
+ return arg_list
+
# conv2d argument generators build the TF constants
def agDepthwiseConv2d(op, shapes, rng):
arg_list = []
diff --git a/verif/frameworks/tensor_gen.py b/verif/frameworks/tensor_gen.py
index e57175b..3e70c87 100644
--- a/verif/frameworks/tensor_gen.py
+++ b/verif/frameworks/tensor_gen.py
@@ -92,22 +92,11 @@ class TGen:
return tf_placeholders, tf_consts
@staticmethod
- def tgConv2d(op, ifm_shape, dtype, rng):
+ def tgConvCommon(op, ifm_shape, filter_shape, out_channels, dtype, rng):
# Take the shape and generate an input and filter
tf_placeholders = []
tf_consts = []
-
- # Require rank 4 shape
- if len(ifm_shape) != 4:
- return [], []
-
- filter_h, filter_w = op["filter"]
-
- # TODO: Hard-code the test by making the OFM depth 2x the IFM depth.
- # Could randomize this in the future.
- filter_shape = (filter_h, filter_w, ifm_shape[3], ifm_shape[3] * 2)
-
tf_placeholders.append(("placeholder_0", TGen.getRand(ifm_shape, dtype, rng)))
tf_consts.append(("const_0", TGen.getRand(filter_shape, dtype, rng)))
@@ -118,17 +107,13 @@ class TGen:
if bias:
# bias is 1D and size == output channels
- bias_shape = (ifm_shape[3] * 2,)
+ bias_shape = (out_channels,)
tf_consts.append(("const_1", TGen.getRand(bias_shape, dtype, rng)))
return tf_placeholders, tf_consts
@staticmethod
- def tgDepthwiseConv2d(op, ifm_shape, dtype, rng):
-
- # Take the shape and generate an input and filter
- tf_placeholders = []
- tf_consts = []
+ def tgConv2d(op, ifm_shape, dtype, rng):
# Require rank 4 shape
if len(ifm_shape) != 4:
@@ -136,32 +121,32 @@ class TGen:
filter_h, filter_w = op["filter"]
- # TODO: Hard-code the test by making the channel_multiplier=2. Could randomize
- # this in the future.
- filter_shape = (filter_h, filter_w, ifm_shape[3], 2)
+ # TODO: Hard-code the test by making the OFM depth 2x the IFM depth.
+ # Could randomize this in the future.
+ out_channels = ifm_shape[3] * 2
+ filter_shape = (filter_h, filter_w, ifm_shape[3], out_channels)
- tf_placeholders.append(("placeholder_0", TGen.getRand(ifm_shape, dtype, rng)))
- tf_consts.append(("const_0", TGen.getRand(filter_shape, dtype, rng)))
+ return TGen.tgConvCommon(op, ifm_shape, filter_shape, out_channels, dtype, rng)
- try:
- bias = op["bias"]
- except KeyError:
- bias = False
+ @staticmethod
+ def tgDepthwiseConv2d(op, ifm_shape, dtype, rng):
- if bias:
- # bias is 1D and size == output channels
- bias_shape = (ifm_shape[3] * 2,)
- tf_consts.append(("const_1", TGen.getRand(bias_shape, dtype, rng)))
+ # Require rank 4 shape
+ if len(ifm_shape) != 4:
+ return [], []
- return tf_placeholders, tf_consts
+ filter_h, filter_w = op["filter"]
+
+ # TODO: Hard-code the test by making the channel_multiplier=2.
+ # Could randomize this in the future.
+ filter_shape = (filter_h, filter_w, ifm_shape[3], 2)
+ out_channels = ifm_shape[3] * 2
+
+ return TGen.tgConvCommon(op, ifm_shape, filter_shape, out_channels, dtype, rng)
@staticmethod
def tgTransposeConv2d(op, ifm_shape, dtype, rng):
- # Take the shape and generate an input and filter
- tf_placeholders = []
- tf_consts = []
-
# Require rank 4 shape
if len(ifm_shape) != 4:
return [], []
@@ -170,22 +155,26 @@ class TGen:
# TODO: Hard-code the test by making the IFM depth 2x the OFM depth.
# Could randomize this in the future.
- filter_shape = (filter_h, filter_w, ifm_shape[3] * 2, ifm_shape[3])
+ out_channels = ifm_shape[3] * 2
+ filter_shape = (filter_h, filter_w, out_channels, ifm_shape[3])
- tf_placeholders.append(("placeholder_0", TGen.getRand(ifm_shape, dtype, rng)))
- tf_consts.append(("const_0", TGen.getRand(filter_shape, dtype, rng)))
+ return TGen.tgConvCommon(op, ifm_shape, filter_shape, out_channels, dtype, rng)
- try:
- bias = op["bias"]
- except KeyError:
- bias = False
+ @staticmethod
+ def tgConv3d(op, ifm_shape, dtype, rng):
- if bias:
- # bias is 1D and size == output channels
- bias_shape = ifm_shape[3] * 2
- tf_consts.append(("const_1", TGen.getRand(bias_shape, dtype, rng)))
+ # Require rank 5 shape
+ if len(ifm_shape) != 5:
+ return [], []
- return tf_placeholders, tf_consts
+ filter_d, filter_h, filter_w = op["filter"]
+
+ # TODO: Hard-code the test by making the OFM depth 2x the IFM depth.
+ # Could randomize this in the future.
+ out_channels = ifm_shape[3] * 2
+ filter_shape = (filter_d, filter_h, filter_w, ifm_shape[3], out_channels)
+
+ return TGen.tgConvCommon(op, ifm_shape, filter_shape, out_channels, dtype, rng)
@staticmethod
def tgPooling(op, shapes, dtype, rng):
diff --git a/verif/frameworks/test_builder.py b/verif/frameworks/test_builder.py
index 84e4d46..0468518 100644
--- a/verif/frameworks/test_builder.py
+++ b/verif/frameworks/test_builder.py
@@ -479,6 +479,47 @@ class TBuilder:
)
return bias_add_op
+ class Conv3d:
+ def __init__(self, weight, strides, padding, dilations, name):
+ self.weight = weight
+ self.strides = strides
+ self.padding = padding
+ self.dilations = dilations
+ self.result_name = name
+
+ def eval(self, input):
+ return tf.nn.conv3d(
+ input,
+ self.weight,
+ self.strides,
+ self.padding,
+ data_format="NDHWC",
+ dilations=self.dilations,
+ name=self.result_name,
+ )
+
+ class Conv3dWithBias:
+ def __init__(self, weight, bias, strides, padding, dilations, name):
+ self.weight = weight
+ self.bias = bias
+ self.strides = strides
+ self.padding = padding
+ self.dilations = dilations
+ self.result_name = name
+
+ def eval(self, input):
+ conv3d_op = tf.nn.conv3d(
+ input,
+ self.weight,
+ self.strides,
+ self.padding,
+ data_format="NDHWC",
+ dilations=self.dilations,
+ name="conv3d",
+ )
+ bias_add_op = tf.nn.bias_add(conv3d_op, self.bias, name=self.result_name)
+ return bias_add_op
+
class DepthwiseConv2d:
def __init__(self, weight, strides, padding, dilations, name):
self.weight = weight
diff --git a/verif/frameworks/tosa_verif_framework_generator.py b/verif/frameworks/tosa_verif_framework_generator.py
index 097fe1f..fb7f35a 100755
--- a/verif/frameworks/tosa_verif_framework_generator.py
+++ b/verif/frameworks/tosa_verif_framework_generator.py
@@ -60,6 +60,7 @@ TYPE_FHIB = [tf.float32, tf.float16, tf.int32, tf.bool]
# processing in createDynamicOpLists)
# 'bias': boolean indicating that there is a bias component to be generated
# 'qtypes': List of QuantType quantized types to generate for this op
+# 'rank': tuple (lowest rank, highest rank). Dimension range of input tensor.
TF_OP_LIST = {
"add": {
@@ -433,6 +434,37 @@ TF_OP_LIST = {
"bias": True,
"template": True,
},
+ "conv3d_TEMPLATE": {
+ "operands": (1, 1),
+ "build_fcn": (TBuilder.Conv3d, TGen.tgConv3d, ArgGen.agConv3d),
+ "types": {
+ "tf": [tf.float32],
+ "tflite": [
+ tf.float32,
+ QuantType.CONV_U8_U8,
+ QuantType.CONV_I8_I8,
+ # Quantization to 16x8-bit not yet supported by tflite.
+ ],
+ },
+ "template": True,
+ "rank": (1, 5),
+ },
+ "conv3d_bias_TEMPLATE": {
+ "operands": (1, 2),
+ "build_fcn": (TBuilder.Conv3dWithBias, TGen.tgConv3d, ArgGen.agConv3d),
+ "types": {
+ "tf": [tf.float32],
+ "tflite": [
+ tf.float32,
+ QuantType.CONV_U8_U8,
+ QuantType.CONV_I8_I8,
+ # Quantization to 16x8-bit not yet supported by tflite.
+ ],
+ },
+ "bias": True,
+ "template": True,
+ "rank": (1, 5),
+ },
"depthwise_conv2d_TEMPLATE": {
"operands": (1, 1),
"build_fcn": (
@@ -762,6 +794,9 @@ shape_list = [
(1, 4, 8, 19),
(1, 32, 32, 8),
(1, 7, 7, 9),
+ (2, 2, 7, 7, 2),
+ (1, 4, 8, 21, 17),
+ (3, 32, 16, 16, 5),
]
@@ -776,13 +811,13 @@ def gen_rand_shapes(args):
max_total_volume = 32 * 32 * 4
shape_list = []
- # Only iterate over ranks 2, 3, and 4
- for rank in range(2, 5):
+ # Only iterate over ranks 2, 3, 4, and 5
+ for rank in range(2, 6):
for n in range(args.random_shapes):
new_shape = rng.integers(1, 48, size=rank)
- # Set the batch dimension on 4D objects to 1
- if rank == 4:
+ # Set the batch dimension on 4D or 5D objects to 1
+ if rank == 4 or rank == 5:
new_shape[0] = 1
# Limit the total shape volume and throw out any
@@ -1190,6 +1225,16 @@ def build_const_net(
op = TF_OP_LIST[op_name]
op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
+ try:
+ rank_lo, rank_hi = op["rank"]
+ except KeyError:
+ # Set testing rank to (1, 4) in default.
+ rank_lo = 1
+ rank_hi = 4
+
+ if len(curr_shape) not in range(rank_lo, rank_hi + 1):
+ return
+
addl_args_tuple = arg_gen_fcn(op, curr_shape, rng)
for desc, addl_args in addl_args_tuple:
# Only filter on the full test_name, not the output directory
@@ -1335,6 +1380,13 @@ def createDynamicOpLists():
[5, 5],
]
+ # dim = [D, H, W]
+ KERNELS_3D = [
+ [1, 1, 1],
+ [2, 3, 3],
+ [3, 5, 5],
+ ]
+
TEMPLATE_LIST = [
"conv2d",
"conv2d_bias",
@@ -1347,6 +1399,11 @@ def createDynamicOpLists():
"transpose_conv2d",
]
+ TEMPLATE_LIST_CONV3D = [
+ "conv3d",
+ "conv3d_bias",
+ ]
+
for t in TEMPLATE_LIST:
for k in KERNELS:
testName = "{}_{}x{}".format(t, k[0], k[1])
@@ -1354,6 +1411,14 @@ def createDynamicOpLists():
TF_OP_LIST[testName]["filter"] = k
TF_OP_LIST[testName]["template"] = False
+ # The existing operators don't support the dimension of kernel that is higher than 2.
+ for t in TEMPLATE_LIST_CONV3D:
+ for k in KERNELS_3D:
+ testName = "{}_{}x{}x{}".format(t, k[0], k[1], k[2])
+ TF_OP_LIST[testName] = TF_OP_LIST["{}_TEMPLATE".format(t)].copy()
+ TF_OP_LIST[testName]["filter"] = k
+ TF_OP_LIST[testName]["template"] = False
+
# Delete any templates after having created any dynamic ops
# This is a two-pass operation because it's bad practice to delete
# keys from dictionaries while iterating