From 015c3550301fdc6d37606995322e144df0940ba2 Mon Sep 17 00:00:00 2001 From: Jeremy Johnson Date: Wed, 23 Feb 2022 12:15:03 +0000 Subject: Add framework unit test generation scripts And fixes in tosa_verif_run_tests: * support for no-color printing * stop double printing of error messages on verbose * differentiate result code pass from results check Change-Id: I26e957013a8d18f7d3d3691067dfb778008a1eea Signed-off-by: Jeremy Johnson --- verif/frameworks/tensor_gen.py | 264 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 264 insertions(+) create mode 100644 verif/frameworks/tensor_gen.py (limited to 'verif/frameworks/tensor_gen.py') diff --git a/verif/frameworks/tensor_gen.py b/verif/frameworks/tensor_gen.py new file mode 100644 index 0000000..e57175b --- /dev/null +++ b/verif/frameworks/tensor_gen.py @@ -0,0 +1,264 @@ +# Copyright (c) 2020-2022, ARM Limited. +# SPDX-License-Identifier: Apache-2.0 +import numpy as np +import tensorflow as tf + +# FIXME: replace hardcoded '* 2' with random integers, where possible + +# The scaling factor for random numbers generated in input tensors. The +# random numbers are calculated as: +# (np.random.rand() - RAND_SHIFT_FACTOR) * RAND_SCALE_FACTOR +# FIXME: improve range here +RAND_SCALE_FACTOR = 4.0 +# Amount to add to random numbers +RAND_SHIFT_FACTOR = 0.5 + +RAND_INT_MIN = -128 +RAND_INT_MAX = 128 + + +class TGen: + """A collection of functions to build tensor value arguments for an operator""" + + def __init__(self): + pass + + @staticmethod + def getRand(shape, dtype, rng): + if dtype == tf.float32: + return np.float32( + (rng.random(size=shape) - RAND_SHIFT_FACTOR) * RAND_SCALE_FACTOR + ) + if dtype == tf.float16: + return np.float16( + (rng.random(size=shape) - RAND_SHIFT_FACTOR) * RAND_SCALE_FACTOR + ) + if dtype == tf.int32: + return np.int32( + rng.integers(low=RAND_INT_MIN, high=RAND_INT_MAX, size=shape) + ) + if dtype == tf.uint32: + return np.uint32(rng.integers(low=0, high=RAND_INT_MAX, size=shape)) + if dtype == tf.bool: + return np.bool_(rng.choice(a=[False, True], size=shape)) + + raise Exception("Unsupported type: {}".format(dtype)) + + @staticmethod + def tgBasic(op, shape, dtype, rng): + # Build random tensor placeholder node args of a given shape + pl, const = op["operands"] + + tf_placeholders = [] + tf_consts = [] + + for i in range(pl): + tf_placeholders.append( + ("placeholder_{}".format(i), TGen.getRand(shape, dtype, rng)) + ) + + for i in range(const): + tf_consts.append(("const_{}".format(i), TGen.getRand(shape, dtype, rng))) + + return tf_placeholders, tf_consts + + @staticmethod + def tgBFuzz(op, shape, dtype, rng): + # Build random tensor placeholder node args of a given shape, optionally + # fuzzing the arguments with random 1's to force broadcasting + + pl, const = op["operands"] + + assert const == 0 + + fuzz_arg = rng.integers(0, pl + const) + fuzz_idx = rng.integers(0, len(shape)) + + tf_placeholders = [] + tf_consts = [] + for i in range(pl): + if i == fuzz_arg: + # Insert the broadcast in one dimension index + s_fuzz = list(shape) + s_fuzz[fuzz_idx] = 1 + s_fuzz = tuple(s_fuzz) + i_shape = s_fuzz + else: + i_shape = shape + tf_placeholders.append( + ("placeholder_{}".format(i), TGen.getRand(i_shape, dtype, rng)) + ) + + return tf_placeholders, tf_consts + + @staticmethod + def tgConv2d(op, ifm_shape, dtype, rng): + + # Take the shape and generate an input and filter + tf_placeholders = [] + tf_consts = [] + + # Require rank 4 shape + if len(ifm_shape) != 4: + return [], [] + + filter_h, filter_w = op["filter"] + + # TODO: Hard-code the test by making the OFM depth 2x the IFM depth. + # Could randomize this in the future. + filter_shape = (filter_h, filter_w, ifm_shape[3], ifm_shape[3] * 2) + + tf_placeholders.append(("placeholder_0", TGen.getRand(ifm_shape, dtype, rng))) + tf_consts.append(("const_0", TGen.getRand(filter_shape, dtype, rng))) + + try: + bias = op["bias"] + except KeyError: + bias = False + + if bias: + # bias is 1D and size == output channels + bias_shape = (ifm_shape[3] * 2,) + tf_consts.append(("const_1", TGen.getRand(bias_shape, dtype, rng))) + + return tf_placeholders, tf_consts + + @staticmethod + def tgDepthwiseConv2d(op, ifm_shape, dtype, rng): + + # Take the shape and generate an input and filter + tf_placeholders = [] + tf_consts = [] + + # Require rank 4 shape + if len(ifm_shape) != 4: + return [], [] + + filter_h, filter_w = op["filter"] + + # TODO: Hard-code the test by making the channel_multiplier=2. Could randomize + # this in the future. + filter_shape = (filter_h, filter_w, ifm_shape[3], 2) + + tf_placeholders.append(("placeholder_0", TGen.getRand(ifm_shape, dtype, rng))) + tf_consts.append(("const_0", TGen.getRand(filter_shape, dtype, rng))) + + try: + bias = op["bias"] + except KeyError: + bias = False + + if bias: + # bias is 1D and size == output channels + bias_shape = (ifm_shape[3] * 2,) + tf_consts.append(("const_1", TGen.getRand(bias_shape, dtype, rng))) + + return tf_placeholders, tf_consts + + @staticmethod + def tgTransposeConv2d(op, ifm_shape, dtype, rng): + + # Take the shape and generate an input and filter + tf_placeholders = [] + tf_consts = [] + + # Require rank 4 shape + if len(ifm_shape) != 4: + return [], [] + + filter_h, filter_w = op["filter"] + + # TODO: Hard-code the test by making the IFM depth 2x the OFM depth. + # Could randomize this in the future. + filter_shape = (filter_h, filter_w, ifm_shape[3] * 2, ifm_shape[3]) + + tf_placeholders.append(("placeholder_0", TGen.getRand(ifm_shape, dtype, rng))) + tf_consts.append(("const_0", TGen.getRand(filter_shape, dtype, rng))) + + try: + bias = op["bias"] + except KeyError: + bias = False + + if bias: + # bias is 1D and size == output channels + bias_shape = ifm_shape[3] * 2 + tf_consts.append(("const_1", TGen.getRand(bias_shape, dtype, rng))) + + return tf_placeholders, tf_consts + + @staticmethod + def tgPooling(op, shapes, dtype, rng): + # Pooling does nothing special except filter out non-rank-4 tensors + if len(shapes) != 4: + return [], [] + + return TGen.tgBasic(op, shapes, dtype, rng) + + @staticmethod + def tgMatmul(op, ifm_shape, dtype, rng): + # Take the shape and generate an input and filter + tf_placeholders = [] + tf_consts = [] + + if len(ifm_shape) < 2: + return [], [] + + # For ifm_shape = [..., N, K] + # Generate rhs tensor with shape [..., K x (2 * N)] + tf_placeholders.append(("placeholder_0", TGen.getRand(ifm_shape, dtype, rng))) + + shape_rhs = list(ifm_shape) + shape_rhs[-2] = ifm_shape[-1] + shape_rhs[-1] = ifm_shape[-2] * 2 + tf_placeholders.append( + ( + "placeholder_1", + TGen.getRand(shape_rhs, dtype, rng), + ) + ) + + return tf_placeholders, tf_consts + + @staticmethod + def tgOneHot(op, shape, dtype, rng): + # Build random tensor placeholder node args of a given shape + pl, const = op["operands"] + + assert pl == 3 and const == 1 + + tf_placeholders = [] + tf_consts = [] + + # depth + depth = np.int32(rng.integers(low=1, high=32, size=None)) + tf_consts.append(("const_0", depth)) + + # indices + indices = np.int32(rng.integers(low=0, high=depth, size=shape)) + tf_placeholders.append(("placeholder_0", indices)) + + # on_value + tf_placeholders.append(("placeholder_1", TGen.getRand(None, dtype, rng))) + + # off_value + tf_placeholders.append(("placeholder_2", TGen.getRand(None, dtype, rng))) + + return tf_placeholders, tf_consts + + @staticmethod + def tgSelect(op, shape, dtype, rng): + # Build random tensor placeholder node args of a given shape + pl, const = op["operands"] + assert pl == 3 and const == 0 + + tf_placeholders = [] + tf_consts = [] + + # selector + tf_placeholders.append(("placeholder_0", TGen.getRand(None, tf.bool, rng))) + # inputs + tf_placeholders.append(("placeholder_1", TGen.getRand(shape, dtype, rng))) + tf_placeholders.append(("placeholder_2", TGen.getRand(shape, dtype, rng))) + + return tf_placeholders, tf_consts -- cgit v1.2.1