aboutsummaryrefslogtreecommitdiff
path: root/verif/frameworks/tensor_gen.py
diff options
context:
space:
mode:
Diffstat (limited to 'verif/frameworks/tensor_gen.py')
-rw-r--r--verif/frameworks/tensor_gen.py264
1 files changed, 264 insertions, 0 deletions
diff --git a/verif/frameworks/tensor_gen.py b/verif/frameworks/tensor_gen.py
new file mode 100644
index 0000000..e57175b
--- /dev/null
+++ b/verif/frameworks/tensor_gen.py
@@ -0,0 +1,264 @@
+# Copyright (c) 2020-2022, ARM Limited.
+# SPDX-License-Identifier: Apache-2.0
+import numpy as np
+import tensorflow as tf
+
+# FIXME: replace hardcoded '* 2' with random integers, where possible
+
+# The scaling factor for random numbers generated in input tensors. The
+# random numbers are calculated as:
+# (np.random.rand() - RAND_SHIFT_FACTOR) * RAND_SCALE_FACTOR
+# FIXME: improve range here
+RAND_SCALE_FACTOR = 4.0
+# Amount to add to random numbers
+RAND_SHIFT_FACTOR = 0.5
+
+RAND_INT_MIN = -128
+RAND_INT_MAX = 128
+
+
+class TGen:
+ """A collection of functions to build tensor value arguments for an operator"""
+
+ def __init__(self):
+ pass
+
+ @staticmethod
+ def getRand(shape, dtype, rng):
+ if dtype == tf.float32:
+ return np.float32(
+ (rng.random(size=shape) - RAND_SHIFT_FACTOR) * RAND_SCALE_FACTOR
+ )
+ if dtype == tf.float16:
+ return np.float16(
+ (rng.random(size=shape) - RAND_SHIFT_FACTOR) * RAND_SCALE_FACTOR
+ )
+ if dtype == tf.int32:
+ return np.int32(
+ rng.integers(low=RAND_INT_MIN, high=RAND_INT_MAX, size=shape)
+ )
+ if dtype == tf.uint32:
+ return np.uint32(rng.integers(low=0, high=RAND_INT_MAX, size=shape))
+ if dtype == tf.bool:
+ return np.bool_(rng.choice(a=[False, True], size=shape))
+
+ raise Exception("Unsupported type: {}".format(dtype))
+
+ @staticmethod
+ def tgBasic(op, shape, dtype, rng):
+ # Build random tensor placeholder node args of a given shape
+ pl, const = op["operands"]
+
+ tf_placeholders = []
+ tf_consts = []
+
+ for i in range(pl):
+ tf_placeholders.append(
+ ("placeholder_{}".format(i), TGen.getRand(shape, dtype, rng))
+ )
+
+ for i in range(const):
+ tf_consts.append(("const_{}".format(i), TGen.getRand(shape, dtype, rng)))
+
+ return tf_placeholders, tf_consts
+
+ @staticmethod
+ def tgBFuzz(op, shape, dtype, rng):
+ # Build random tensor placeholder node args of a given shape, optionally
+ # fuzzing the arguments with random 1's to force broadcasting
+
+ pl, const = op["operands"]
+
+ assert const == 0
+
+ fuzz_arg = rng.integers(0, pl + const)
+ fuzz_idx = rng.integers(0, len(shape))
+
+ tf_placeholders = []
+ tf_consts = []
+ for i in range(pl):
+ if i == fuzz_arg:
+ # Insert the broadcast in one dimension index
+ s_fuzz = list(shape)
+ s_fuzz[fuzz_idx] = 1
+ s_fuzz = tuple(s_fuzz)
+ i_shape = s_fuzz
+ else:
+ i_shape = shape
+ tf_placeholders.append(
+ ("placeholder_{}".format(i), TGen.getRand(i_shape, dtype, rng))
+ )
+
+ return tf_placeholders, tf_consts
+
+ @staticmethod
+ def tgConv2d(op, ifm_shape, dtype, rng):
+
+ # Take the shape and generate an input and filter
+ tf_placeholders = []
+ tf_consts = []
+
+ # Require rank 4 shape
+ if len(ifm_shape) != 4:
+ return [], []
+
+ filter_h, filter_w = op["filter"]
+
+ # TODO: Hard-code the test by making the OFM depth 2x the IFM depth.
+ # Could randomize this in the future.
+ filter_shape = (filter_h, filter_w, ifm_shape[3], ifm_shape[3] * 2)
+
+ tf_placeholders.append(("placeholder_0", TGen.getRand(ifm_shape, dtype, rng)))
+ tf_consts.append(("const_0", TGen.getRand(filter_shape, dtype, rng)))
+
+ try:
+ bias = op["bias"]
+ except KeyError:
+ bias = False
+
+ if bias:
+ # bias is 1D and size == output channels
+ bias_shape = (ifm_shape[3] * 2,)
+ tf_consts.append(("const_1", TGen.getRand(bias_shape, dtype, rng)))
+
+ return tf_placeholders, tf_consts
+
+ @staticmethod
+ def tgDepthwiseConv2d(op, ifm_shape, dtype, rng):
+
+ # Take the shape and generate an input and filter
+ tf_placeholders = []
+ tf_consts = []
+
+ # Require rank 4 shape
+ if len(ifm_shape) != 4:
+ return [], []
+
+ filter_h, filter_w = op["filter"]
+
+ # TODO: Hard-code the test by making the channel_multiplier=2. Could randomize
+ # this in the future.
+ filter_shape = (filter_h, filter_w, ifm_shape[3], 2)
+
+ tf_placeholders.append(("placeholder_0", TGen.getRand(ifm_shape, dtype, rng)))
+ tf_consts.append(("const_0", TGen.getRand(filter_shape, dtype, rng)))
+
+ try:
+ bias = op["bias"]
+ except KeyError:
+ bias = False
+
+ if bias:
+ # bias is 1D and size == output channels
+ bias_shape = (ifm_shape[3] * 2,)
+ tf_consts.append(("const_1", TGen.getRand(bias_shape, dtype, rng)))
+
+ return tf_placeholders, tf_consts
+
+ @staticmethod
+ def tgTransposeConv2d(op, ifm_shape, dtype, rng):
+
+ # Take the shape and generate an input and filter
+ tf_placeholders = []
+ tf_consts = []
+
+ # Require rank 4 shape
+ if len(ifm_shape) != 4:
+ return [], []
+
+ filter_h, filter_w = op["filter"]
+
+ # TODO: Hard-code the test by making the IFM depth 2x the OFM depth.
+ # Could randomize this in the future.
+ filter_shape = (filter_h, filter_w, ifm_shape[3] * 2, ifm_shape[3])
+
+ tf_placeholders.append(("placeholder_0", TGen.getRand(ifm_shape, dtype, rng)))
+ tf_consts.append(("const_0", TGen.getRand(filter_shape, dtype, rng)))
+
+ try:
+ bias = op["bias"]
+ except KeyError:
+ bias = False
+
+ if bias:
+ # bias is 1D and size == output channels
+ bias_shape = ifm_shape[3] * 2
+ tf_consts.append(("const_1", TGen.getRand(bias_shape, dtype, rng)))
+
+ return tf_placeholders, tf_consts
+
+ @staticmethod
+ def tgPooling(op, shapes, dtype, rng):
+ # Pooling does nothing special except filter out non-rank-4 tensors
+ if len(shapes) != 4:
+ return [], []
+
+ return TGen.tgBasic(op, shapes, dtype, rng)
+
+ @staticmethod
+ def tgMatmul(op, ifm_shape, dtype, rng):
+ # Take the shape and generate an input and filter
+ tf_placeholders = []
+ tf_consts = []
+
+ if len(ifm_shape) < 2:
+ return [], []
+
+ # For ifm_shape = [..., N, K]
+ # Generate rhs tensor with shape [..., K x (2 * N)]
+ tf_placeholders.append(("placeholder_0", TGen.getRand(ifm_shape, dtype, rng)))
+
+ shape_rhs = list(ifm_shape)
+ shape_rhs[-2] = ifm_shape[-1]
+ shape_rhs[-1] = ifm_shape[-2] * 2
+ tf_placeholders.append(
+ (
+ "placeholder_1",
+ TGen.getRand(shape_rhs, dtype, rng),
+ )
+ )
+
+ return tf_placeholders, tf_consts
+
+ @staticmethod
+ def tgOneHot(op, shape, dtype, rng):
+ # Build random tensor placeholder node args of a given shape
+ pl, const = op["operands"]
+
+ assert pl == 3 and const == 1
+
+ tf_placeholders = []
+ tf_consts = []
+
+ # depth
+ depth = np.int32(rng.integers(low=1, high=32, size=None))
+ tf_consts.append(("const_0", depth))
+
+ # indices
+ indices = np.int32(rng.integers(low=0, high=depth, size=shape))
+ tf_placeholders.append(("placeholder_0", indices))
+
+ # on_value
+ tf_placeholders.append(("placeholder_1", TGen.getRand(None, dtype, rng)))
+
+ # off_value
+ tf_placeholders.append(("placeholder_2", TGen.getRand(None, dtype, rng)))
+
+ return tf_placeholders, tf_consts
+
+ @staticmethod
+ def tgSelect(op, shape, dtype, rng):
+ # Build random tensor placeholder node args of a given shape
+ pl, const = op["operands"]
+ assert pl == 3 and const == 0
+
+ tf_placeholders = []
+ tf_consts = []
+
+ # selector
+ tf_placeholders.append(("placeholder_0", TGen.getRand(None, tf.bool, rng)))
+ # inputs
+ tf_placeholders.append(("placeholder_1", TGen.getRand(shape, dtype, rng)))
+ tf_placeholders.append(("placeholder_2", TGen.getRand(shape, dtype, rng)))
+
+ return tf_placeholders, tf_consts