aboutsummaryrefslogtreecommitdiff
path: root/verif
diff options
context:
space:
mode:
authorJeremy Johnson <jeremy.johnson@arm.com>2023-10-26 13:53:14 +0100
committerEric Kunze <eric.kunze@arm.com>2023-11-02 23:22:09 +0000
commita4d907e8686791dd84ed987d0d79325c4d908b73 (patch)
tree9748ef39183b7548a9ff50d457920eace3a6fdec /verif
parentd1a08ce27ef8d0f6cf77e1b864610aade06edc5c (diff)
downloadreference_model-a4d907e8686791dd84ed987d0d79325c4d908b73.tar.gz
Main compliance testing support for MUL
Update verify ULP mode to allow fractions (e.g. 0.5). Update pseudo generator to accept ranges. Fix up pseudo random distribution based on ranges. Change-Id: I9168c5f7d37722678c0f1f9e906953c8cec367b1 Signed-off-by: Jeremy Johnson <jeremy.johnson@arm.com>
Diffstat (limited to 'verif')
-rw-r--r--verif/conformance/tosa_main_profile_ops_info.json19
-rw-r--r--verif/generator/tosa_arg_gen.py162
-rw-r--r--verif/generator/tosa_test_gen.py62
-rw-r--r--verif/generator/tosa_utils.py1
-rw-r--r--verif/generator/tosa_verif_build_tests.py12
5 files changed, 161 insertions, 95 deletions
diff --git a/verif/conformance/tosa_main_profile_ops_info.json b/verif/conformance/tosa_main_profile_ops_info.json
index a090479..4256bfb 100644
--- a/verif/conformance/tosa_main_profile_ops_info.json
+++ b/verif/conformance/tosa_main_profile_ops_info.json
@@ -1484,7 +1484,7 @@
"--target-dtype",
"bf16",
"--fp-values-range",
- "-2.0,2.0",
+ "-max,max",
"--target-shape",
"1,47,37,25",
"--target-shape",
@@ -1495,7 +1495,7 @@
"--target-dtype",
"fp32",
"--fp-values-range",
- "-2.0,2.0",
+ "-max,max",
"--target-shape",
"1,65534,4,1",
"--target-shape",
@@ -1613,7 +1613,7 @@
"--target-dtype",
"bf16",
"--fp-values-range",
- "-2.0,2.0",
+ "-max,max",
"--tensor-dim-range",
"1,65",
"--target-rank",
@@ -1627,7 +1627,7 @@
"--target-dtype",
"fp16",
"--fp-values-range",
- "-2.0,2.0",
+ "-max,max",
"--tensor-dim-range",
"1,17",
"--target-rank",
@@ -1637,7 +1637,7 @@
"--target-dtype",
"bf16",
"--fp-values-range",
- "-2.0,2.0",
+ "-max,max",
"--tensor-dim-range",
"1,16",
"--target-rank",
@@ -1647,7 +1647,7 @@
"--target-dtype",
"fp32",
"--fp-values-range",
- "-2.0,2.0",
+ "-max,max",
"--target-shape",
"1,1,65539,1"
]
@@ -2312,6 +2312,7 @@
"profile": [
"tosa-mi"
],
+ "support_for": [ "lazy_data_gen" ],
"generation": {
"standard": {
"negative_dim_range": "1,10",
@@ -2324,7 +2325,7 @@
"--target-dtype",
"bf16",
"--fp-values-range",
- "-2.0,2.0",
+ "-max,max",
"--tensor-dim-range",
"16,64",
"--target-rank",
@@ -2338,7 +2339,7 @@
"--target-dtype",
"fp16",
"--fp-values-range",
- "-2.0,2.0",
+ "-max,max",
"--tensor-dim-range",
"1,16",
"--target-rank",
@@ -2350,7 +2351,7 @@
"--target-dtype",
"bf16",
"--fp-values-range",
- "-2.0,2.0",
+ "-max,max",
"--target-shape",
"1,1,3,65534",
"--target-shape",
diff --git a/verif/generator/tosa_arg_gen.py b/verif/generator/tosa_arg_gen.py
index 32f4341..94b7172 100644
--- a/verif/generator/tosa_arg_gen.py
+++ b/verif/generator/tosa_arg_gen.py
@@ -628,6 +628,13 @@ class TosaTensorValuesGen:
return tens
+ # Default high value for random numbers
+ TVG_FLOAT_HIGH_VALUE = {
+ DType.FP32: (1 << 128) - (1 << (127 - 23)),
+ DType.FP16: (1 << 16) - (1 << (15 - 10)),
+ DType.BF16: (1 << 128) - (1 << (127 - 7)),
+ }
+
@staticmethod
def tvgLazyGenDefault(
testGen, opName, dtypeList, shapeList, argsDict, error_name=None
@@ -684,10 +691,13 @@ class TosaTensorValuesGen:
info = {}
# TODO - generate seed for this generator based on test
info["rng_seed"] = 42
- info["range"] = [
- str(v)
- for v in testGen.getDTypeRange(dtypeList[idx], high_inclusive=True)
- ]
+ if "data_range" in argsDict:
+ data_range = argsDict["data_range"]
+ else:
+ data_range = testGen.getDTypeRange(
+ dtypeList[idx], high_inclusive=True
+ )
+ info["range"] = [str(v) for v in data_range]
tens_meta["pseudo_random_info"] = info
elif dg_type == gtu.DataGenType.DOT_PRODUCT:
info = {}
@@ -950,80 +960,97 @@ class TosaTensorValuesGen:
testGen, op, dtypeList, shapeList, testArgs, error_name
)
+ # Set the data range to the square root of the largest value
+ TVG_FLOAT_HIGH_VALUE_MUL = {
+ DType.FP32: math.sqrt(TVG_FLOAT_HIGH_VALUE[DType.FP32]),
+ DType.FP16: math.sqrt(TVG_FLOAT_HIGH_VALUE[DType.FP16]),
+ DType.BF16: math.sqrt(TVG_FLOAT_HIGH_VALUE[DType.BF16]),
+ }
+
@staticmethod
- def tvgMul(testGen, op, dtypeList, shapeList, testArgs, error_name=None):
- if error_name is None:
+ def tvgMul(testGen, opName, dtypeList, shapeList, argsDict, error_name=None):
+ if error_name is not None or dtypeList[0] in (
+ DType.FP16,
+ DType.BF16,
+ DType.FP32,
+ ):
+ # ERROR_IF or floating point test
+ if dtypeList[0] in TosaTensorValuesGen.TVG_FLOAT_HIGH_VALUE_MUL:
+ data_range = testGen.getDTypeRange(dtypeList[0], high_inclusive=True)
+ high_val = TosaTensorValuesGen.TVG_FLOAT_HIGH_VALUE_MUL[dtypeList[0]]
+ # Set the values to something that won't produce infinity whilst
+ # respecting the default ranges if less than the high value
+ argsDict["data_range"] = [
+ max(-high_val, data_range[0]),
+ min(high_val, data_range[1]),
+ ]
+ return TosaTensorValuesGen.tvgLazyGenDefault(
+ testGen, opName, dtypeList, shapeList, argsDict, error_name
+ )
+ else:
+ # Integer test
+ op = testGen.TOSA_OP_LIST[opName]
pCount, cCount = op["operands"]
assert (
pCount == 2 and cCount == 0
), "Op.MUL must have 2 placeholders, 0 consts"
- tens = []
- if dtypeList[0] in (DType.FP16, DType.BF16, DType.FP32):
- tens.extend(testGen.buildPlaceholderTensors(shapeList[:], dtypeList[:]))
- else:
- placeholders = []
-
- # Make sure multiply result in int32 range
- shift = testArgs[0]
- if dtypeList[0] == DType.INT8:
- num_bits = 8
- elif dtypeList[0] == DType.INT16:
- num_bits = 16
- elif dtypeList[0] == DType.INT32:
- num_bits = 32
- elif error_name == ErrorIf.WrongInputType:
- num_bits = 8
- else:
- raise Exception("OpMul: invalid input dtype")
+ tens_ser_list = []
- for idx, shape in enumerate(shapeList[:]):
- low = -(2 ** (num_bits - 1))
- high = (2 ** (num_bits - 1)) - 1
+ # Make sure multiply result in int32 range
+ shift = argsDict["shift"]
+ if dtypeList[0] == DType.INT8:
+ num_bits = 8
+ elif dtypeList[0] == DType.INT16:
+ num_bits = 16
+ elif dtypeList[0] == DType.INT32:
+ num_bits = 32
+ elif error_name == ErrorIf.WrongInputType:
+ num_bits = 8
+ else:
+ raise Exception("OpMul: invalid input dtype")
- a_arr = np.int32(
- testGen.rng.integers(low=low, high=high, size=shapeList[0])
- )
- b_arr = np.int32(
- testGen.rng.integers(low=low, high=high, size=shapeList[1])
- )
+ for idx, shape in enumerate(shapeList[:]):
+ low = -(2 ** (num_bits - 1))
+ high = (2 ** (num_bits - 1)) - 1
- i = 0
- while True:
+ a_arr = np.int32(
+ testGen.rng.integers(low=low, high=high, size=shapeList[0])
+ )
+ b_arr = np.int32(
+ testGen.rng.integers(low=low, high=high, size=shapeList[1])
+ )
- a_arr_64 = a_arr.astype(np.int64)
- b_arr_64 = b_arr.astype(np.int64)
+ i = 0
+ while True:
- if shift > 0:
- rounding = 1 << (shift - 1)
- result_arr = ((a_arr_64 * b_arr_64) + rounding) >> shift
- else:
- result_arr = a_arr_64 * b_arr_64
+ a_arr_64 = a_arr.astype(np.int64)
+ b_arr_64 = b_arr.astype(np.int64)
- if (result_arr > -(2**31)).all() and (
- result_arr <= ((2**31) - 1)
- ).all():
- break
-
- i = i + 1
- a_arr = a_arr // 2
- b_arr = b_arr // 2
+ if shift > 0:
+ rounding = 1 << (shift - 1)
+ result_arr = ((a_arr_64 * b_arr_64) + rounding) >> shift
+ else:
+ result_arr = a_arr_64 * b_arr_64
- placeholders.append(
- testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], a_arr)
- )
- placeholders.append(
- testGen.ser.addPlaceholder(shapeList[1], dtypeList[1], b_arr)
- )
+ if (result_arr > -(2**31)).all() and (
+ result_arr <= ((2**31) - 1)
+ ).all():
+ break
- tens.extend(placeholders)
+ i = i + 1
+ a_arr = a_arr // 2
+ b_arr = b_arr // 2
- return tens
- else:
- return TosaTensorValuesGen.tvgDefault(
- testGen, op, dtypeList, shapeList, testArgs, error_name
+ tens_ser_list.append(
+ testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], a_arr)
+ )
+ tens_ser_list.append(
+ testGen.ser.addPlaceholder(shapeList[1], dtypeList[1], b_arr)
)
+ return TosaTensorValuesGen.TVGInfo(tens_ser_list, None)
+
@staticmethod
def tvgConcat(testGen, op, dtypeList, shapeList, testArgs, error_name=None):
count = len(shapeList) - testGen.args.num_const_inputs_concat
@@ -2076,11 +2103,18 @@ class TosaArgGen:
for p in range(testGen.args.num_rand_permutations):
shift = testGen.randInt(0, 32)
-
- arg_list.append(("perm{}_shift{}".format(p, shift), [shift]))
+ arg_list.append(("perm{}_shift{}".format(p, shift), {"shift": shift}))
else:
- arg_list.append(("perm0_shift0", [0]))
+ arg_list.append(("perm0_shift0", {"shift": 0}))
+ arg_list = TosaArgGen._add_data_generators(
+ testGen,
+ opName,
+ dtype,
+ arg_list,
+ error_name,
+ )
+ # Return list of tuples: (arg_str, args_dict)
return arg_list
@staticmethod
diff --git a/verif/generator/tosa_test_gen.py b/verif/generator/tosa_test_gen.py
index 54b624e..1995cbc 100644
--- a/verif/generator/tosa_test_gen.py
+++ b/verif/generator/tosa_test_gen.py
@@ -51,15 +51,31 @@ class TosaTestGen:
self.quantGen = TosaQuantGen()
# Force makeShape to do a specific starting shape
self.targetted_shape = None
- # Work out floating point range
- self.random_fp_low = min(args.tensor_fp_value_range)
- self.random_fp_high = max(args.tensor_fp_value_range)
# JSON schema validation
self.descSchemaValidator = TestDescSchemaValidator()
# Data generator library is sometimes needed for compliance set up
# even if we are generating the data later (lazy_data_generation)
self.dgl = GenerateLibrary(args.generate_lib_path)
+ # Work out floating point range
+ def convertFPRange(rangeFP, maxFP):
+ # Converts program arguments of max/-max to FP max
+ vals = []
+ for v in rangeFP:
+ if v == "max":
+ v = maxFP
+ elif v == "-max":
+ v = -maxFP
+ vals.append(v)
+ return tuple(sorted(vals))
+
+ self.random_float_range = {}
+ for dtype in (DType.FP32, DType.FP16, DType.BF16):
+ self.random_float_range[dtype] = convertFPRange(
+ args.tensor_fp_value_range,
+ TosaTensorValuesGen.TVG_FLOAT_HIGH_VALUE[dtype],
+ )
+
def createSerializer(self, opName, testPath):
self.testPath = os.path.join(opName, testPath)
@@ -130,9 +146,8 @@ class TosaTestGen:
# Returns dtype value range boundaries (low, high)
# The high boundary is excluded in the range
# unless high_inclusive is True
-
if dtype in (DType.FP32, DType.FP16, DType.BF16):
- return (self.random_fp_low, self.random_fp_high)
+ return self.random_float_range[dtype]
elif dtype == DType.BOOL:
rng = (0, 2)
elif dtype == DType.UINT8:
@@ -318,8 +333,6 @@ class TosaTestGen:
compliance_tens["ulp_info"] = {"ulp": op["compliance"]["ulp"]}
elif op["op"] == Op.REDUCE_PRODUCT:
mode = gtu.ComplianceMode.REDUCE_PRODUCT
- elif op["op"] in (Op.ADD, Op.MUL, Op.SUB, Op.CEIL, Op.FLOOR, Op.CAST):
- mode = gtu.ComplianceMode.ROUND
else:
mode = gtu.ComplianceMode.EXACT
compliance_tens["mode"] = gtu.ComplianceMode(mode).name
@@ -466,23 +479,29 @@ class TosaTestGen:
self.ser.addOperator(op["op"], input_list, output_list, attr)
return result_tens
- def build_mul(self, op, a, b, shift, validator_fcns=None, error_name=None):
- result_tens = OutputShaper.binaryBroadcastOp(
+ def build_mul(
+ self, op, inputs, args_dict, validator_fcns=None, error_name=None, qinfo=None
+ ):
+ assert len(inputs) == 2
+ a, b = inputs
+ shift = args_dict["shift"]
+
+ result_tensor = OutputShaper.binaryBroadcastOp(
self.ser, self.rng, a, b, error_name
)
- # Special for multiply:
- # Force the result to INT32 for INT types
+ # Special for multiply: Force the result to INT32 for INT types
if a.dtype not in (DType.FP16, DType.BF16, DType.FP32):
- result_tens.setDtype(DType.INT32)
+ result_tensor.setDtype(DType.INT32)
+
if error_name == ErrorIf.WrongOutputType:
all_dtypes = [DType.INT8, DType.INT16, DType.INT48]
outputDType = self.rng.choice(all_dtypes)
- result_tens.setDtype(outputDType)
+ result_tensor.setDtype(outputDType)
# Invalidate Input/Output list for error if checks.
input_list = [a.name, b.name]
- output_list = [result_tens.name]
+ output_list = [result_tensor.name]
pCount, cCount = op["operands"]
num_operands = pCount + cCount
input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
@@ -497,8 +516,8 @@ class TosaTestGen:
input1=a,
input2=b,
input_dtype=a.dtype,
- output_dtype=result_tens.dtype,
- result_tensors=[result_tens],
+ output_dtype=result_tensor.dtype,
+ result_tensors=[result_tensor],
input_list=input_list,
output_list=output_list,
num_operands=num_operands,
@@ -509,7 +528,12 @@ class TosaTestGen:
attr.MulAttribute(shift)
self.ser.addOperator(op["op"], input_list, output_list, attr)
- return result_tens
+
+ compliance = self.tensorComplianceMetaData(
+ op, a.dtype, args_dict, result_tensor, error_name
+ )
+
+ return TosaTestGen.BuildInfo(result_tensor, compliance)
def build_table(self, op, a, table, validator_fcns=None, error_name=None):
result_tens = OutputShaper.tableOp(self.ser, self.rng, a, error_name)
@@ -3456,6 +3480,10 @@ class TosaTestGen:
TosaErrorValidator.evDimensionMismatch,
TosaErrorValidator.evBroadcastShapesMismatch,
),
+ "data_gen": {
+ "fp": (gtu.DataGenType.PSEUDO_RANDOM,),
+ },
+ "compliance": {"ulp": 0.5},
},
"pow": {
"op": Op.POW,
diff --git a/verif/generator/tosa_utils.py b/verif/generator/tosa_utils.py
index 7fc5b52..3b487de 100644
--- a/verif/generator/tosa_utils.py
+++ b/verif/generator/tosa_utils.py
@@ -38,7 +38,6 @@ class ComplianceMode(IntEnum):
ULP = 2
FP_SPECIAL = 3
REDUCE_PRODUCT = 4
- ROUND = 5
class DataGenType(IntEnum):
diff --git a/verif/generator/tosa_verif_build_tests.py b/verif/generator/tosa_verif_build_tests.py
index 954c6e9..d6598fb 100644
--- a/verif/generator/tosa_verif_build_tests.py
+++ b/verif/generator/tosa_verif_build_tests.py
@@ -13,14 +13,18 @@ from serializer.tosa_serializer import DTypeNames
OPTION_FP_VALUES_RANGE = "--fp-values-range"
-# Used for parsing a comma-separated list of integers in a string
-# to an actual list of integers
+# Used for parsing a comma-separated list of integers/floats in a string
+# to an actual list of integers/floats with special case max
def str_to_list(in_s, is_float=False):
- """Converts a comma-separated list of string integers to a python list of ints"""
+ """Converts a comma-separated list string to a python list of numbers."""
lst = in_s.split(",")
out_list = []
for i in lst:
- val = float(i) if is_float else int(i)
+ # Special case for allowing maximum FP numbers
+ if is_float and i in ("-max", "max"):
+ val = i
+ else:
+ val = float(i) if is_float else int(i)
out_list.append(val)
return out_list