From 7bf0cb990b55d5738c8dc4291686576654d2d8ab Mon Sep 17 00:00:00 2001 From: Jeremy Johnson Date: Tue, 31 Oct 2023 14:37:54 +0000 Subject: Main Compliance testing support for ADD, SUB, MINIMUM and MAXIMUM Added main inference compliance mode data generation and verification. Improved error reporting in tosa_verif_build_tests. Fixed pseudo random invalid range check. Signed-off-by: Jeremy Johnson Change-Id: Ib538b5999444d67704ebc322be3c7becbe5ad206 --- .../src/generate/generate_pseudo_random.cc | 5 +- reference_model/src/generate/generate_utils.cc | 4 + verif/conformance/tosa_main_profile_ops_info.json | 28 ++++--- verif/generator/tosa_arg_gen.py | 97 +++++++++++++++------- verif/generator/tosa_test_gen.py | 87 ++++++++++++------- verif/generator/tosa_verif_build_tests.py | 52 +++++++----- 6 files changed, 178 insertions(+), 95 deletions(-) diff --git a/reference_model/src/generate/generate_pseudo_random.cc b/reference_model/src/generate/generate_pseudo_random.cc index f234796..78013eb 100644 --- a/reference_model/src/generate/generate_pseudo_random.cc +++ b/reference_model/src/generate/generate_pseudo_random.cc @@ -126,9 +126,10 @@ bool generatePseudoRandom(const GenerateConfig& cfg, void* data, size_t size) WARNING("[Generator][PR] Unknown operator."); return false; } - if (cfg.pseudoRandomInfo.range.size() != 0 || cfg.pseudoRandomInfo.range.size() != 2) + if (cfg.pseudoRandomInfo.range.size() != 0 && cfg.pseudoRandomInfo.range.size() != 2) { - WARNING("[Generator][PR] Invalid range."); + WARNING("[Generator][PR] Invalid range"); + return false; } switch (cfg.dataType) diff --git a/reference_model/src/generate/generate_utils.cc b/reference_model/src/generate/generate_utils.cc index ae6dfcb..8ae889a 100644 --- a/reference_model/src/generate/generate_utils.cc +++ b/reference_model/src/generate/generate_utils.cc @@ -38,11 +38,15 @@ NLOHMANN_JSON_SERIALIZE_ENUM(DType, NLOHMANN_JSON_SERIALIZE_ENUM(Op, { { Op::Op_UNKNOWN, "UNKNOWN" }, + { Op::Op_ADD, "ADD" }, { Op::Op_CONV2D, "CONV2D" }, { Op::Op_MATMUL, "MATMUL" }, + { Op::Op_MAXIMUM, "MAXIMUM" }, { Op::Op_MAX_POOL2D, "MAX_POOL2D" }, + { Op::Op_MINIMUM, "MINIMUM" }, { Op::Op_MUL, "MUL" }, { Op::Op_PAD, "PAD" }, + { Op::Op_SUB, "SUB" }, }) } // namespace tosa diff --git a/verif/conformance/tosa_main_profile_ops_info.json b/verif/conformance/tosa_main_profile_ops_info.json index 4256bfb..254f5e7 100644 --- a/verif/conformance/tosa_main_profile_ops_info.json +++ b/verif/conformance/tosa_main_profile_ops_info.json @@ -69,6 +69,7 @@ "profile": [ "tosa-mi" ], + "support_for": [ "lazy_data_gen" ], "generation": { "standard": { "generator_args": [ @@ -80,7 +81,7 @@ "--target-dtype", "bf16", "--fp-values-range", - "-2.0,2.0", + "-max,max", "--tensor-dim-range", "16,64", "--target-rank", @@ -94,7 +95,7 @@ "--target-dtype", "fp32", "--fp-values-range", - "-2.0,2.0", + "-max,max", "--tensor-dim-range", "1,16", "--target-rank", @@ -106,7 +107,7 @@ "--target-dtype", "fp16", "--fp-values-range", - "-2.0,2.0", + "-max,max", "--target-shape", "2,1,1,65532", "--target-shape", @@ -2182,6 +2183,7 @@ "profile": [ "tosa-mi" ], + "support_for": [ "lazy_data_gen" ], "generation": { "standard": { "generator_args": [ @@ -2193,7 +2195,7 @@ "--target-dtype", "bf16", "--fp-values-range", - "-2.0,2.0", + "-max,max", "--tensor-dim-range", "16,64", "--target-rank", @@ -2211,7 +2213,7 @@ "--target-dtype", "bf16", "--fp-values-range", - "-2.0,2.0", + "-max,max", "--tensor-dim-range", "1,16", "--target-rank", @@ -2223,7 +2225,7 @@ "--target-dtype", "fp32", "--fp-values-range", - "-2.0,2.0", + "-max,max", "--target-shape", "65531,1,2,1", "--target-shape", @@ -2247,6 +2249,7 @@ "profile": [ "tosa-mi" ], + "support_for": [ "lazy_data_gen" ], "generation": { "standard": { "generator_args": [ @@ -2258,7 +2261,7 @@ "--target-dtype", "bf16", "--fp-values-range", - "-2.0,2.0", + "-max,max", "--tensor-dim-range", "16,64", "--target-rank", @@ -2276,7 +2279,7 @@ "--target-dtype", "bf16", "--fp-values-range", - "-2.0,2.0", + "-max,max", "--tensor-dim-range", "1,16", "--target-rank", @@ -2288,7 +2291,7 @@ "--target-dtype", "fp32", "--fp-values-range", - "-2.0,2.0", + "-max,max", "--target-shape", "1,65531,2,1", "--target-shape", @@ -2775,6 +2778,7 @@ "profile": [ "tosa-mi" ], + "support_for": [ "lazy_data_gen" ], "generation": { "standard": { "generator_args": [ @@ -2786,7 +2790,7 @@ "--target-dtype", "bf16", "--fp-values-range", - "-2.0,2.0", + "-max,max", "--tensor-dim-range", "16,64", "--target-rank", @@ -2800,7 +2804,7 @@ "--target-dtype", "bf16", "--fp-values-range", - "-2.0,2.0", + "-max,max", "--tensor-dim-range", "1,16", "--target-rank", @@ -2812,7 +2816,7 @@ "--target-dtype", "fp32", "--fp-values-range", - "-2.0,2.0", + "-max,max", "--target-shape", "1,1,65533,3", "--target-shape", diff --git a/verif/generator/tosa_arg_gen.py b/verif/generator/tosa_arg_gen.py index 94b7172..1be243c 100644 --- a/verif/generator/tosa_arg_gen.py +++ b/verif/generator/tosa_arg_gen.py @@ -635,6 +635,19 @@ class TosaTensorValuesGen: DType.BF16: (1 << 128) - (1 << (127 - 7)), } + @staticmethod + def _get_data_range(testGen, dtype, highValueLookup): + if dtype in highValueLookup: + data_range = testGen.getDTypeRange(dtype, high_inclusive=True) + high_val = highValueLookup[dtype] + # Set the values to something that won't produce infinity whilst + # respecting the default ranges if less than the high value + return [ + max(-high_val, data_range[0]), + min(high_val, data_range[1]), + ] + return None + @staticmethod def tvgLazyGenDefault( testGen, opName, dtypeList, shapeList, argsDict, error_name=None @@ -777,16 +790,24 @@ class TosaTensorValuesGen: testGen, op, dtypeList, shapeList, testArgs, error_name ) + # Set the data range to half the largest value + TVG_FLOAT_HIGH_VALUE_ADDSUB = { + DType.FP32: (TVG_FLOAT_HIGH_VALUE[DType.FP32] / 2), + DType.FP16: (TVG_FLOAT_HIGH_VALUE[DType.FP16] / 2), + DType.BF16: (TVG_FLOAT_HIGH_VALUE[DType.BF16] / 2), + } + @staticmethod - def tvgAddSub(testGen, op, dtypeList, shapeList, testArgs, error_name=None): + def tvgAddSub(testGen, opName, dtypeList, shapeList, argsDict, error_name=None): if dtypeList[0] == DType.INT32 and error_name is None: - # Make sure the operation does not cause value saturation - where + # Make sure the integer operation does not cause value saturation - where # the number wraps due to limited number of bits to store the answer + op = testGen.TOSA_OP_LIST[opName] pCount, cCount = op["operands"] assert ( pCount == 2 and cCount == 0 ), "Op.ADD / Op.SUB must have 2 placeholders, 0 consts" - placeholders = [] + tens_ser_list = [] add = op["op"] == Op.ADD a_arr = testGen.getRandTensor(shapeList[0], dtypeList[0]) b_arr = testGen.getRandTensor(shapeList[1], dtypeList[1]) @@ -833,17 +854,24 @@ class TosaTensorValuesGen: ), "Op.ADD / SUB dimension must be 1 or matching to be broadcastable" b_unsat_arr = np.amax(b_unsat_arr, axis=axis, keepdims=True) - placeholders.append( + tens_ser_list.append( testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], a_arr) ) - placeholders.append( + tens_ser_list.append( testGen.ser.addPlaceholder(shapeList[1], dtypeList[1], b_unsat_arr) ) - return placeholders + return TosaTensorValuesGen.TVGInfo(tens_ser_list, None) else: - return TosaTensorValuesGen.tvgDefault( - testGen, op, dtypeList, shapeList, testArgs, error_name + # ERROR_IF or floating point test + data_range = TosaTensorValuesGen._get_data_range( + testGen, dtypeList[0], TosaTensorValuesGen.TVG_FLOAT_HIGH_VALUE_ADDSUB + ) + if data_range: + argsDict["data_range"] = data_range + + return TosaTensorValuesGen.tvgLazyGenDefault( + testGen, opName, dtypeList, shapeList, argsDict, error_name ) @staticmethod @@ -923,14 +951,15 @@ class TosaTensorValuesGen: ) @staticmethod - def tvgIntDiv(testGen, op, dtypeList, shapeList, testArgs, error_name=None): + def tvgIntDiv(testGen, opName, dtypeList, shapeList, argsDict, error_name=None): if error_name is None: + op = testGen.TOSA_OP_LIST[opName] pCount, cCount = op["operands"] assert ( pCount == 2 and cCount == 0 ), "Op.INTDIV must have 2 placeholders, 0 consts" - placeholders = [] + tens_ser_list = [] # Two invalid cases for Op.INTDIV: # 1. divisor == 0 @@ -947,17 +976,17 @@ class TosaTensorValuesGen: break - placeholders.append( + tens_ser_list.append( testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], dividend_arr) ) - placeholders.append( + tens_ser_list.append( testGen.ser.addPlaceholder(shapeList[1], dtypeList[1], divisor_arr) ) - return placeholders + return TosaTensorValuesGen.TVGInfo(tens_ser_list, None) else: - return TosaTensorValuesGen.tvgDefault( - testGen, op, dtypeList, shapeList, testArgs, error_name + return TosaTensorValuesGen.tvgLazyGenDefault( + testGen, opName, dtypeList, shapeList, argsDict, error_name ) # Set the data range to the square root of the largest value @@ -975,15 +1004,12 @@ class TosaTensorValuesGen: DType.FP32, ): # ERROR_IF or floating point test - if dtypeList[0] in TosaTensorValuesGen.TVG_FLOAT_HIGH_VALUE_MUL: - data_range = testGen.getDTypeRange(dtypeList[0], high_inclusive=True) - high_val = TosaTensorValuesGen.TVG_FLOAT_HIGH_VALUE_MUL[dtypeList[0]] - # Set the values to something that won't produce infinity whilst - # respecting the default ranges if less than the high value - argsDict["data_range"] = [ - max(-high_val, data_range[0]), - min(high_val, data_range[1]), - ] + data_range = TosaTensorValuesGen._get_data_range( + testGen, dtypeList[0], TosaTensorValuesGen.TVG_FLOAT_HIGH_VALUE_MUL + ) + if data_range: + argsDict["data_range"] = data_range + return TosaTensorValuesGen.tvgLazyGenDefault( testGen, opName, dtypeList, shapeList, argsDict, error_name ) @@ -1075,22 +1101,25 @@ class TosaTensorValuesGen: return tens @staticmethod - def tvgLogicalShift(testGen, op, dtypeList, shapeList, testArgs, error_name=None): + def tvgLogicalShift( + testGen, opName, dtypeList, shapeList, argsDict, error_name=None + ): + op = testGen.TOSA_OP_LIST[opName] pCount, cCount = op["operands"] assert ( pCount == 2 and cCount == 0 ), "Op.LOGICAL_LEFT_SHIFT or Op.LOGICAL_RIGHT_SHIFT must have 2 placeholders, 0 consts" values_arr = testGen.getRandTensor(shapeList[0], dtypeList[0]) shift_arr = np.int32(testGen.rng.integers(low=0, high=32, size=shapeList[1])) - placeholders = [] - placeholders.append( + tens_ser_list = [] + tens_ser_list.append( testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], values_arr) ) - placeholders.append( + tens_ser_list.append( testGen.ser.addPlaceholder(shapeList[1], dtypeList[1], shift_arr) ) - return placeholders + return TosaTensorValuesGen.TVGInfo(tens_ser_list, None) @staticmethod def tvgEqual(testGen, op, dtypeList, shapeList, testArgs, error_name=None): @@ -1218,7 +1247,15 @@ class TosaArgGen: def agNone(testGen, opName, shapeList, dtype, error_name=None): """A trivial argument generator for operators that don't take any non-tensor arguments""" - return [("", [])] + arg_list = TosaArgGen._add_data_generators( + testGen, + opName, + dtype, + [("", {})], + error_name, + ) + # Return list of tuples: (arg_str, args_dict) + return arg_list @staticmethod def agAxis(testGen, opName, shapeList, dtype, error_name=None): diff --git a/verif/generator/tosa_test_gen.py b/verif/generator/tosa_test_gen.py index 1995cbc..556a0d8 100644 --- a/verif/generator/tosa_test_gen.py +++ b/verif/generator/tosa_test_gen.py @@ -403,14 +403,18 @@ class TosaTestGen: self.ser.addOperator(op["op"], input_list, output_list, attr) return result_tens - def build_binary_broadcast(self, op, a, b, validator_fcns, error_name=None): - result_tens = OutputShaper.binaryBroadcastOp( + def build_binary_broadcast( + self, op, inputs, args_dict, validator_fcns, error_name=None, qinfo=None + ): + assert len(inputs) == 2 + a, b = inputs + result_tensor = OutputShaper.binaryBroadcastOp( self.ser, self.rng, a, b, error_name ) # Invalidate Input/Output list for error if checks. input_list = [a.name, b.name] - output_list = [result_tens.name] + output_list = [result_tensor.name] pCount, cCount = op["operands"] num_operands = pCount + cCount input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList( @@ -425,8 +429,8 @@ class TosaTestGen: input1=a, input2=b, input_dtype=a.dtype, - output_dtype=result_tens.dtype, - result_tensors=[result_tens], + output_dtype=result_tensor.dtype, + result_tensors=[result_tensor], input_list=input_list, output_list=output_list, num_operands=num_operands, @@ -434,7 +438,16 @@ class TosaTestGen: return None self.ser.addOperator(op["op"], input_list, output_list) - return result_tens + + if op["op"] == Op.POW: + # TODO - add compliance support + compliance = None + else: + compliance = self.tensorComplianceMetaData( + op, a.dtype, args_dict, result_tensor, error_name + ) + + return TosaTestGen.BuildInfo(result_tensor, compliance) def build_binary_nonbroadcast(self, op, a, b, validator_fcns=None, error_name=None): result_tens = OutputShaper.binaryNonBroadcastOp(self.ser, a, b) @@ -3208,7 +3221,7 @@ class TosaTestGen: build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, TosaTensorValuesGen.tvgAddSub, - None, + TosaArgGen.agNone, ), "types": TYPE_FI32, "error_if_validators": ( @@ -3220,6 +3233,10 @@ class TosaTestGen: TosaErrorValidator.evDimensionMismatch, TosaErrorValidator.evBroadcastShapesMismatch, ), + "data_gen": { + "fp": (gtu.DataGenType.PSEUDO_RANDOM,), + }, + "compliance": {"ulp": 0.5}, }, "arithmetic_right_shift": { "op": Op.ARITHMETIC_RIGHT_SHIFT, @@ -3247,8 +3264,8 @@ class TosaTestGen: "build_fcn": ( build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, - TosaTensorValuesGen.tvgDefault, - None, + TosaTensorValuesGen.tvgLazyGenDefault, + TosaArgGen.agNone, ), "types": TYPE_INT, "error_if_validators": ( @@ -3267,8 +3284,8 @@ class TosaTestGen: "build_fcn": ( build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, - TosaTensorValuesGen.tvgDefault, - None, + TosaTensorValuesGen.tvgLazyGenDefault, + TosaArgGen.agNone, ), "types": TYPE_INT, "error_if_validators": ( @@ -3287,8 +3304,8 @@ class TosaTestGen: "build_fcn": ( build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, - TosaTensorValuesGen.tvgDefault, - None, + TosaTensorValuesGen.tvgLazyGenDefault, + TosaArgGen.agNone, ), "types": TYPE_INT, "error_if_validators": ( @@ -3308,7 +3325,7 @@ class TosaTestGen: build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, TosaTensorValuesGen.tvgIntDiv, - None, + TosaArgGen.agNone, ), "types": [DType.INT32], "error_if_validators": ( @@ -3327,8 +3344,8 @@ class TosaTestGen: "build_fcn": ( build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, - TosaTensorValuesGen.tvgDefault, - None, + TosaTensorValuesGen.tvgLazyGenDefault, + TosaArgGen.agNone, ), "types": TYPE_BOOL, "error_if_validators": ( @@ -3348,7 +3365,7 @@ class TosaTestGen: build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, TosaTensorValuesGen.tvgLogicalShift, - None, + TosaArgGen.agNone, ), "types": TYPE_INT, "error_if_validators": ( @@ -3368,7 +3385,7 @@ class TosaTestGen: build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, TosaTensorValuesGen.tvgLogicalShift, - None, + TosaArgGen.agNone, ), "types": TYPE_INT, "error_if_validators": ( @@ -3387,8 +3404,8 @@ class TosaTestGen: "build_fcn": ( build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, - TosaTensorValuesGen.tvgDefault, - None, + TosaTensorValuesGen.tvgLazyGenDefault, + TosaArgGen.agNone, ), "types": TYPE_BOOL, "error_if_validators": ( @@ -3407,8 +3424,8 @@ class TosaTestGen: "build_fcn": ( build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, - TosaTensorValuesGen.tvgDefault, - None, + TosaTensorValuesGen.tvgLazyGenDefault, + TosaArgGen.agNone, ), "types": TYPE_BOOL, "error_if_validators": ( @@ -3427,8 +3444,8 @@ class TosaTestGen: "build_fcn": ( build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, - TosaTensorValuesGen.tvgDefault, - None, + TosaTensorValuesGen.tvgLazyGenDefault, + TosaArgGen.agNone, ), "types": TYPE_FI32, "error_if_validators": ( @@ -3440,6 +3457,9 @@ class TosaTestGen: TosaErrorValidator.evDimensionMismatch, TosaErrorValidator.evBroadcastShapesMismatch, ), + "data_gen": { + "fp": (gtu.DataGenType.PSEUDO_RANDOM,), + }, }, "minimum": { "op": Op.MINIMUM, @@ -3447,8 +3467,8 @@ class TosaTestGen: "build_fcn": ( build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, - TosaTensorValuesGen.tvgDefault, - None, + TosaTensorValuesGen.tvgLazyGenDefault, + TosaArgGen.agNone, ), "types": TYPE_FI32, "error_if_validators": ( @@ -3460,6 +3480,9 @@ class TosaTestGen: TosaErrorValidator.evDimensionMismatch, TosaErrorValidator.evBroadcastShapesMismatch, ), + "data_gen": { + "fp": (gtu.DataGenType.PSEUDO_RANDOM,), + }, }, "mul": { "op": Op.MUL, @@ -3491,8 +3514,8 @@ class TosaTestGen: "build_fcn": ( build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, - TosaTensorValuesGen.tvgDefault, - None, + TosaTensorValuesGen.tvgLazyGenDefault, + TosaArgGen.agNone, ), "types": TYPE_FP, "error_if_validators": ( @@ -3512,7 +3535,7 @@ class TosaTestGen: build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, TosaTensorValuesGen.tvgAddSub, - None, + TosaArgGen.agNone, ), "types": TYPE_FI32, "error_if_validators": ( @@ -3524,6 +3547,10 @@ class TosaTestGen: TosaErrorValidator.evDimensionMismatch, TosaErrorValidator.evBroadcastShapesMismatch, ), + "data_gen": { + "fp": (gtu.DataGenType.PSEUDO_RANDOM,), + }, + "compliance": {"ulp": 0.5}, }, "table": { "op": Op.TABLE, @@ -4379,7 +4406,7 @@ class TosaTestGen: build_rfft2d, TosaTensorGen.tgRFFT2d, TosaTensorValuesGen.tvgDefault, - TosaArgGen.agNone, + None, ), "types": [DType.FP32], "error_if_validators": ( diff --git a/verif/generator/tosa_verif_build_tests.py b/verif/generator/tosa_verif_build_tests.py index d6598fb..d01e8a7 100644 --- a/verif/generator/tosa_verif_build_tests.py +++ b/verif/generator/tosa_verif_build_tests.py @@ -301,32 +301,42 @@ def main(argv=None): results = [] for test_type in testType: testList = [] - for op in ttg.TOSA_OP_LIST: - if re.match(args.filter + ".*", op): - testList.extend( - ttg.genOpTestList( - op, - shapeFilter=args.target_shapes, - rankFilter=args.target_ranks, - dtypeFilter=args.target_dtypes, - testType=test_type, + try: + for opName in ttg.TOSA_OP_LIST: + if re.match(args.filter + ".*", opName): + testList.extend( + ttg.genOpTestList( + opName, + shapeFilter=args.target_shapes, + rankFilter=args.target_ranks, + dtypeFilter=args.target_dtypes, + testType=test_type, + ) ) - ) + except Exception as e: + print(f"INTERNAL ERROR: Failure generating test lists for {opName}") + raise e print("{} matching {} tests".format(len(testList), test_type)) testStrings = [] - for opName, testStr, dtype, error, shapeList, testArgs in testList: - # Check for and skip duplicate tests - if testStr in testStrings: - print(f"Skipping duplicate test: {testStr}") - continue - else: - testStrings.append(testStr) - - results.append( - ttg.serializeTest(opName, testStr, dtype, error, shapeList, testArgs) - ) + try: + for opName, testStr, dtype, error, shapeList, testArgs in testList: + # Check for and skip duplicate tests + if testStr in testStrings: + print(f"Skipping duplicate test: {testStr}") + continue + else: + testStrings.append(testStr) + + results.append( + ttg.serializeTest( + opName, testStr, dtype, error, shapeList, testArgs + ) + ) + except Exception as e: + print(f"INTERNAL ERROR: Failure creating test output for {opName}") + raise e print(f"Done creating {len(results)} tests") -- cgit v1.2.1