aboutsummaryrefslogtreecommitdiff
path: root/verif
diff options
context:
space:
mode:
authorTai Ly <tai.ly@arm.com>2024-03-01 20:59:32 +0000
committerEric Kunze <eric.kunze@arm.com>2024-03-06 18:27:07 +0000
commit6e1e2bc06bff785e87577f24064bbc846300f8fd (patch)
tree0a96aeac6f88799fbc297e5937cc0ffc44adcfff /verif
parent1d5ddeda5d853642fe3b2eade7d765386727021f (diff)
downloadreference_model-6e1e2bc06bff785e87577f24064bbc846300f8fd.tar.gz
[ref model] Change RescaleOp attrs to inputs
This patch implements changes required for RescaleOp's multiplier and shift changing from attributes to inputs Signed-off-by: Tai Ly <tai.ly@arm.com> Change-Id: I178919727e3220c749dad0ebce141e695868fee0
Diffstat (limited to 'verif')
-rw-r--r--verif/generator/tosa_arg_gen.py63
-rw-r--r--verif/generator/tosa_test_gen.py47
-rw-r--r--verif/generator/tosa_utils.py8
3 files changed, 82 insertions, 36 deletions
diff --git a/verif/generator/tosa_arg_gen.py b/verif/generator/tosa_arg_gen.py
index 592c491..cbfffae 100644
--- a/verif/generator/tosa_arg_gen.py
+++ b/verif/generator/tosa_arg_gen.py
@@ -744,6 +744,10 @@ class TosaTensorValuesGen:
arr = np.int64(argsDict["fixed_data"][idx])
elif dtype == DType.INT8:
arr = np.int8(argsDict["fixed_data"][idx])
+ elif dtype == DType.INT16:
+ arr = np.int16(argsDict["fixed_data"][idx])
+ elif dtype == DType.INT32:
+ arr = np.int32(argsDict["fixed_data"][idx])
else:
assert False, "Unsupported fixed_data type"
else:
@@ -1060,6 +1064,26 @@ class TosaTensorValuesGen:
)
@staticmethod
+ def tvgRescale(testGen, opName, dtypeList, shapeList, argsDict, error_name=None):
+ scale32 = argsDict["scale"]
+ multiplier_arr = argsDict["multiplier"]
+ shift_arr = argsDict["shift"]
+
+ if scale32:
+ dtypeList[1] = DType.INT32
+ else:
+ dtypeList[1] = DType.INT16
+ shapeList[1] = [len(multiplier_arr)]
+ dtypeList[2] = DType.INT8
+ shapeList[2] = [len(shift_arr)]
+ # Create a new list for the pre-generated data in argsDict["fixed_data"]
+ argsDict["fixed_data"] = [None, multiplier_arr, shift_arr]
+
+ return TosaTensorValuesGen.tvgLazyGenDefault(
+ testGen, opName, dtypeList, shapeList, argsDict, error_name
+ )
+
+ @staticmethod
def tvgPad(testGen, opName, dtypeList, shapeList, argsDict, error_name=None):
# argsDict["pad"] is 2D array, need to flatten it to get list of values
pad_values = argsDict["pad"].flatten()
@@ -2842,6 +2866,43 @@ class TosaArgGen:
# Illegal condition. ERROR_IF(!scale32 && double_round)
continue
+ if per_channel:
+ nc = shapeList[0][-1]
+ else:
+ nc = 1
+
+ in_type_width = gtu.dtypeWidth(inDtype)
+ out_type_width = gtu.dtypeWidth(outDtype)
+
+ # Calculate scale based on:
+ # scale = a *(2^output_width)/(2^input_width))
+
+ a = np.float32(testGen.rng.random(size=[nc]))
+ scale_arr = a * np.float32(
+ (1 << out_type_width) / (1 << in_type_width)
+ )
+
+ if scale32:
+ # Cap the scaling at 2^31 - 1 for scale32
+ scale_arr = np.clip(
+ scale_arr, 1.0 / (1 << 31), (1 << 31) - 1
+ )
+ else:
+ # Cap the scaling at 2^15 - 1 for scale16
+ scale_arr = np.clip(scale_arr, 1.0 / (1 << 31), 32767.0)
+
+ # print('{} {} -> {}'.format(out_type_width, in_type_width, scale_arr))
+
+ multiplier_arr = np.int32(np.zeros(shape=[nc]))
+ shift_arr = np.int32(np.zeros(shape=[nc]))
+ for i in range(nc):
+ (
+ multiplier_arr[i],
+ shift_arr[i],
+ ) = TosaQuantGen.computeMultiplierAndShift(
+ scale_arr[i], scale32
+ )
+
arg_list.append(
(
"out{}_sc{}_dr{}_pc{}".format(
@@ -2855,6 +2916,8 @@ class TosaArgGen:
"scale": scale32,
"double_round": double_round,
"per_channel": per_channel,
+ "multiplier": multiplier_arr,
+ "shift": shift_arr,
},
)
)
diff --git a/verif/generator/tosa_test_gen.py b/verif/generator/tosa_test_gen.py
index 978e735..415858c 100644
--- a/verif/generator/tosa_test_gen.py
+++ b/verif/generator/tosa_test_gen.py
@@ -317,13 +317,6 @@ class TosaTestGen:
"Unknown dtype, cannot convert to string: {}".format(dtype)
)
- def typeWidth(self, dtype):
- """Get the datatype width for data types"""
- if dtype in gtu.DTYPE_ATTRIBUTES:
- return gtu.DTYPE_ATTRIBUTES[dtype]["width"]
- else:
- raise Exception(f"Unknown dtype, cannot determine width: {dtype}")
-
def constrictBatchSize(self, shape):
# Limit the batch size unless an explicit target shape set
if self.args.max_batch_size and not self.args.target_shapes:
@@ -2130,12 +2123,15 @@ class TosaTestGen:
error_name=None,
qinfo=None,
):
- assert len(inputs) == 1
+ assert len(inputs) == 3
val = inputs[0]
+ multiplier_val = inputs[1]
+ shift_val = inputs[2]
out_dtype = args_dict["output_dtype"]
scale32 = args_dict["scale"]
double_round = args_dict["double_round"]
per_channel = args_dict["per_channel"]
+ shift_arr = args_dict["shift"]
result_tensor = OutputShaper.typeConversionOp(
self.ser, self.rng, val, out_dtype, error_name
@@ -2146,8 +2142,8 @@ class TosaTestGen:
else:
nc = 1
- in_type_width = self.typeWidth(val.dtype)
- out_type_width = self.typeWidth(out_dtype)
+ in_type_width = gtu.dtypeWidth(val.dtype)
+ out_type_width = gtu.dtypeWidth(out_dtype)
input_unsigned = False
output_unsigned = False
@@ -2198,31 +2194,10 @@ class TosaTestGen:
else:
output_zp = 0
- # Calculate scale based on:
- # scale = a *(2^output_width)/(2^input_width))
-
- a = np.float32(self.rng.random(size=[nc]))
- scale_arr = a * np.float32((1 << out_type_width) / (1 << in_type_width))
-
- if scale32:
- pass
- # Cap the scaling at 2^31 - 1 for scale32
- scale_arr = np.clip(scale_arr, 1.0 / (1 << 31), (1 << 31) - 1)
- else:
- # Cap the scaling at 2^15 - 1 for scale16
- scale_arr = np.clip(scale_arr, 1.0 / (1 << 31), 32767.0)
-
- # print('{} {} -> {}'.format(out_type_width, in_type_width, scale_arr))
-
- multiplier_arr = np.int32(np.zeros(shape=[nc]))
- shift_arr = np.int32(np.zeros(shape=[nc]))
min_shift_value_arr = np.int64(np.zeros(shape=[nc]))
max_shift_value_arr = np.int64(np.zeros(shape=[nc]))
for i in range(nc):
- multiplier_arr[i], shift_arr[i] = TosaQuantGen.computeMultiplierAndShift(
- scale_arr[i], scale32
- )
min_shift_value_arr[i] = -1 << (shift_arr[i] - 1)
max_shift_value_arr[i] = (1 << (shift_arr[i] - 1)) - 1
@@ -2256,7 +2231,7 @@ class TosaTestGen:
)
# Invalidate Input/Output list for error if checks.
- input_list = [val.name]
+ input_list = [val.name, multiplier_val.name, shift_val.name]
output_list = [result_tensor.name]
pCount, cCount = op["operands"]
num_operands = pCount + cCount
@@ -2287,8 +2262,8 @@ class TosaTestGen:
attr.RescaleAttribute(
input_zp,
output_zp,
- multiplier_arr,
- shift_arr,
+ [],
+ [],
scale32,
double_round,
per_channel,
@@ -4809,11 +4784,11 @@ class TosaTestGen:
},
"rescale": {
"op": Op.RESCALE,
- "operands": (1, 0),
+ "operands": (3, 0),
"build_fcn": (
build_rescale,
TosaTensorGen.tgBasic,
- TosaTensorValuesGen.tvgLazyGenDefault,
+ TosaTensorValuesGen.tvgRescale,
TosaArgGen.agRescale,
),
"types": [
diff --git a/verif/generator/tosa_utils.py b/verif/generator/tosa_utils.py
index 31a0ff0..384463f 100644
--- a/verif/generator/tosa_utils.py
+++ b/verif/generator/tosa_utils.py
@@ -55,6 +55,14 @@ class DataGenType(IntEnum):
FIXED_DATA = 5
+def dtypeWidth(dtype):
+ """Get the datatype width for data types"""
+ if dtype in DTYPE_ATTRIBUTES:
+ return DTYPE_ATTRIBUTES[dtype]["width"]
+ else:
+ raise Exception(f"Unknown dtype, cannot determine width: {dtype}")
+
+
def dtypeIsSupportedByCompliance(dtype):
"""Types supported by the new data generation and compliance flow."""
if isinstance(dtype, list) or isinstance(dtype, tuple):