diff options
Diffstat (limited to 'verif')
38 files changed, 5613 insertions, 0 deletions
diff --git a/verif/tosa/Attribute.py b/verif/tosa/Attribute.py new file mode 100644 index 0000000..a4d96e0 --- /dev/null +++ b/verif/tosa/Attribute.py @@ -0,0 +1,36 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# Copyright (c) 2020, ARM Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# namespace: tosa + +class Attribute(object): + NONE = 0 + Pool2dAttribute = 1 + Conv2dAttribute = 2 + TransposeConv2dAttribute = 3 + ReluNAttribute = 4 + AxisAttribute = 5 + ReshapeAttribute = 6 + SliceAttribute = 7 + TileAttribute = 8 + ResizeAttribute = 9 + ClampAttribute = 10 + RescaleAttribute = 11 + CustomAttribute = 12 + CondIfAttribute = 13 + WhileLoopAttribute = 14 + diff --git a/verif/tosa/AxisAttribute.py b/verif/tosa/AxisAttribute.py new file mode 100644 index 0000000..d47eb81 --- /dev/null +++ b/verif/tosa/AxisAttribute.py @@ -0,0 +1,45 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# Copyright (c) 2020, ARM Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# namespace: tosa + +import flatbuffers + +class AxisAttribute(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsAxisAttribute(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = AxisAttribute() + x.Init(buf, n + offset) + return x + + # AxisAttribute + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # AxisAttribute + def Axis(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + +def AxisAttributeStart(builder): builder.StartObject(1) +def AxisAttributeAddAxis(builder, axis): builder.PrependInt32Slot(0, axis, 0) +def AxisAttributeEnd(builder): return builder.EndObject() diff --git a/verif/tosa/ClampAttribute.py b/verif/tosa/ClampAttribute.py new file mode 100644 index 0000000..ddc95cf --- /dev/null +++ b/verif/tosa/ClampAttribute.py @@ -0,0 +1,69 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# Copyright (c) 2020, ARM Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# namespace: tosa + +import flatbuffers + +class ClampAttribute(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsClampAttribute(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ClampAttribute() + x.Init(buf, n + offset) + return x + + # ClampAttribute + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # ClampAttribute + def MinInt(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # ClampAttribute + def MaxInt(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # ClampAttribute + def MinFp(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) + return 0.0 + + # ClampAttribute + def MaxFp(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) + return 0.0 + +def ClampAttributeStart(builder): builder.StartObject(4) +def ClampAttributeAddMinInt(builder, minInt): builder.PrependInt32Slot(0, minInt, 0) +def ClampAttributeAddMaxInt(builder, maxInt): builder.PrependInt32Slot(1, maxInt, 0) +def ClampAttributeAddMinFp(builder, minFp): builder.PrependFloat32Slot(2, minFp, 0.0) +def ClampAttributeAddMaxFp(builder, maxFp): builder.PrependFloat32Slot(3, maxFp, 0.0) +def ClampAttributeEnd(builder): return builder.EndObject() diff --git a/verif/tosa/CondIfAttribute.py b/verif/tosa/CondIfAttribute.py new file mode 100644 index 0000000..0bf4566 --- /dev/null +++ b/verif/tosa/CondIfAttribute.py @@ -0,0 +1,53 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# Copyright (c) 2020, ARM Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# namespace: tosa + +import flatbuffers + +class CondIfAttribute(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsCondIfAttribute(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = CondIfAttribute() + x.Init(buf, n + offset) + return x + + # CondIfAttribute + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # CondIfAttribute + def ThenBranch(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.String(o + self._tab.Pos) + return None + + # CondIfAttribute + def ElseBranch(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.String(o + self._tab.Pos) + return None + +def CondIfAttributeStart(builder): builder.StartObject(2) +def CondIfAttributeAddThenBranch(builder, thenBranch): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(thenBranch), 0) +def CondIfAttributeAddElseBranch(builder, elseBranch): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(elseBranch), 0) +def CondIfAttributeEnd(builder): return builder.EndObject() diff --git a/verif/tosa/Conv2dAttribute.py b/verif/tosa/Conv2dAttribute.py new file mode 100644 index 0000000..c7861a5 --- /dev/null +++ b/verif/tosa/Conv2dAttribute.py @@ -0,0 +1,109 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# Copyright (c) 2020, ARM Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# namespace: tosa + +import flatbuffers + +class Conv2dAttribute(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsConv2dAttribute(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = Conv2dAttribute() + x.Init(buf, n + offset) + return x + + # Conv2dAttribute + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # Conv2dAttribute + def Padding(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # Conv2dAttribute + def PaddingAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # Conv2dAttribute + def PaddingLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Conv2dAttribute + def Stride(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # Conv2dAttribute + def StrideAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # Conv2dAttribute + def StrideLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Conv2dAttribute + def Dilation(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # Conv2dAttribute + def DilationAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # Conv2dAttribute + def DilationLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + +def Conv2dAttributeStart(builder): builder.StartObject(3) +def Conv2dAttributeAddPadding(builder, padding): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(padding), 0) +def Conv2dAttributeStartPaddingVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def Conv2dAttributeAddStride(builder, stride): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(stride), 0) +def Conv2dAttributeStartStrideVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def Conv2dAttributeAddDilation(builder, dilation): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(dilation), 0) +def Conv2dAttributeStartDilationVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def Conv2dAttributeEnd(builder): return builder.EndObject() diff --git a/verif/tosa/ConvQuantInfo.py b/verif/tosa/ConvQuantInfo.py new file mode 100644 index 0000000..a88bfa6 --- /dev/null +++ b/verif/tosa/ConvQuantInfo.py @@ -0,0 +1,53 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# Copyright (c) 2020, ARM Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# namespace: tosa + +import flatbuffers + +class ConvQuantInfo(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsConvQuantInfo(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ConvQuantInfo() + x.Init(buf, n + offset) + return x + + # ConvQuantInfo + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # ConvQuantInfo + def InputZp(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # ConvQuantInfo + def WeightZp(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + +def ConvQuantInfoStart(builder): builder.StartObject(2) +def ConvQuantInfoAddInputZp(builder, inputZp): builder.PrependInt32Slot(0, inputZp, 0) +def ConvQuantInfoAddWeightZp(builder, weightZp): builder.PrependInt32Slot(1, weightZp, 0) +def ConvQuantInfoEnd(builder): return builder.EndObject() diff --git a/verif/tosa/CustomAttribute.py b/verif/tosa/CustomAttribute.py new file mode 100644 index 0000000..25f6759 --- /dev/null +++ b/verif/tosa/CustomAttribute.py @@ -0,0 +1,45 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# Copyright (c) 2020, ARM Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# namespace: tosa + +import flatbuffers + +class CustomAttribute(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsCustomAttribute(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = CustomAttribute() + x.Init(buf, n + offset) + return x + + # CustomAttribute + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # CustomAttribute + def Identifier(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.String(o + self._tab.Pos) + return None + +def CustomAttributeStart(builder): builder.StartObject(1) +def CustomAttributeAddIdentifier(builder, identifier): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(identifier), 0) +def CustomAttributeEnd(builder): return builder.EndObject() diff --git a/verif/tosa/DType.py b/verif/tosa/DType.py new file mode 100644 index 0000000..44d9970 --- /dev/null +++ b/verif/tosa/DType.py @@ -0,0 +1,31 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# Copyright (c) 2020, ARM Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# namespace: tosa + +class DType(object): + UNKNOWN = 0 + BOOL = 1 + AINT8 = 2 + UINT8 = 3 + INT4 = 4 + INT8 = 5 + INT16 = 6 + INT32 = 7 + INT48 = 8 + FLOAT = 9 + diff --git a/verif/tosa/Format.py b/verif/tosa/Format.py new file mode 100644 index 0000000..5db4f27 --- /dev/null +++ b/verif/tosa/Format.py @@ -0,0 +1,27 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# Copyright (c) 2020, ARM Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# namespace: tosa + +class Format(object): + UNKNOWN = 0 + NHWC = 1 + NDHWC = 2 + OHWI = 3 + HWIM = 4 + DOHWI = 5 + diff --git a/verif/tosa/MatMulQuantInfo.py b/verif/tosa/MatMulQuantInfo.py new file mode 100644 index 0000000..b8390a9 --- /dev/null +++ b/verif/tosa/MatMulQuantInfo.py @@ -0,0 +1,53 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# Copyright (c) 2020, ARM Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# namespace: tosa + +import flatbuffers + +class MatMulQuantInfo(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsMatMulQuantInfo(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = MatMulQuantInfo() + x.Init(buf, n + offset) + return x + + # MatMulQuantInfo + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # MatMulQuantInfo + def AZp(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # MatMulQuantInfo + def BZp(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + +def MatMulQuantInfoStart(builder): builder.StartObject(2) +def MatMulQuantInfoAddAZp(builder, aZp): builder.PrependInt32Slot(0, aZp, 0) +def MatMulQuantInfoAddBZp(builder, bZp): builder.PrependInt32Slot(1, bZp, 0) +def MatMulQuantInfoEnd(builder): return builder.EndObject() diff --git a/verif/tosa/Op.py b/verif/tosa/Op.py new file mode 100644 index 0000000..09f1364 --- /dev/null +++ b/verif/tosa/Op.py @@ -0,0 +1,90 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# Copyright (c) 2020, ARM Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# namespace: tosa + +class Op(object): + UNKNOWN = 0 + ARGMAX = 1 + AVG_POOL2D = 2 + CONV2D = 3 + CONV3D = 4 + DEPTHWISE_CONV2D = 5 + FULLY_CONNECTED = 6 + MATMUL = 7 + MAX_POOL2D = 8 + TRANSPOSE_CONV2D = 9 + CLAMP = 10 + RELUN = 11 + SIGMOID = 12 + TANH = 13 + ADD = 14 + ARITHMETIC_RIGHT_SHIFT = 15 + BITWISE_AND = 16 + BITWISE_OR = 17 + BITWISE_XOR = 18 + LOGICAL_AND = 19 + LOGICAL_LEFT_SHIFT = 20 + LOGICAL_RIGHT_SHIFT = 21 + LOGICAL_OR = 22 + LOGICAL_XOR = 23 + MAXIMUM = 24 + MINIMUM = 25 + MUL = 26 + POW = 27 + SUB = 28 + TABLE = 29 + ABS = 30 + BITWISE_NOT = 31 + CEIL = 32 + CLZ = 33 + EXP = 34 + FLOOR = 35 + LOG = 36 + LOGICAL_NOT = 37 + NEGATE = 38 + RECIPROCAL = 39 + RSQRT = 40 + SELECT = 41 + EQUAL = 42 + GREATER = 43 + GREATER_EQUAL = 44 + REDUCE_ANY = 45 + REDUCE_ALL = 46 + REDUCE_MAX = 47 + REDUCE_MIN = 48 + REDUCE_PRODUCT = 49 + REDUCE_SUM = 50 + CONCAT = 51 + PAD = 52 + RESHAPE = 53 + REVERSE = 54 + SLICE = 55 + TILE = 56 + TRANSPOSE = 57 + GATHER = 58 + RESIZE = 59 + CAST = 60 + RESCALE = 61 + CONST = 62 + PLACEHOLDER = 63 + IDENTITY = 64 + IDENTITYN = 65 + CUSTOM = 66 + COND_IF = 67 + WHILE_LOOP = 68 + diff --git a/verif/tosa/PadQuantInfo.py b/verif/tosa/PadQuantInfo.py new file mode 100644 index 0000000..df61926 --- /dev/null +++ b/verif/tosa/PadQuantInfo.py @@ -0,0 +1,45 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# Copyright (c) 2020, ARM Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# namespace: tosa + +import flatbuffers + +class PadQuantInfo(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsPadQuantInfo(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = PadQuantInfo() + x.Init(buf, n + offset) + return x + + # PadQuantInfo + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # PadQuantInfo + def InputZp(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + +def PadQuantInfoStart(builder): builder.StartObject(1) +def PadQuantInfoAddInputZp(builder, inputZp): builder.PrependInt32Slot(0, inputZp, 0) +def PadQuantInfoEnd(builder): return builder.EndObject() diff --git a/verif/tosa/Pool2dAttribute.py b/verif/tosa/Pool2dAttribute.py new file mode 100644 index 0000000..1520de2 --- /dev/null +++ b/verif/tosa/Pool2dAttribute.py @@ -0,0 +1,109 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# Copyright (c) 2020, ARM Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# namespace: tosa + +import flatbuffers + +class Pool2dAttribute(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsPool2dAttribute(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = Pool2dAttribute() + x.Init(buf, n + offset) + return x + + # Pool2dAttribute + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # Pool2dAttribute + def Padding(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # Pool2dAttribute + def PaddingAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # Pool2dAttribute + def PaddingLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Pool2dAttribute + def Kernel(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # Pool2dAttribute + def KernelAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # Pool2dAttribute + def KernelLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Pool2dAttribute + def Stride(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # Pool2dAttribute + def StrideAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # Pool2dAttribute + def StrideLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + +def Pool2dAttributeStart(builder): builder.StartObject(3) +def Pool2dAttributeAddPadding(builder, padding): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(padding), 0) +def Pool2dAttributeStartPaddingVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def Pool2dAttributeAddKernel(builder, kernel): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(kernel), 0) +def Pool2dAttributeStartKernelVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def Pool2dAttributeAddStride(builder, stride): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(stride), 0) +def Pool2dAttributeStartStrideVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def Pool2dAttributeEnd(builder): return builder.EndObject() diff --git a/verif/tosa/QuantInfo.py b/verif/tosa/QuantInfo.py new file mode 100644 index 0000000..0544cce --- /dev/null +++ b/verif/tosa/QuantInfo.py @@ -0,0 +1,26 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# Copyright (c) 2020, ARM Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# namespace: tosa + +class QuantInfo(object): + NONE = 0 + UnaryQuantInfo = 1 + ConvQuantInfo = 2 + MatMulQuantInfo = 3 + PadQuantInfo = 4 + diff --git a/verif/tosa/README.md b/verif/tosa/README.md new file mode 100644 index 0000000..de8c1f9 --- /dev/null +++ b/verif/tosa/README.md @@ -0,0 +1,14 @@ +TOSA FlatBuffers python serialization library +============================================= + +Files in this directory are automatically generated by running: + +``` bash +../build/thirdparty/flatbuffers/flatc --python ../serialization/tosa.fbs +``` + +From the ``verif/`` directory. Flatc is compiled along with the *TOSA +Reference Model*. + +*Because they are automatically generated, please do not edit the +python files in this directory by hand.* diff --git a/verif/tosa/ReluNAttribute.py b/verif/tosa/ReluNAttribute.py new file mode 100644 index 0000000..e446c03 --- /dev/null +++ b/verif/tosa/ReluNAttribute.py @@ -0,0 +1,53 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# Copyright (c) 2020, ARM Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# namespace: tosa + +import flatbuffers + +class ReluNAttribute(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsReluNAttribute(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ReluNAttribute() + x.Init(buf, n + offset) + return x + + # ReluNAttribute + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # ReluNAttribute + def MaxInt(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # ReluNAttribute + def MaxFp(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) + return 0.0 + +def ReluNAttributeStart(builder): builder.StartObject(2) +def ReluNAttributeAddMaxInt(builder, maxInt): builder.PrependInt32Slot(0, maxInt, 0) +def ReluNAttributeAddMaxFp(builder, maxFp): builder.PrependFloat32Slot(1, maxFp, 0.0) +def ReluNAttributeEnd(builder): return builder.EndObject() diff --git a/verif/tosa/RescaleAttribute.py b/verif/tosa/RescaleAttribute.py new file mode 100644 index 0000000..0ec8c2b --- /dev/null +++ b/verif/tosa/RescaleAttribute.py @@ -0,0 +1,125 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# Copyright (c) 2020, ARM Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# namespace: tosa + +import flatbuffers + +class RescaleAttribute(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsRescaleAttribute(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = RescaleAttribute() + x.Init(buf, n + offset) + return x + + # RescaleAttribute + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # RescaleAttribute + def InputZp(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # RescaleAttribute + def OutputZp(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # RescaleAttribute + def Multiplier(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # RescaleAttribute + def MultiplierAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # RescaleAttribute + def MultiplierLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # RescaleAttribute + def Shift(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # RescaleAttribute + def ShiftAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # RescaleAttribute + def ShiftLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # RescaleAttribute + def Scale32(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + + # RescaleAttribute + def DoubleRound(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + + # RescaleAttribute + def PerChannel(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + +def RescaleAttributeStart(builder): builder.StartObject(7) +def RescaleAttributeAddInputZp(builder, inputZp): builder.PrependInt32Slot(0, inputZp, 0) +def RescaleAttributeAddOutputZp(builder, outputZp): builder.PrependInt32Slot(1, outputZp, 0) +def RescaleAttributeAddMultiplier(builder, multiplier): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(multiplier), 0) +def RescaleAttributeStartMultiplierVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def RescaleAttributeAddShift(builder, shift): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(shift), 0) +def RescaleAttributeStartShiftVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def RescaleAttributeAddScale32(builder, scale32): builder.PrependBoolSlot(4, scale32, 0) +def RescaleAttributeAddDoubleRound(builder, doubleRound): builder.PrependBoolSlot(5, doubleRound, 0) +def RescaleAttributeAddPerChannel(builder, perChannel): builder.PrependBoolSlot(6, perChannel, 0) +def RescaleAttributeEnd(builder): return builder.EndObject() diff --git a/verif/tosa/ReshapeAttribute.py b/verif/tosa/ReshapeAttribute.py new file mode 100644 index 0000000..2c50cef --- /dev/null +++ b/verif/tosa/ReshapeAttribute.py @@ -0,0 +1,61 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# Copyright (c) 2020, ARM Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# namespace: tosa + +import flatbuffers + +class ReshapeAttribute(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsReshapeAttribute(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ReshapeAttribute() + x.Init(buf, n + offset) + return x + + # ReshapeAttribute + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # ReshapeAttribute + def Shape(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # ReshapeAttribute + def ShapeAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # ReshapeAttribute + def ShapeLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + +def ReshapeAttributeStart(builder): builder.StartObject(1) +def ReshapeAttributeAddShape(builder, shape): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(shape), 0) +def ReshapeAttributeStartShapeVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def ReshapeAttributeEnd(builder): return builder.EndObject() diff --git a/verif/tosa/ResizeAttribute.py b/verif/tosa/ResizeAttribute.py new file mode 100644 index 0000000..1e6941f --- /dev/null +++ b/verif/tosa/ResizeAttribute.py @@ -0,0 +1,125 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# Copyright (c) 2020, ARM Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# namespace: tosa + +import flatbuffers + +class ResizeAttribute(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsResizeAttribute(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ResizeAttribute() + x.Init(buf, n + offset) + return x + + # ResizeAttribute + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # ResizeAttribute + def OutputSize(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # ResizeAttribute + def OutputSizeAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # ResizeAttribute + def OutputSizeLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # ResizeAttribute + def Stride(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # ResizeAttribute + def StrideAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # ResizeAttribute + def StrideLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # ResizeAttribute + def Offset(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # ResizeAttribute + def OffsetAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # ResizeAttribute + def OffsetLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # ResizeAttribute + def Shift(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # ResizeAttribute + def Mode(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos) + return 0 + +def ResizeAttributeStart(builder): builder.StartObject(5) +def ResizeAttributeAddOutputSize(builder, outputSize): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(outputSize), 0) +def ResizeAttributeStartOutputSizeVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def ResizeAttributeAddStride(builder, stride): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(stride), 0) +def ResizeAttributeStartStrideVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def ResizeAttributeAddOffset(builder, offset): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(offset), 0) +def ResizeAttributeStartOffsetVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def ResizeAttributeAddShift(builder, shift): builder.PrependInt32Slot(3, shift, 0) +def ResizeAttributeAddMode(builder, mode): builder.PrependUint32Slot(4, mode, 0) +def ResizeAttributeEnd(builder): return builder.EndObject() diff --git a/verif/tosa/ResizeMode.py b/verif/tosa/ResizeMode.py new file mode 100644 index 0000000..02bed51 --- /dev/null +++ b/verif/tosa/ResizeMode.py @@ -0,0 +1,24 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# Copyright (c) 2020, ARM Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# namespace: tosa + +class ResizeMode(object): + UNKNOWN = 0 + NEAREST = 1 + BILINEAR = 2 + diff --git a/verif/tosa/SliceAttribute.py b/verif/tosa/SliceAttribute.py new file mode 100644 index 0000000..d156a4a --- /dev/null +++ b/verif/tosa/SliceAttribute.py @@ -0,0 +1,85 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# Copyright (c) 2020, ARM Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# namespace: tosa + +import flatbuffers + +class SliceAttribute(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsSliceAttribute(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SliceAttribute() + x.Init(buf, n + offset) + return x + + # SliceAttribute + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # SliceAttribute + def Begin(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # SliceAttribute + def BeginAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # SliceAttribute + def BeginLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # SliceAttribute + def Size(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # SliceAttribute + def SizeAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # SliceAttribute + def SizeLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + +def SliceAttributeStart(builder): builder.StartObject(2) +def SliceAttributeAddBegin(builder, begin): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(begin), 0) +def SliceAttributeStartBeginVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def SliceAttributeAddSize(builder, size): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(size), 0) +def SliceAttributeStartSizeVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def SliceAttributeEnd(builder): return builder.EndObject() diff --git a/verif/tosa/TileAttribute.py b/verif/tosa/TileAttribute.py new file mode 100644 index 0000000..6385edd --- /dev/null +++ b/verif/tosa/TileAttribute.py @@ -0,0 +1,61 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# Copyright (c) 2020, ARM Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# namespace: tosa + +import flatbuffers + +class TileAttribute(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsTileAttribute(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = TileAttribute() + x.Init(buf, n + offset) + return x + + # TileAttribute + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # TileAttribute + def Multiples(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # TileAttribute + def MultiplesAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # TileAttribute + def MultiplesLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + +def TileAttributeStart(builder): builder.StartObject(1) +def TileAttributeAddMultiples(builder, multiples): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(multiples), 0) +def TileAttributeStartMultiplesVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def TileAttributeEnd(builder): return builder.EndObject() diff --git a/verif/tosa/TosaBasicBlock.py b/verif/tosa/TosaBasicBlock.py new file mode 100644 index 0000000..42a7379 --- /dev/null +++ b/verif/tosa/TosaBasicBlock.py @@ -0,0 +1,123 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# Copyright (c) 2020, ARM Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# namespace: tosa + +import flatbuffers + +class TosaBasicBlock(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsTosaBasicBlock(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = TosaBasicBlock() + x.Init(buf, n + offset) + return x + + # TosaBasicBlock + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # TosaBasicBlock + def Name(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.String(o + self._tab.Pos) + return None + + # TosaBasicBlock + def Operators(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + x = self._tab.Vector(o) + x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 + x = self._tab.Indirect(x) + from .TosaOperator import TosaOperator + obj = TosaOperator() + obj.Init(self._tab.Bytes, x) + return obj + return None + + # TosaBasicBlock + def OperatorsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # TosaBasicBlock + def Tensors(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + x = self._tab.Vector(o) + x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 + x = self._tab.Indirect(x) + from .TosaTensor import TosaTensor + obj = TosaTensor() + obj.Init(self._tab.Bytes, x) + return obj + return None + + # TosaBasicBlock + def TensorsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # TosaBasicBlock + def Inputs(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.String(a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return "" + + # TosaBasicBlock + def InputsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # TosaBasicBlock + def Outputs(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.String(a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return "" + + # TosaBasicBlock + def OutputsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + +def TosaBasicBlockStart(builder): builder.StartObject(5) +def TosaBasicBlockAddName(builder, name): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0) +def TosaBasicBlockAddOperators(builder, operators): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(operators), 0) +def TosaBasicBlockStartOperatorsVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def TosaBasicBlockAddTensors(builder, tensors): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(tensors), 0) +def TosaBasicBlockStartTensorsVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def TosaBasicBlockAddInputs(builder, inputs): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(inputs), 0) +def TosaBasicBlockStartInputsVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def TosaBasicBlockAddOutputs(builder, outputs): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(outputs), 0) +def TosaBasicBlockStartOutputsVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def TosaBasicBlockEnd(builder): return builder.EndObject() diff --git a/verif/tosa/TosaGraph.py b/verif/tosa/TosaGraph.py new file mode 100644 index 0000000..92568b9 --- /dev/null +++ b/verif/tosa/TosaGraph.py @@ -0,0 +1,71 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# Copyright (c) 2020, ARM Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# namespace: tosa + +import flatbuffers + +class TosaGraph(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsTosaGraph(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = TosaGraph() + x.Init(buf, n + offset) + return x + + # TosaGraph + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # TosaGraph + def Version(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + x = self._tab.Indirect(o + self._tab.Pos) + from .Version import Version + obj = Version() + obj.Init(self._tab.Bytes, x) + return obj + return None + + # TosaGraph + def Blocks(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + x = self._tab.Vector(o) + x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 + x = self._tab.Indirect(x) + from .TosaBasicBlock import TosaBasicBlock + obj = TosaBasicBlock() + obj.Init(self._tab.Bytes, x) + return obj + return None + + # TosaGraph + def BlocksLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + +def TosaGraphStart(builder): builder.StartObject(2) +def TosaGraphAddVersion(builder, version): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(version), 0) +def TosaGraphAddBlocks(builder, blocks): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(blocks), 0) +def TosaGraphStartBlocksVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def TosaGraphEnd(builder): return builder.EndObject() diff --git a/verif/tosa/TosaOperator.py b/verif/tosa/TosaOperator.py new file mode 100644 index 0000000..ab4a160 --- /dev/null +++ b/verif/tosa/TosaOperator.py @@ -0,0 +1,117 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# Copyright (c) 2020, ARM Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# namespace: tosa + +import flatbuffers + +class TosaOperator(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsTosaOperator(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = TosaOperator() + x.Init(buf, n + offset) + return x + + # TosaOperator + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # TosaOperator + def Op(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos) + return 0 + + # TosaOperator + def AttributeType(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos) + return 0 + + # TosaOperator + def Attribute(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + from flatbuffers.table import Table + obj = Table(bytearray(), 0) + self._tab.Union(obj, o) + return obj + return None + + # TosaOperator + def Inputs(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.String(a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return "" + + # TosaOperator + def InputsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # TosaOperator + def Outputs(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.String(a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return "" + + # TosaOperator + def OutputsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # TosaOperator + def QuantInfoType(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos) + return 0 + + # TosaOperator + def QuantInfo(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) + if o != 0: + from flatbuffers.table import Table + obj = Table(bytearray(), 0) + self._tab.Union(obj, o) + return obj + return None + +def TosaOperatorStart(builder): builder.StartObject(7) +def TosaOperatorAddOp(builder, op): builder.PrependUint32Slot(0, op, 0) +def TosaOperatorAddAttributeType(builder, attributeType): builder.PrependUint8Slot(1, attributeType, 0) +def TosaOperatorAddAttribute(builder, attribute): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(attribute), 0) +def TosaOperatorAddInputs(builder, inputs): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(inputs), 0) +def TosaOperatorStartInputsVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def TosaOperatorAddOutputs(builder, outputs): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(outputs), 0) +def TosaOperatorStartOutputsVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def TosaOperatorAddQuantInfoType(builder, quantInfoType): builder.PrependUint8Slot(5, quantInfoType, 0) +def TosaOperatorAddQuantInfo(builder, quantInfo): builder.PrependUOffsetTRelativeSlot(6, flatbuffers.number_types.UOffsetTFlags.py_type(quantInfo), 0) +def TosaOperatorEnd(builder): return builder.EndObject() diff --git a/verif/tosa/TosaTensor.py b/verif/tosa/TosaTensor.py new file mode 100644 index 0000000..0b30266 --- /dev/null +++ b/verif/tosa/TosaTensor.py @@ -0,0 +1,133 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# Copyright (c) 2020, ARM Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# namespace: tosa + +import flatbuffers + +class TosaTensor(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsTosaTensor(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = TosaTensor() + x.Init(buf, n + offset) + return x + + # TosaTensor + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # TosaTensor + def Name(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.String(o + self._tab.Pos) + return None + + # TosaTensor + def Shape(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # TosaTensor + def ShapeAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # TosaTensor + def ShapeLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # TosaTensor + def Type(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos) + return 0 + + # TosaTensor + def Usage(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Uint32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # TosaTensor + def UsageAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint32Flags, o) + return 0 + + # TosaTensor + def UsageLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # TosaTensor + def Format(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Uint32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # TosaTensor + def FormatAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint32Flags, o) + return 0 + + # TosaTensor + def FormatLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # TosaTensor + def NpyFilename(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return self._tab.String(o + self._tab.Pos) + return None + +def TosaTensorStart(builder): builder.StartObject(6) +def TosaTensorAddName(builder, name): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0) +def TosaTensorAddShape(builder, shape): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(shape), 0) +def TosaTensorStartShapeVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def TosaTensorAddType(builder, type): builder.PrependUint32Slot(2, type, 0) +def TosaTensorAddUsage(builder, usage): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(usage), 0) +def TosaTensorStartUsageVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def TosaTensorAddFormat(builder, format): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(format), 0) +def TosaTensorStartFormatVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def TosaTensorAddNpyFilename(builder, npyFilename): builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(npyFilename), 0) +def TosaTensorEnd(builder): return builder.EndObject() diff --git a/verif/tosa/TransposeConv2dAttribute.py b/verif/tosa/TransposeConv2dAttribute.py new file mode 100644 index 0000000..043d8e8 --- /dev/null +++ b/verif/tosa/TransposeConv2dAttribute.py @@ -0,0 +1,133 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# Copyright (c) 2020, ARM Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# namespace: tosa + +import flatbuffers + +class TransposeConv2dAttribute(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsTransposeConv2dAttribute(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = TransposeConv2dAttribute() + x.Init(buf, n + offset) + return x + + # TransposeConv2dAttribute + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # TransposeConv2dAttribute + def Outpad(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # TransposeConv2dAttribute + def OutpadAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # TransposeConv2dAttribute + def OutpadLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # TransposeConv2dAttribute + def Stride(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # TransposeConv2dAttribute + def StrideAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # TransposeConv2dAttribute + def StrideLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # TransposeConv2dAttribute + def Dilation(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # TransposeConv2dAttribute + def DilationAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # TransposeConv2dAttribute + def DilationLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # TransposeConv2dAttribute + def OutputShape(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # TransposeConv2dAttribute + def OutputShapeAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # TransposeConv2dAttribute + def OutputShapeLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + +def TransposeConv2dAttributeStart(builder): builder.StartObject(4) +def TransposeConv2dAttributeAddOutpad(builder, outpad): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(outpad), 0) +def TransposeConv2dAttributeStartOutpadVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def TransposeConv2dAttributeAddStride(builder, stride): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(stride), 0) +def TransposeConv2dAttributeStartStrideVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def TransposeConv2dAttributeAddDilation(builder, dilation): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(dilation), 0) +def TransposeConv2dAttributeStartDilationVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def TransposeConv2dAttributeAddOutputShape(builder, outputShape): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(outputShape), 0) +def TransposeConv2dAttributeStartOutputShapeVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def TransposeConv2dAttributeEnd(builder): return builder.EndObject() diff --git a/verif/tosa/UnaryQuantInfo.py b/verif/tosa/UnaryQuantInfo.py new file mode 100644 index 0000000..9ae0214 --- /dev/null +++ b/verif/tosa/UnaryQuantInfo.py @@ -0,0 +1,53 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# Copyright (c) 2020, ARM Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# namespace: tosa + +import flatbuffers + +class UnaryQuantInfo(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsUnaryQuantInfo(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = UnaryQuantInfo() + x.Init(buf, n + offset) + return x + + # UnaryQuantInfo + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # UnaryQuantInfo + def InputZp(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # UnaryQuantInfo + def OutputZp(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + +def UnaryQuantInfoStart(builder): builder.StartObject(2) +def UnaryQuantInfoAddInputZp(builder, inputZp): builder.PrependInt32Slot(0, inputZp, 0) +def UnaryQuantInfoAddOutputZp(builder, outputZp): builder.PrependInt32Slot(1, outputZp, 0) +def UnaryQuantInfoEnd(builder): return builder.EndObject() diff --git a/verif/tosa/Usage.py b/verif/tosa/Usage.py new file mode 100644 index 0000000..4c42daa --- /dev/null +++ b/verif/tosa/Usage.py @@ -0,0 +1,25 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# Copyright (c) 2020, ARM Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# namespace: tosa + +class Usage(object): + UNKNOWN = 0 + ACTIVATION = 1 + WEIGHT = 2 + INDEX = 3 + diff --git a/verif/tosa/Version.py b/verif/tosa/Version.py new file mode 100644 index 0000000..ddfdb2d --- /dev/null +++ b/verif/tosa/Version.py @@ -0,0 +1,69 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# Copyright (c) 2020, ARM Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# namespace: tosa + +import flatbuffers + +class Version(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsVersion(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = Version() + x.Init(buf, n + offset) + return x + + # Version + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # Version + def _major(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # Version + def _minor(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 20 + + # Version + def _patch(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # Version + def _experimental(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + +def VersionStart(builder): builder.StartObject(4) +def VersionAdd_major(builder, Major): builder.PrependInt32Slot(0, Major, 0) +def VersionAdd_minor(builder, Minor): builder.PrependInt32Slot(1, Minor, 20) +def VersionAdd_patch(builder, Patch): builder.PrependInt32Slot(2, Patch, 0) +def VersionAdd_experimental(builder, Experimental): builder.PrependBoolSlot(3, Experimental, 0) +def VersionEnd(builder): return builder.EndObject() diff --git a/verif/tosa/WhileLoopAttribute.py b/verif/tosa/WhileLoopAttribute.py new file mode 100644 index 0000000..c37977f --- /dev/null +++ b/verif/tosa/WhileLoopAttribute.py @@ -0,0 +1,53 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# Copyright (c) 2020, ARM Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# namespace: tosa + +import flatbuffers + +class WhileLoopAttribute(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsWhileLoopAttribute(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = WhileLoopAttribute() + x.Init(buf, n + offset) + return x + + # WhileLoopAttribute + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # WhileLoopAttribute + def CondBranch(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.String(o + self._tab.Pos) + return None + + # WhileLoopAttribute + def BodyBranch(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.String(o + self._tab.Pos) + return None + +def WhileLoopAttributeStart(builder): builder.StartObject(2) +def WhileLoopAttributeAddCondBranch(builder, condBranch): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(condBranch), 0) +def WhileLoopAttributeAddBodyBranch(builder, bodyBranch): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(bodyBranch), 0) +def WhileLoopAttributeEnd(builder): return builder.EndObject() diff --git a/verif/tosa/__init__.py b/verif/tosa/__init__.py new file mode 100644 index 0000000..ee1ab30 --- /dev/null +++ b/verif/tosa/__init__.py @@ -0,0 +1,15 @@ + +# Copyright (c) 2020, ARM Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/verif/tosa_ref_run.py b/verif/tosa_ref_run.py new file mode 100644 index 0000000..99f504b --- /dev/null +++ b/verif/tosa_ref_run.py @@ -0,0 +1,66 @@ +import os + +# Copyright (c) 2020, ARM Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import shlex +import subprocess +from tosa_test_runner import TosaTestRunner, run_sh_command + +class TosaRefRunner(TosaTestRunner): + def __init__(self, args, runnerArgs, testDir): + super().__init__(args, runnerArgs, testDir) + + def runModel(self): + # Build up the TOSA reference command line + # Uses arguments from the argParser args, not the runnerArgs + args = self.args + + ref_cmd = [ args.ref_model_path, + '-Csubgraph_file={}'.format(self.testDesc['tosa_file']), + '-Csubgraph_dir={}'.format(self.testDir), + '-Cinput_dir={}'.format(self.testDir), + '-Coutput_dir={}'.format(self.testDir), + '-Coutput_tensor_prefix=ref-', # Naming agreement with TosaSerializer + ] + + # Build up input tensor_name/filename list + inputTensors = [] + for i in range(len(self.testDesc['ifm_placeholder'])): + inputTensors.append('{}:{}'.format(self.testDesc['ifm_placeholder'][i], self.testDesc['ifm_file'][i])) + + ref_cmd.append('-Cinput_tensor={}'.format(','.join(inputTensors))) + + if args.ref_debug: + ref_cmd.extend(['-dALL', '-l{}'.format(args.ref_debug)]) + + if args.ref_intermediates: + ref_cmd.extend(['-Ddump_intermediates=1']) + + expectedFailure = self.testDesc['expected_failure'] + + try: + run_sh_command(self.args, ref_cmd) + if expectedFailure: + result = TosaTestRunner.Result.UNEXPECTED_PASS + else: + result = TosaTestRunner.Result.EXPECTED_PASS + except Exception as e: + if expectedFailure: + result = TosaTestRunner.Result.EXPECTED_FAILURE + else: + result = TosaTestRunner.Result.EXPECTED_PASS + + return result diff --git a/verif/tosa_serializer.py b/verif/tosa_serializer.py new file mode 100644 index 0000000..7ba68c3 --- /dev/null +++ b/verif/tosa_serializer.py @@ -0,0 +1,718 @@ + + +# Copyright (c) 2020, ARM Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#!/usr/bin/env python3 + +import flatbuffers +import numpy as np +from enum import Enum, IntEnum, unique +from tosa import TosaGraph, TosaBasicBlock, TosaTensor, TosaOperator, DType, Format, Usage, Op, ResizeMode, Version +import tosa +import os +import json + +# With the way flatc generates its python types, there is no programatic way +# to get string names for the integer types. Manually maintain a string table +# here. +DTypeNames = [ 'UNKNOWN', + 'BOOL', + 'AINT8', + 'UINT8', + 'INT4', + 'INT8', + 'INT16', + 'INT32', + 'INT48', + 'FLOAT' ] + +def dtype_str_to_val(name): + + for i in range(len(DTypeNames)): + if name.casefold() == DTypeNames[i].casefold(): + return i + raise Exception('Unable to parse DType name {}'.format(name)) + + +class TosaSerializerUnion: + '''This class handles encapsulating and serializing union types into flatbuffers''' + def __init__(self): + + # A tuple of the start and end functions. Set by the options constructors below + self.optFcns = None + + # The type from the tosa.Options enumeration. Set by the options constructors below. + self.utype = None + + # Each of these lists is a tuple of the add function and the + # value being added. Set by the options constructors below. + self.ints = [] + self.bools = [] + self.floats = [] + self.strings = [] + self.intvecs = [] + + def serialize(self, builder): + + # We have to build strings and vectors first + strList = [] + intVecList = [] + + for fcn, val in self.strings: + strList.append((fcn, builder.CreateString(val))) + + for fcn, val in self.intvecs: + intVecList.append((fcn, TosaSerializer.serializeInt32Vec(builder, val))) + + startFcn, endFcn = self.optFcns + + # Then serialize the options object from the list of primitives and + # other serialized values + startFcn(builder) + for fcn, val in self.ints: + fcn(builder, val) + + for fcn, val in self.bools: + fcn(builder, val) + + for fcn, val in self.floats: + fcn(builder, val) + + for fcn, val in strList: + fcn(builder, val) + + for fcn, val in intVecList: + fcn(builder, val) + + return endFcn(builder) + +class TosaSerializerAttribute(TosaSerializerUnion): + '''This class handles encapsulating all of the enumerated types for attributes''' + + def __init__(self): + super().__init__() + + def Pool2dAttribute(self, kernel, stride, padding): + from tosa import Pool2dAttribute as a, Attribute + + self.utype = Attribute.Attribute().Pool2dAttribute + + self.optFcns = (a.Pool2dAttributeStart, a.Pool2dAttributeEnd) + self.intvecs.append((a.Pool2dAttributeAddPadding, + padding)) + self.intvecs.append((a.Pool2dAttributeAddKernel, + kernel)) + self.intvecs.append((a.Pool2dAttributeAddStride, + stride)) + + def Conv2dAttribute(self, padding, stride, dilation): + from tosa import Conv2dAttribute as a, Attribute + + self.utype = Attribute.Attribute().Conv2dAttribute + self.optFcns = (a.Conv2dAttributeStart, a.Conv2dAttributeEnd) + + self.intvecs.append((a.Conv2dAttributeAddPadding, + padding)) + self.intvecs.append((a.Conv2dAttributeAddStride, + stride)) + self.intvecs.append((a.Conv2dAttributeAddDilation, + dilation)) + + def TransposeConv2DAttribute(self, outpad, stride, dilation, output_shape): + from tosa import TransposeConv2dAttribute as a, Attribute + + self.utype = Attribute.Attribute().TransposeConv2dAttribute + self.optFcns = (a.TransposeConv2dAttributeStart, a.TransposeConv2dAttributeEnd) + + self.intvecs.append((a.TransposeConv2dAttributeAddOutpad, + outpad)) + self.intvecs.append((a.TransposeConv2dAttributeAddStride, + stride)) + self.intvecs.append((a.TransposeConv2dAttributeAddDilation, + dilation)) + self.intvecs.append((a.TransposeConv2dAttributeAddOutputShape, + output_shape)) + + def ReluNAttribute(self, maxint, maxfp): + from tosa import ReluNAttribute as a, Attribute + + self.utype = Attribute.Attribute().ReluNAttribute + self.optFcns = (a.ReluNAttributeStart, a.ReluNAttributeEnd) + + self.ints.append((a.ReluNAttributeAddMaxInt, maxint)) + self.ints.append((a.ReluNAttributeAddMaxFp, maxfp)) + + + def AxisAttribute(self, axis): + from tosa import AxisAttribute as a, Attribute + + self.utype = Attribute.Attribute().AxisAttribute + self.optFcns = (a.AxisAttributeStart, a.AxisAttributeEnd) + + self.ints.append((a.AxisAttributeAddAxis, + axis)) + + def ReshapeAttribute(self, shape): + from tosa import ReshapeAttribute as a, Attribute + + self.utype = Attribute.Attribute().ReshapeAttribute + self.optFcns = (a.ReshapeAttributeStart, a.ReshapeAttributeEnd) + + self.intvecs.append((a.ReshapeAttributeAddShape, + shape)) + + def SliceAttribute(self, begin, size): + from tosa import SliceAttribute as a, Attribute + + self.utype = Attribute.Attribute().SliceAttribute + self.optFcns = (a.SliceAttributeStart, a.SliceAttributeEnd) + + self.intvecs.append((a.SliceAttributeAddBegin, + begin)) + self.intvecs.append((a.SliceAttributeAddSize, + size)) + + def TileAttribute(self, multiples): + from tosa import TileAttribute as a, Attribute + + self.utype = Attribute.Attribute().TileAttribute + self.optFcns = (a.TileAttributeStart, a.TileAttributeEnd) + + self.intvecs.append((a.TileAttributeAddMultiples, + multiples)) + + def ResizeAttribute(self, output_size, stride, offset, shift, mode): + from tosa import ResizeAttribute as a, Attribute + + self.utype = Attribute.Attribute().ResizeAttribute + self.optFcns = (a.ResizeAttributeStart, a.ResizeAttributeEnd) + + self.intvecs.append((a.ResizeAttributeAddOutputSize, + output_size)) + self.intvecs.append((a.ResizeAttributeAddStride, + stride)) + self.intvecs.append((a.ResizeAttributeAddOffset, + offset)) + self.ints.append((a.ResizeAttributeAddShift, + shift)) + self.ints.append((a.ResizeAttributeAddMode, + mode)) + + def ClampAttribute(self, minint, maxint, minfp, maxfp): + from tosa import ClampAttribute as a, Attribute + + self.utype = Attribute.Attribute().ClampAttribute + self.optFcns = (a.ClampAttributeStart, a.ClampAttributeEnd) + + self.ints.append((a.ClampAttributeAddMinInt, + minint)) + self.ints.append((a.ClampAttributeAddMaxInt, + maxint)) + + self.ints.append((a.ClampAttributeAddMinFp, + minfp)) + self.ints.append((a.ClampAttributeAddMaxFp, + maxfp)) + + def RescaleAttribute(self, input_zp, output_zp, multiplier, shift, scale32, double_round, per_channel): + from tosa import RescaleAttribute as a, Attribute + + self.utype = Attribute.Attribute().RescaleAttribute + self.optFcns = (a.RescaleAttributeStart, a.RescaleAttributeEnd) + + self.ints.append((a.RescaleAttributeAddInputZp, + input_zp)) + self.ints.append((a.RescaleAttributeAddOutputZp, + output_zp)) + self.intvecs.append((a.RescaleAttributeAddMultiplier, + multiplier)) + self.intvecs.append((a.RescaleAttributeAddShift, + shift)) + self.bools.append((a.RescaleAttributeAddScale32, + scale32)) + self.bools.append((a.RescaleAttributeAddDoubleRound, + double_round)) + self.bools.append((a.RescaleAttributeAddPerChannel, + per_channel)) + + def CustomAttribute(self, identifier): + from tosa import CustomAttribute as a, Attribute + + self.utype = Attribute.Attribute().CustomAttribute + self.optFcns = (a.CustomAttributeStart, a.CustomAttributeEnd) + + self.strings.append((a.CustomAttributeAddIdentifier, + identifier)) + + def CondIfAttribute(self, then_branch, else_branch): + from tosa import CondIfAttribute as a, Attribute + + self.utype = Attribute.Attribute().CondIfAttribute + self.optFcns = (a.CondIfAttributeStart, a.CondIfAttributeEnd) + + self.strings.append((a.CondIfAttributeAddThenBranch, + then_branch)) + self.strings.append((a.CondIfAttributeAddElseBranch, + else_branch)) + + def WhileLoopAttribute(self, cond_branch, body_branch): + from tosa import WhileLoopAttribute as a, Attribute + + self.utype = Attribute.Attribute().WhileLoopAttribute + self.optFcns = (a.WhileLoopAttributeStart, a.WhileLoopAttributeEnd) + + self.strings.append((a.WhileLoopAttributeAddCondBranch, + cond_branch)) + self.strings.append((a.WhileLoopAttributeAddBodyBranch, + body_branch)) + +class TosaSerializerQuantInfo(TosaSerializerUnion): + '''This class handles encapsulating all of the enumerated types for quantinfo types''' + def __init__(self): + super().__init__() + + def ConvQuantInfo(self, input_zp, weight_zp): + from tosa import ConvQuantInfo as q, QuantInfo + + self.utype = QuantInfo.QuantInfo().ConvQuantInfo + self.optFcns = (q.ConvQuantInfoStart, q.ConvQuantInfoEnd) + self.ints.append((q.ConvQuantInfoAddInputZp, input_zp)) + self.ints.append((q.ConvQuantInfoAddWeightZp, weight_zp)) + + def UnaryQuantInfo(self, input_zp, output_zp): + from tosa import UnaryQuantInfo as q, QuantInfo + + self.utype = QuantInfo.QuantInfo().UnaryQuantInfo + self.optFcns = (q.UnaryQuantInfoStart, q.UnaryQuantInfoEnd) + self.ints.append((q.UnaryQuantInfoAddInputZp, input_zp)) + self.ints.append((q.UnaryQuantInfoAddOutputZp, output_zp)) + + def MatMulQuantInfo(self, a_zp, b_zp): + from tosa import MatMulQuantInfo as q, QuantInfo + + self.utype = QuantInfo.QuantInfo().MatMulQuantInfo + self.optFcns = (q.MatMulQuantInfoStart, q.MatMulQuantInfoEnd) + self.ints.append((q.MatMulQuantInfoAddAZp, a_zp)) + self.ints.append((q.MatMulQuantInfoAddBZp, b_zp)) + + def PadQuantInfo(self, input_zp): + from tosa import PadQuantInfo as q, QuantInfo + + self.utype = QuantInfo.QuantInfo().PadQuantInfo + self.optFcns = (q.PadQuantInfoStart, q.PadQuantInfoEnd) + self.ints.append((q.PadQuantInfoAddInputZp, input_zp)) + +class TosaSerializerTensor: + def __init__(self, name, shape, dtype, usage, dformat, filename = None, placeholderFilename = None): + self.name = name + + if isinstance(shape, np.ndarray): + shape = shape.astype(int).tolist() + shape = list(map(int, shape)) + + self.shape = shape + self.dtype = dtype + self.usage = TosaSerializer.toList(usage) + self.dformat = TosaSerializer.toList(dformat) + + # Filename for const tensors. This gets written to the .tosa serialization + self.filename = filename + + # Filename for placeholder tensors. These get generated by the test generation + # process and are written to disk, but are considered input tensors by the network + # so they do not appear in the TOSA serialiazation. However, if we want to form a unit + # test around these input tensors, we can get the filename from here. + self.placeholderFilename = placeholderFilename + + def __str__(self): + str = 'TosaSerializerTensor name: {} shape: {} dtype: {} Usage: {} format {} filename: {}'.format( + self.name, self.shape, DTypeNames[self.dtype], self.usage, self.dformat, self.filename) + return str + + def addUsage(self, usage): + self.usage.append(usage) + + def addFormat(self, format): + self.dformat.append(format) + + def setDtype(self, dtype): + self.dtype = dtype + + def merge(self, name, shape, dtype, usage, dformat, filename = None): + # Merge in additional usage/formats to the list + found = 0 + for i in self.usage: + if i == usage: + found = 1 + break + if not found: + self.usage.append(usage) + + found = 0 + for i in self.dformat: + if i == dformat: + found = 1 + break + if not found: + self.dformat.append(dformat) + + def serialize(self, builder): + fb_name = builder.CreateString(self.name) + if self.filename: + fb_filename = builder.CreateString(self.filename) + fb_shapes = TosaSerializer.serializeInt32Vec(builder, self.shape) + fb_usage = TosaSerializer.serializeInt32Vec(builder, self.usage) + fb_dformat = TosaSerializer.serializeInt32Vec(builder, self.dformat) + + TosaTensor.TosaTensorStart(builder) + TosaTensor.TosaTensorAddName(builder, fb_name) + TosaTensor.TosaTensorAddShape(builder, fb_shapes) + TosaTensor.TosaTensorAddType(builder, self.dtype) + TosaTensor.TosaTensorAddUsage(builder, fb_usage) + TosaTensor.TosaTensorAddFormat(builder, fb_dformat) + if self.filename: + TosaTensor.TosaTensorAddNpyFilename(builder, fb_filename) + + return TosaTensor.TosaTensorEnd(builder) + +class TosaSerializerOperator: + def __init__(self, op, inputs, outputs, attributes = None, quantInfo = None): + self.op = op + self.attributes = attributes + self.inputs = TosaSerializer.toList(inputs) + self.outputs = TosaSerializer.toList(outputs) + self.quantInfo = quantInfo + + def __str__(self): + str = 'Op {}\n----\n'.format(self.op) + + for i in self.inputs: + str = str + ' Input: {}\n'.format(i) + for o in self.outputs: + str = str + ' Output: {}\n'.format(o) + + return str + + def serialize(self, builder): + fb_inputs = TosaSerializer.serializeStrVec(builder, self.inputs, TosaOperator.TosaOperatorStartInputsVector) + fb_outputs = TosaSerializer.serializeStrVec(builder, self.outputs, TosaOperator.TosaOperatorStartOutputsVector) + # Need to serialize quant_info and attributes enums still + if self.attributes is not None: + fb_attributes = self.attributes.serialize(builder) + + if self.quantInfo is not None: + fb_qinfo = self.quantInfo.serialize(builder) + + TosaOperator.TosaOperatorStart(builder) + TosaOperator.TosaOperatorAddOp(builder, self.op) + TosaOperator.TosaOperatorAddInputs(builder, fb_inputs) + TosaOperator.TosaOperatorAddOutputs(builder, fb_outputs) + if self.attributes is not None: + TosaOperator.TosaOperatorAddAttributeType(builder, self.attributes.utype) + TosaOperator.TosaOperatorAddAttribute(builder, fb_attributes) + if self.quantInfo is not None: + TosaOperator.TosaOperatorAddQuantInfoType(builder, self.quantInfo.utype) + TosaOperator.TosaOperatorAddQuantInfo(builder, fb_qinfo) + + return TosaOperator.TosaOperatorEnd(builder) + +class TosaSerializerBasicBlock: + def __init__(self, name): + self.name = name + self.operators = [] + + # Dict assures uniqueness, but allows us to look up by name + self.tensors = dict() + + self.inputs = [] + self.outputs = [] + + def addTensor(self, name, shape, dtype, usage, dformat, filename = None, placeholderFilename = None): + try: + # Someone already added this tensor. + # We may have to add more usages and formats + tens = self.tensors[name] + filename = tens.merge(name, shape, dtype, usage, dformat, filename) + except KeyError: + self.tensors[name] = TosaSerializerTensor(name, shape, dtype, usage, dformat, filename, placeholderFilename) + + return self.tensors[name] + + def addInput(self, name): + self.inputs.append(name) + + def addOutput(self, name): + self.outputs.append(name) + + def addOperator(self, op, inputs, outputs, attributes = None, quant_info = None): + self.operators.append(TosaSerializerOperator(op, inputs, outputs, attributes, quant_info)) + + def serialize(self, builder): + fb_name = builder.CreateString(self.name) + fbv_inputs = TosaSerializer.serializeStrVec(builder, list(self.inputs), TosaBasicBlock.TosaBasicBlockStartInputsVector) + fbv_outputs = TosaSerializer.serializeStrVec(builder, list(self.outputs), TosaBasicBlock.TosaBasicBlockStartOutputsVector) + fbv_tensors = TosaSerializer.serializeObjVec(builder, list(self.tensors.values()), TosaBasicBlock.TosaBasicBlockStartTensorsVector) + fbv_operators = TosaSerializer.serializeObjVec(builder, self.operators, TosaBasicBlock.TosaBasicBlockStartOperatorsVector) + + TosaBasicBlock.TosaBasicBlockStart(builder) + TosaBasicBlock.TosaBasicBlockAddName(builder, fb_name) + TosaBasicBlock.TosaBasicBlockAddInputs(builder, fbv_inputs) + TosaBasicBlock.TosaBasicBlockAddOutputs(builder, fbv_outputs) + TosaBasicBlock.TosaBasicBlockAddTensors(builder, fbv_tensors) + TosaBasicBlock.TosaBasicBlockAddOperators(builder, fbv_operators) + return TosaBasicBlock.TosaBasicBlockEnd(builder) + +@unique +class TensorDir(IntEnum): + PLACEHOLDER = 0 + CONST = 1 + INTERMEDIATE = 2 + RESULT = 3 + +class TosaSerializer: + def __init__(self, pathPrefix): + + # Get the global TOSA version if not already defined + try: + TOSA_VERSION + except NameError: + TosaSerializer.setTosaVersion() + + self.builder = flatbuffers.Builder(0) + + self.basicBlocks = [] + self.startBasicBlock('main') + self.pathPrefix = pathPrefix + + # Indicies used for adding/naming tensors + self.currInputIdx = 0 + self.currConstIdx = 0 + self.currLayerIdx = 1 + self.currResultIdx = 0 + + # Is this an illegal test that is expected to fail? + self.expectedFailure = False + self.expectedFailureDesc = '' + + def __str__(self): + str = '' + for bb in self.basicBlocks: + str = str + bb.__str__() + return str + + def addPlaceholder(self, shape, dtype, usage, dformat, vals): + if not self.currBasicBlock: + raise Exception('addTensor called without valid basic block') + + name = 'input-{}'.format(self.currInputIdx) + filename = '{}.npy'.format(name) + self.currInputIdx = self.currInputIdx + 1 + + tens = self.currBasicBlock.addTensor(name, shape, dtype, usage, dformat, None, filename) + # This is always an input to the block + self.currBasicBlock.addInput(name) + # Add the operator now + self.currBasicBlock.addOperator(tosa.Op.Op().PLACEHOLDER, [], name) + + if vals is not None: + np.save(os.path.join(self.pathPrefix, filename), vals, False) + + return tens + + def addConst(self, shape, dtype, usage, dformat, vals): + if not self.currBasicBlock: + raise Exception('addTensor called without valid basic block') + + name = 'const-{}'.format(self.currInputIdx) + filename = '{}.npy'.format(name) + self.currInputIdx = self.currInputIdx + 1 + + tens = self.currBasicBlock.addTensor(name, shape, dtype, usage, dformat, filename) + # Add the operator now + self.currBasicBlock.addOperator(tosa.Op.Op().CONST, [], name) + + if vals is not None: + np.save(os.path.join(self.pathPrefix, filename), vals, False) + return tens + + def addIntermediate(self, shape, dtype, usage, dformat): + + if not self.currBasicBlock: + raise Exception('addTensor called without valid basic block') + + name = 'layer-{}'.format(self.currLayerIdx) + filename = None # No file, so no filename + self.currLayerIdx = self.currLayerIdx + 1 + + tens = self.currBasicBlock.addTensor(name, shape, dtype, usage, dformat, filename) + + return tens + + def addInputTensor(self, tensor): + self.currBasicBlock.addOperator(tosa.Op.Op().PLACEHOLDER, [], tensor.name) + self.currBasicBlock.addTensor(tensor.name, tensor.shape, tensor.dtype, tensor.usage, tensor.dformat) + self.currBasicBlock.addInput(tensor.name) + + def addOutputTensor(self, tensor): + self.currBasicBlock.addOutput(tensor.name) + + def addOutput(self, shape, dtype, usage, dformat): + if not self.currBasicBlock: + raise Exception('addTensor called without valid basic block') + + name = 'result-{}'.format(self.currResultIdx) + self.currResultIdx = self.currResultIdx + 1 + + tens = self.currBasicBlock.addTensor(name, shape, dtype, usage, dformat, None) + self.currBasicBlock.addOutput(name) + return tens + + def addOperator(self, op, inputs, outputs, attributes = None, quant_info = None): + + if op == tosa.Op.Op().PLACEHOLDER or \ + op == tosa.Op.Op().CONST: + raise Exception('Use addPlaceholderTensor() or addConstTensor() to add PLACEHOLDER and CONST ops') + + return self.currBasicBlock.addOperator(op, inputs, outputs, attributes, quant_info) + + def setExpectedFailure(self, desc='', val=True): + self.expectedFailure = val + self.expectedFailureDesc = desc + + def setExpectedFailure(self, desc='', val=True): + self.expectedFailure = val + self.expectedFailureDesc = desc + + def serialize(self): + + builder = self.builder + + Version.VersionStart(builder) + Version.VersionAdd_major(builder, TOSA_VERSION[0]) + Version.VersionAdd_minor(builder, TOSA_VERSION[1]) + Version.VersionAdd_patch(builder, TOSA_VERSION[2]) + Version.VersionAdd_experimental(builder, TOSA_VERSION[3]) + version = Version.VersionEnd(builder) + + fbv_bb = TosaSerializer.serializeObjVec(builder, self.basicBlocks, TosaGraph.TosaGraphStartBlocksVector) + + TosaGraph.TosaGraphStart(builder) + TosaGraph.TosaGraphAddVersion(builder, version) + TosaGraph.TosaGraphAddBlocks(builder, fbv_bb) + graph = TosaGraph.TosaGraphEnd(builder) + + self.builder.Finish(graph) + return self.builder.Output() + + def writeJson(self, tosa_filename): + '''Write a json test file so that it is fairly easy to pick up the test + and generate commands for third party tool''' + test_desc = dict() + + test_desc['tosa_file'] = tosa_filename + ifm_name = [] + ifm_shape = [] + ifm_file = [] + ofm_name = [] + ofm_file = [] + ofm_shape = [] + + for b in self.basicBlocks: + if b.name == 'main': + for i in b.inputs: + ifm_name.append(i) + ifm_shape.append(b.tensors[i].shape) + ifm_file.append(b.tensors[i].placeholderFilename) + for o in b.outputs: + ofm_name.append(o) + ofm_shape.append(b.tensors[o].shape) + # Make up an OFM filename here. One isn't generated until the reference tool is + # run, so any name is a good name + ofm_file.append('ref-{}.npy'.format(o)) + + test_desc['ifm_placeholder'] = ifm_name + test_desc['ifm_file'] = ifm_file + test_desc['ifm_shape'] = ifm_shape + test_desc['ofm_name'] = ofm_name + test_desc['ofm_shape'] = ofm_shape + test_desc['ofm_file'] = ofm_file + test_desc['expected_failure'] = self.expectedFailure + if self.expectedFailureDesc: + test_desc['expected_failure_desc'] = self.expectedFailureDesc + + return json.dumps(test_desc, indent=' ') + + def startBasicBlock(self, name): + self.currBasicBlock = TosaSerializerBasicBlock(name) + self.basicBlocks.append(self.currBasicBlock) + + @staticmethod + def serializeStrVec(builder, vec, start_fcn): + fb_strs = [builder.CreateString(i) for i in vec] + start_fcn(builder, len(fb_strs)) + for s in fb_strs[::-1]: + builder.PrependUOffsetTRelative(s) + return builder.EndVector(len(fb_strs)) + + @staticmethod + def serializeInt32Vec(builder, vec): + builder.StartVector(4, len(vec), 4) + for v in vec[::-1]: + builder.PrependInt32(v) + return builder.EndVector(len(vec)) + + @staticmethod + def serializeObjVec(builder, vec, start_fcn): + serialized_vec = [] + for v in vec[::-1]: + serialized_vec.append(v.serialize(builder)) + + start_fcn(builder, len(vec)) + for v in serialized_vec: + builder.PrependUOffsetTRelative(v) + return builder.EndVector(len(vec)) + + @staticmethod + def toList(val): + if isinstance(val, list): + return val + else: + return [val] + + @staticmethod + def setTosaVersion(): + # Create a dummy flatbuffers file with the default version information + # There does not appear to be a better way to get a constant from a + # flatbuffer schema file + builder = flatbuffers.Builder(0) + Version.VersionStart(builder) + ver = Version.VersionEnd(builder) + TosaGraph.TosaGraphStart(builder) + TosaGraph.TosaGraphAddVersion(builder, ver) + gr = TosaGraph.TosaGraphEnd(builder) + builder.Finish(gr) + + out = builder.Output() + + gr = TosaGraph.TosaGraph() + root = gr.GetRootAsTosaGraph(out, 0) + + # Store the version as a global variable so that it only needs to be + # generated once per process. + global TOSA_VERSION + TOSA_VERSION = [root.Version()._major(), + root.Version()._minor(), + root.Version()._patch(), + root.Version()._experimental() ] diff --git a/verif/tosa_test_gen.py b/verif/tosa_test_gen.py new file mode 100644 index 0000000..dc2d803 --- /dev/null +++ b/verif/tosa_test_gen.py @@ -0,0 +1,2301 @@ +#!/usr/bin/env python3 + +# Copyright (c) 2020, ARM Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import numpy as np +import argparse +import sys +import re +import os +import subprocess +import shlex +import json +import glob +import math +import queue +import threading +import traceback +import math + +from enum import IntEnum, Enum, unique + +import tosa_serializer as ts +from tosa_serializer import * +import tosa + +# Convenience variables to the flatc-generated types that should be enums, but aren't +DType = tosa.DType.DType() +Usage = tosa.Usage.Usage() +Format = tosa.Format.Format() +Op = tosa.Op.Op() +ResizeMode = tosa.ResizeMode.ResizeMode() + +class TosaQuantGen: + '''QuantizedInfo random generator helper functions. Specify with 'qgen': in the operator defintion''' + def __init__(self): + pass + + @staticmethod + def needsQinfo(op, dtype): + if dtype == DType.AINT8 or dtype == DType.INT8: + return True + return False + + @staticmethod + def qgUnary(testGen, op, dtype): + qinfo = ts.TosaSerializerQuantInfo() + if TosaQuantGen.needsQinfo(op, dtype): + qinfo.UnaryQuantInfo(testGen.randInt(), testGen.randInt()) + else: + qinfo.UnaryQuantInfo(0, 0) + return qinfo + + @staticmethod + def qgConv(testGen, op, dtype): + qinfo = ts.TosaSerializerQuantInfo() + if TosaQuantGen.needsQinfo(op, dtype): + qinfo.ConvQuantInfo(testGen.randInt(), testGen.randInt()) + else: + qinfo.ConvQuantInfo(0, 0) + return qinfo + + @staticmethod + def qgMatmul(testGen, op, dtype): + qinfo = ts.TosaSerializerQuantInfo() + if TosaQuantGen.needsQinfo(op, dtype): + qinfo.MatMulQuantInfo(testGen.randInt(), testGen.randInt()) + else: + qinfo.MatMulQuantInfo(0, 0) + return qinfo + + @staticmethod + def qgPad(testGen, op, dtype): + qinfo = ts.TosaSerializerQuantInfo() + if TosaQuantGen.needsQinfo(op, dtype): + qinfo.PadQuantInfo(testGen.randInt()) + else: + qinfo.PadQuantInfo(0) + return qinfo + + @staticmethod + def computeMultiplierAndShift(scaleFp, scale32): + # Derived from computeMultiplierAndShiftTosaScale32 + # Provide a floating-point scaling factor and the scale32 parameter + # to compute the multiplier and shift + + if scale32: + scaleBits = 31 + else: + scaleBits = 15 + + m, shift = math.frexp(scaleFp) + + if scaleFp < 0.0: + m = -m + + multiplier = round(m * (1 << scaleBits)) + assert(multiplier <= (1 << scaleBits)) + + if multiplier == (1 << scaleBits): + multiplier = multiplier // 2 + shift = shift + 1 + + shift = (-shift) + scaleBits + #print('scalefp {} scaleBits {} m {} mult {} shift {}'.format(scaleFp, scaleBits, m, multiplier, shift)) + + assert(multiplier <= (1 << scaleBits)) + assert(shift >= 0 and shift <= 63) + + return multiplier, shift + + +class TosaTensorGen(): + ''' Tensor generators create a shape list for the placeholder and const tensor + data operands for the operator. The actual random data is generated separately for each test.''' + def __init__(self): + pass + + @staticmethod + def tgBasic(testGen, opName, rank): + pl, const = opName['operands'] + shape = testGen.makeShape(rank) + + shape_list = [] + for i in range(pl + const): + shape_list.append(shape.copy()) + + return shape_list + + @staticmethod + def tgNHWC(testGen, opName, rank): + pl, const = opName['operands'] + + assert(rank == 4) + + shape = testGen.makeShape(rank) + + # Constrict the batch size? + if testGen.args.max_batch_size: + shape[0] = (shape[0] % testGen.args.max_batch_size) + 1 + + shape_list = [] + for i in range(pl + const): + shape_list.append(shape.copy()) + + return shape_list + + @staticmethod + def tgBroadcastFuzz(testGen, op, rank): + shape = testGen.makeShape(rank) + + pl, const = op['operands'] + + shape_list = [] + + # Choose one of the inputs to broadcast + bcast_idx = testGen.randInt(0, pl + const) + for i in range(pl + const): + shape_bcast = shape.copy() + + # If the chosen input, pick a random index to broadcast + if i == bcast_idx: + fuzz_idx = testGen.randInt(0, rank) + shape_bcast[fuzz_idx] = 1 + + shape_list.append(shape_bcast) + + return shape_list + + @staticmethod + def tgConv2D(testGen, op, rank): + pl, const = op['operands'] + + assert(rank == 4) + + # IFM dimensions are NHWC + ifm_shape = testGen.makeShape(rank) + + # Constrict the batch size? + if testGen.args.max_batch_size: + ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1 + + # Get the filter height/width from the operator parameters + filter_hw = op['filter'] + + # Generate a random OFM depth + ofm_depth = testGen.makeShape(1)[0] + + # The filter dimensions are OHWI + filter_shape = np.asarray([ofm_depth, filter_hw[0], filter_hw[1], ifm_shape[3]]) + + # The bias is OC + bias_shape = np.asarray([ofm_depth]) + + return [ifm_shape, filter_shape, bias_shape] + + @staticmethod + def tgTransposeConv2D(testGen, op, rank): + pl, const = op['operands'] + + assert(rank == 4) + + # IFM dimensions are NHWC + ifm_shape = testGen.makeShape(rank) + + # Constrict the batch size? + if testGen.args.max_batch_size: + ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1 + + # Get the filter height/width from the operator parameters + filter_hw = op['filter'] + + # Generate a random OFM depth + ofm_depth = testGen.makeShape(1)[0] + + # The filter dimensions are OHWI + filter_shape = np.asarray([ofm_depth, filter_hw[0], filter_hw[1], ifm_shape[3]]) + + return [ifm_shape, filter_shape] + + @staticmethod + def tgDepthwiseConv2D(testGen, op, rank): + pl, const = op['operands'] + + assert(rank == 4) + assert(pl == 1 and const == 2) + + # IFM dimensions are NHWC + ifm_shape = testGen.makeShape(rank) + + # Constrict the batch size? + if testGen.args.max_batch_size: + ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1 + + # Get the filter height/width from the operator parameters + # Filter is KH, HW, C, M + filter_hw = op['filter'] + + # Generate a random OFM depth, but don't let it get too big because + # the output depth is M * C + filter_m = (testGen.makeShape(1)[0] % (testGen.args.tensor_shape_range[1] // 4)) + 1 + + # The filter dimensions are HWCM + filter_shape = np.asarray([filter_hw[0], filter_hw[1], ifm_shape[3], filter_m]) + + # The bias is M * C + bias_shape = np.asarray([ifm_shape[3] * filter_m]) + + return [ifm_shape, filter_shape, bias_shape] + + @staticmethod + def tgFullyConnected(testGen, op, rank): + pl, const = op['operands'] + + assert(rank == 2) + assert(pl == 2 and const == 0) + + input_shape = testGen.makeShape(rank) + filter_oc = testGen.makeShape(1)[0] + filter_shape = np.asarray([filter_oc, input_shape[1]]) + + bias_shape = np.asarray([filter_oc]) + + return [input_shape, filter_shape, bias_shape] + + @staticmethod + def tgMatmul(testGen, op, rank): + pl, const = op['operands'] + + assert(rank == 2) + assert(pl == 2 and const == 0) + + a_shape = testGen.makeShape(rank) + b_oc = testGen.makeShape(1)[0] + b_shape = np.asarray([a_shape[1], b_oc]) + + return [a_shape, b_shape] + +class TosaArgGen: + '''Argument generators create exhaustive or random lists of attributes for operators that take + attributes or other parameters. The return value is a list of (descriptive_name, [arglist]) + tuples where the descriptive_name is appended to the test name and the arglist is expanded + as arguments to the operator build function.''' + def __init__(self): + pass + + @staticmethod + def agNone(testGen, opName, shapeList, dtype): + '''A trivial argument generator for operators that don't take any + non-tensor arguments''' + return [('', [])] + + @staticmethod + def agAxis(testGen, opName, shapeList, dtype): + '''Build the axis argument for operators that take a single axis''' + axes = [] + + shape = shapeList[0] + + for a in range(0, len(shape)): + axes.append(('axis_{}'.format(a), [a])) + return axes + + @staticmethod + def agConv2D(testGen, opName, shapeList, dtype): + arg_list = [] + + ifm_shape = shapeList[0] + filter_shape = shapeList[1] + + # Must be rank 4 + assert(len(ifm_shape) == 4) + assert(len(filter_shape) == 4) + + maxStride = testGen.args.max_conv_stride + maxPadding = testGen.args.max_conv_padding + 1 + maxDilation = testGen.args.max_conv_dilation + + # Strides, padding, dilations + for stride in range(0, maxStride ** 2): + for padding in range(0, (maxPadding) ** 4): + for dilation in range(0, maxDilation ** 2): + + s = [stride // maxStride + 1, + stride % maxStride + 1] + p = [(padding // (maxPadding * 4)) % maxPadding, + (padding // (maxPadding * 2)) % maxPadding, + (padding // (maxPadding * 1)) % maxPadding, + padding % maxPadding] + d = [ dilation // maxDilation + 1, + dilation % maxDilation + 1] + + # 4 padding parameters for regular conv2d + arg_list.append(('st{}{}_pad{}{}{}{}_dilat{}{}'.format(s[0], s[1], + p[0], p[1], p[2], p[3], + d[0], d[1]), + [ s, p, d ])) + return arg_list + + @staticmethod + def agTransposeConv2D(testGen, opName, shapeList, dtype): + arg_list = [] + + ifm_shape = shapeList[0] + filter_shape = shapeList[1] + + # Must be rank 4 + assert(len(ifm_shape) == 4) + assert(len(filter_shape) == 4) + + maxStride = testGen.args.max_conv_stride + maxPadding = testGen.args.max_conv_padding + 1 + maxDilation = testGen.args.max_conv_dilation + + # Strides, padding, dilations + for stride in range(0, maxStride ** 2): + for out_padding in range(0, (maxPadding) ** 2): + for dilation in range(0, maxDilation ** 2): + + s = [stride // maxStride + 1, + stride % maxStride + 1] + p = [(out_padding // (maxPadding * 1)) % maxPadding, + out_padding % maxPadding] + d = [ dilation // maxDilation + 1, + dilation % maxDilation + 1] + + oh = (ifm_shape[1] - filter_shape[1] - (filter_shape[1] - 1) * (d[0] - 1) + \ + 2 * p[0]) // s[0] + 1 + + ow = (ifm_shape[2] - filter_shape[2] - (filter_shape[2] - 1) * (d[1] - 1) + \ + 2 * p[1]) // s[1] + 1 + + # Output shape + os = [ ifm_shape[0], oh, ow, filter_shape[0] ] + + arg_list.append(('st{}{}_outpad{}{}_dilat{}{}_os{}x{}x{}x{}'.format(s[0], s[1], + p[0], p[1], + d[0], d[1], + os[0], os[1], os[2], os[3]), + [ s, p, d, os ])) + + return arg_list + + @staticmethod + def agPad(testGen, opName, shapeList, dtype): + arg_list = [] + rank = len(shapeList[0]) + + # Exhaustively test combinations of 0/1 padding on each side of each dimension + # This process might need some revision for >1 padding, but use rank**2 as a bitmask + # for now + for v in range(rank ** 2): + + # Create a flat arraypadding4D + paddings = np.zeros((rank * 2), dtype=np.int32) + + # Fill in the 1's + for r in (range(rank * 2)): + if (v >> r) & 1: + paddings[r] = 1 + + # Reshape back to a 2D array + paddings = paddings.reshape((rank, 2)) + + arg_list.append(('pad{0:b}'.format(v), [ paddings ])) + + return arg_list + + @staticmethod + def agPooling(testGen, opName, shapeList, dtype): + arg_list = [] + + shape = shapeList[0] + assert(len(shape) == 4) + + maxStride = testGen.args.max_pooling_stride + maxKernel = testGen.args.max_pooling_kernel + maxPadding = testGen.args.max_pooling_padding + 1 + + for kernel in range(0, maxKernel ** 2): + for stride in range(0, maxStride ** 2): + for padding in range(0, maxPadding ** 4): + s = [stride // maxStride + 1, + stride % maxStride + 1] + k = [(kernel // maxKernel) + 2, + (kernel % maxKernel) + 2] + p = [(padding // (maxPadding * 4)) % maxPadding, + (padding // (maxPadding * 2)) % maxPadding, + (padding // (maxPadding * 1)) % maxPadding, + padding % maxPadding] + + arg_list.append(('st{}{}_kern{}{}_pad{}{}{}{}'.format(s[0], s[1], + k[0], k[1], + p[0], p[1], p[2], p[3]), + [k, s, p])) + return arg_list + + @staticmethod + def agCast(testGen, opName, shapeList, inDtype): + arg_list = [] + + # Enumerate the output types here + if inDtype == DType.INT8: + dtypeList = [ DType.BOOL, DType.INT16, DType.INT32, DType.FLOAT ] + elif inDtype == DType.INT16: + dtypeList = [ DType.BOOL, DType.INT8, DType.INT32, DType.FLOAT ] + elif inDtype == DType.INT32: + dtypeList = [ DType.BOOL, DType.INT8, DType.INT16, DType.FLOAT ] + elif inDtype == DType.BOOL: + dtypeList = [ DType.INT8, DType.INT16, DType.INT32 ] + elif inDtype == DType.FLOAT: + dtypeList = [ DType.INT8, DType.INT16, DType.INT32 ] + else: + raise Exception('Unexpected input dtype: {}'.format(inDtype)) + + for dtype in dtypeList: + arg_list.append(('out{}'.format(DTypeNames[dtype]), [dtype])) + + return arg_list + + @staticmethod + def agRescale(testGen, opName, shapeList, inDtype): + arg_list = [] + + # Enumerate the output types here + for dtype in [ DType.AINT8, DType.INT16, DType.INT32 ]: + for scale32 in [ False, True ]: + for double_round in [ False, True ]: + for per_channel in [ False, True ]: + + if inDtype == DType.INT48 and scale32: + # Illegal condition. Must be scale32=False + continue + + arg_list.append(('out{}_sc{}_dr{}_pc{}'.format(DTypeNames[dtype], int(scale32), int(double_round), int(per_channel)), + [dtype, scale32, double_round, per_channel])) + + return arg_list + + # Helper function for reshape. Gets some factors of a larger number. + @staticmethod + def getFactors(val, start=1): + factors = [] + + for i in range(start, int(np.sqrt(val))): + if (val % i) == 0: + factors.append(i) + + return factors + + @staticmethod + def agReshape(testGen, opName, shapeList, dtype): + arg_list = [] + + origShape = shapeList[0] + + totalElements = 1 + for s in origShape: + totalElements *= s + + # This code is NOT fast. Fortunately, the numbers are fairly small. + factors = TosaArgGen.getFactors(totalElements) + + for p in range(testGen.args.num_rand_permutations): + newRank = testGen.randInt(1, 6) + newShape = [] + if (len(factors) < newRank): + continue + + remainingElements = totalElements + shuffledFactors = testGen.rng.permutation(factors) + for i in range(newRank): + # pick rank-1 factors + newShape.append(shuffledFactors[0]) + remainingElements = remainingElements // shuffledFactors[0] + shuffledFactors = testGen.rng.permutation(TosaArgGen.getFactors(remainingElements)) + newShape.append(remainingElements) + + # Toss in a -1 sometimes + minusOne = testGen.randInt(0, newRank * 4) + if minusOne < newRank: + newShape[minusOne] = -1 + + arg_list.append(('perm{}_rank{}'.format(p, newRank), [newShape])) + + return arg_list + + + @staticmethod + def agTranspose(testGen, opName, shapeList, dtype): + arg_list = [] + + ifm_shape = shapeList[0] + + perms = range(len(ifm_shape)) + for p in range(testGen.args.num_rand_permutations): + perms = np.int32(testGen.rng.permutation(perms)).tolist() + + # Avoid duplicates + found = False + for name, other_perm in arg_list: + if other_perm[0] == perms: + found = True + break + + if not found: + arg_list.append(('perm{}'.format(p), [perms])) + + return arg_list + + @staticmethod + def agSlice(testGen, opName, shapeList, dtype): + arg_list = [] + + ifm_shape = shapeList[0] + rank = len(ifm_shape) + + for p in range(testGen.args.num_rand_permutations): + begin = [] + size = [] + + valid=True + + for i in range(rank): + if ifm_shape[i] > 1: + begin.append(testGen.randInt(0, ifm_shape[i])) + size.append(testGen.randInt(0, ifm_shape[i] - begin[i])) + + # Invalid slice size? + if size[i] == 0: + valid = False + else: + begin.append(0) + size.append(1) + + if valid: + arg_list.append(('perm{}'.format(p), [begin, size])) + return arg_list + + @staticmethod + def agTile(testGen, opName, shapeList, dtype): + arg_list = [] + + ifm_shape = shapeList[0] + rank = len(ifm_shape) + + for p in range(testGen.args.num_rand_permutations): + + # Pick a few random, but small multiple values + # because otherwise this has a tendency to generate + # enormous tensors + multiples = [] + for i in range(rank): + multiples.append(testGen.randInt(1, 4)) + + arg_list.append(('perm{}'.format(p), [multiples])) + + return arg_list + + @staticmethod + def agResize(testGen, opName, shapeList, dtype): + arg_list = [] + + ifm_shape = shapeList[0] + + for m in [ResizeMode.NEAREST, ResizeMode.BILINEAR]: + + # Exclude illegal {mode, type} configurations. Pick legal output types + if m == ResizeMode.NEAREST and dtype == DType.INT8: + outputDTypeList = [ DType.INT32 ] + elif m == ResizeMode.NEAREST and dtype == DType.INT16: + outputDTypeList = [ DType.INT16 ] + elif m == ResizeMode.BILINEAR and dtype == DType.INT8: + outputDTypeList = [ DType.INT8 ] + elif m == ResizeMode.BILINEAR and dtype == DType.INT16: + outputDTypeList = [ DType.INT48 ] + else: + continue + + for outputDType in outputDTypeList: + for perm in range(testGen.args.num_rand_permutations): + + # Randomly generate legal output dimensions and shift + # and then compute the stride and offset based on them + output_dims = [ testGen.randInt(), testGen.randInt() ] + + shift = testGen.randInt(1, 11) + + stride = [ (ifm_shape[1] << shift) // output_dims[0], + (ifm_shape[2] << shift) // output_dims[1] ] + + offset = [ testGen.randInt(-stride[0], (ifm_shape[1] << shift) - (output_dims[0] - 1) * stride[0]), + testGen.randInt(-stride[1], (ifm_shape[2] << shift) - (output_dims[1] - 1) * stride[1]) ] + + arg_list.append(('mode{}_shift{}_odim{}x{}_out{}_st{}x{}_off{}x{}'.format(m, shift, output_dims[0], output_dims[1], + testGen.typeStr(outputDType), stride[0], stride[1], + offset[0], offset[1]), + [m, stride, offset, shift, output_dims, outputDType])) + + return arg_list + + def agCondIf(testGen, opName, shapeList, dtype): + # CondIf generates the condition values here. + # Convert to tensors in the build function, along with the + # then and else blocks + arg_list = [] + + for c in [False, True]: + arg_list.append(('cond{}'.format(int(c)), [ c ])) + + return arg_list + + def agWhileLoop(testGen, opName, shapeList, dtype): + # While loop: 0 iterations, 1, more than 1 + arg_list = [] + + for iter in [0, 1, 4]: + arg_list.append(('iter{}'.format(iter), [ iter ])) + + return arg_list + +class TosaTestGen: + def __init__(self, args): + self.args = args + self.basePath = args.output_dir + self.random_seed = args.random_seed + self.ser = None + self.rng = np.random.default_rng(self.random_seed) + self.createDynamicOpLists() + self.initOpListDefaults() + self.quantGen = TosaQuantGen() + # Force makeShape to do a specific starting shape + self.targetted_shape = None + + def createSerializer(self, opName, testPath): + self.testPath = os.path.join(opName, testPath) + + fullPath = os.path.join(self.basePath, self.testPath) + os.makedirs(fullPath, exist_ok=True) + self.ser = ts.TosaSerializer(fullPath) + + def getSerializer(self): + return self.ser + + def serialize(self, testName): + with open(os.path.join(self.basePath, self.testPath, '{}.tosa'.format(testName)), 'wb') as fd: + fd.write(self.ser.serialize()) + + with open(os.path.join(self.basePath, self.testPath, 'desc.json'), 'w') as fd: + fd.write(self.ser.writeJson('{}.tosa'.format(testName))) + + def getRandTensor(self, shape, dtype): + RAND_SHIFT_FACTOR = 0.5 + RAND_SCALE_FACTOR = 4.0 + + if dtype == DType.BOOL: + np_dt = np.bool + return np.bool_(self.rng.choice(a=[False, True], size=shape)) + elif dtype == DType.AINT8: + return np.int32(self.rng.integers(low=0, high=256, size=shape)) + elif dtype == DType.INT4: + return np.int32(self.rng.integers(low=-7, high=8, size=shape)) + elif dtype == DType.INT8: + return np.int32(self.rng.integers(low=-127, high=128, size=shape)) + elif dtype == DType.INT16: + return np.int32(self.rng.integers(low=-32768, high=32768, size=shape)) + elif dtype == DType.INT32: + return np.int32(self.rng.integers(low=-(1 << 31), high=(1 << 31), size=shape)) + elif dtype == DType.INT48: + return np.int64(self.rng.integers(low=-(1 << 47), high=(1 << 47), size=shape)) + elif dtype == DType.FLOAT: + return np.float32(self.rng.random(size=shape) - RAND_SHIFT_FACTOR * RAND_SCALE_FACTOR) + else: + raise Exception('Unrecognized Dtype: {}'.format(dtype)) + + def buildPlaceholderTensors(self, shape_list, dtype): + placeholders = [] + + for shape in shape_list: + arr = self.getRandTensor(shape, dtype) + placeholders.append(self.ser.addPlaceholder(shape, dtype, Usage.ACTIVATION, [], arr)) + + return placeholders + + def buildConstTensors(self, shape_list, dtype): + consts = [] + + for shape in shape_list: + arr = self.getRandTensor(shape, dtype) + consts.append(self.ser.addConst(shape, dtype, Usage.ACTIVATION, [], arr)) + + return consts + + def makeShape(self, rank): + if self.targetted_shape: + return np.int32(self.targetted_shape) + return np.int32(self.rng.integers(low=self.args.tensor_shape_range[0], + high=self.args.tensor_shape_range[1], + size=rank)) + + def setTargetShape(self, shape): + self.targetted_shape = shape + + def randInt(self, low=0, high=256): + return np.int32(self.rng.integers(low=low, high=high, size=1))[0] + + def getRandNumberDType(self, dtype): + if dtype == DType.FLOAT: + return self.rng.random() + elif dtype == DType.BOOL: + return self.rng.choice([False, True]) + elif dtype == DType.INT4: + low, high = (-7, 8) + elif dtype == DType.AINT8: + low, high = (0, 256) + elif dtype == DType.INT8: + low, high = (-127, 128) + elif dtype == DType.INT16: + low, high = (-32768, 32768) + elif dtype == DType.INT32: + low, high = (-(1<<31), (1<<31)) + elif dtype == DType.INT48: + low, high = (-(1<<47), (1<<47)) + # Special size + return np.int64(self.rng.integers(low, high, size=1))[0] + else: + raise Exception('Unknown dtype: {}'.format(dtype)) + + return np.int32(self.rng.integers(low, high, size=1))[0] + + def shapeStr(self, shape): + + sStr = [] + # Convert to strings + for i in shape: + sStr.append(str(i)) + + return 'x'.join(sStr) + + def typeStr(self, t): + if t == DType.BOOL: + return 'b' + elif t == DType.AINT8: + return 'a8' + elif t == DType.INT4: + return 'i4' + elif t == DType.INT8: + return 'i8' + elif t == DType.INT16: + return 'i16' + elif t == DType.INT32: + return 'i32' + elif t == DType.INT48: + return 'i48' + elif t == DType.FLOAT: + return 'float' + else: + raise Exception('Unknown dtype, cannot convert to string: {}'.format(t)) + + def typeWidth(self, t): + ''' Get the datatype width for integer types''' + if t == DType.AINT8: + return 8 + elif t == DType.UINT8: + return 8 + elif t == DType.INT4: + return 4 + elif t == DType.INT8: + return 8 + elif t == DType.INT16: + return 16 + elif t == DType.INT32: + return 32 + elif t == DType.INT48: + return 48 + else: + raise Exception('Unknown dtype, cannot convert to string: {}'.format(t)) + + # Argument generators + # Returns a list of tuples (stringDescriptor, [build_fcn_arg_list]) + # Where the string descriptor is used to generate the test name and + # The build_fcn_arg_list is expanded and passed to the operator test + # build function + + + def build_unary(self, op, a, qinfo = None): + result_tens = OutputShaper.unaryOp(self.ser, a) + self.ser.addOperator(op, [a.name], [result_tens.name], None, qinfo) + return result_tens + + def build_binary_broadcast(self, op, a, b): + result_tens = OutputShaper.binaryBroadcastOp(self.ser, a, b) + self.ser.addOperator(op, [a.name, b.name], [result_tens.name]) + return result_tens + + def build_binary_nonbroadcast(self, op, a, b): + result_tens = OutputShaper.binaryNonBroadcastOp(self.ser, a, b) + self.ser.addOperator(op, [a.name, b.name], [result_tens.name]) + return result_tens + + def build_mul(self, op, a, b): + result_tens = OutputShaper.binaryBroadcastOp(self.ser, a, b) + + # Special for multiply: + # Force the result to INT32 for INT types + if a.dtype != DType.FLOAT: + result_tens.setDtype(DType.INT32) + + self.ser.addOperator(op, [a.name, b.name], [result_tens.name]) + return result_tens + + def build_table(self, op, a): + # Constant size, random values + table_arr = self.getRandTensor([513], DType.INT16) + table_tens = self.ser.addConst(table_arr.shape, DType.INT16, Usage.INDEX, [], table_arr) + + result_tens = OutputShaper.tableOp(self.ser, a, table_tens) + self.ser.addOperator(op, [a.name, table_tens.name], [result_tens.name], None) + + return result_tens + + def build_select(self, op, cond, a, b): + + # Replace the cond tensor with a boolean tensor since it probably + # has the wrong dtype + t = self.buildPlaceholderTensors([cond.shape], DType.BOOL) + cond = t[0] + + result_tens = OutputShaper.selectOp(self.ser, cond, a, b) + self.ser.addOperator(op, [cond.name, a.name, b.name], [result_tens.name]) + + return result_tens + + def build_comparison(self, op, a, b): + result_tens = OutputShaper.binaryComparisonOp(self.ser, a, b) + self.ser.addOperator(op, [a.name, b.name], [result_tens.name]) + return result_tens + + def build_argmax(self, op, a, axis): + result_tens = OutputShaper.argmaxOp(self.ser, a, axis) + + attr = ts.TosaSerializerAttribute() + attr.AxisAttribute(axis) + + self.ser.addOperator(op, [a.name], [result_tens.name], attr) + return result_tens + + def build_pool2d(self, op, input, kernel, stride, pad, qinfo = None): + result_tens = OutputShaper.pool2dOp(self.ser, input, kernel, stride, pad) + + attr = ts.TosaSerializerAttribute() + attr.Pool2dAttribute(kernel, stride, pad) + input.addFormat(Format.NHWC) + + self.ser.addOperator(op, [input.name], [result_tens.name], attr, qinfo) + return result_tens + + def build_conv2d(self, op, ifm, filter, bias, strides, padding, dilations, qinfo): + assert(len(padding) == 4) + result_tens = OutputShaper.conv2dOp(self.ser, ifm, filter, strides, padding, dilations) + + attr = ts.TosaSerializerAttribute() + attr.Conv2dAttribute(padding, strides, dilations) + + ifm.addFormat(Format.NHWC) + # Update the filter ordering + filter.addUsage(Usage.WEIGHT) + filter.addFormat(Format.OHWI) + + self.ser.addOperator(op, [ifm.name, filter.name, bias.name], [result_tens.name], attr, qinfo) + return result_tens + + def build_transpose_conv2d(self, op, ifm, filter, stride, outpad, dilation, output_shape, qinfo): + assert(len(outpad) == 2) + result_tens = OutputShaper.transposeConv2DOp(self.ser, ifm, output_shape) + + attr = ts.TosaSerializerAttribute() + attr.TransposeConv2DAttribute(outpad, stride, dilation, output_shape) + + ifm.addFormat(Format.NHWC) + # Update the filter ordering + filter.addUsage(Usage.WEIGHT) + filter.addFormat(Format.OHWI) + + # Create bias here since the acc_t depends on (but isn't the same as) the input dtype + # The bias is OC + if ifm.dtype == DType.AINT8 or ifm.dtype == DType.INT8: + bias_type = DType.INT32 + elif ifm.dtype == DType.INT16: + bias_type = DType.INT48 + elif ifm.dtype == DType.FLOAT: + bias_type = DType.FLOAT + else: + raise Exception('Unsupported dtype for transpose_conv2d: {}'.format(ifm.dtype)) + + bias_arr = self.getRandTensor([filter.shape[0]], bias_type) + bias_tens = self.ser.addConst([filter.shape[0]], bias_type, [], [], bias_arr) + + self.ser.addOperator(op, [ifm.name, filter.name, bias_tens.name], [result_tens.name], attr, qinfo) + return result_tens + + def build_depthwise_conv2d(self, op, ifm, filter, bias, strides, padding, dilations, qinfo): + result_tens = OutputShaper.depthwiseConv2dOp(self.ser, ifm, filter, strides, padding, dilations) + + attr = ts.TosaSerializerAttribute() + attr.Conv2dAttribute(padding, strides, dilations) + + ifm.addFormat(Format.NHWC) + filter.addUsage(Usage.WEIGHT) + filter.addFormat(Format.HWIM) + + self.ser.addOperator(op, [ifm.name, filter.name, bias.name], [result_tens.name], attr, qinfo) + return result_tens + + def build_fully_connected(self, op, ifm, filter, bias, qinfo): + result_tens = OutputShaper.fullyConnectedOp(self.ser, ifm, filter) + + filter.addUsage(Usage.WEIGHT) + self.ser.addOperator(op, [ifm.name, filter.name, bias.name], [result_tens.name], None, qinfo) + return result_tens + + def build_matmul(self, op, a, b, qinfo): + result_tens = OutputShaper.matmulOp(self.ser, a, b) + self.ser.addOperator(op, [a.name, b.name], [result_tens.name], None, qinfo) + return result_tens + + def build_reduce(self, op, a, axis): + result_tens = OutputShaper.reduceOp(self.ser, a, axis) + + attr = ts.TosaSerializerAttribute() + attr.AxisAttribute(axis) + + self.ser.addOperator(op, [a.name], result_tens.name, attr) + return result_tens + + def build_clamp(self, op, a): + result_tens = OutputShaper.unaryOp(self.ser, a) + + attr = ts.TosaSerializerAttribute() + + # Get two random ints + v = [self.randInt(), self.randInt()] + + if a.dtype == DType.FLOAT: + attr.ClampAttribute(0, 0, min(v), max(v)) + else: + attr.ClampAttribute(min(v), max(v), 0, 0) + + self.ser.addOperator(op, [a.name], [result_tens.name], attr) + return result_tens + + def build_leaky_relu(self, op, a): + result_tens = OutputShaper.unaryOp(self.ser, a) + attr = ts.TosaSerializerAttribute() + + attr.LeakyReluAttribute(self.getRandNumberDType(DType.FLOAT)) + + self.ser.addOperator(op, [a.name], [result_tens.name], attr) + return result_tens + + # Needs an additional type/input + def build_prelu(self, op, a): + result_tens = OutputShaper.unaryOp(self.ser, a) + + self.ser.addOperator(op, [a.name], [result_tens.name]) + return result_tens + + def build_relun(self, op, a): + result_tens = OutputShaper.unaryOp(self.ser, a) + + attr = ts.TosaSerializerAttribute() + + if a.dtype == DType.FLOAT: + attr.ReluNAttribute(0, self.getRandNumberDType(a.dtype)) + else: + attr.ReluNAttribute(self.getRandNumberDType(a.dtype), 0) + + self.ser.addOperator(op, [a.name], [result_tens.name], attr) + return result_tens + + def build_sigmoid(self, op, a): + result_tens = OutputShaper.unaryOp(self.ser, a) + self.ser.addOperator(op, [a.name], [result_tens.name]) + return result_tens + + def build_tanh(self, op, a): + result_tens = OutputShaper.unaryOp(self.ser, a) + self.ser.addOperator(op, [a.name], [result_tens.name]) + return result_tens + + def build_concat(self, op, a, b, axis): + result_tens = OutputShaper.concatOp(self.ser, a, b, axis) + + attr = ts.TosaSerializerAttribute() + attr.AxisAttribute(axis) + + self.ser.addOperator(op, [a.name, b.name], [result_tens.name], attr) + + def build_pad(self, op, a, padding, qinfo): + result_tens = OutputShaper.padOp(self.ser, a, padding) + + # Need to turn the padding array into a TOSA tensor here. + # This is one of the few tensor operands that does not get + # randomly generated + padding_tens = self.ser.addConst(padding.shape, DType.INT32, [], [], padding) + + self.ser.addOperator(op, [a.name, padding_tens.name], [result_tens.name], None, qinfo) + + def build_reshape(self, op, a, newShape): + result_tens = OutputShaper.reshapeOp(self.ser, a, newShape) + + attr = ts.TosaSerializerAttribute() + attr.ReshapeAttribute(newShape) + + self.ser.addOperator(op, [a.name], [result_tens.name], attr) + return result_tens + + def build_reverse(self, op, a, axis): + result_tens = OutputShaper.unaryOp(self.ser, a) + + attr = ts.TosaSerializerAttribute() + attr.AxisAttribute(axis) + + self.ser.addOperator(op, [a.name], [result_tens.name], attr) + return result_tens + + def build_transpose(self, op, a, perms): + result_tens = OutputShaper.transposeOp(self.ser, a, perms) + + perms_tens = self.ser.addConst([len(perms)], DType.INT32, Usage.ACTIVATION, [], np.int32(perms)) + + self.ser.addOperator(op, [a.name, perms_tens.name], [result_tens.name]) + return result_tens + + def build_slice(self, op, a, begin, size): + result_tens = OutputShaper.sliceOp(self.ser, a, begin, size) + + attr = ts.TosaSerializerAttribute() + attr.SliceAttribute(begin, size) + + self.ser.addOperator(op, [a.name], [result_tens.name], attr) + return result_tens + + def build_tile(self, op, a, multiples): + result_tens = OutputShaper.tileOp(self.ser, a, multiples) + + attr = ts.TosaSerializerAttribute() + attr.TileAttribute(multiples) + + self.ser.addOperator(op, [a.name], [result_tens.name], attr) + return result_tens + + + def build_gather(self, op, values, axis): + + # Create a new indicies tensor + # here with data that doesn't exceed the dimensions of the values tensor + + max_val = values.shape[axis] + indicies_arr = np.int32(self.rng.integers(low=0, high=max_val, size=[self.randInt(1, max_val + 1)])) + indicies = self.ser.addConst(indicies_arr.shape, DType.INT32, Usage.INDEX, [], indicies_arr) + + result_tens = OutputShaper.gatherOp(self.ser, values, indicies, axis) + + attr = ts.TosaSerializerAttribute() + attr.AxisAttribute(axis) + + self.ser.addOperator(op, [values.name, indicies.name], [result_tens.name], attr) + + return result_tens + + def build_resize(self, op, input, mode, stride, offset, shift, output_dims, output_dtype): + result_tens = OutputShaper.resizeOp(self.ser, input, mode, stride, offset, shift, output_dims, output_dtype) + + attr = ts.TosaSerializerAttribute() + attr.ResizeAttribute(output_dims, stride, offset, shift, mode) + + self.ser.addOperator(op, [input.name], [result_tens.name], attr) + return result_tens + + def build_identityn(self, op, val, val2): + + result_tens = OutputShaper.unaryOp(self.ser, val) + result_tens2 = OutputShaper.unaryOp(self.ser, val2) + self.ser.addOperator(op, [val.name, val2.name], [result_tens.name, result_tens2.name]) + return result_tens + + def build_placeholder(self, op, val): + # Add an identity op to avoid warning in the reference model + return self.build_unary(Op.IDENTITY, val) + + # Type Conversion + def build_cast(self, op, val, out_dtype): + result_tens = OutputShaper.typeConversionOp(self.ser, val, out_dtype) + self.ser.addOperator(op, [val.name], [result_tens.name]) + return result_tens + + def build_rescale(self, op, val, out_dtype, scale32, double_round, per_channel): + result_tens = OutputShaper.typeConversionOp(self.ser, val, out_dtype) + + if per_channel: + nc = val.shape[-1] + else: + nc = 1 + + in_type_width = self.typeWidth(val.dtype) + out_type_width = self.typeWidth(out_dtype) + + if val.dtype == DType.AINT8: + input_zp = self.randInt() + in_type_width = in_type_width + 1 + else: + input_zp = 0 + + if out_dtype == DType.AINT8: + output_zp = self.randInt() + out_type_width = out_type_width + 1 + else: + output_zp = 0 + + # Calculate scale based on: + # scale = a *(2^output_width)/(2^input_width)) + + a = np.float32(self.rng.random(size=[nc])) + scale_arr = a * np.float32((1 << out_type_width) / (1 << in_type_width)) + + if scale32: + pass + # Cap the scaling at 2^15 - 1 for scale16 + scale_arr = np.clip(scale_arr, 1.0 / (1 << 31), (1 << 31) - 1) + else: + # Cap the scaling at 2^15 - 1 for scale16 + scale_arr = np.clip(scale_arr, 1.0 / (1 << 31), 32767.0) + + #print('{} {} -> {}'.format(out_type_width, in_type_width, scale_arr)) + + multiplier_arr = np.int32(np.zeros(shape=[nc])) + shift_arr = np.int32(np.zeros(shape=[nc])) + + for i in range(nc): + multiplier_arr[i], shift_arr[i] = TosaQuantGen.computeMultiplierAndShift(scale_arr[i], scale32) + + #print('multiplier {} shift {} inzp {} outzp {}'.format(multiplier_arr, shift_arr, input_zp, output_zp)) + + attr = ts.TosaSerializerAttribute() + attr.RescaleAttribute(input_zp, + output_zp, + multiplier_arr, + shift_arr, + scale32, + double_round, + + per_channel) + + self.ser.addOperator(op, [val.name], [result_tens.name], attr) + return result_tens + + def build_cond_if_const(self, op, then_tens, else_tens, cond): + # For cond_if with constants, we're supplied with then/else tensors that we ignore + # (except for the generated shap) and the condition. Build Then/Else blocks + # and fill them with const nodes for the body. + + # Condition tensor + cond_tens = self.ser.addConst([], DType.BOOL, Usage.ACTIVATION, [], [cond]) + + # Make then/else tensors + out_shape = then_tens.shape + then_arr = np.int32(self.rng.integers(0, 255, size=out_shape)) + else_arr = np.int32(self.rng.integers(0, 255, size=out_shape)) + + # And the result tensor based on any of the outputs + result_tens = self.ser.addOutput(out_shape, DType.INT32, Usage.ACTIVATION, []) + + # Create the attribute with the names of the then/else blocks + then_block = 'THEN_BLOCK' + else_block = 'ELSE_BLOCK' + attr = ts.TosaSerializerAttribute() + attr.CondIfAttribute(then_block, else_block) + + # Finally, build the op and the two blocks + self.ser.addOperator(op, [cond_tens.name], [result_tens.name], attr) + + self.ser.startBasicBlock(then_block) + # Build the actual then/else tensors inside their blocks + then_tens = self.ser.addConst(out_shape, DType.INT32, Usage.ACTIVATION, [], then_arr) + self.ser.addOutputTensor(then_tens) + + self.ser.startBasicBlock(else_block) + else_tens = self.ser.addConst(out_shape, DType.INT32, Usage.ACTIVATION, [], else_arr) + self.ser.addOutputTensor(else_tens) + + return result_tens + + def build_cond_if_binary(self, op, a, b, cond): + # For cond_if with a binary op in the then/else blocks, take a and b and + # alternately add or subtract them based on the condition + + # Condition tensor + cond_tens = self.ser.addConst([], DType.BOOL, Usage.ACTIVATION, [], [cond]) + + result_tens = self.ser.addOutput(a.shape, a.dtype, Usage.ACTIVATION, []) + self.ser.currBasicBlock.addOutput(result_tens.name) + + # Create the attribute with the names of the then/else blocks + then_block = 'THEN_BLOCK' + else_block = 'ELSE_BLOCK' + attr = ts.TosaSerializerAttribute() + attr.CondIfAttribute(then_block, else_block) + + # Finally, build the op and the two blocks + self.ser.addOperator(op, [cond_tens.name, a.name, b.name], [result_tens.name], attr) + + self.ser.startBasicBlock(then_block) + self.ser.addInputTensor(a) + self.ser.addInputTensor(b) + then_tens = self.ser.addOutput(a.shape, a.dtype, a.usage, a.dformat) + self.ser.addOperator(Op.ADD, [a.name, b.name], [then_tens.name]) + + self.ser.startBasicBlock(else_block) + self.ser.addInputTensor(a) + self.ser.addInputTensor(b) + else_tens = self.ser.addOutput(a.shape, a.dtype, a.usage, a.dformat) + self.ser.addOperator(Op.SUB, [a.name, b.name], [else_tens.name]) + + return result_tens + + def build_while_loop(self, op, a, iter_val): + iter = self.ser.addPlaceholder([], DType.INT32, Usage.ACTIVATION, [], [np.int32(iter_val)]) + + cond_block = 'COND_BLOCK' + body_block = 'BODY_BLOCK' + + attr = ts.TosaSerializerAttribute() + attr.WhileLoopAttribute(cond_block, body_block) + + # Accumulator tensor + #acc = self.ser.addOutput(a.shape, a.dtype, a.usage, a.dformat) + acc_init_val = np.int32(np.zeros(a.shape)) + acc = self.ser.addPlaceholder(a.shape, a.dtype, a.usage, a.dformat, acc_init_val) + + # Intermediate/output tensors for everything going through the loop + iter_out = self.ser.addIntermediate(iter.shape, iter.dtype, iter.usage, iter.dformat) + a_out = self.ser.addIntermediate(a.shape, a.dtype, a.usage, a.dformat) + acc_out = self.ser.addIntermediate(acc.shape, acc.dtype, acc.usage, acc.dformat) + + # While_loop operator + self.ser.addOperator(op, + [iter.name, a.name, acc.name], + [iter_out.name, a_out.name, acc_out.name], attr) + + # COND block (input: iter, output: cond_tens ) + self.ser.startBasicBlock(cond_block) + self.ser.addInputTensor(iter) + self.ser.addInputTensor(a) + self.ser.addInputTensor(acc) + zero_tens = self.ser.addConst([], DType.INT32, [], [], [np.int32(0)]) + cond_tens = self.ser.addOutput([], DType.BOOL, [], []) + self.ser.addOperator(Op.GREATER, [iter.name, zero_tens.name], + [cond_tens.name]) + + # BODY block (input: a, acc, iter, output: a, acc, iter) + # Note that local intermediate tensors need to be declared here for the outputs + self.ser.startBasicBlock(body_block) + self.ser.addInputTensor(iter) + self.ser.addInputTensor(a) + self.ser.addInputTensor(acc) + one_tens = self.ser.addConst([], DType.INT32, [], [], [np.int32(1)]) + iter_body_out = self.ser.addIntermediate(iter.shape, iter.dtype, iter.usage, iter.dformat) + acc_body_out = self.ser.addIntermediate(acc.shape, acc.dtype, acc.usage, acc.dformat) + self.ser.addOperator(Op.ADD, [a.name, acc.name], [acc_body_out.name]) + self.ser.addOperator(Op.SUB, [iter.name, one_tens.name], [iter_body_out.name]) + self.ser.addOutputTensor(iter_body_out) + self.ser.addOutputTensor(a) + self.ser.addOutputTensor(acc_body_out) + + return acc_out + + + def genOpTestList(self, opName, shapeFilter=[None], rankFilter=None, dtypeFilter=None): + + try: + op = self.TOSA_OP_LIST[opName] + except KeyError as e: + raise Exception('Cannot find op with name {}'.format(opName)) + + # Initialize a new random number generator + self.rng = np.random.default_rng(self.random_seed) + + build_fcn, tgen_fcn, agen_fcn = op['build_fcn'] + + # Generate the lists of arguments + rmin, rmax = op['rank'] + + # Test list consists of a tuple of: + # (opName, testNameStr, dtype, shapeList, argumentsList) + testList = [] + + if not shapeFilter: + shapeFilter = [None] + + for r in range(rmin, rmax + 1): + + # Filter out the rank? + if rankFilter is not None and r not in rankFilter: + continue + + for t in op['types']: + + # Filter tests based on dtype? + if dtypeFilter is not None: + if t not in dtypeFilter: + continue + + # Create the placeholder and const tensors + for shape in shapeFilter: + # A None shape chooses a random shape of a given rank + + # Filter out by rank + if shape is not None and len(shape) != r: + continue + + self.setTargetShape(shape) + shapeList = tgen_fcn(self, op, r) + + shapeStr = self.shapeStr(shapeList[0]) + typeStr = self.typeStr(t) + + # Argument lists consists of tuples of the (str, []) string representation and the build function argument list + argList = [] + if agen_fcn: + argList = agen_fcn(self, opName, shapeList, t) + else: + argList = [('', [])] + + for argStr, args in argList: + if argStr: + testStr = '{}_{}_{}_{}'.format(opName, shapeStr, typeStr, argStr) + else: + testStr = '{}_{}_{}'.format(opName, shapeStr, typeStr) + + testList.append((opName, testStr, t, shapeList, args)) + + return testList + + def serializeTest(self, opName, testStr, dtype, shapeList, testArgs): + try: + op = self.TOSA_OP_LIST[opName] + except KeyError as e: + raise Exception('Cannot find op with name {}'.format(opName)) + + # Create a serializer + self.createSerializer(opName, testStr) + + build_fcn, tgen_fcn, agen_fcn = op['build_fcn'] + pCount, cCount = op['operands'] + + try: + qgen = op['qgen'] + except KeyError: + qgen = None + + # Build the random tensor operands and the test + tens = [] + tens.extend(self.buildPlaceholderTensors(shapeList[0:pCount], dtype)) + tens.extend(self.buildConstTensors(shapeList[pCount:], dtype)) + + if qgen is not None: + qinfo = qgen(self, op, dtype) + else: + qinfo = None + + try: + if qinfo is not None: + resultName = build_fcn(self, op['op'], *tens, *testArgs, qinfo) + else: + resultName = build_fcn(self, op['op'], *tens, *testArgs) + except TypeError as e: + print('build_fcn: {}\nTensors: {}\nArgs: {}\n'.format(build_fcn, tens, testArgs)) + raise e + + # Save the serialized test + self.serialize('test') + + def createDynamicOpLists(self): + + # Dynamically create op lists for convolutions with a list of kernel sizes + KERNELS = [ [1, 1], [2, 2], [3, 3], [5, 5], [3, 1], [1, 3] ] + + for k in KERNELS: + testName = 'conv2d_{}x{}'.format(k[0], k[1]) + self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST['conv2d_TEMPLATE'].copy() + self.TOSA_OP_LIST[testName]['filter'] = k + self.TOSA_OP_LIST[testName]['template'] = False + + testName = 'depthwise_conv2d_{}x{}'.format(k[0], k[1]) + self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST['depthwise_conv2d_TEMPLATE'].copy() + self.TOSA_OP_LIST[testName]['filter'] = k + self.TOSA_OP_LIST[testName]['template'] = False + + testName = 'transpose_conv2d_{}x{}'.format(k[0], k[1]) + self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST['transpose_conv2d_TEMPLATE'].copy() + self.TOSA_OP_LIST[testName]['filter'] = k + self.TOSA_OP_LIST[testName]['template'] = False + + # Delete any templates after having created any dynamic ops + # This is a two-pass operation because it's bad practice to delete + # keys from dictionaries while iterating + keyList = [] + for k in self.TOSA_OP_LIST: + try: + if self.TOSA_OP_LIST[k]['template'] == True: + keyList.append(k) + continue + except KeyError: + pass + + for k in keyList: + del self.TOSA_OP_LIST[k] + + def initOpListDefaults(self): + '''Fill in default fields for ops if they aren't already specified. + Look for missing required fields (datastructure linting).''' + for op in self.TOSA_OP_LIST: + + # Required fields + try: + pl, c = self.TOSA_OP_LIST[op]['operands'] + except (KeyError, ValueError, TypeError): + raise Exception('Op {} is missing a valid operand tuple in TOSA_OP_LIST'.format(op)) + + try: + fcn, tgen, arggen = self.TOSA_OP_LIST[op]['build_fcn'] + except (KeyError, ValueError, TypeError): + raise Exception('Op {} is missing a valid build_fcn tuple in TOSA_OP_LIST'.format(op)) + + try: + types = self.TOSA_OP_LIST[op]['types'] + except KeyError as e: + raise Exception('Op {} is missing a valid type list in TOSA_OP_LIST'.format(op)) + + try: + opcode = self.TOSA_OP_LIST[op]['op'] + except KeyError as e: + raise Exception('Op {} is missing the Op field in TOSA_OP_LIST'.format(op)) + + # Put in default rank range, if missing + try: + rank = self.TOSA_OP_LIST[op]['rank'] + except KeyError: + self.TOSA_OP_LIST[op]['rank'] = self.DEFAULT_RANK_RANGE + + # Tensor operator list + # 'op': op name + # 'operands': tuple of (placeholder, const) operands + # 'rank': optional, restricts rank to tuple inclusive of (min, max), if not specified, defaults to (1, 4) + # 'build_fcn': tuple of the function to (build_operator(), TensorGen function, ArgGen enum) + # 'types': array of datatypes to be tested + TYPE_FP = [ DType.FLOAT ] + + # Type with an aint8 + TYPE_INT = [ DType.AINT8, DType.INT16, DType.INT32 ] # Most operators support AINT8 instead of INT8, excludes INT4 + TYPE_INT_FP = [ DType.AINT8, DType.INT16, DType.INT32, DType.FLOAT ] # Most operators support AINT8 instead of INT8, excludes INT4 + + # Types with an int8 + TYPE_PURE_INT = [ DType.INT8, DType.INT16, DType.INT32 ] # Note: excludes INT4 + TYPE_PURE_INT_FP = [ DType.INT8, DType.INT16, DType.INT32, DType.FLOAT ] # Note: excludes INT4 + TYPE_BOOL = [ DType.BOOL ] + TYPE_FI32 = [ DType.FLOAT, DType.INT32 ] + TYPE_FIB = [ DType.FLOAT, DType.AINT8, DType.INT8, DType.INT16, DType.INT32, DType.BOOL ] + TYPE_FI16 = [ DType.FLOAT, DType.INT16 ] + + TYPE_NARROW_INT_FP = [ DType.AINT8, DType.INT16, DType.FLOAT ] + + DEFAULT_RANK_RANGE = (1, 4) + + TOSA_OP_LIST = { + # Binary ops + 'add': + { 'op': Op.ADD, + 'operands': (2, 0), + 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None), + 'types': TYPE_FI32 }, + + 'arithmetic_right_shift': + { 'op': Op.ARITHMETIC_RIGHT_SHIFT, + 'operands': (2, 0), + 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None), + 'types': TYPE_PURE_INT }, + + 'bitwise_and': + { 'op': Op.BITWISE_AND, + 'operands': (2, 0), + 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None), + 'types': TYPE_INT }, + + 'bitwise_or': + { 'op': Op.BITWISE_OR, + 'operands': (2, 0), + 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None), + 'types': TYPE_INT }, + + 'bitwise_xor': + { 'op': Op.BITWISE_XOR, + 'operands': (2, 0), + 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None), + 'types': TYPE_INT }, + + 'logical_and': + { 'op': Op.LOGICAL_AND, + 'operands': (2, 0), + 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None), + 'types': TYPE_BOOL }, + + 'logical_left_shift': + { 'op': Op.LOGICAL_LEFT_SHIFT, + 'operands': (2, 0), + 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None), + 'types': TYPE_PURE_INT }, + + 'logical_right_shift': + { 'op': Op.LOGICAL_RIGHT_SHIFT, + 'operands': (2, 0), + 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None), + 'types': TYPE_PURE_INT }, + + 'logical_or': + { 'op': Op.LOGICAL_OR, + 'operands': (2, 0), + 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None), + 'types': TYPE_BOOL }, + + 'logical_xor': + { 'op': Op.LOGICAL_XOR, + 'operands': (2, 0), + 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None), + 'types': TYPE_BOOL }, + + 'max': + { 'op': Op.MAXIMUM, + 'operands': (2, 0), + 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None), + 'types': TYPE_FI32 }, + + 'min': + { 'op': Op.MINIMUM, + 'operands': (2, 0), + 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None), + 'types': TYPE_FI32 }, + + 'mul': + { 'op': Op.MUL, + 'operands': (2, 0), + 'build_fcn': (build_mul, TosaTensorGen.tgBroadcastFuzz, None), + 'types': TYPE_PURE_INT_FP }, + + 'pow': + { 'op': Op.POW, + 'operands': (2, 0), + 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBasic, None), + 'types': TYPE_FP }, + + 'sub': + { 'op': Op.SUB, + 'operands': (2, 0), + 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None), + 'types': TYPE_FI32 }, + + 'table': + { 'op': Op.TABLE, + # Use the automatic generation functions to create the input array + # but create the table tensor in the build function, as it may be + # a different type from the input + 'operands': (1, 0), + 'build_fcn': (build_table, TosaTensorGen.tgBasic, None), + 'types': [ DType.INT16 ] }, + + 'argmax': + { 'op': Op.ARGMAX, + 'operands': (1, 0), + 'build_fcn': (build_argmax, TosaTensorGen.tgBasic, TosaArgGen.agAxis), + 'types': TYPE_FP }, + + # Templated operator. Filled in by createDynamicOpLists + 'conv2d_TEMPLATE': + { 'op': Op.CONV2D, + 'operands': (1, 2), + 'rank': (4, 4), + 'build_fcn': (build_conv2d, TosaTensorGen.tgConv2D, TosaArgGen.agConv2D), + 'qgen': TosaQuantGen.qgConv, + 'types': TYPE_FP, + 'template': True }, + + # Templated operator. Filled in by createDynamicOpLists + 'depthwise_conv2d_TEMPLATE': + { 'op': Op.DEPTHWISE_CONV2D, + 'operands': (1, 2), + 'filter': [1, 1], + 'rank': (4, 4), + 'build_fcn': (build_depthwise_conv2d, TosaTensorGen.tgDepthwiseConv2D, TosaArgGen.agConv2D), + 'qgen': TosaQuantGen.qgConv, + 'types': TYPE_FP, + 'template': True }, + + # Templated operator. Filled in by createDynamicOpLists + 'transpose_conv2d_TEMPLATE': + { 'op': Op.TRANSPOSE_CONV2D, + 'operands': (1, 1), + 'rank': (4, 4), + 'build_fcn': (build_transpose_conv2d, TosaTensorGen.tgTransposeConv2D, TosaArgGen.agTransposeConv2D), + 'qgen': TosaQuantGen.qgConv, + 'types': TYPE_FP, + 'template': True }, + + 'fully_connected': + { 'op': Op.FULLY_CONNECTED, + 'operands': (2, 0), + 'rank': (2, 2), + 'build_fcn': (build_fully_connected, TosaTensorGen.tgFullyConnected, None), + 'qgen': TosaQuantGen.qgConv, + 'types': TYPE_FP }, + + 'matmul': + { 'op': Op.MATMUL, + 'operands': (2, 0), + 'rank': (2, 2), + 'build_fcn': (build_matmul, TosaTensorGen.tgMatmul, None), + 'qgen': TosaQuantGen.qgMatmul, + 'types': TYPE_NARROW_INT_FP }, + + # Unary operators + 'abs': + { 'op': Op.ABS, + 'operands': (1, 0), + 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None), + 'types': TYPE_FI32 }, + + 'bitwise_not': + { 'op': Op.BITWISE_NOT, + 'operands': (1, 0), + 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None), + 'types': TYPE_INT }, + + 'ceil': + { 'op': Op.CEIL, + 'operands': (1, 0), + 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None), + 'types': TYPE_FP }, + + 'clz': + { 'op': Op.CLZ, + 'operands': (1, 0), + 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None), + 'types': [ DType.INT32 ] }, + + 'exp': + { 'op': Op.EXP, + 'operands': (1, 0), + 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None), + 'types': TYPE_FP }, + + 'floor': + { 'op': Op.FLOOR, + 'operands': (1, 0), + 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None), + 'types': TYPE_FP }, + + 'log': + { 'op': Op.LOG, + 'operands': (1, 0), + 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None), + 'types': TYPE_FP }, + + 'floor': + { 'op': Op.FLOOR, + 'operands': (1, 0), + 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None), + 'types': TYPE_FP }, + + 'logical_not': + { 'op': Op.LOGICAL_NOT, + 'operands': (1, 0), + 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None), + 'types': TYPE_BOOL }, + + 'negate': + { 'op': Op.NEGATE, + 'operands': (1, 0), + 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None), + 'qgen': TosaQuantGen.qgUnary, + 'types': TYPE_INT_FP }, + + 'reciprocal': + { 'op': Op.RECIPROCAL, + 'operands': (1, 0), + 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None), + 'types': TYPE_FP }, + + 'rsqrt': + { 'op': Op.RSQRT, + 'operands': (1, 0), + 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None), + 'types': TYPE_FP }, + + # Ternary operators + 'select': + { 'op': Op.SELECT, + 'operands': (3, 0), + 'build_fcn': (build_select, TosaTensorGen.tgBroadcastFuzz, None), + 'types': TYPE_FIB }, + + # Comparison operators + 'equal': + { 'op': Op.EQUAL, + 'operands': (2, 0), + 'build_fcn': (build_comparison, TosaTensorGen.tgBroadcastFuzz, None), + 'types': TYPE_FI32 }, + + 'greater_equal': + { 'op': Op.GREATER_EQUAL, + 'operands': (2, 0), + 'build_fcn': (build_comparison, TosaTensorGen.tgBroadcastFuzz, None), + 'types': TYPE_FI32 }, + + 'greater': + { 'op': Op.GREATER, + 'operands': (2, 0), + 'build_fcn': (build_comparison, TosaTensorGen.tgBroadcastFuzz, None), + 'types': TYPE_FI32 }, + + # Pooling operators + 'avg_pool2d': + { 'op': Op.AVG_POOL2D, + 'operands': (1, 0), + 'rank': (4, 4), + 'build_fcn': (build_pool2d, TosaTensorGen.tgNHWC, TosaArgGen.agPooling), + 'qgen': TosaQuantGen.qgUnary, + 'types': TYPE_NARROW_INT_FP }, + + + 'max_pool2d': + { 'op': Op.MAX_POOL2D, + 'operands': (1, 0), + 'rank': (4, 4), + 'build_fcn': (build_pool2d, TosaTensorGen.tgNHWC, TosaArgGen.agPooling), + 'types': TYPE_NARROW_INT_FP }, + + # Reduce operators + 'reduce_any': + { 'op': Op.REDUCE_ANY, + 'operands': (1, 0), + 'build_fcn': (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis), + 'types': TYPE_BOOL }, + + 'reduce_all': + { 'op': Op.REDUCE_ALL, + 'operands': (1, 0), + 'build_fcn': (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis), + 'types': TYPE_BOOL }, + + 'reduce_max': + { 'op': Op.REDUCE_MAX, + 'operands': (1, 0), + 'build_fcn': (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis), + 'types': TYPE_INT_FP }, + + 'reduce_min': + { 'op': Op.REDUCE_MAX, + 'operands': (1, 0), + 'build_fcn': (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis), + 'types': TYPE_INT_FP }, + + 'reduce_product': + { 'op': Op.REDUCE_PRODUCT, + 'operands': (1, 0), + 'build_fcn': (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis), + 'types': TYPE_FP }, + + 'reduce_sum': + { 'op': Op.REDUCE_SUM, + 'operands': (1, 0), + 'build_fcn': (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis), + 'types': TYPE_FI32 }, + + # Activation functions + 'clamp': + { 'op': Op.CLAMP, + 'operands': (1, 0), + 'build_fcn': (build_clamp, TosaTensorGen.tgBasic, None), + 'types': TYPE_NARROW_INT_FP }, + + 'relun': + { 'op': Op.RELUN, + 'operands': (1, 0), + 'build_fcn': (build_relun, TosaTensorGen.tgBasic, None), + 'types': TYPE_FI32 }, + + 'sigmoid': + { 'op': Op.SIGMOID, + 'operands': (1, 0), + 'build_fcn': (build_sigmoid, TosaTensorGen.tgBasic, None), + 'types': TYPE_FP }, + + 'tanh': + { 'op': Op.TANH, + 'operands': (1, 0), + 'build_fcn': (build_tanh, TosaTensorGen.tgBasic, None), + 'types': TYPE_FP }, + + # Data layout operators + 'concat': + { 'op': Op.CONCAT, + 'operands': (2, 0), + 'build_fcn': (build_concat, TosaTensorGen.tgBasic, TosaArgGen.agAxis), + 'types': TYPE_FIB }, + + 'pad': + { 'op': Op.PAD, + 'operands': (1, 0), + 'build_fcn': (build_pad, TosaTensorGen.tgBasic, TosaArgGen.agPad), + 'qgen': TosaQuantGen.qgPad, + 'types': TYPE_FIB }, + + 'reshape': + { 'op': Op.RESHAPE, + 'operands': (1, 0), + 'build_fcn': (build_reshape, TosaTensorGen.tgBasic, TosaArgGen.agReshape), + 'types': TYPE_FIB }, + + 'reverse': + { 'op': Op.REVERSE, + 'operands': (1, 0), + 'build_fcn': (build_reverse, TosaTensorGen.tgBasic, TosaArgGen.agAxis), + 'types': TYPE_FIB }, + + 'slice': + { 'op': Op.SLICE, + 'operands': (1, 0), + 'build_fcn': (build_slice, TosaTensorGen.tgBasic, TosaArgGen.agSlice), + 'types': TYPE_FIB }, + + 'tile': + { 'op': Op.TILE, + 'operands': (1, 0), + 'build_fcn': (build_tile, TosaTensorGen.tgBasic, TosaArgGen.agTile), + 'types': TYPE_FIB }, + + 'transpose': + { 'op': Op.TRANSPOSE, + 'operands': (1, 0), + 'rank': (2, 4), # Do not allow tranpose on rank=1 + 'build_fcn': (build_transpose, TosaTensorGen.tgBasic, TosaArgGen.agTranspose), + 'types': TYPE_FIB }, + + # Scatter/Gather + 'gather': + { 'op': Op.GATHER, + 'operands': (1, 0), + 'build_fcn': (build_gather, TosaTensorGen.tgBasic, TosaArgGen.agAxis), + 'types': TYPE_INT }, + + + # Image operations + 'resize': + { 'op': Op.RESIZE, + 'operands': (1, 0), + 'rank': (4, 4), + 'build_fcn': ( build_resize, TosaTensorGen.tgNHWC, TosaArgGen.agResize), + 'types': [ DType.INT8, DType.INT16 ] }, + + + # Data nodes + 'placeholder': + { 'op': Op.PLACEHOLDER, + 'operands': (1, 0), + 'build_fcn': ( build_placeholder, TosaTensorGen.tgBasic, None), + 'types': TYPE_FIB }, + + 'const': + { 'op': Op.CONST, + 'operands': (1, 0), + 'build_fcn': ( build_placeholder, TosaTensorGen.tgBasic, None), + 'types': TYPE_FIB }, + + + 'identity': + { 'op': Op.IDENTITY, + 'operands': (1, 0), + 'build_fcn': ( build_unary, TosaTensorGen.tgBasic, None), + 'types': TYPE_FIB }, + + + 'identityn': + { 'op': Op.IDENTITYN, + 'operands': (2, 0), + 'build_fcn': ( build_identityn, TosaTensorGen.tgBasic, None), + 'types': TYPE_FIB }, + + # Type conversion + 'cast': + { 'op': Op.CAST, + 'operands': (1, 0), + 'build_fcn': ( build_cast, TosaTensorGen.tgBasic, TosaArgGen.agCast ), + 'types': [ DType.FLOAT, DType.INT8, DType.INT16, DType.INT32, DType.BOOL ] }, + + 'rescale': + { 'op': Op.RESCALE, + 'operands': (1, 0), + 'build_fcn': ( build_rescale, TosaTensorGen.tgBasic, TosaArgGen.agRescale ), + 'types': [ DType.AINT8, DType.INT16, DType.INT32, DType.INT48 ] }, + + # Custom + # Not implemented. + + # Control flow + + # Two varients of cond_if, one that generates one of two constant tensors (no + # inputs to the basic blocks, one output) and another that either adds or subtracts two tensors + # (two inputs to the basic blocks, one output) + 'cond_if_const': + { 'op': Op.COND_IF, + 'operands': (0, 2), + 'build_fcn': ( build_cond_if_const, TosaTensorGen.tgBasic, TosaArgGen.agCondIf ), + 'types': [ DType.BOOL ] }, + + 'cond_if_binary': + { 'op': Op.COND_IF, + 'operands': (2, 0), + 'build_fcn': ( build_cond_if_binary, TosaTensorGen.tgBasic, TosaArgGen.agCondIf ), + 'types': TYPE_FI32 }, + + # while_loop + 'while_loop': + { 'op': Op.WHILE_LOOP, + 'operands': (0, 1), + 'build_fcn': ( build_while_loop, TosaTensorGen.tgBasic, TosaArgGen.agWhileLoop ), + 'types': [DType.INT32] }, + + + } + +class OutputShaper: + # Methods in this class compute the expected output shape and datatype + # for common classes of operations + def __init__(self): + pass + + # These methods return arguments that can be used for + # creating a new output tensor + @staticmethod + def binaryBroadcastOp(ser, a, b): + assert(len(a.shape) == len(b.shape)) + assert(a.dtype == b.dtype) + + shape = [] + for i in range(len(a.shape)): + if a.shape[i] == 1: + shape.append(b.shape[i]) + else: + shape.append(a.shape[i]) + + return ser.addOutput(shape, a.dtype, a.usage, a.dformat) + + @staticmethod + def binaryNonBroadcastOp(ser, a, b): + assert(len(a.shape) == len(b.shape)) + assert(a.dtype == b.dtype) + + shape = [] + for i in range(len(a.shape)): + assert(a.shape[i] == b.shape[i]) + shape.append(a.shape[i]) + + return ser.addOutput(shape, a.dtype, a.usage, a.dformat) + + @staticmethod + def unaryOp(ser, a): + return ser.addOutput(a.shape, a.dtype, a.usage, a.dformat) + + @staticmethod + def selectOp(ser, cond, a, b): + assert(len(a.shape) == len(b.shape) and len(a.shape) == len(cond.shape)) + assert(a.dtype == b.dtype) + + shape = [] + for i in range(len(a.shape)): + shape.append(max(cond.shape[i], a.shape[i], b.shape[i])) + + return ser.addOutput(shape, a.dtype, a.usage, a.dformat) + + @staticmethod + def binaryComparisonOp(ser, a, b): + assert(len(a.shape) == len(b.shape)) + assert(a.dtype == b.dtype) + + # Do broadcast + shape = [] + for i in range(len(a.shape)): + if a.shape[i] == 1: + shape.append(b.shape[i]) + else: + shape.append(a.shape[i]) + + # Force the output type to bool + return ser.addOutput(shape, DType.BOOL, a.usage, a.dformat) + + @staticmethod + def reduceOp(ser, a, axis): + + shape = a.shape.copy() + + shape[axis] = 1 + + return ser.addOutput(shape, a.dtype, a.usage, a.dformat) + + @staticmethod + def argmaxOp(ser, a, axis): + shape = a.shape.copy() + del shape[axis] + return ser.addOutput(shape, DType.INT32, a.usage, a.dformat) + + @staticmethod + def conv2dOp(ser, ifm, filter, strides, padding, dilations): + + # IFM: NHWC + # Filter: OHWI + # OFM: NHWC + + if len(padding) == 2: + # Expand padding to 4 parameters in the case of transpose_conv2d + # From H,W to T,B,L,R + padding = [padding[0], padding[0], padding[1], padding[1]] + + h = (ifm.shape[1] - filter.shape[1] - (filter.shape[1] - 1) * (dilations[0] - 1) + \ + padding[0] + padding[1]) // strides[0] + 1 + + w = (ifm.shape[2] - filter.shape[2] - (filter.shape[2] - 1) * (dilations[1] - 1) + \ + padding[2] + padding[3]) // strides[1] + 1 + + if h <= 0 or w <= 0: + # Invalid test parameters? + h = 0 + w = 0 + ser.setExpectedFailure(True, 'Invalid combination of conv2d parameters') + + ofm_shape = [ifm.shape[0], h, w, filter.shape[0]] + + if ifm.dtype == DType.AINT8 or ifm.dtype == DType.INT8: + out_dtype = DType.INT32 + elif ifm.dtype == DType.INT16: + out_dtype = DType.INT48 + elif ifm.dtype == DType.FLOAT: + out_dtype = DType.FLOAT + else: + raise Exception('Unsupported input dtype: {}'.format(ifm.dtype)) + + return ser.addOutput(ofm_shape, out_dtype, ifm.usage, ifm.dformat) + + @staticmethod + def depthwiseConv2dOp(ser, ifm, filter, strides, padding, dilations): + # IFM: NHWC + # Filter: HWCM + # OFM: NHW C*M + h = (ifm.shape[1] - filter.shape[0] - (filter.shape[0] - 1) * (dilations[0] - 1) + \ + padding[0] + padding[1]) // strides[0] + 1 + + w = (ifm.shape[2] - filter.shape[1] - (filter.shape[1] - 1) * (dilations[1] - 1) + \ + padding[2] + padding[3]) // strides[1] + 1 + + if h <= 0 or w <= 0: + # Invalid test parameters? + h = 0 + w = 0 + ser.setExpectedFailure(True, 'Invalid combination of conv2d parameters') + + ofm_shape = [ifm.shape[0], h, w, filter.shape[2] * filter.shape[3]] + + if ifm.dtype == DType.AINT8 or ifm.dtype == DType.INT8: + out_dtype = DType.INT32 + elif ifm.dtype == DType.INT16: + out_dtype = DType.INT48 + elif ifm.dtype == DType.FLOAT: + out_dtype = DType.FLOAT + else: + raise Exception('Unsupported input dtype: {}'.format(ifm.dtype)) + + return ser.addOutput(ofm_shape, out_dtype, ifm.usage, ifm.dformat) + + + @staticmethod + def pool2dOp(ser, ifm, kernel, stride, pad): + # input: NHWC + h = (ifm.shape[1] + pad[0] + pad[1] + stride[0] - kernel[0]) // stride[0] + w = (ifm.shape[2] + pad[2] + pad[3] + stride[1] - kernel[1]) // stride[1] + + if h <= 0 or w <= 0: + # Invalid test parameters? + h = 0 + w = 0 + ser.setExpectedFailure(True, 'Invalid combination of pooling parameters') + + ofm_shape = [ifm.shape[0], h, w, ifm.shape[3]] + return ser.addOutput(ofm_shape, ifm.dtype, ifm.usage, ifm.dformat) + + @staticmethod + def fullyConnectedOp(ser, input, filter): + # input: N, IC + # filter: OC, IC + # output: N, OC + + output_shape = [input.shape[0], filter.shape[0]] + + if input.dtype == DType.AINT8 or input.dtype == DType.INT8: + out_dtype = DType.INT32 + elif input.dtype == DType.INT16: + out_dtype = DType.INT48 + elif input.dtype == DType.FLOAT: + out_dtype = DType.FLOAT + else: + raise Exception('Unsupported input dtype: {}'.format(input.dtype)) + + return ser.addOutput(output_shape, out_dtype, input.usage, input.dformat) + + @staticmethod + def matmulOp(ser, a, b): + # a: M, K + # b: K, N + # out: M, N + + output_shape = [a.shape[0], b.shape[1]] + + + if a.dtype == DType.AINT8: + out_dtype = DType.INT32 + elif a.dtype == DType.INT16: + out_dtype = DType.INT48 + elif a.dtype == DType.FLOAT: + out_dtype = DType.FLOAT + else: + raise Exception('UNsupported input dtype for matmul: {}'.format(a.dtype)) + + return ser.addOutput(output_shape, out_dtype, a.usage, a.dformat) + + @staticmethod + def concatOp(ser, a, b, axis): + + output_shape = a.shape.copy() + output_shape[axis] = a.shape[axis] + b.shape[axis] + + return ser.addOutput(output_shape, a.dtype, a.usage, a.dformat) + + @staticmethod + def padOp(ser, a, padding): + + output_shape = a.shape.copy() + + for i in range(len(output_shape)): + output_shape[i] = padding[i][0] + padding[i][1] + output_shape[i] + + return ser.addOutput(output_shape, a.dtype, a.usage, a.dformat) + + @staticmethod + def reshapeOp(ser, a, shape): + output_shape = shape.copy() + + totalElements = 1 + for i in a.shape: + totalElements *= i + + # If there are any -1 elements, figure out what that dimension must be + totalOutputElements = 1 + for i in output_shape: + if i != -1: + totalOutputElements *= i + + # And fill it in + for i in range(len(output_shape)): + if output_shape[i] == -1: + output_shape[i] = totalElements // totalOutputElements + + return ser.addOutput(output_shape, a.dtype, a.usage, a.dformat) + + @staticmethod + def sliceOp(ser, a, begin, size): + + output_shape = size.copy() + return ser.addOutput(output_shape, a.dtype, a.usage, a.dformat) + + @staticmethod + def tileOp(ser, a, multiples): + + output_shape = a.shape.copy() + assert(len(multiples) == len(output_shape)) + + for i in range(len(output_shape)): + output_shape[i] = a.shape[i] * multiples[i] + + return ser.addOutput(output_shape, a.dtype, a.usage, a.dformat) + + @staticmethod + def transposeOp(ser, a, perms): + output_shape = a.shape.copy() + assert(len(perms) == len(output_shape)) + + for i in range(len(output_shape)): + output_shape[i] = a.shape[perms[i]] + + return ser.addOutput(output_shape, a.dtype, a.usage, a.dformat) + + @staticmethod + def gatherOp(ser, values, indicies, axis): + # indicies minus the axis + values - the indexes used to look up values. + output_shape = [*values.shape[0:axis], indicies.shape[0], *values.shape[axis+1:]] + + return ser.addOutput(output_shape, values.dtype, indicies.usage, indicies.dformat) + + @staticmethod + def tableOp(ser, input, table): + # Same shape as the input, but with the type of the table. + return ser.addOutput(input.shape, DType.INT32, input.usage, input.dformat) + + @staticmethod + def resizeOp(ser, input, mode, stride, offset, shift, output_dims, output_dtype): + + output_dims = [input.shape[0], output_dims[0], output_dims[1], input.shape[3]] + + if stride[0] <= 0 or stride[1] <= 0: + ser.setExpectedFailure(True, 'Negative or zero stride') + + return ser.addOutput(output_dims, output_dtype, input.usage, input.dformat) + + @staticmethod + def typeConversionOp(ser, val, out_dtype): + return ser.addOutput(val.shape, out_dtype, val.usage, val.dformat) + + @staticmethod + def transposeConv2DOp(ser, ifm, output_shape): + if ifm.dtype == DType.AINT8 or ifm.dtype == DType.INT8: + out_dtype = DType.INT32 + elif ifm.dtype == DType.INT16: + out_dtype = DType.INT48 + elif ifm.dtype == DType.FLOAT: + out_dtype = DType.FLOAT + else: + raise Exception('Unsupported input dtype: {}'.format(ifm.dtype)) + + if output_shape[1] <= 0 or output_shape[2] <= 0: + ser.setExpectedFailure(True, 'Negative output shape') + + return ser.addOutput(output_shape, out_dtype, ifm.usage, ifm.dformat) diff --git a/verif/tosa_test_runner.py b/verif/tosa_test_runner.py new file mode 100644 index 0000000..6549192 --- /dev/null +++ b/verif/tosa_test_runner.py @@ -0,0 +1,63 @@ +import os + +# Copyright (c) 2020, ARM Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import shlex +import subprocess +from enum import IntEnum, unique + +def run_sh_command(args, full_cmd, capture_output=False): + '''Utility function to run an external command. Optionally return captured stdout/stderr''' + + # Quote the command line for printing + full_cmd_esc = [ shlex.quote(x) for x in full_cmd ] + + if args.verbose: + print('### Running {}'.format(' '.join(full_cmd_esc))) + + if capture_output: + rc = subprocess.run(full_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + if rc.returncode != 0: + print(rc.stdout.decode('utf-8')) + print(rc.stderr.decode('utf-8')) + raise Exception('Error running command: {}.\n{}'.format(' '.join(full_cmd_esc), rc.stderr.decode('utf-8'))) + return (rc.stdout, rc.stderr) + else: + rc = subprocess.run(full_cmd) + if rc.returncode != 0: + raise Exception('Error running command: {}'.format(' '.join(full_cmd_esc))) + +class TosaTestRunner: + + def __init__(self, args, runnerArgs, testDir): + + self.args = args + self.runnerArgs = runnerArgs + self.testDir = testDir + + # Load the json test file + with open(os.path.join(testDir, 'desc.json'), 'r') as fd: + self.testDesc = json.load(fd) + + def runModel(self): + pass + + class Result(IntEnum): + EXPECTED_PASS = 0 + EXPECTED_FAILURE = 1 + UNEXPECTED_PASS = 2 + UNEXPECTED_FAILURE = 3 + INTERNAL_ERROR = 4 diff --git a/verif/tosa_verif_build_tests.py b/verif/tosa_verif_build_tests.py new file mode 100755 index 0000000..19eb2f4 --- /dev/null +++ b/verif/tosa_verif_build_tests.py @@ -0,0 +1,136 @@ +#!/usr/bin/env python3 + +# Copyright (c) 2020, ARM Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import argparse +import sys +import re +import os +import subprocess +import shlex +import json +import glob +import math +import queue +import threading +import traceback + + +from enum import IntEnum, Enum, unique +from datetime import datetime + +# Include the ../shared directory in PYTHONPATH +parent_dir = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(parent_dir, '..', 'scripts')) +sys.path.append(os.path.join(parent_dir, '..', 'scripts', 'xunit')) +import xunit +from tosa_serializer import * +from tosa_test_gen import TosaTestGen +import tosa + +# Used for parsing a comma-separated list of integers in a string +# to an actual list of integers +def str_to_list(in_s): + '''Converts a comma-separated list of string integers to a python list of ints''' + lst = in_s.split(',') + out_list = [] + for i in lst: + out_list.append(int(i)) + return out_list + +def auto_int(x): + '''Converts hex/dec argument values to an int''' + return int(x, 0) + +def parseArgs(): + + parser = argparse.ArgumentParser() + parser.add_argument('-o', dest='output_dir', type=str, default='vtest', + help='Test output directory') + + parser.add_argument('--seed', dest='random_seed', default=42, type=int, + help='Random seed for test generation') + + parser.add_argument('--filter', dest='filter', default='', type=str, + help='Filter operator test names by this expression') + + parser.add_argument('-v', '--verbose', dest='verbose', action='count', + help='Verbose operation') + + # Constraints on tests + parser.add_argument('--tensor-dim-range', dest='tensor_shape_range', default='1,64', + type=lambda x: str_to_list(x), + help='Min,Max range of tensor shapes') + + parser.add_argument('--max-batch-size', dest='max_batch_size', default=1, type=int, + help='Maximum batch size for NHWC tests') + + parser.add_argument('--max-conv-padding', dest='max_conv_padding', default=1, type=int, + help='Maximum padding for Conv tests') + + parser.add_argument('--max-conv-dilation', dest='max_conv_dilation', default=2, type=int, + help='Maximum dilation for Conv tests') + + parser.add_argument('--max-conv-stride', dest='max_conv_stride', default=2, type=int, + help='Maximum stride for Conv tests') + + parser.add_argument('--max-pooling-padding', dest='max_pooling_padding', default=1, type=int, + help='Maximum padding for pooling tests') + + parser.add_argument('--max-pooling-stride', dest='max_pooling_stride', default=2, type=int, + help='Maximum stride for pooling tests') + + parser.add_argument('--max-pooling-kernel', dest='max_pooling_kernel', default=2, type=int, + help='Maximum padding for pooling tests') + + parser.add_argument('--num-rand-permutations', dest='num_rand_permutations', default=6, type=int, + help='Number of random permutations for a given shape/rank for randomly-sampled parameter spaces') + + # Targetting a specific shape/rank/dtype + parser.add_argument('--target-shape', dest='target_shapes', action='append', default=[], type=lambda x: str_to_list(x), + help='Create tests with a particular input tensor shape, e.g., 1,4,4,8 (may be repeated for tests that require multiple input shapes)') + + parser.add_argument('--target-rank', dest='target_ranks', action='append', default=None, type=lambda x: auto_int(x), + help='Create tests with a particular input tensor rank') + + parser.add_argument('--target-dtype', dest='target_dtypes', action='append', default=None, type=lambda x: dtype_str_to_val(x), + help='Create test with a particular DType (may be repeated)') + + args = parser.parse_args() + + return args + +def main(): + + + args = parseArgs() + + ttg = TosaTestGen(args) + + testList = [] + for op in ttg.TOSA_OP_LIST: + if re.match(args.filter + '.*', op): + testList.extend(ttg.genOpTestList(op, shapeFilter=args.target_shapes, rankFilter=args.target_ranks, dtypeFilter=args.target_dtypes)) + + print('{} matching tests'.format(len(testList))) + for opName, testStr, dtype, shapeList, testArgs in testList: + print(testStr) + ttg.serializeTest(opName, testStr, dtype, shapeList, testArgs) + print('Done creating {} tests'.format(len(testList))) + + +if __name__ == '__main__': + exit(main()) diff --git a/verif/tosa_verif_run_ref.py b/verif/tosa_verif_run_ref.py new file mode 100755 index 0000000..2284e35 --- /dev/null +++ b/verif/tosa_verif_run_ref.py @@ -0,0 +1,198 @@ +#!/usr/bin/env python3 + +# Copyright (c) 2020, ARM Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import argparse +import sys +import re +import os +import subprocess +import shlex +import json +import glob +import math +import queue +import threading +import traceback +import importlib + + +from enum import IntEnum, Enum, unique +from datetime import datetime + +# Include the ../shared directory in PYTHONPATH +parent_dir = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(parent_dir, '..', 'scripts')) +sys.path.append(os.path.join(parent_dir, '..', 'scripts', 'xunit')) +import xunit +import tosa +from tosa_test_gen import TosaTestGen +from tosa_test_runner import TosaTestRunner + +no_color_printing = False +#from run_tf_unit_test import LogColors, print_color, run_sh_command + +def parseArgs(): + + parser = argparse.ArgumentParser() + parser.add_argument('-t', '--test', dest='test', type=str, nargs='+', + help='Test(s) to run') + parser.add_argument('--seed', dest='random_seed', default=42, type=int, + help='Random seed for test generation') + parser.add_argument('--ref-model-path', dest='ref_model_path', + default='build/reference_model/tosa_reference_model', type=str, + help='Path to reference model executable') + parser.add_argument('--ref-debug', dest='ref_debug', default='', type=str, + help='Reference debug flag (low, med, high)') + parser.add_argument('--ref-intermediates', dest='ref_intermediates', default=0, type=int, + help='Reference model dumps intermediate tensors') + parser.add_argument('-v', '--verbose', dest='verbose', action='count', + help='Verbose operation') + parser.add_argument('-j', '--jobs', dest='jobs', type=int, default=1, + help='Number of parallel jobs') + parser.add_argument('--sut-module', '-s', dest='sut_module', type=str, nargs='+', default=['tosa_ref_run'], + help='System under test module to load (derives from TosaTestRunner). May be repeated') + parser.add_argument('--sut-module-args', dest='sut_module_args', type=str, nargs='+', default=[], + help='System under test module arguments. Use sutmodulename:argvalue to pass an argument. May be repeated.') + parser.add_argument('--xunit-file', dest='xunit_file', type=str, default='result.xml', + help='XUnit output file') + + args = parser.parse_args() + + # Autodetect CPU count + if args.jobs <= 0: + args.jobs = os.cpu_count() + + return args + +def workerThread(task_queue, runnerList, args, result_queue): + while True: + try: + test = task_queue.get(block=False) + except queue.Empty: + break + + if test is None: + break + + msg = '' + start_time = datetime.now() + try: + + for runnerModule, runnerArgs in runnerList: + if args.verbose: + print('Running runner {} with test {}'.format(runnerModule.__name__, test)) + runner = runnerModule.TosaRefRunner(args, runnerArgs, test) + try: + rc = runner.runModel() + except Exception as e: + rc = TosaTestRunner.Result.INTERNAL_ERROR + except Exception as e: + print('Internal regression error: {}'.format(e)) + print(''.join(traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__))) + rc = TosaTestRunner.Result.INTERNAL_ERROR + + end_time = datetime.now() + + result_queue.put((test, rc, msg, end_time - start_time)) + task_queue.task_done() + + return True + +def loadRefModules(args): + # Returns a tuple of (runner_module, [argument list]) + runnerList = [] + for r in args.sut_module: + if args.verbose: + print('Loading module {}'.format(r)) + + runner = importlib.import_module(r) + + # Look for arguments associated with this runner + runnerArgPrefix = '{}:'.format(r) + runnerArgList = [] + for a in args.sut_module_args: + if a.startswith(runnerArgPrefix): + runnerArgList.append(a[len(runnerArgPrefix):]) + runnerList.append((runner, runnerArgList)) + + return runnerList + +def main(): + args = parseArgs() + + runnerList = loadRefModules(args) + + threads = [] + taskQueue = queue.Queue() + resultQueue = queue.Queue() + + for t in args.test: + taskQueue.put((t)) + + print('Running {} tests '.format(taskQueue.qsize())) + + for i in range(args.jobs): + t = threading.Thread(target=workerThread, args=(taskQueue, runnerList, args, resultQueue)) + t.setDaemon(True) + t.start() + threads.append(t) + + taskQueue.join() + + resultList = [] + results = [0] * len(TosaTestRunner.Result) + + while True: + try: + test, rc, msg, time_delta = resultQueue.get(block=False) + except queue.Empty: + break + + resultList.append((test, rc, msg, time_delta)) + results[rc] = results[rc] + 1 + + xunit_result = xunit.xunit_results('Regressions') + xunit_suite = xunit_result.create_suite('Unit tests') + + # Sort by test name + for test, rc, msg, time_delta in sorted(resultList, key=lambda tup: tup[0]): + test_name = test + xt = xunit.xunit_test(test_name, 'reference') + + xt.time = str(float(time_delta.seconds) + (float(time_delta.microseconds) * 1e-6)) + + if rc == TosaTestRunner.Result.EXPECTED_PASS or rc == TosaTestRunner.Result.EXPECTED_FAILURE: + if args.verbose: + print('{} {}'.format(rc.name, test_name)) + else: + xt.failed(msg) + print('{} {}'.format(rc.name, test_name)) + + xunit_suite.tests.append(xt) + resultQueue.task_done() + + xunit_result.write_results(args.xunit_file) + + print('Totals: ', end='') + for result in TosaTestRunner.Result: + print('{} {}, '.format(results[result], result.name.lower()), end ='') + print() + + return 0 + +if __name__ == '__main__': + exit(main()) |