aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKevin Cheng <kevin.cheng@arm.com>2021-03-03 11:21:43 -0800
committerKevin Cheng <kevin.cheng@arm.com>2021-04-27 16:01:59 -0700
commit550ccc52de231621c0bf0c05ae2a398eec37ff51 (patch)
treed4a5bd8d24560135784208c0fe35615b1d043249
parentcf6224e6e8ba4fc2984de3e542538c38e27c9f57 (diff)
downloadreference_model-550ccc52de231621c0bf0c05ae2a398eec37ff51.tar.gz
Replace serialization/ and verif/ with MLPlatform's serialization_lib submodule
- Remove Usage and Format - Run black on verif/*.py scripts Signed-off-by: Kevin Cheng <kevin.cheng@arm.com> Change-Id: Ie81515891eb0039540f614894f4b6b0e0e78ba74
-rw-r--r--.gitmodules6
-rw-r--r--CMakeLists.txt5
-rw-r--r--README.md7
-rw-r--r--reference_model/CMakeLists.txt8
-rw-r--r--reference_model/src/main.cpp2
-rw-r--r--reference_model/src/ops/control_flow.cc5
-rw-r--r--reference_model/src/ops/tensor_ops.cc63
-rw-r--r--reference_model/src/subgraph_traverser.cc223
-rw-r--r--reference_model/src/tensor.cc17
-rw-r--r--reference_model/src/tensor.h163
-rw-r--r--serialization/CMakeLists.txt32
-rw-r--r--serialization/attribute.def98
-rw-r--r--serialization/attribute.h181
-rw-r--r--serialization/operator.def124
-rw-r--r--serialization/quant_info.def43
-rw-r--r--serialization/quant_info.h164
-rw-r--r--serialization/tosa.fbs325
-rw-r--r--serialization/tosa_generated.h2683
-rw-r--r--serialization/tosa_serialization_handler.cpp1532
-rw-r--r--serialization/tosa_serialization_handler.h423
-rw-r--r--thirdparty/CMakeLists.txt5
m---------thirdparty/flatbuffers0
m---------thirdparty/serialization_lib0
-rw-r--r--verif/tosa/ArithmeticRightShiftAttribute.py45
-rw-r--r--verif/tosa/Attribute.py37
-rw-r--r--verif/tosa/AxisAttribute.py45
-rw-r--r--verif/tosa/ClampAttribute.py69
-rw-r--r--verif/tosa/CondIfAttribute.py53
-rw-r--r--verif/tosa/Conv2dAttribute.py109
-rw-r--r--verif/tosa/ConvQuantInfo.py53
-rw-r--r--verif/tosa/CustomAttribute.py45
-rw-r--r--verif/tosa/DType.py30
-rw-r--r--verif/tosa/Format.py27
-rw-r--r--verif/tosa/MatMulQuantInfo.py53
-rw-r--r--verif/tosa/MulAttribute.py45
-rw-r--r--verif/tosa/Op.py91
-rw-r--r--verif/tosa/PadQuantInfo.py45
-rw-r--r--verif/tosa/Pool2dAttribute.py109
-rw-r--r--verif/tosa/QuantInfo.py26
-rw-r--r--verif/tosa/README.md14
-rw-r--r--verif/tosa/ReluNAttribute.py53
-rw-r--r--verif/tosa/RescaleAttribute.py125
-rw-r--r--verif/tosa/ReshapeAttribute.py61
-rw-r--r--verif/tosa/ResizeAttribute.py173
-rw-r--r--verif/tosa/ResizeMode.py24
-rw-r--r--verif/tosa/SliceAttribute.py85
-rw-r--r--verif/tosa/TileAttribute.py61
-rw-r--r--verif/tosa/TosaBasicBlock.py123
-rw-r--r--verif/tosa/TosaGraph.py71
-rw-r--r--verif/tosa/TosaOperator.py117
-rw-r--r--verif/tosa/TosaTensor.py133
-rw-r--r--verif/tosa/TransposeConv2dAttribute.py133
-rw-r--r--verif/tosa/UnaryQuantInfo.py53
-rw-r--r--verif/tosa/Usage.py25
-rw-r--r--verif/tosa/Version.py69
-rw-r--r--verif/tosa/WhileLoopAttribute.py53
-rw-r--r--verif/tosa/__init__.py15
-rw-r--r--verif/tosa_ref_run.py32
-rw-r--r--verif/tosa_serializer.py405
-rw-r--r--verif/tosa_test_gen.py1915
-rw-r--r--verif/tosa_test_runner.py23
-rwxr-xr-xverif/tosa_verif_build_tests.py208
-rwxr-xr-xverif/tosa_verif_run_ref.py157
63 files changed, 1735 insertions, 9384 deletions
diff --git a/.gitmodules b/.gitmodules
index 9a6276e..c112866 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,6 +1,6 @@
[submodule "thirdparty/eigen"]
path = thirdparty/eigen
url = https://gitlab.com/libeigen/eigen.git
-[submodule "thirdparty/flatbuffers"]
- path = thirdparty/flatbuffers
- url = https://github.com/google/flatbuffers
+[submodule "thirdparty/serialization_lib"]
+ path = thirdparty/serialization_lib
+ url = https://review.mlplatform.org/tosa/serialization_lib
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 19c5824..04141aa 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -3,15 +3,10 @@ cmake_minimum_required (VERSION 3.4)
set(CMAKE_INSTALL_PREFIX ".")
project(tosa_tools LANGUAGES CXX)
-option(TOSA_TOOLS_BUILD_SERIALIZATION "Enable building of Tosa Serialization Library" ON)
option(TOSA_TOOLS_BUILD_REFERENCE_MODEL "Enable building of Tosa Reference Model" ON)
add_subdirectory(thirdparty)
-if(TOSA_TOOLS_BUILD_SERIALIZATION)
- add_subdirectory(serialization)
-endif()
-
if(TOSA_TOOLS_BUILD_REFERENCE_MODEL)
add_subdirectory(reference_model)
endif()
diff --git a/README.md b/README.md
index 47901e5..396fdb8 100644
--- a/README.md
+++ b/README.md
@@ -29,8 +29,8 @@ tools:
* GCC (tested with 7.5.0) or Clang C++ compiler (tested with clang-9)
with C++17 support
-The model includes the TOSA Serialization Library, Eigen 3.3.7, and
-FlatBuffers 1.11.0 as git submodules. The model is written using
+The model includes the TOSA Serialization Library and Eigen 3.3.7
+as git submodules. The model is written using
C++17 and has been primarily tested on Ubuntu x86_64 18.04 LTS Linux
systems.
@@ -42,8 +42,7 @@ The testing infrastructure requires:
Check out the required git submodules with:
``` bash
-$ git submodule init
-$ git submodule update
+$ git submodule update --init --recursive
```
# Compilation
diff --git a/reference_model/CMakeLists.txt b/reference_model/CMakeLists.txt
index 0ba8afb..153a5bd 100644
--- a/reference_model/CMakeLists.txt
+++ b/reference_model/CMakeLists.txt
@@ -26,8 +26,8 @@ else()
set(CMAKE_CXX_FLAGS "-Wall -Wno-ignored-attributes")
endif()
-set(FLATBUFFERS_DIR "../thirdparty/flatbuffers/")
-set(SERIALIZATION_DIR "../serialization")
+set(FLATBUFFERS_DIR "../thirdparty/serialization_lib/third_party/flatbuffers/")
+set(SERIALIZATION_DIR "../thirdparty/serialization_lib/")
set (CXX_SOURCE
src/main.cpp
@@ -64,13 +64,13 @@ target_include_directories(tosa_reference_model
${FLATBUFFERS_DIR}/include
../thirdparty/eigen/
../thirdparty/eigen/unsupported/
- ${SERIALIZATION_DIR}
+ ${SERIALIZATION_DIR}/include
)
target_link_libraries(tosa_reference_model
PRIVATE
+ tosa_serialization_lib
flatbuffers
- tosa_serialization
)
install (TARGETS tosa_reference_model DESTINATION bin)
diff --git a/reference_model/src/main.cpp b/reference_model/src/main.cpp
index ec2fdc9..240d913 100644
--- a/reference_model/src/main.cpp
+++ b/reference_model/src/main.cpp
@@ -15,8 +15,6 @@
#include <stdio.h>
-#include "flatbuffers/idl.h"
-#include "flatbuffers/util.h"
#include "model_common.h"
#include "ops/op_factory.h"
#include "subgraph_traverser.h"
diff --git a/reference_model/src/ops/control_flow.cc b/reference_model/src/ops/control_flow.cc
index 9d5db40..827e01f 100644
--- a/reference_model/src/ops/control_flow.cc
+++ b/reference_model/src/ops/control_flow.cc
@@ -292,9 +292,8 @@ int OpWhileLoop::checkTensorAttributes()
int OpWhileLoop::eval()
{
- TosaReference::Tensor0<bool> cond_output_ctensor(
- std::string("cond_output"), DType_BOOL, std::vector<Usage>({ Usage_ACTIVATION }),
- std::vector<Format>({ Format_UNKNOWN }), std::vector<int32_t>({}), false);
+ TosaReference::Tensor0<bool> cond_output_ctensor(std::string("cond_output"), DType_BOOL,
+ std::vector<int32_t>({}));
cond_output_ctensor.allocate();
std::vector<TosaReference::Tensor*> cond_block_outputs;
diff --git a/reference_model/src/ops/tensor_ops.cc b/reference_model/src/ops/tensor_ops.cc
index d6cd1cd..b8c7ade 100644
--- a/reference_model/src/ops/tensor_ops.cc
+++ b/reference_model/src/ops/tensor_ops.cc
@@ -103,12 +103,6 @@ int OpAvgPool2d<Dtype>::checkTensorAttributes()
in = dynamic_cast<TosaReference::TensorTemplate<TIn>*>(inputs[0]);
out = dynamic_cast<TosaReference::TensorTemplate<TOut>*>(outputs[0]);
- if (!in->hasFormat(Format_NHWC))
- {
- printNodeValidationError("OpAvgPool2d: unsupported tensor format");
- return 1;
- }
-
if (attribute->padding().size() != 4)
{
printNodeValidationError("OpAvgPool2d: illegal size for attribute padding");
@@ -321,28 +315,11 @@ int OpConv2d<InDtype, WeightDtype>::checkTensorAttributes()
printNodeValidationError("OpConv2d: bias tensor must be rank 1");
}
- if (inputs[1]->getIsConst() == 0)
- {
- printNodeValidationError("OpConv2d: weight tensor is not const typed");
- }
-
input = dynamic_cast<TosaReference::TensorTemplate<TIn>*>(inputs[0]);
weight = dynamic_cast<TosaReference::TensorTemplate<TWeight>*>(inputs[1]);
bias = dynamic_cast<TosaReference::TensorTemplate<TBias>*>(inputs[2]);
output = dynamic_cast<TosaReference::TensorTemplate<TAcc>*>(outputs[0]);
- if (!input->hasFormat(Format_NHWC))
- {
- printNodeValidationError("OpConv2d: unsupported input tensor format");
- return 1;
- }
-
- if (!weight->hasFormat(Format_OHWI))
- {
- printNodeValidationError("OpConv2d: unsupported weight tensor format");
- return 1;
- }
-
if (attribute->padding().size() != 4)
{
printNodeValidationError("OpConv2d: illegal size for attribute padding");
@@ -530,28 +507,11 @@ int OpDepthwiseConv2d<InDtype, WeightDtype>::checkTensorAttributes()
printNodeValidationError("OpDepthwiseConv2d: bias tensor must be rank 1");
}
- if (inputs[1]->getIsConst() == 0)
- {
- printNodeValidationError("OpDepthwiseConv2d: weight tensor is not const typed");
- }
-
input = dynamic_cast<TosaReference::TensorTemplate<TIn>*>(inputs[0]);
weight = dynamic_cast<TosaReference::TensorTemplate<TWeight>*>(inputs[1]);
bias = dynamic_cast<TosaReference::TensorTemplate<TBias>*>(inputs[2]);
output = dynamic_cast<TosaReference::TensorTemplate<TAcc>*>(outputs[0]);
- if (!input->hasFormat(Format_NHWC))
- {
- printNodeValidationError("OpDepthwiseConv2d: unsupported input tensor format");
- return 1;
- }
-
- if (!weight->hasFormat(Format_HWIM))
- {
- printNodeValidationError("OpDepthwiseConv2d: unsupported weight tensor format");
- return 1;
- }
-
if (attribute->padding().size() != 4)
{
printNodeValidationError("OpDepthwiseConv2d: illegal size for attribute padding");
@@ -881,12 +841,6 @@ int OpMaxPool2d<Dtype>::checkTensorAttributes()
in = dynamic_cast<TosaReference::TensorTemplate<TIn>*>(inputs[0]);
out = dynamic_cast<TosaReference::TensorTemplate<TOut>*>(outputs[0]);
- if (!in->hasFormat(Format_NHWC))
- {
- printNodeValidationError("OpMaxPool2d: unsupported tensor format");
- return 1;
- }
-
if (attribute->padding().size() != 4)
{
printNodeValidationError("OpMaxPool2d: illegal size for attribute padding");
@@ -1021,28 +975,11 @@ int OpTransposeConv2d<InDtype, OutDtype>::checkTensorAttributes()
return 1;
}
- if (inputs[1]->getIsConst() == 0)
- {
- printNodeValidationError("OpTransposeConv2d: weight tensor is not const typed");
- }
-
input = dynamic_cast<TosaReference::TensorTemplate<TIn>*>(inputs[0]);
weight = dynamic_cast<TosaReference::TensorTemplate<TWeight>*>(inputs[1]);
bias = dynamic_cast<TosaReference::TensorTemplate<TBias>*>(inputs[2]);
output = dynamic_cast<TosaReference::TensorTemplate<TAcc>*>(outputs[0]);
- if (!input->hasFormat(Format_NHWC))
- {
- printNodeValidationError("OpTransposeConv2d: unsupported input tensor format");
- return 1;
- }
-
- if (!weight->hasFormat(Format_OHWI))
- {
- printNodeValidationError("OpTransposeConv2d: unsupported weight tensor format");
- return 1;
- }
-
if (attribute->outpad().size() != 2)
{
printNodeValidationError("OpTransposeConv2d: illegal size for attribute outpad");
diff --git a/reference_model/src/subgraph_traverser.cc b/reference_model/src/subgraph_traverser.cc
index 082f802..5096ffa 100644
--- a/reference_model/src/subgraph_traverser.cc
+++ b/reference_model/src/subgraph_traverser.cc
@@ -103,110 +103,118 @@ int SubgraphTraverser::initializeGraph()
for (auto op : block->GetOperators())
{
// translated TosaSerializationOperator to GraphNode
- DType in_dtype = DType_UNKNOWN, out_dtype = DType_UNKNOWN, weight_dtype = DType_UNKNOWN;
- uint32_t in_rank = 0, out_rank = 0, weight_rank = 0;
- for (auto name : op->GetInputTensorNames())
- {
-
- TosaSerializationTensor* ts = block->GetTensorByName(name);
- ASSERT_MSG(ts, "SubgraphTraverser: fail to get tensor %s from TosaSerializationHandler", name.c_str());
-
- if (ts->HasUsage(Usage_WEIGHT))
- {
- weight_dtype = ts->GetDtype();
- weight_rank = ts->GetShape().size();
- }
- else if (ts->HasUsage(Usage_INDEX))
- {
- // do nothing, but this will prevent tensor's dtype/rank being wrongly used as template argument when initializing this op
- }
- else if (ts->HasUsage(Usage_ACTIVATION))
- {
- if (ts->GetShape().size() >= in_rank)
- {
- in_dtype = ts->GetDtype();
- in_rank = ts->GetShape().size();
- }
- }
- }
-
- // if dtype/rank still not initialized with above pass, we initialize without Usage check
- if (in_dtype == DType_UNKNOWN && in_rank == 0)
- {
- for (auto name : op->GetInputTensorNames())
- {
- TosaSerializationTensor* ts = block->GetTensorByName(name);
- ASSERT_MSG(ts, "SubgraphTraverser: fail to get tensor %s from TosaSerializationHandler", name.c_str());
-
- if (ts->GetShape().size() >= in_rank)
- {
- in_dtype = ts->GetDtype();
- in_rank = ts->GetShape().size();
- }
- }
- }
-
- for (auto name : op->GetOutputTensorNames())
- {
-
- TosaSerializationTensor* ts = block->GetTensorByName(name);
- ASSERT_MSG(ts, "SubgraphTraverser: fail to get tensor %s from TosaSerializationHandler", name.c_str());
-
- out_dtype = ts->GetDtype();
- out_rank = ts->GetShape().size();
- }
+ DType input_dtype = DType_UNKNOWN;
+ DType output_dtype = DType_UNKNOWN;
+ DType weight_dtype = DType_UNKNOWN;
+ uint32_t input_rank = 0;
+ uint32_t output_rank = 0;
+ uint32_t weight_rank = 0;
+ int32_t input_index = -1;
+ int32_t weight_index = -1;
+
+ switch (op->GetOp())
+ {
+ case Op_CONV2D:
+ case Op_DEPTHWISE_CONV2D:
+ case Op_TRANSPOSE_CONV2D:
+ case Op_FULLY_CONNECTED:
+ input_index = 0;
+ weight_index = 1;
+ break;
+ case Op_SELECT:
+ input_index = 1;
+ break;
+ default:
+ if (!op->GetInputTensorNames().empty())
+ input_index = 0;
+ break;
+ }
+
+ if (input_index != -1)
+ {
+ ASSERT_MSG((size_t)input_index < op->GetInputTensorNames().size(),
+ "Op=%s, input_index %d must be within [0, num_input - 1]", EnumNamesOp()[op->GetOp()],
+ input_index);
+
+ std::string input_name = op->GetInputTensorNames()[input_index];
+ TosaSerializationTensor* input_tensor = block->GetTensorByName(input_name);
+ ASSERT_MSG(input_tensor, "SubgraphTraverser: fail to get input tensor %s from TosaSerializationHandler",
+ input_name.c_str());
+ input_dtype = input_tensor->GetDtype();
+ input_rank = input_tensor->GetShape().size();
+ }
+
+ if (weight_index != -1)
+ {
+ ASSERT_MSG((size_t)weight_index < op->GetInputTensorNames().size(),
+ "Op=%s, weight_index %d must be within [0, num_input - 1]", EnumNamesOp()[op->GetOp()],
+ weight_index);
+ std::string weight_name = op->GetInputTensorNames()[weight_index];
+ TosaSerializationTensor* weight_tensor = block->GetTensorByName(weight_name);
+ ASSERT_MSG(weight_tensor, "SubgraphTraverser: fail to get weight tensor %s from TosaSerializationHandler",
+ weight_name.c_str());
+ weight_dtype = weight_tensor->GetDtype();
+ weight_rank = weight_tensor->GetShape().size();
+ }
+
+ std::string output_name = op->GetOutputTensorNames()[0];
+ TosaSerializationTensor* output_tensor = block->GetTensorByName(output_name);
+ ASSERT_MSG(output_tensor, "SubgraphTraverser: fail to get output tensor %s from TosaSerializationHandler",
+ output_name.c_str());
+ output_dtype = output_tensor->GetDtype();
+ output_rank = output_tensor->GetShape().size();
DEBUG_INFO(GT, "Creating operator id_%03u, %8s, %lu input tensors, %lu output tensors", idx,
EnumNamesOp()[op->GetOp()], op->GetInputTensorNames().size(), op->GetOutputTensorNames().size());
- GraphNode* cn = OpFactory::newOp(tsh, op->GetOp(), op->GetAttribute(), op->GetQInfo(), idx, in_dtype, in_rank,
- out_dtype, out_rank, weight_dtype, weight_rank);
- if (!cn)
+ GraphNode* node = OpFactory::newOp(tsh, op->GetOp(), op->GetAttribute(), op->GetQInfo(), idx, input_dtype,
+ input_rank, output_dtype, output_rank, weight_dtype, weight_rank);
+ if (!node)
{
- if (weight_dtype == DType_UNKNOWN && weight_rank == 0)
+ if (weight_index == -1)
{
fprintf(g_func_debug.func_debug_file,
"OpFactory could not allocate op %8s input=(%s rank %d) -> (%s rank %d)",
- EnumNamesOp()[op->GetOp()], EnumNamesDType()[in_dtype], in_rank, EnumNamesDType()[out_dtype],
- out_rank);
+ EnumNamesOp()[op->GetOp()], EnumNamesDType()[input_dtype], input_rank,
+ EnumNamesDType()[output_dtype], output_rank);
}
else
{
fprintf(g_func_debug.func_debug_file,
"OpFactory could not allocate op %8s input=(%s rank %d), weight=(%s rank %d) -> (%s rank %d)",
- EnumNamesOp()[op->GetOp()], EnumNamesDType()[in_dtype], in_rank, EnumNamesDType()[weight_dtype],
- weight_rank, EnumNamesDType()[out_dtype], out_rank);
+ EnumNamesOp()[op->GetOp()], EnumNamesDType()[input_dtype], input_rank,
+ EnumNamesDType()[weight_dtype], weight_rank, EnumNamesDType()[output_dtype], output_rank);
}
- for (auto ts : op->GetInputTensors())
+ for (auto& ts : op->GetInputTensorNames())
{
- fprintf(g_func_debug.func_debug_file, "Input: %s\n", ts->GetName().c_str());
+ fprintf(g_func_debug.func_debug_file, "Input: %s\n", ts.c_str());
}
- for (auto ts : op->GetOutputTensors())
+ for (auto& ts : op->GetOutputTensorNames())
{
- fprintf(g_func_debug.func_debug_file, "Output: %s\n", ts->GetName().c_str());
+ fprintf(g_func_debug.func_debug_file, "Output: %s\n", ts.c_str());
}
FATAL_ERROR("Unsupported operation type or rank.");
}
- for (auto name : op->GetInputTensorNames())
+ for (auto& name : op->GetInputTensorNames())
{
- cn->addInputName(name);
+ node->addInputName(name);
}
for (auto name : op->GetOutputTensorNames())
{
- cn->addOutputName(name);
+ node->addOutputName(name);
}
- addNode(cn);
+ addNode(node);
// if node doesn't have any inputs (i.e. CONST)
// it should be ready for evaluation
- if (op->GetInputTensorNames().empty() && !cn->getOnNextNodeList())
+ if (op->GetInputTensorNames().empty() && !node->getOnNextNodeList())
{
- addToNextNodeList(cn);
+ addToNextNodeList(node);
}
idx++;
@@ -215,47 +223,40 @@ int SubgraphTraverser::initializeGraph()
for (auto ts : block->GetTensors())
{
- bool is_const = false;
- if (ts->HasUsage(Usage_WEIGHT))
- {
- is_const = true;
- }
-
DEBUG_INFO(GT, "Creating tensor %s", ts->GetName().c_str());
- TosaReference::Tensor* ct =
- TensorFactory::newTensor(ts->GetName(), ts->GetDtype(), ts->GetUsage(), ts->GetFormat(), ts->GetShape(),
- is_const, ts->GetShape().size());
+ TosaReference::Tensor* tensor =
+ TensorFactory::newTensor(ts->GetName(), ts->GetDtype(), ts->GetShape(), ts->GetShape().size());
- if (ts->GetNpyFilePtr())
+ if (!ts->GetNpyFilePtr().empty())
{
- if (ct->allocate())
+ if (tensor->allocate())
{
- FATAL_ERROR("Fail to allocate Eigen tensor %s", ct->getName().c_str());
+ FATAL_ERROR("Fail to allocate Eigen tensor %s", tensor->getName().c_str());
}
bzero(tensor_fullname, sizeof(tensor_fullname));
snprintf(tensor_fullname, sizeof(tensor_fullname), "%s/%s", g_func_config.subgraph_dir,
- ts->GetNpyFilePtr()->c_str());
- if (ct->readFromNpyFile(tensor_fullname))
+ ts->GetNpyFilePtr().c_str());
+ if (tensor->readFromNpyFile(tensor_fullname))
{
- FATAL_ERROR("Cannot read input data into graph tensor %s from block %s", ct->getName().c_str(),
+ FATAL_ERROR("Cannot read input data into graph tensor %s from block %s", tensor->getName().c_str(),
block->GetName().c_str());
}
}
// update this->tensors
- addTensor(ct);
+ addTensor(tensor);
}
DEBUG_INFO(GT, "Enumerating block %s graph inputs", block->GetName().c_str());
for (auto& input_name : block->GetInputs())
{
- TosaReference::Tensor* ct = findTensorByName(input_name);
+ TosaReference::Tensor* tensor = findTensorByName(input_name);
DEBUG_INFO(GT, "input tensor name=%s", input_name.c_str());
- if (ct)
+ if (tensor)
{
- ct->setIsSubgraphInput();
- inputTensors.push_back(ct);
+ tensor->setIsSubgraphInput();
+ inputTensors.push_back(tensor);
}
else
{
@@ -266,12 +267,12 @@ int SubgraphTraverser::initializeGraph()
DEBUG_INFO(GT, "Enumerating block %s graph outputs", block->GetName().c_str());
for (auto& output_name : block->GetOutputs())
{
- TosaReference::Tensor* ct = findTensorByName(output_name);
+ TosaReference::Tensor* tensor = findTensorByName(output_name);
DEBUG_INFO(GT, "output tensor name=%s\n", output_name.c_str());
- if (ct)
+ if (tensor)
{
- ct->setIsSubgraphOutput();
- outputTensors.push_back(ct);
+ tensor->setIsSubgraphOutput();
+ outputTensors.push_back(tensor);
}
else
{
@@ -333,12 +334,12 @@ int SubgraphTraverser::evaluateNextNode()
WARNING("Node %lu has been evaluated %d times. Loop suspected.", currNode->getID(), currNode->getEvalCount());
}
- for (auto ct : currNode->getOutputs())
+ for (auto tensor : currNode->getOutputs())
{
- if (!ct->is_allocated())
- if (ct->allocate())
+ if (!tensor->is_allocated())
+ if (tensor->allocate())
{
- FATAL_ERROR("Fail to allocate Eigen tensor %s", ct->getName().c_str());
+ FATAL_ERROR("Fail to allocate Eigen tensor %s", tensor->getName().c_str());
}
}
@@ -348,26 +349,26 @@ int SubgraphTraverser::evaluateNextNode()
}
// free input tensor if all of its consumers have all of their outputs ready and it's not block's output
- for (auto ct : currNode->getInputs())
+ for (auto tensor : currNode->getInputs())
{
bool in_use = false;
- for (auto cn : ct->getConsumers())
+ for (auto node : tensor->getConsumers())
{
- if (!cn->hasAllOutputsReady())
+ if (!node->hasAllOutputsReady())
{
in_use = true;
}
}
for (auto name : block->GetOutputs())
{
- if (name == ct->getName())
+ if (name == tensor->getName())
{
in_use = true;
}
}
if (!in_use)
{
- ct->deallocate();
+ tensor->deallocate();
}
}
@@ -433,29 +434,29 @@ int SubgraphTraverser::clearAllNodeMarkings()
return false;
}
-int SubgraphTraverser::addTensor(TosaReference::Tensor* ct)
+int SubgraphTraverser::addTensor(TosaReference::Tensor* tensor)
{
// Enforce no duplicate tensors/tensor names
// O(N), but the number of tensors is small
for (TosaReference::Tensor* currTensor : tensors)
{
- if (ct == currTensor || currTensor->getName() == ct->getName())
+ if (tensor == currTensor || currTensor->getName() == tensor->getName())
{
- FATAL_ERROR("Error: Duplicate tensor or tensor name being added to graph: %s\n", ct->getName().c_str());
+ FATAL_ERROR("Error: Duplicate tensor or tensor name being added to graph: %s\n", tensor->getName().c_str());
return 1;
}
}
- tensors.push_back(ct);
+ tensors.push_back(tensor);
- if (ct->getIsSubgraphInput())
+ if (tensor->getIsSubgraphInput())
{
- inputTensors.push_back(ct);
+ inputTensors.push_back(tensor);
}
- if (ct->getIsSubgraphOutput())
+ if (tensor->getIsSubgraphOutput())
{
- outputTensors.push_back(ct);
+ outputTensors.push_back(tensor);
}
return 0;
diff --git a/reference_model/src/tensor.cc b/reference_model/src/tensor.cc
index da81bcd..1efebe3 100644
--- a/reference_model/src/tensor.cc
+++ b/reference_model/src/tensor.cc
@@ -22,17 +22,11 @@ using namespace tosa;
TosaReference::Tensor::Tensor(std::string tensorName_,
DType tensorDtype_,
- const std::vector<Usage>& tensorUsage_,
- const std::vector<Format>& tensorFormat_,
- std::vector<int> shape_,
- int isConst_)
+ std::vector<int> shape_)
{
tensorName = std::string(tensorName_);
tensorDtype = tensorDtype_;
- tensorUsage = std::vector<Usage>(tensorUsage_);
- tensorFormat = std::vector<Format>(tensorFormat_);
shape = std::vector<int>(shape_);
- isConst = isConst_;
producer = nullptr;
isValid = false;
consumers.clear();
@@ -74,17 +68,16 @@ int TosaReference::Tensor::addConsumer(GraphNode* node)
int TosaReference::Tensor::dumpTensorParams(FILE* out) const
{
- fprintf(out, "Name: %s DType=%s Usage=%s isValid=%d Rank=%d Shape=%s\n", tensorName.c_str(),
- EnumNamesDType()[getDtype()], getUsageAsString().c_str(), getIsValid(), getRank(),
- getShapeAsString().c_str());
+ fprintf(out, "Name: %s DType=%s isValid=%d Rank=%d Shape=%s\n", tensorName.c_str(), EnumNamesDType()[getDtype()],
+ getIsValid(), getRank(), getShapeAsString().c_str());
return 0;
}
int TosaReference::Tensor::dumpTensorParams(std::ostream& out) const
{
- out << "Name: " << getName() << " DType=" << EnumNamesDType()[getDtype()] << " Usage=" << getUsageAsString()
- << " isValid=" << getIsValid() << " Rank=" << getRank() << " Shape=" << getShapeAsString() << "\n";
+ out << "Name: " << getName() << " DType=" << EnumNamesDType()[getDtype()] << " isValid=" << getIsValid()
+ << " Rank=" << getRank() << " Shape=" << getShapeAsString() << "\n";
return 0;
}
diff --git a/reference_model/src/tensor.h b/reference_model/src/tensor.h
index 4f77cfc..d39cc7c 100644
--- a/reference_model/src/tensor.h
+++ b/reference_model/src/tensor.h
@@ -35,10 +35,7 @@ class Tensor
public:
Tensor(std::string tensorName_,
DType tensorDtype__,
- const std::vector<Usage>& tensorUsage_,
- const std::vector<Format>& tensorFormat_,
- std::vector<int> shape_,
- int isConst_);
+ std::vector<int> shape_);
virtual ~Tensor();
@@ -75,11 +72,6 @@ public:
return isValid;
}
- int getIsConst() const
- {
- return isConst;
- }
-
GraphNode* getProducer()
{
return producer;
@@ -111,62 +103,6 @@ public:
return shape_str;
}
- const std::vector<Usage>& getUsage() const
- {
- return tensorUsage;
- }
-
- bool hasUsage(Usage usage) const
- {
- for (auto& usg : tensorUsage)
- {
- if (usg == usage)
- {
- return true;
- }
- }
- return false;
- }
-
- std::string getUsageAsString() const
- {
- std::string usage_str("[");
- for (auto& usg : tensorUsage)
- {
- usage_str += (std::string(EnumNamesUsage()[usg]) + ", ");
- }
- usage_str.append("]");
- return usage_str;
- }
-
- const std::vector<Format>& getFormat() const
- {
- return tensorFormat;
- }
-
- bool hasFormat(Format format) const
- {
- for (auto& fmt : tensorFormat)
- {
- if (fmt == format)
- {
- return true;
- }
- }
- return false;
- }
-
- std::string getFormatAsString() const
- {
- std::string format_str("[");
- for (auto& fmt : tensorFormat)
- {
- format_str += (std::string(EnumNamesFormat()[fmt]) + ", ");
- }
- format_str.append("]");
- return format_str;
- }
-
const uint32_t getElementCount() const
{
uint32_t elements = 1;
@@ -282,9 +218,6 @@ public:
protected:
std::string tensorName;
DType tensorDtype;
- std::vector<Usage> tensorUsage;
- std::vector<Format> tensorFormat;
- int isConst;
int isValid;
std::vector<int> shape;
int isSubgraphInput;
@@ -309,11 +242,8 @@ class TensorTemplate : public Tensor
public:
TensorTemplate(std::string tensorName_,
DType tensorDtype_,
- const std::vector<Usage>& tensorUsage_,
- const std::vector<Format>& tensorFormat_,
- std::vector<int> shape_,
- int isConst_)
- : Tensor(tensorName_, tensorDtype_, tensorUsage_, tensorFormat_, shape_, isConst_)
+ std::vector<int> shape_)
+ : Tensor(tensorName_, tensorDtype_, shape_)
{
tensor = nullptr;
}
@@ -678,10 +608,7 @@ class TensorFactory
public:
static Tensor* newTensor(std::string tensorName_,
DType tensorDtype_,
- const std::vector<Usage>& tensorUsage_,
- const std::vector<Format>& tensorFormat_,
std::vector<int> shape_,
- int isConst_,
const uint32_t rank)
{
switch (tensorDtype_)
@@ -690,26 +617,19 @@ public:
switch (rank)
{
case 0:
- return new Tensor0<float>(tensorName_, tensorDtype_, tensorUsage_, tensorFormat_, shape_,
- isConst_);
+ return new Tensor0<float>(tensorName_, tensorDtype_, shape_);
case 1:
- return new Tensor1<float>(tensorName_, tensorDtype_, tensorUsage_, tensorFormat_, shape_,
- isConst_);
+ return new Tensor1<float>(tensorName_, tensorDtype_, shape_);
case 2:
- return new Tensor2<float>(tensorName_, tensorDtype_, tensorUsage_, tensorFormat_, shape_,
- isConst_);
+ return new Tensor2<float>(tensorName_, tensorDtype_, shape_);
case 3:
- return new Tensor3<float>(tensorName_, tensorDtype_, tensorUsage_, tensorFormat_, shape_,
- isConst_);
+ return new Tensor3<float>(tensorName_, tensorDtype_, shape_);
case 4:
- return new Tensor4<float>(tensorName_, tensorDtype_, tensorUsage_, tensorFormat_, shape_,
- isConst_);
+ return new Tensor4<float>(tensorName_, tensorDtype_, shape_);
case 5:
- return new Tensor5<float>(tensorName_, tensorDtype_, tensorUsage_, tensorFormat_, shape_,
- isConst_);
+ return new Tensor5<float>(tensorName_, tensorDtype_, shape_);
case 6:
- return new Tensor6<float>(tensorName_, tensorDtype_, tensorUsage_, tensorFormat_, shape_,
- isConst_);
+ return new Tensor6<float>(tensorName_, tensorDtype_, shape_);
default:
goto done;
}
@@ -721,26 +641,19 @@ public:
switch (rank)
{
case 0:
- return new Tensor0<int32_t>(tensorName_, tensorDtype_, tensorUsage_, tensorFormat_, shape_,
- isConst_);
+ return new Tensor0<int32_t>(tensorName_, tensorDtype_, shape_);
case 1:
- return new Tensor1<int32_t>(tensorName_, tensorDtype_, tensorUsage_, tensorFormat_, shape_,
- isConst_);
+ return new Tensor1<int32_t>(tensorName_, tensorDtype_, shape_);
case 2:
- return new Tensor2<int32_t>(tensorName_, tensorDtype_, tensorUsage_, tensorFormat_, shape_,
- isConst_);
+ return new Tensor2<int32_t>(tensorName_, tensorDtype_, shape_);
case 3:
- return new Tensor3<int32_t>(tensorName_, tensorDtype_, tensorUsage_, tensorFormat_, shape_,
- isConst_);
+ return new Tensor3<int32_t>(tensorName_, tensorDtype_, shape_);
case 4:
- return new Tensor4<int32_t>(tensorName_, tensorDtype_, tensorUsage_, tensorFormat_, shape_,
- isConst_);
+ return new Tensor4<int32_t>(tensorName_, tensorDtype_, shape_);
case 5:
- return new Tensor5<int32_t>(tensorName_, tensorDtype_, tensorUsage_, tensorFormat_, shape_,
- isConst_);
+ return new Tensor5<int32_t>(tensorName_, tensorDtype_, shape_);
case 6:
- return new Tensor6<int32_t>(tensorName_, tensorDtype_, tensorUsage_, tensorFormat_, shape_,
- isConst_);
+ return new Tensor6<int32_t>(tensorName_, tensorDtype_, shape_);
default:
goto done;
}
@@ -748,26 +661,19 @@ public:
switch (rank)
{
case 0:
- return new Tensor0<int64_t>(tensorName_, tensorDtype_, tensorUsage_, tensorFormat_, shape_,
- isConst_);
+ return new Tensor0<int64_t>(tensorName_, tensorDtype_, shape_);
case 1:
- return new Tensor1<int64_t>(tensorName_, tensorDtype_, tensorUsage_, tensorFormat_, shape_,
- isConst_);
+ return new Tensor1<int64_t>(tensorName_, tensorDtype_, shape_);
case 2:
- return new Tensor2<int64_t>(tensorName_, tensorDtype_, tensorUsage_, tensorFormat_, shape_,
- isConst_);
+ return new Tensor2<int64_t>(tensorName_, tensorDtype_, shape_);
case 3:
- return new Tensor3<int64_t>(tensorName_, tensorDtype_, tensorUsage_, tensorFormat_, shape_,
- isConst_);
+ return new Tensor3<int64_t>(tensorName_, tensorDtype_, shape_);
case 4:
- return new Tensor4<int64_t>(tensorName_, tensorDtype_, tensorUsage_, tensorFormat_, shape_,
- isConst_);
+ return new Tensor4<int64_t>(tensorName_, tensorDtype_, shape_);
case 5:
- return new Tensor5<int64_t>(tensorName_, tensorDtype_, tensorUsage_, tensorFormat_, shape_,
- isConst_);
+ return new Tensor5<int64_t>(tensorName_, tensorDtype_, shape_);
case 6:
- return new Tensor6<int64_t>(tensorName_, tensorDtype_, tensorUsage_, tensorFormat_, shape_,
- isConst_);
+ return new Tensor6<int64_t>(tensorName_, tensorDtype_, shape_);
default:
goto done;
}
@@ -775,26 +681,19 @@ public:
switch (rank)
{
case 0:
- return new Tensor0<bool>(tensorName_, tensorDtype_, tensorUsage_, tensorFormat_, shape_,
- isConst_);
+ return new Tensor0<bool>(tensorName_, tensorDtype_, shape_);
case 1:
- return new Tensor1<bool>(tensorName_, tensorDtype_, tensorUsage_, tensorFormat_, shape_,
- isConst_);
+ return new Tensor1<bool>(tensorName_, tensorDtype_, shape_);
case 2:
- return new Tensor2<bool>(tensorName_, tensorDtype_, tensorUsage_, tensorFormat_, shape_,
- isConst_);
+ return new Tensor2<bool>(tensorName_, tensorDtype_, shape_);
case 3:
- return new Tensor3<bool>(tensorName_, tensorDtype_, tensorUsage_, tensorFormat_, shape_,
- isConst_);
+ return new Tensor3<bool>(tensorName_, tensorDtype_, shape_);
case 4:
- return new Tensor4<bool>(tensorName_, tensorDtype_, tensorUsage_, tensorFormat_, shape_,
- isConst_);
+ return new Tensor4<bool>(tensorName_, tensorDtype_, shape_);
case 5:
- return new Tensor5<bool>(tensorName_, tensorDtype_, tensorUsage_, tensorFormat_, shape_,
- isConst_);
+ return new Tensor5<bool>(tensorName_, tensorDtype_, shape_);
case 6:
- return new Tensor6<bool>(tensorName_, tensorDtype_, tensorUsage_, tensorFormat_, shape_,
- isConst_);
+ return new Tensor6<bool>(tensorName_, tensorDtype_, shape_);
default:
goto done;
}
diff --git a/serialization/CMakeLists.txt b/serialization/CMakeLists.txt
deleted file mode 100644
index 7bca824..0000000
--- a/serialization/CMakeLists.txt
+++ /dev/null
@@ -1,32 +0,0 @@
-cmake_minimum_required (VERSION 3.4)
-
-# Copyright (c) 2020, ARM Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-project (tosa)
-
-set (CMAKE_CXX_STANDARD 11)
-set (CMAKE_CXX_FLAGS "-g -Wall")
-set (FLATBUFFERS_SRC_DIR "../thirdparty/flatbuffers")
-
-set (SOURCE
- tosa_serialization_handler.cpp
-)
-
-add_library(tosa_serialization STATIC ${SOURCE})
-
-include_directories("./")
-
-target_link_libraries(tosa_serialization PRIVATE flatbuffers)
diff --git a/serialization/attribute.def b/serialization/attribute.def
deleted file mode 100644
index a146f47..0000000
--- a/serialization/attribute.def
+++ /dev/null
@@ -1,98 +0,0 @@
-
-// Copyright (c) 2020, ARM Limited.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
- Syntax:
- DEF_ATTRIBUTE(ATTRIBUTE_NAME, NUM_ARGS_IN_ATTRIBUTES, ARG0_TYPE, ARG0_SCALAR_OR_VECTOR, ARGS0_NAME, ...)
-
- Description:
- ATTRIBUTE_NAME: corresponding attribute name, must match corresponding "table XXXAttribute" in tosa.fbs
- NUM_ARGS_IN_ATTRIBUTES: number of arguments in this attribute
- ARG0_TYPE: data type of arg0 in attribute
- ARG0_SCALAR_OR_VECTOR: is arg0 a scalar(S) or a vector(V)
- ARG0_NAME: name of arg0
- ...: variadic variables for more arguments, depending on NUM_ARGS_IN_ATTRIBUTES
-*/
-
-DEF_ATTRIBUTE(Pool2d, 3,
- int32_t, V, padding,
- int32_t, V, kernel,
- int32_t, V, stride)
-
-DEF_ATTRIBUTE(Conv2d, 3,
- int32_t, V, padding,
- int32_t, V, stride,
- int32_t, V, dilation)
-
-DEF_ATTRIBUTE(TransposeConv2d, 4,
- int32_t, V, outpad,
- int32_t, V, stride,
- int32_t, V, dilation,
- int32_t, V, output_shape)
-
-DEF_ATTRIBUTE(ReluN, 2,
- int32_t, S, max_int,
- float, S, max_fp)
-
-DEF_ATTRIBUTE(Axis, 1,
- int32_t, S, axis)
-
-DEF_ATTRIBUTE(Reshape, 1,
- int32_t, V, shape)
-
-DEF_ATTRIBUTE(Slice, 2,
- int32_t, V, begin,
- int32_t, V, size)
-
-DEF_ATTRIBUTE(Tile, 1,
- int32_t, V, multiples)
-
-DEF_ATTRIBUTE(Resize, 7,
- int32_t, V, output_size,
- int32_t, V, stride,
- int32_t, V, offset,
- int32_t, S, shift,
- float, V, stride_fp,
- float, V, offset_fp,
- ResizeMode, S, mode)
-
-DEF_ATTRIBUTE(Clamp, 4,
- int32_t, S, min_int,
- int32_t, S, max_int,
- float, S, min_fp,
- float, S, max_fp)
-
-DEF_ATTRIBUTE(Rescale, 7,
- int32_t, S, input_zp,
- int32_t, S, output_zp,
- int32_t, V, multiplier,
- int32_t, V, shift,
- bool, S, scale32,
- bool, S, double_round,
- bool, S, per_channel)
-
-DEF_ATTRIBUTE(Mul, 1,
- int32_t, S, shift)
-
-DEF_ATTRIBUTE(ArithmeticRightShift, 1,
- bool, S, round)
-
-DEF_ATTRIBUTE(CondIf, 2,
- string, S, then_branch,
- string, S, else_branch)
-
-DEF_ATTRIBUTE(WhileLoop, 2,
- string, S, cond_branch,
- string, S, body_branch)
diff --git a/serialization/attribute.h b/serialization/attribute.h
deleted file mode 100644
index 2a33a8f..0000000
--- a/serialization/attribute.h
+++ /dev/null
@@ -1,181 +0,0 @@
-
-// Copyright (c) 2020, ARM Limited.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef _TOSA_SERIALIZATION_ATTRIBUTE_H
-#define _TOSA_SERIALIZATION_ATTRIBUTE_H
-#include "flatbuffers/idl.h"
-#include "flatbuffers/util.h"
-#include "tosa_generated.h"
-
-using std::string;
-
-namespace tosa
-{
-
-class TosaAttributeBase
-{
-public:
- virtual ~TosaAttributeBase()
- {}
-};
-
-class TosaNoneAttribute : public TosaAttributeBase
-{
-public:
- TosaNoneAttribute()
- {}
- TosaNoneAttribute(TosaNoneAttribute* p)
- {}
-};
-
-#define DEF_ARGS_VER0_S_STR(V) _##V = p->V()->str();
-#define DEF_ARGS_VER0_S_DEFAULT(V) _##V = p->V();
-
-#define DEF_ARGS_VER0_S_int32_t(V) DEF_ARGS_VER0_S_DEFAULT(V)
-#define DEF_ARGS_VER0_S_float(V) DEF_ARGS_VER0_S_DEFAULT(V)
-#define DEF_ARGS_VER0_S_bool(V) DEF_ARGS_VER0_S_DEFAULT(V)
-#define DEF_ARGS_VER0_S_ResizeMode(V) DEF_ARGS_VER0_S_DEFAULT(V)
-#define DEF_ARGS_VER0_S_string(V) DEF_ARGS_VER0_S_STR(V)
-
-#define DEF_ARGS_VER0_S(T, V) DEF_ARGS_VER0_S_##T(V)
-#define DEF_ARGS_VER0_V(T, V) _##V = std::vector<T>(p->V()->begin(), p->V()->end());
-
-#define DEF_ARGS_VER1_S(T, V) const T& V
-#define DEF_ARGS_VER1_V(T, V) const std::vector<T>& V
-#define DEF_ARGS_VER2_S(T, V) _##V = V;
-#define DEF_ARGS_VER2_V(T, V) _##V = V;
-#define DEF_ARGS_VER3_S(T, V) \
- T V() const \
- { \
- return _##V; \
- }
-#define DEF_ARGS_VER3_V(T, V) \
- std::vector<T> V() const \
- { \
- return _##V; \
- }
-#define DEF_ARGS_VER4_S(T, V) T _##V;
-#define DEF_ARGS_VER4_V(T, V) std::vector<T> _##V;
-
-// another level of preprocessor indirection to handle ", " as function's input argument
-#define DEF_ARGS_VER1_TRUE(T, F, V) DEF_ARGS_VER1_##F(T, V)
-#define DEF_ARGS_VER1_FALSE(T, F, V) , DEF_ARGS_VER1_##F(T, V)
-
-#define DEF_ARGS_VER0(FIRST, T, F, V) DEF_ARGS_VER0_##F(T, V)
-#define DEF_ARGS_VER1(FIRST, T, F, V) DEF_ARGS_VER1_##FIRST(T, F, V)
-#define DEF_ARGS_VER2(FIRST, T, F, V) DEF_ARGS_VER2_##F(T, V)
-#define DEF_ARGS_VER3(FIRST, T, F, V) DEF_ARGS_VER3_##F(T, V)
-#define DEF_ARGS_VER4(FIRST, T, F, V) DEF_ARGS_VER4_##F(T, V)
-
-#define DEF_ARGS_0(VER, ...)
-#define DEF_ARGS_1(VER, T0, F0, V0) DEF_ARGS_##VER(TRUE, T0, F0, V0)
-#define DEF_ARGS_2(VER, T0, F0, V0, T1, F1, V1) DEF_ARGS_##VER(TRUE, T0, F0, V0) DEF_ARGS_##VER(FALSE, T1, F1, V1)
-#define DEF_ARGS_3(VER, T0, F0, V0, T1, F1, V1, T2, F2, V2) \
- DEF_ARGS_##VER(TRUE, T0, F0, V0) DEF_ARGS_##VER(FALSE, T1, F1, V1) DEF_ARGS_##VER(FALSE, T2, F2, V2)
-#define DEF_ARGS_4(VER, T0, F0, V0, T1, F1, V1, T2, F2, V2, T3, F3, V3) \
- DEF_ARGS_##VER(TRUE, T0, F0, V0) DEF_ARGS_##VER(FALSE, T1, F1, V1) DEF_ARGS_##VER(FALSE, T2, F2, V2) \
- DEF_ARGS_##VER(FALSE, T3, F3, V3)
-#define DEF_ARGS_5(VER, T0, F0, V0, T1, F1, V1, T2, F2, V2, T3, F3, V3, T4, F4, V4) \
- DEF_ARGS_##VER(TRUE, T0, F0, V0) DEF_ARGS_##VER(FALSE, T1, F1, V1) DEF_ARGS_##VER(FALSE, T2, F2, V2) \
- DEF_ARGS_##VER(FALSE, T3, F3, V3) DEF_ARGS_##VER(FALSE, T4, F4, V4)
-
-#define DEF_ARGS_6(VER, T0, F0, V0, T1, F1, V1, T2, F2, V2, T3, F3, V3, T4, F4, V4, T5, F5, V5) \
- DEF_ARGS_##VER(TRUE, T0, F0, V0) DEF_ARGS_##VER(FALSE, T1, F1, V1) DEF_ARGS_##VER(FALSE, T2, F2, V2) \
- DEF_ARGS_##VER(FALSE, T3, F3, V3) DEF_ARGS_##VER(FALSE, T4, F4, V4) DEF_ARGS_##VER(FALSE, T5, F5, V5)
-
-#define DEF_ARGS_7(VER, T0, F0, V0, T1, F1, V1, T2, F2, V2, T3, F3, V3, T4, F4, V4, T5, F5, V5, T6, F6, V6) \
- DEF_ARGS_##VER(TRUE, T0, F0, V0) DEF_ARGS_##VER(FALSE, T1, F1, V1) DEF_ARGS_##VER(FALSE, T2, F2, V2) \
- DEF_ARGS_##VER(FALSE, T3, F3, V3) DEF_ARGS_##VER(FALSE, T4, F4, V4) DEF_ARGS_##VER(FALSE, T5, F5, V5) \
- DEF_ARGS_##VER(FALSE, T6, F6, V6)
-
-#define DEF_VER0_VAR_DECL_PTR(NAME) const NAME* p = static_cast<const NAME*>(options);
-#define DEF_VER0_VAR_0(NAME)
-#define DEF_VER0_VAR_1(NAME) DEF_VER0_VAR_DECL_PTR(NAME)
-#define DEF_VER0_VAR_2(NAME) DEF_VER0_VAR_DECL_PTR(NAME)
-#define DEF_VER0_VAR_3(NAME) DEF_VER0_VAR_DECL_PTR(NAME)
-#define DEF_VER0_VAR_4(NAME) DEF_VER0_VAR_DECL_PTR(NAME)
-#define DEF_VER0_VAR_5(NAME) DEF_VER0_VAR_DECL_PTR(NAME)
-#define DEF_VER0_VAR_6(NAME) DEF_VER0_VAR_DECL_PTR(NAME)
-#define DEF_VER0_VAR_7(NAME) DEF_VER0_VAR_DECL_PTR(NAME)
-
-#define DEF_ATTRIBUTE(NAME, NUM_ARGS, ...) \
- class Tosa##NAME##Attribute : public TosaAttributeBase \
- { \
- public: \
- Tosa##NAME##Attribute(const TosaAttributeBase* options) \
- { \
- const Tosa##NAME##Attribute* p = reinterpret_cast<const Tosa##NAME##Attribute*>(options); \
- *this = *p; \
- } \
- Tosa##NAME##Attribute(const Tosa##NAME##Attribute* p) \
- { \
- *this = *p; \
- } \
- Tosa##NAME##Attribute(const void* options){ DEF_VER0_VAR_##NUM_ARGS(NAME##Attribute) \
- DEF_ARGS_##NUM_ARGS(VER0, __VA_ARGS__) } Tosa##NAME \
- ##Attribute(DEF_ARGS_##NUM_ARGS(VER1, __VA_ARGS__)) \
- { \
- DEF_ARGS_##NUM_ARGS(VER2, __VA_ARGS__) \
- } \
- virtual ~Tosa##NAME##Attribute() \
- {} \
- DEF_ARGS_##NUM_ARGS(VER3, __VA_ARGS__) private : DEF_ARGS_##NUM_ARGS(VER4, __VA_ARGS__) \
- };
-
-#include "attribute.def"
-#undef DEF_ATTRIBUTE
-#undef DEF_ARGS_0
-#undef DEF_ARGS_1
-#undef DEF_ARGS_2
-#undef DEF_ARGS_3
-#undef DEF_ARGS_4
-#undef DEF_ARGS_5
-#undef DEF_ARGS_6
-#undef DEF_ARGS_7
-#undef DEF_ARGS_VER0
-#undef DEF_ARGS_VER1
-#undef DEF_ARGS_VER2
-#undef DEF_ARGS_VER3
-#undef DEF_ARGS_VER4
-#undef DEF_ARGS_VER0_S_int32_t
-#undef DEF_ARGS_VER0_S_float
-#undef DEF_ARGS_VER0_S_bool
-#undef DEF_ARGS_VER0_S_ResizeMode
-#undef DEF_ARGS_VER0_S_string
-#undef DEF_ARGS_VER0_S_STR
-#undef DEF_ARGS_VER0_S_DEFAULT
-#undef DEF_ARGS_VER1_TRUE
-#undef DEF_ARGS_VER1_FALSE
-#undef DEF_ARGS_VER0_S
-#undef DEF_ARGS_VER0_V
-#undef DEF_ARGS_VER1_S
-#undef DEF_ARGS_VER1_V
-#undef DEF_ARGS_VER2_S
-#undef DEF_ARGS_VER2_V
-#undef DEF_ARGS_VER3_S
-#undef DEF_ARGS_VER3_V
-#undef DEF_ARGS_VER4_S
-#undef DEF_ARGS_VER4_V
-#undef DEF_VER0_VAR_0
-#undef DEF_VER0_VAR_1
-#undef DEF_VER0_VAR_2
-#undef DEF_VER0_VAR_3
-#undef DEF_VER0_VAR_4
-#undef DEF_VER0_VAR_5
-#undef DEF_VER0_VAR_DECL_PTR
-
-} // namespace tosa
-
-#endif // _TOSA_SERIALIZATION_ATTRIBUTE_H
diff --git a/serialization/operator.def b/serialization/operator.def
deleted file mode 100644
index 9a93b70..0000000
--- a/serialization/operator.def
+++ /dev/null
@@ -1,124 +0,0 @@
-
-// Copyright (c) 2020, ARM Limited.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
- Syntax:
- DEF_OPERATOR(MLIR_NAME, SCHEMA_NAME, REF_IMPL_NAME, OPTIONS, QUANT_INFO)
-
- Description:
- MLIR_NAME: the symbolic string of this op, must match tosa_ops.td
- SCHEMA_NAME: corresponding operator name, must match "enum Op" in serialization/tosa.fbs
- REF_IMPL_NAME: name used internally in tosa reference implementation
- OPTIONS: compile time constant options of this op, corresponding to operator_option.def
- QUANT_INFO: quantization infomation of this op, corresponding to quant_info.def
-*/
-
-
-/* tensor operators */
-DEF_OPERATOR(argmax, ARGMAX, ArgMax, Axis, None)
-DEF_OPERATOR(avg_pool2d, AVG_POOL2D, AvgPool2d, Pool2d, Unary)
-DEF_OPERATOR(conv2d, CONV2D, Conv2d, Conv2d, Conv)
-DEF_OPERATOR(conv3d, CONV3D, Conv3d, None, None)
-DEF_OPERATOR(depthwise_conv2d, DEPTHWISE_CONV2D, DepthwiseConv2d, Conv2d, Conv)
-DEF_OPERATOR(fully_connected, FULLY_CONNECTED, FullyConnected, None, Conv)
-DEF_OPERATOR(matmul, MATMUL, MatMul, None, MatMul)
-DEF_OPERATOR(max_pool2d, MAX_POOL2D, MaxPool2d, Pool2d, None)
-DEF_OPERATOR(transpose_conv2d, TRANSPOSE_CONV2D, TransposeConv2d, TransposeConv2d, Conv)
-
-/* activation */
-DEF_OPERATOR(clamp, CLAMP, Clamp, Clamp, None)
-DEF_OPERATOR(reluN, RELUN, ReluN, ReluN, None)
-DEF_OPERATOR(sigmoid, SIGMOID, Sigmoid, None, None)
-DEF_OPERATOR(tanh, TANH, Tanh, None, None)
-
-/* elementwise - binary */
-DEF_OPERATOR(add, ADD, Add, None, None)
-DEF_OPERATOR(arithmetic_right_shift, ARITHMETIC_RIGHT_SHIFT, ArithmeticRightShift, ArithmeticRightShift, None)
-DEF_OPERATOR(bitwise_and, BITWISE_AND, BitwiseAnd, None, None)
-DEF_OPERATOR(bitwise_or, BITWISE_OR, BitwiseOr, None, None)
-DEF_OPERATOR(bitwise_xor, BITWISE_XOR, BitwiseXor, None, None)
-DEF_OPERATOR(logical_and, LOGICAL_AND, LogicalAnd, None, None)
-DEF_OPERATOR(logical_left_shift, LOGICAL_LEFT_SHIFT, LogicalLeftShift, None, None)
-DEF_OPERATOR(logical_right_shift, LOGICAL_RIGHT_SHIFT, LogicalRightShift, None, None)
-DEF_OPERATOR(logical_or, LOGICAL_OR, LogicalOr, None, None)
-DEF_OPERATOR(logical_xor, LOGICAL_XOR, LogicalXor, None, None)
-DEF_OPERATOR(maximum, MAXIMUM, Maximum, None, None)
-DEF_OPERATOR(minimum, MINIMUM, Minimum, None, None)
-DEF_OPERATOR(mul, MUL, Mul, Mul, None)
-DEF_OPERATOR(pow, POW, Pow, None, None)
-DEF_OPERATOR(sub, SUB, Sub, None, None)
-DEF_OPERATOR(table, TABLE, Table, None, None)
-
-/* elementwise - unary */
-DEF_OPERATOR(abs, ABS, Abs, None, None)
-DEF_OPERATOR(bitwise_not, BITWISE_NOT, BitwiseNot, None, None)
-DEF_OPERATOR(ceil, CEIL, Ceil, None, None)
-DEF_OPERATOR(clz, CLZ, Clz, None, None)
-DEF_OPERATOR(exp, EXP, Exp, None, None)
-DEF_OPERATOR(floor, FLOOR, Floor, None, None)
-DEF_OPERATOR(log, LOG, Log, None, None)
-DEF_OPERATOR(logical_not, LOGICAL_NOT, LogicalNot, None, None)
-DEF_OPERATOR(negate, NEGATE, Negate, None, Unary)
-DEF_OPERATOR(reciprocal, RECIPROCAL, Reciprocal, None, None)
-DEF_OPERATOR(rsqrt, RSQRT, Rsqrt, None, None)
-
-/* elementwise - ternary */
-DEF_OPERATOR(select, SELECT, Select, None, None)
-
-/* logical */
-DEF_OPERATOR(equal, EQUAL, Equal, None, None)
-DEF_OPERATOR(greater, GREATER, Greater, None, None)
-DEF_OPERATOR(greater_equal, GREATER_EQUAL, GreaterEqual, None, None)
-
-/* reduction */
-DEF_OPERATOR(reduce_any, REDUCE_ANY, ReduceAny, Reduce, None)
-DEF_OPERATOR(reduce_all, REDUCE_ALL, ReduceAll, Reduce, None)
-DEF_OPERATOR(reduce_max, REDUCE_MAX, ReduceMax, Reduce, None)
-DEF_OPERATOR(reduce_min, REDUCE_MIN, ReduceMin, Reduce, None)
-DEF_OPERATOR(reduce_prod, REDUCE_PRODUCT, ReduceProduct, Reduce, None)
-DEF_OPERATOR(reduce_sum, REDUCE_SUM, ReduceSum, Reduce, None)
-
-/* memory operation */
-DEF_OPERATOR(concat, CONCAT, Concat, Axis, None)
-DEF_OPERATOR(pad, PAD, Pad, None, Pad)
-DEF_OPERATOR(reshape, RESHAPE, Reshape, Reshape, None)
-DEF_OPERATOR(reverse, REVERSE, Reverse, Reverse, None)
-DEF_OPERATOR(slice, SLICE, Slice, Slice, None)
-DEF_OPERATOR(tile, TILE, Tile, Tile, None)
-DEF_OPERATOR(transpose, TRANSPOSE, Transpose, None, None)
-
-/* gather/scatter */
-DEF_OPERATOR(gather, GATHER, Gather, None, None)
-DEF_OPERATOR(scatter, SCATTER, Scatter, None, None)
-
-/* image */
-DEF_OPERATOR(resize, RESIZE, Resize, Resize, None)
-
-/* quantization */
-DEF_OPERATOR(cast, CAST, Cast, None, None)
-DEF_OPERATOR(rescale, RESCALE, Rescale, Rescale, None)
-
-/* data nodes */
-DEF_OPERATOR(const, CONST, Const, None, None)
-DEF_OPERATOR(placeholder, PLACEHOLDER, Placeholder, None, None)
-DEF_OPERATOR(identity, IDENTITY, Identity, None, None)
-DEF_OPERATOR(identityn, IDENTITYN, IdentityN, None, None)
-
-/* custom operations */
-DEF_OPERATOR(custom, CUSTOM, Custom, None, None)
-
-/* control flow operators */
-DEF_OPERATOR(cond_if, COND_IF, CondIf, CondIf, None)
-DEF_OPERATOR(while_loop, WHILE_LOOP, WhileLoop, WhileLoop, None)
diff --git a/serialization/quant_info.def b/serialization/quant_info.def
deleted file mode 100644
index 39dc101..0000000
--- a/serialization/quant_info.def
+++ /dev/null
@@ -1,43 +0,0 @@
-
-// Copyright (c) 2020, ARM Limited.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
- Syntax:
- DEF_QUANTIZATION_INFO(NAME, NUM_ARGS_IN_OPTIONS, ARG0_TYPE, ARG0_SCALAR_OR_VECTOR, ARGS0_NAME, ...)
-
- Description:
- NAME: corresponding quantization info name, must match corresponding "table XXXQuantInfo" in tosa.fbs
- NUM_ARGS_IN_QINFO: number of arguments in this quantization info
- ARG0_TYPE: data type of arg0
- ARG0_SCALAR_OR_VECTOR: is arg0 a scalar (S) or a vector (V)
- ARG0_NAME: name of arg0
- ...: variadic variables for more arguments, depending on NUM_ARGS_IN_QINFO
-*/
-
-
-DEF_QUANTIZATION_INFO(Unary, 2,
- int32_t, S, input_zp,
- int32_t, S, output_zp)
-
-DEF_QUANTIZATION_INFO(Conv, 2,
- int32_t, S, input_zp,
- int32_t, S, weight_zp)
-
-DEF_QUANTIZATION_INFO(MatMul, 2,
- int32_t, S, a_zp,
- int32_t, S, b_zp)
-
-DEF_QUANTIZATION_INFO(Pad, 1,
- int32_t, S, input_zp)
diff --git a/serialization/quant_info.h b/serialization/quant_info.h
deleted file mode 100644
index 03dcab9..0000000
--- a/serialization/quant_info.h
+++ /dev/null
@@ -1,164 +0,0 @@
-
-// Copyright (c) 2020, ARM Limited.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef _TOSA_SERIALIZATION_QUANT_INFO_H
-#define _TOSA_SERIALIZATION_QUANT_INFO_H
-#include "flatbuffers/idl.h"
-#include "flatbuffers/util.h"
-#include "tosa_generated.h"
-
-namespace tosa
-{
-
-class TosaQuantInfoBase
-{
-public:
- virtual ~TosaQuantInfoBase()
- {}
-};
-
-class TosaNoneQuantInfo : public TosaQuantInfoBase
-{
-public:
- TosaNoneQuantInfo()
- {}
- TosaNoneQuantInfo(TosaNoneQuantInfo* p)
- {}
-};
-
-#define DEF_ARGS_VER0_S(T, V) _##V = p->V();
-#define DEF_ARGS_VER0_V(T, V) _##V = std::vector<T>(p->V()->begin(), p->V()->end());
-#define DEF_ARGS_VER1_S(T, V) const T& V
-#define DEF_ARGS_VER1_V(T, V) const std::vector<T>& V
-#define DEF_ARGS_VER2_S(T, V) _##V = V;
-#define DEF_ARGS_VER2_V(T, V) _##V = V;
-#define DEF_ARGS_VER3_S(T, V) \
- T V() const \
- { \
- return _##V; \
- }
-#define DEF_ARGS_VER3_V(T, V) \
- std::vector<T> V() const \
- { \
- return _##V; \
- }
-#define DEF_ARGS_VER4_S(T, V) T _##V;
-#define DEF_ARGS_VER4_V(T, V) std::vector<T> _##V;
-
-// another level of preprocessor indirection to handle ", " as function's input argument
-#define DEF_ARGS_VER1_TRUE(T, F, V) DEF_ARGS_VER1_##F(T, V)
-#define DEF_ARGS_VER1_FALSE(T, F, V) , DEF_ARGS_VER1_##F(T, V)
-
-#define DEF_ARGS_VER0(FIRST, T, F, V) DEF_ARGS_VER0_##F(T, V)
-#define DEF_ARGS_VER1(FIRST, T, F, V) DEF_ARGS_VER1_##FIRST(T, F, V)
-#define DEF_ARGS_VER2(FIRST, T, F, V) DEF_ARGS_VER2_##F(T, V)
-#define DEF_ARGS_VER3(FIRST, T, F, V) DEF_ARGS_VER3_##F(T, V)
-#define DEF_ARGS_VER4(FIRST, T, F, V) DEF_ARGS_VER4_##F(T, V)
-
-#define DEF_ARGS_1(VER, T0, F0, V0) DEF_ARGS_##VER(TRUE, T0, F0, V0)
-#define DEF_ARGS_2(VER, T0, F0, V0, T1, F1, V1) DEF_ARGS_##VER(TRUE, T0, F0, V0) DEF_ARGS_##VER(FALSE, T1, F1, V1)
-#define DEF_ARGS_3(VER, T0, F0, V0, T1, F1, V1, T2, F2, V2) \
- DEF_ARGS_##VER(TRUE, T0, F0, V0) DEF_ARGS_##VER(FALSE, T1, F1, V1) DEF_ARGS_##VER(FALSE, T2, F2, V2)
-#define DEF_ARGS_4(VER, T0, F0, V0, T1, F1, V1, T2, F2, V2, T3, F3, V3) \
- DEF_ARGS_##VER(TRUE, T0, F0, V0) DEF_ARGS_##VER(FALSE, T1, F1, V1) DEF_ARGS_##VER(FALSE, T2, F2, V2) \
- DEF_ARGS_##VER(FALSE, T3, F3, V3)
-#define DEF_ARGS_5(VER, T0, F0, V0, T1, F1, V1, T2, F2, V2, T3, F3, V3, T4, F4, V4) \
- DEF_ARGS_##VER(TRUE, T0, F0, V0) DEF_ARGS_##VER(FALSE, T1, F1, V1) DEF_ARGS_##VER(FALSE, T2, F2, V2) \
- DEF_ARGS_##VER(FALSE, T3, F3, V3) DEF_ARGS_##VER(FALSE, T4, F4, V4)
-#define DEF_ARGS_6(VER, T0, F0, V0, T1, F1, V1, T2, F2, V2, T3, F3, V3, T4, F4, V4, T5, F5, V5) \
- DEF_ARGS_##VER(TRUE, T0, F0, V0) DEF_ARGS_##VER(FALSE, T1, F1, V1) DEF_ARGS_##VER(FALSE, T2, F2, V2) \
- DEF_ARGS_##VER(FALSE, T3, F3, V3) DEF_ARGS_##VER(FALSE, T4, F4, V4) DEF_ARGS_##VER(FALSE, T5, F5, V5)
-#define DEF_ARGS_7(VER, T0, F0, V0, T1, F1, V1, T2, F2, V2, T3, F3, V3, T4, F4, V4, T5, F5, V5, T6, F6, V6) \
- DEF_ARGS_##VER(TRUE, T0, F0, V0) DEF_ARGS_##VER(FALSE, T1, F1, V1) DEF_ARGS_##VER(FALSE, T2, F2, V2) \
- DEF_ARGS_##VER(FALSE, T3, F3, V3) DEF_ARGS_##VER(FALSE, T4, F4, V4) DEF_ARGS_##VER(FALSE, T5, F5, V5) \
- DEF_ARGS_##VER(FALSE, T6, F6, V6)
-#define DEF_ARGS_8(VER, T0, F0, V0, T1, F1, V1, T2, F2, V2, T3, F3, V3, T4, F4, V4, T5, F5, V5, T6, F6, V6, T7, F7, \
- V7) \
- DEF_ARGS_##VER(TRUE, T0, F0, V0) DEF_ARGS_##VER(FALSE, T1, F1, V1) DEF_ARGS_##VER(FALSE, T2, F2, V2) \
- DEF_ARGS_##VER(FALSE, T3, F3, V3) DEF_ARGS_##VER(FALSE, T4, F4, V4) DEF_ARGS_##VER(FALSE, T5, F5, V5) \
- DEF_ARGS_##VER(FALSE, T6, F6, V6) DEF_ARGS_##VER(FALSE, T7, F7, V7)
-#define DEF_ARGS_9(VER, T0, F0, V0, T1, F1, V1, T2, F2, V2, T3, F3, V3, T4, F4, V4, T5, F5, V5, T6, F6, V6, T7, F7, \
- V7, T8, F8, V8) \
- DEF_ARGS_##VER(TRUE, T0, F0, V0) DEF_ARGS_##VER(FALSE, T1, F1, V1) DEF_ARGS_##VER(FALSE, T2, F2, V2) \
- DEF_ARGS_##VER(FALSE, T3, F3, V3) DEF_ARGS_##VER(FALSE, T4, F4, V4) DEF_ARGS_##VER(FALSE, T5, F5, V5) \
- DEF_ARGS_##VER(FALSE, T6, F6, V6) DEF_ARGS_##VER(FALSE, T7, F7, V7) DEF_ARGS_##VER(FALSE, T8, F8, V8)
-#define DEF_ARGS_10(VER, T0, F0, V0, T1, F1, V1, T2, F2, V2, T3, F3, V3, T4, F4, V4, T5, F5, V5, T6, F6, V6, T7, F7, \
- V7, T8, F8, V8, T9, F9, V9) \
- DEF_ARGS_##VER(TRUE, T0, F0, V0) DEF_ARGS_##VER(FALSE, T1, F1, V1) DEF_ARGS_##VER(FALSE, T2, F2, V2) \
- DEF_ARGS_##VER(FALSE, T3, F3, V3) DEF_ARGS_##VER(FALSE, T4, F4, V4) DEF_ARGS_##VER(FALSE, T5, F5, V5) \
- DEF_ARGS_##VER(FALSE, T6, F6, V6) DEF_ARGS_##VER(FALSE, T7, F7, V7) DEF_ARGS_##VER(FALSE, T8, F8, V8) \
- DEF_ARGS_##VER(FALSE, T9, F9, V9)
-
-#define DEF_QUANTIZATION_INFO(NAME, NUM_ARGS, ...) \
- class Tosa##NAME##QuantInfo : public TosaQuantInfoBase \
- { \
- public: \
- Tosa##NAME##QuantInfo(const TosaQuantInfoBase* qinfo) \
- { \
- const Tosa##NAME##QuantInfo* p = dynamic_cast<const Tosa##NAME##QuantInfo*>(qinfo); \
- assert(p); \
- *this = *p; \
- } \
- Tosa##NAME##QuantInfo(const Tosa##NAME##QuantInfo* p) \
- { \
- *this = *p; \
- } \
- Tosa##NAME##QuantInfo(const void* qinfo) \
- { \
- const NAME##QuantInfo* p = static_cast<const NAME##QuantInfo*>(qinfo); \
- DEF_ARGS_##NUM_ARGS(VER0, __VA_ARGS__) \
- } \
- Tosa##NAME##QuantInfo(DEF_ARGS_##NUM_ARGS(VER1, __VA_ARGS__)) \
- { \
- DEF_ARGS_##NUM_ARGS(VER2, __VA_ARGS__) \
- } \
- virtual ~Tosa##NAME##QuantInfo() \
- {} \
- DEF_ARGS_##NUM_ARGS(VER3, __VA_ARGS__) private : DEF_ARGS_##NUM_ARGS(VER4, __VA_ARGS__) \
- };
-
-#include "quant_info.def"
-#undef DEF_QUANTIZATION_INFO
-#undef DEF_ARGS_1
-#undef DEF_ARGS_2
-#undef DEF_ARGS_3
-#undef DEF_ARGS_4
-#undef DEF_ARGS_5
-#undef DEF_ARGS_6
-#undef DEF_ARGS_7
-#undef DEF_ARGS_8
-#undef DEF_ARGS_9
-#undef DEF_ARGS_10
-#undef DEF_ARGS_VER0
-#undef DEF_ARGS_VER1
-#undef DEF_ARGS_VER2
-#undef DEF_ARGS_VER3
-#undef DEF_ARGS_VER4
-#undef DEF_ARGS_VER1_TRUE
-#undef DEF_ARGS_VER1_FALSE
-#undef DEF_ARGS_VER0_S
-#undef DEF_ARGS_VER0_V
-#undef DEF_ARGS_VER1_S
-#undef DEF_ARGS_VER1_V
-#undef DEF_ARGS_VER2_S
-#undef DEF_ARGS_VER2_V
-#undef DEF_ARGS_VER3_S
-#undef DEF_ARGS_VER3_V
-#undef DEF_ARGS_VER4_S
-#undef DEF_ARGS_VER4_V
-
-} // namespace tosa
-
-#endif
diff --git a/serialization/tosa.fbs b/serialization/tosa.fbs
deleted file mode 100644
index e30c3cf..0000000
--- a/serialization/tosa.fbs
+++ /dev/null
@@ -1,325 +0,0 @@
-
-// Copyright (c) 2020-2021, ARM Limited.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-namespace tosa;
-
-// This corresponds to the version.
-file_identifier "TOSA";
-// File extension of any written files.
-file_extension "tosa";
-
-enum DType:uint32 {
- UNKNOWN = 0,
- BOOL,
- UINT8,
- INT4,
- INT8,
- INT16,
- INT32,
- INT48,
- FLOAT,
-}
-
-enum Format:uint32 {
- UNKNOWN = 0,
- NHWC,
- NDHWC,
- OHWI,
- HWIM,
- DOHWI,
-}
-
-enum Usage:uint32 {
- UNKNOWN = 0,
- ACTIVATION,
- WEIGHT,
- INDEX,
-}
-
-enum ResizeMode:uint32 {
- UNKNOWN = 0,
- NEAREST,
- BILINEAR,
-}
-
-enum Op:uint32 {
- UNKNOWN = 0,
-
- // Tensor Operator
- ARGMAX,
- AVG_POOL2D,
- CONV2D,
- CONV3D,
- DEPTHWISE_CONV2D,
- FULLY_CONNECTED,
- MATMUL,
- MAX_POOL2D,
- TRANSPOSE_CONV2D,
-
- // Activation
- CLAMP,
- RELUN,
- SIGMOID,
- TANH,
-
- // Elementwise-Binary
- ADD,
- ARITHMETIC_RIGHT_SHIFT,
- BITWISE_AND,
- BITWISE_OR,
- BITWISE_XOR,
- LOGICAL_AND,
- LOGICAL_LEFT_SHIFT,
- LOGICAL_RIGHT_SHIFT,
- LOGICAL_OR,
- LOGICAL_XOR,
- MAXIMUM,
- MINIMUM,
- MUL,
- POW,
- SUB,
- TABLE,
-
- // Elementwise-Unary
- ABS,
- BITWISE_NOT,
- CEIL,
- CLZ,
- EXP,
- FLOOR,
- LOG,
- LOGICAL_NOT,
- NEGATE,
- RECIPROCAL,
- RSQRT,
-
- // Elementwise-Ternary
- SELECT,
-
- // Logical
- EQUAL,
- GREATER,
- GREATER_EQUAL,
-
- // Reduction
- REDUCE_ANY,
- REDUCE_ALL,
- REDUCE_MAX,
- REDUCE_MIN,
- REDUCE_PRODUCT,
- REDUCE_SUM,
-
- // Data layout operation
- CONCAT,
- PAD,
- RESHAPE,
- REVERSE,
- SLICE,
- TILE,
- TRANSPOSE,
-
- // Gather/scatter operation
- GATHER,
- SCATTER,
-
- // Image
- RESIZE,
-
- // Type conversion
- CAST,
- RESCALE,
-
- // Data Nodes
- CONST,
- PLACEHOLDER,
- IDENTITY,
- IDENTITYN,
-
- // Custom operations
- CUSTOM,
-
- // Control flow operators
- COND_IF,
- WHILE_LOOP,
-}
-
-union Attribute {
- Pool2dAttribute,
- Conv2dAttribute,
- TransposeConv2dAttribute,
- ReluNAttribute,
- AxisAttribute,
- ReshapeAttribute,
- SliceAttribute,
- TileAttribute,
- ResizeAttribute,
- ClampAttribute,
- RescaleAttribute,
- MulAttribute,
- ArithmeticRightShiftAttribute,
- CondIfAttribute,
- WhileLoopAttribute,
-}
-
-table Pool2dAttribute {
- padding: [int32];
- kernel: [int32];
- stride: [int32];
-}
-
-table Conv2dAttribute {
- padding: [int32];
- stride: [int32];
- dilation: [int32];
-}
-
-table TransposeConv2dAttribute {
- outpad: [int32];
- stride: [int32];
- dilation: [int32];
- output_shape: [int32];
-}
-
-table ReluNAttribute {
- max_int: int32;
- max_fp: float;
-}
-
-table AxisAttribute {
- axis: int32;
-}
-
-table ReshapeAttribute {
- shape: [int32];
-}
-
-table SliceAttribute {
- begin: [int32];
- size: [int32];
-}
-
-table TileAttribute {
- multiples: [int32];
-}
-
-table ResizeAttribute {
- output_size: [int32];
- stride: [int32];
- offset: [int32];
- shift: int32;
- stride_fp: [float];
- offset_fp: [float];
- mode: ResizeMode;
-}
-
-table ClampAttribute {
- min_int: int32;
- max_int: int32;
- min_fp: float;
- max_fp: float;
-}
-
-table RescaleAttribute {
- input_zp: int32;
- output_zp: int32;
- multiplier: [int32];
- shift: [int32];
- scale32: bool;
- double_round: bool;
- per_channel: bool;
-}
-
-table MulAttribute {
- shift: int32;
-}
-
-table ArithmeticRightShiftAttribute {
- round: bool;
-}
-
-table CondIfAttribute {
- then_branch: string;
- else_branch: string;
-}
-
-table WhileLoopAttribute {
- cond_branch: string;
- body_branch: string;
-}
-
-union QuantInfo {
- UnaryQuantInfo,
- ConvQuantInfo,
- MatMulQuantInfo,
- PadQuantInfo,
-}
-
-table UnaryQuantInfo {
- input_zp: int32;
- output_zp: int32;
-}
-
-table ConvQuantInfo {
- input_zp: int32;
- weight_zp: int32;
-}
-
-table MatMulQuantInfo {
- a_zp: int32;
- b_zp: int32;
-}
-
-table PadQuantInfo {
- input_zp: int32;
-}
-
-table Version {
- _major: int32 = 0;
- _minor: int32 = 21;
- _patch: int32 = 0;
- _experimental: bool = false;
-}
-
-table TosaTensor {
- name:string; // name of the tensor, used for solving dependency
- shape:[int32]; // shape of the tensor
- type:DType; // data type of the tensor
- usage:[Usage]; // vector of possible usages. for the convenience of debugging only.
- format:[Format]; // vector of possible formats. for the convenience of debugging only.
- npy_filename: string; // numpy array filename
-}
-
-table TosaOperator {
- op:Op; // operator enum
- attribute: Attribute; // union structure. operator attribute
- inputs:[string]; // list of input tensor names
- outputs:[string]; // list of output tensor names
- quant_info: QuantInfo; // op-based quantization information
-}
-
-table TosaBasicBlock {
- name:string; // basic block name
- operators:[TosaOperator]; // operators array
- tensors:[TosaTensor]; // tensors array
- inputs:[string]; // name of graph inputs
- outputs:[string]; // name of graph outputs
-}
-
-table TosaGraph {
- version: Version;
- blocks:[TosaBasicBlock]; // basic blocks array
-}
-
-root_type TosaGraph;
diff --git a/serialization/tosa_generated.h b/serialization/tosa_generated.h
deleted file mode 100644
index f2796af..0000000
--- a/serialization/tosa_generated.h
+++ /dev/null
@@ -1,2683 +0,0 @@
-
-// Copyright (c) 2020-2021, ARM Limited.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// automatically generated by the FlatBuffers compiler, do not modify
-
-
-#ifndef FLATBUFFERS_GENERATED_TOSA_TOSA_H_
-#define FLATBUFFERS_GENERATED_TOSA_TOSA_H_
-
-#include "flatbuffers/flatbuffers.h"
-
-namespace tosa {
-
-struct Pool2dAttribute;
-
-struct Conv2dAttribute;
-
-struct TransposeConv2dAttribute;
-
-struct ReluNAttribute;
-
-struct AxisAttribute;
-
-struct ReshapeAttribute;
-
-struct SliceAttribute;
-
-struct TileAttribute;
-
-struct ResizeAttribute;
-
-struct ClampAttribute;
-
-struct RescaleAttribute;
-
-struct MulAttribute;
-
-struct ArithmeticRightShiftAttribute;
-
-struct CondIfAttribute;
-
-struct WhileLoopAttribute;
-
-struct UnaryQuantInfo;
-
-struct ConvQuantInfo;
-
-struct MatMulQuantInfo;
-
-struct PadQuantInfo;
-
-struct Version;
-
-struct TosaTensor;
-
-struct TosaOperator;
-
-struct TosaBasicBlock;
-
-struct TosaGraph;
-
-enum DType {
- DType_UNKNOWN = 0,
- DType_BOOL = 1,
- DType_UINT8 = 2,
- DType_INT4 = 3,
- DType_INT8 = 4,
- DType_INT16 = 5,
- DType_INT32 = 6,
- DType_INT48 = 7,
- DType_FLOAT = 8,
- DType_MIN = DType_UNKNOWN,
- DType_MAX = DType_FLOAT
-};
-
-inline const DType (&EnumValuesDType())[9] {
- static const DType values[] = {
- DType_UNKNOWN,
- DType_BOOL,
- DType_UINT8,
- DType_INT4,
- DType_INT8,
- DType_INT16,
- DType_INT32,
- DType_INT48,
- DType_FLOAT
- };
- return values;
-}
-
-inline const char * const *EnumNamesDType() {
- static const char * const names[] = {
- "UNKNOWN",
- "BOOL",
- "UINT8",
- "INT4",
- "INT8",
- "INT16",
- "INT32",
- "INT48",
- "FLOAT",
- nullptr
- };
- return names;
-}
-
-inline const char *EnumNameDType(DType e) {
- if (e < DType_UNKNOWN || e > DType_FLOAT) return "";
- const size_t index = static_cast<size_t>(e);
- return EnumNamesDType()[index];
-}
-
-enum Format {
- Format_UNKNOWN = 0,
- Format_NHWC = 1,
- Format_NDHWC = 2,
- Format_OHWI = 3,
- Format_HWIM = 4,
- Format_DOHWI = 5,
- Format_MIN = Format_UNKNOWN,
- Format_MAX = Format_DOHWI
-};
-
-inline const Format (&EnumValuesFormat())[6] {
- static const Format values[] = {
- Format_UNKNOWN,
- Format_NHWC,
- Format_NDHWC,
- Format_OHWI,
- Format_HWIM,
- Format_DOHWI
- };
- return values;
-}
-
-inline const char * const *EnumNamesFormat() {
- static const char * const names[] = {
- "UNKNOWN",
- "NHWC",
- "NDHWC",
- "OHWI",
- "HWIM",
- "DOHWI",
- nullptr
- };
- return names;
-}
-
-inline const char *EnumNameFormat(Format e) {
- if (e < Format_UNKNOWN || e > Format_DOHWI) return "";
- const size_t index = static_cast<size_t>(e);
- return EnumNamesFormat()[index];
-}
-
-enum Usage {
- Usage_UNKNOWN = 0,
- Usage_ACTIVATION = 1,
- Usage_WEIGHT = 2,
- Usage_INDEX = 3,
- Usage_MIN = Usage_UNKNOWN,
- Usage_MAX = Usage_INDEX
-};
-
-inline const Usage (&EnumValuesUsage())[4] {
- static const Usage values[] = {
- Usage_UNKNOWN,
- Usage_ACTIVATION,
- Usage_WEIGHT,
- Usage_INDEX
- };
- return values;
-}
-
-inline const char * const *EnumNamesUsage() {
- static const char * const names[] = {
- "UNKNOWN",
- "ACTIVATION",
- "WEIGHT",
- "INDEX",
- nullptr
- };
- return names;
-}
-
-inline const char *EnumNameUsage(Usage e) {
- if (e < Usage_UNKNOWN || e > Usage_INDEX) return "";
- const size_t index = static_cast<size_t>(e);
- return EnumNamesUsage()[index];
-}
-
-enum ResizeMode {
- ResizeMode_UNKNOWN = 0,
- ResizeMode_NEAREST = 1,
- ResizeMode_BILINEAR = 2,
- ResizeMode_MIN = ResizeMode_UNKNOWN,
- ResizeMode_MAX = ResizeMode_BILINEAR
-};
-
-inline const ResizeMode (&EnumValuesResizeMode())[3] {
- static const ResizeMode values[] = {
- ResizeMode_UNKNOWN,
- ResizeMode_NEAREST,
- ResizeMode_BILINEAR
- };
- return values;
-}
-
-inline const char * const *EnumNamesResizeMode() {
- static const char * const names[] = {
- "UNKNOWN",
- "NEAREST",
- "BILINEAR",
- nullptr
- };
- return names;
-}
-
-inline const char *EnumNameResizeMode(ResizeMode e) {
- if (e < ResizeMode_UNKNOWN || e > ResizeMode_BILINEAR) return "";
- const size_t index = static_cast<size_t>(e);
- return EnumNamesResizeMode()[index];
-}
-
-enum Op {
- Op_UNKNOWN = 0,
- Op_ARGMAX = 1,
- Op_AVG_POOL2D = 2,
- Op_CONV2D = 3,
- Op_CONV3D = 4,
- Op_DEPTHWISE_CONV2D = 5,
- Op_FULLY_CONNECTED = 6,
- Op_MATMUL = 7,
- Op_MAX_POOL2D = 8,
- Op_TRANSPOSE_CONV2D = 9,
- Op_CLAMP = 10,
- Op_RELUN = 11,
- Op_SIGMOID = 12,
- Op_TANH = 13,
- Op_ADD = 14,
- Op_ARITHMETIC_RIGHT_SHIFT = 15,
- Op_BITWISE_AND = 16,
- Op_BITWISE_OR = 17,
- Op_BITWISE_XOR = 18,
- Op_LOGICAL_AND = 19,
- Op_LOGICAL_LEFT_SHIFT = 20,
- Op_LOGICAL_RIGHT_SHIFT = 21,
- Op_LOGICAL_OR = 22,
- Op_LOGICAL_XOR = 23,
- Op_MAXIMUM = 24,
- Op_MINIMUM = 25,
- Op_MUL = 26,
- Op_POW = 27,
- Op_SUB = 28,
- Op_TABLE = 29,
- Op_ABS = 30,
- Op_BITWISE_NOT = 31,
- Op_CEIL = 32,
- Op_CLZ = 33,
- Op_EXP = 34,
- Op_FLOOR = 35,
- Op_LOG = 36,
- Op_LOGICAL_NOT = 37,
- Op_NEGATE = 38,
- Op_RECIPROCAL = 39,
- Op_RSQRT = 40,
- Op_SELECT = 41,
- Op_EQUAL = 42,
- Op_GREATER = 43,
- Op_GREATER_EQUAL = 44,
- Op_REDUCE_ANY = 45,
- Op_REDUCE_ALL = 46,
- Op_REDUCE_MAX = 47,
- Op_REDUCE_MIN = 48,
- Op_REDUCE_PRODUCT = 49,
- Op_REDUCE_SUM = 50,
- Op_CONCAT = 51,
- Op_PAD = 52,
- Op_RESHAPE = 53,
- Op_REVERSE = 54,
- Op_SLICE = 55,
- Op_TILE = 56,
- Op_TRANSPOSE = 57,
- Op_GATHER = 58,
- Op_SCATTER = 59,
- Op_RESIZE = 60,
- Op_CAST = 61,
- Op_RESCALE = 62,
- Op_CONST = 63,
- Op_PLACEHOLDER = 64,
- Op_IDENTITY = 65,
- Op_IDENTITYN = 66,
- Op_CUSTOM = 67,
- Op_COND_IF = 68,
- Op_WHILE_LOOP = 69,
- Op_MIN = Op_UNKNOWN,
- Op_MAX = Op_WHILE_LOOP
-};
-
-inline const Op (&EnumValuesOp())[70] {
- static const Op values[] = {
- Op_UNKNOWN,
- Op_ARGMAX,
- Op_AVG_POOL2D,
- Op_CONV2D,
- Op_CONV3D,
- Op_DEPTHWISE_CONV2D,
- Op_FULLY_CONNECTED,
- Op_MATMUL,
- Op_MAX_POOL2D,
- Op_TRANSPOSE_CONV2D,
- Op_CLAMP,
- Op_RELUN,
- Op_SIGMOID,
- Op_TANH,
- Op_ADD,
- Op_ARITHMETIC_RIGHT_SHIFT,
- Op_BITWISE_AND,
- Op_BITWISE_OR,
- Op_BITWISE_XOR,
- Op_LOGICAL_AND,
- Op_LOGICAL_LEFT_SHIFT,
- Op_LOGICAL_RIGHT_SHIFT,
- Op_LOGICAL_OR,
- Op_LOGICAL_XOR,
- Op_MAXIMUM,
- Op_MINIMUM,
- Op_MUL,
- Op_POW,
- Op_SUB,
- Op_TABLE,
- Op_ABS,
- Op_BITWISE_NOT,
- Op_CEIL,
- Op_CLZ,
- Op_EXP,
- Op_FLOOR,
- Op_LOG,
- Op_LOGICAL_NOT,
- Op_NEGATE,
- Op_RECIPROCAL,
- Op_RSQRT,
- Op_SELECT,
- Op_EQUAL,
- Op_GREATER,
- Op_GREATER_EQUAL,
- Op_REDUCE_ANY,
- Op_REDUCE_ALL,
- Op_REDUCE_MAX,
- Op_REDUCE_MIN,
- Op_REDUCE_PRODUCT,
- Op_REDUCE_SUM,
- Op_CONCAT,
- Op_PAD,
- Op_RESHAPE,
- Op_REVERSE,
- Op_SLICE,
- Op_TILE,
- Op_TRANSPOSE,
- Op_GATHER,
- Op_SCATTER,
- Op_RESIZE,
- Op_CAST,
- Op_RESCALE,
- Op_CONST,
- Op_PLACEHOLDER,
- Op_IDENTITY,
- Op_IDENTITYN,
- Op_CUSTOM,
- Op_COND_IF,
- Op_WHILE_LOOP
- };
- return values;
-}
-
-inline const char * const *EnumNamesOp() {
- static const char * const names[] = {
- "UNKNOWN",
- "ARGMAX",
- "AVG_POOL2D",
- "CONV2D",
- "CONV3D",
- "DEPTHWISE_CONV2D",
- "FULLY_CONNECTED",
- "MATMUL",
- "MAX_POOL2D",
- "TRANSPOSE_CONV2D",
- "CLAMP",
- "RELUN",
- "SIGMOID",
- "TANH",
- "ADD",
- "ARITHMETIC_RIGHT_SHIFT",
- "BITWISE_AND",
- "BITWISE_OR",
- "BITWISE_XOR",
- "LOGICAL_AND",
- "LOGICAL_LEFT_SHIFT",
- "LOGICAL_RIGHT_SHIFT",
- "LOGICAL_OR",
- "LOGICAL_XOR",
- "MAXIMUM",
- "MINIMUM",
- "MUL",
- "POW",
- "SUB",
- "TABLE",
- "ABS",
- "BITWISE_NOT",
- "CEIL",
- "CLZ",
- "EXP",
- "FLOOR",
- "LOG",
- "LOGICAL_NOT",
- "NEGATE",
- "RECIPROCAL",
- "RSQRT",
- "SELECT",
- "EQUAL",
- "GREATER",
- "GREATER_EQUAL",
- "REDUCE_ANY",
- "REDUCE_ALL",
- "REDUCE_MAX",
- "REDUCE_MIN",
- "REDUCE_PRODUCT",
- "REDUCE_SUM",
- "CONCAT",
- "PAD",
- "RESHAPE",
- "REVERSE",
- "SLICE",
- "TILE",
- "TRANSPOSE",
- "GATHER",
- "SCATTER",
- "RESIZE",
- "CAST",
- "RESCALE",
- "CONST",
- "PLACEHOLDER",
- "IDENTITY",
- "IDENTITYN",
- "CUSTOM",
- "COND_IF",
- "WHILE_LOOP",
- nullptr
- };
- return names;
-}
-
-inline const char *EnumNameOp(Op e) {
- if (e < Op_UNKNOWN || e > Op_WHILE_LOOP) return "";
- const size_t index = static_cast<size_t>(e);
- return EnumNamesOp()[index];
-}
-
-enum Attribute {
- Attribute_NONE = 0,
- Attribute_Pool2dAttribute = 1,
- Attribute_Conv2dAttribute = 2,
- Attribute_TransposeConv2dAttribute = 3,
- Attribute_ReluNAttribute = 4,
- Attribute_AxisAttribute = 5,
- Attribute_ReshapeAttribute = 6,
- Attribute_SliceAttribute = 7,
- Attribute_TileAttribute = 8,
- Attribute_ResizeAttribute = 9,
- Attribute_ClampAttribute = 10,
- Attribute_RescaleAttribute = 11,
- Attribute_MulAttribute = 12,
- Attribute_ArithmeticRightShiftAttribute = 13,
- Attribute_CondIfAttribute = 14,
- Attribute_WhileLoopAttribute = 15,
- Attribute_MIN = Attribute_NONE,
- Attribute_MAX = Attribute_WhileLoopAttribute
-};
-
-inline const Attribute (&EnumValuesAttribute())[16] {
- static const Attribute values[] = {
- Attribute_NONE,
- Attribute_Pool2dAttribute,
- Attribute_Conv2dAttribute,
- Attribute_TransposeConv2dAttribute,
- Attribute_ReluNAttribute,
- Attribute_AxisAttribute,
- Attribute_ReshapeAttribute,
- Attribute_SliceAttribute,
- Attribute_TileAttribute,
- Attribute_ResizeAttribute,
- Attribute_ClampAttribute,
- Attribute_RescaleAttribute,
- Attribute_MulAttribute,
- Attribute_ArithmeticRightShiftAttribute,
- Attribute_CondIfAttribute,
- Attribute_WhileLoopAttribute
- };
- return values;
-}
-
-inline const char * const *EnumNamesAttribute() {
- static const char * const names[] = {
- "NONE",
- "Pool2dAttribute",
- "Conv2dAttribute",
- "TransposeConv2dAttribute",
- "ReluNAttribute",
- "AxisAttribute",
- "ReshapeAttribute",
- "SliceAttribute",
- "TileAttribute",
- "ResizeAttribute",
- "ClampAttribute",
- "RescaleAttribute",
- "MulAttribute",
- "ArithmeticRightShiftAttribute",
- "CondIfAttribute",
- "WhileLoopAttribute",
- nullptr
- };
- return names;
-}
-
-inline const char *EnumNameAttribute(Attribute e) {
- if (e < Attribute_NONE || e > Attribute_WhileLoopAttribute) return "";
- const size_t index = static_cast<size_t>(e);
- return EnumNamesAttribute()[index];
-}
-
-template<typename T> struct AttributeTraits {
- static const Attribute enum_value = Attribute_NONE;
-};
-
-template<> struct AttributeTraits<Pool2dAttribute> {
- static const Attribute enum_value = Attribute_Pool2dAttribute;
-};
-
-template<> struct AttributeTraits<Conv2dAttribute> {
- static const Attribute enum_value = Attribute_Conv2dAttribute;
-};
-
-template<> struct AttributeTraits<TransposeConv2dAttribute> {
- static const Attribute enum_value = Attribute_TransposeConv2dAttribute;
-};
-
-template<> struct AttributeTraits<ReluNAttribute> {
- static const Attribute enum_value = Attribute_ReluNAttribute;
-};
-
-template<> struct AttributeTraits<AxisAttribute> {
- static const Attribute enum_value = Attribute_AxisAttribute;
-};
-
-template<> struct AttributeTraits<ReshapeAttribute> {
- static const Attribute enum_value = Attribute_ReshapeAttribute;
-};
-
-template<> struct AttributeTraits<SliceAttribute> {
- static const Attribute enum_value = Attribute_SliceAttribute;
-};
-
-template<> struct AttributeTraits<TileAttribute> {
- static const Attribute enum_value = Attribute_TileAttribute;
-};
-
-template<> struct AttributeTraits<ResizeAttribute> {
- static const Attribute enum_value = Attribute_ResizeAttribute;
-};
-
-template<> struct AttributeTraits<ClampAttribute> {
- static const Attribute enum_value = Attribute_ClampAttribute;
-};
-
-template<> struct AttributeTraits<RescaleAttribute> {
- static const Attribute enum_value = Attribute_RescaleAttribute;
-};
-
-template<> struct AttributeTraits<MulAttribute> {
- static const Attribute enum_value = Attribute_MulAttribute;
-};
-
-template<> struct AttributeTraits<ArithmeticRightShiftAttribute> {
- static const Attribute enum_value = Attribute_ArithmeticRightShiftAttribute;
-};
-
-template<> struct AttributeTraits<CondIfAttribute> {
- static const Attribute enum_value = Attribute_CondIfAttribute;
-};
-
-template<> struct AttributeTraits<WhileLoopAttribute> {
- static const Attribute enum_value = Attribute_WhileLoopAttribute;
-};
-
-bool VerifyAttribute(flatbuffers::Verifier &verifier, const void *obj, Attribute type);
-bool VerifyAttributeVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector<flatbuffers::Offset<void>> *values, const flatbuffers::Vector<uint8_t> *types);
-
-enum QuantInfo {
- QuantInfo_NONE = 0,
- QuantInfo_UnaryQuantInfo = 1,
- QuantInfo_ConvQuantInfo = 2,
- QuantInfo_MatMulQuantInfo = 3,
- QuantInfo_PadQuantInfo = 4,
- QuantInfo_MIN = QuantInfo_NONE,
- QuantInfo_MAX = QuantInfo_PadQuantInfo
-};
-
-inline const QuantInfo (&EnumValuesQuantInfo())[5] {
- static const QuantInfo values[] = {
- QuantInfo_NONE,
- QuantInfo_UnaryQuantInfo,
- QuantInfo_ConvQuantInfo,
- QuantInfo_MatMulQuantInfo,
- QuantInfo_PadQuantInfo
- };
- return values;
-}
-
-inline const char * const *EnumNamesQuantInfo() {
- static const char * const names[] = {
- "NONE",
- "UnaryQuantInfo",
- "ConvQuantInfo",
- "MatMulQuantInfo",
- "PadQuantInfo",
- nullptr
- };
- return names;
-}
-
-inline const char *EnumNameQuantInfo(QuantInfo e) {
- if (e < QuantInfo_NONE || e > QuantInfo_PadQuantInfo) return "";
- const size_t index = static_cast<size_t>(e);
- return EnumNamesQuantInfo()[index];
-}
-
-template<typename T> struct QuantInfoTraits {
- static const QuantInfo enum_value = QuantInfo_NONE;
-};
-
-template<> struct QuantInfoTraits<UnaryQuantInfo> {
- static const QuantInfo enum_value = QuantInfo_UnaryQuantInfo;
-};
-
-template<> struct QuantInfoTraits<ConvQuantInfo> {
- static const QuantInfo enum_value = QuantInfo_ConvQuantInfo;
-};
-
-template<> struct QuantInfoTraits<MatMulQuantInfo> {
- static const QuantInfo enum_value = QuantInfo_MatMulQuantInfo;
-};
-
-template<> struct QuantInfoTraits<PadQuantInfo> {
- static const QuantInfo enum_value = QuantInfo_PadQuantInfo;
-};
-
-bool VerifyQuantInfo(flatbuffers::Verifier &verifier, const void *obj, QuantInfo type);
-bool VerifyQuantInfoVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector<flatbuffers::Offset<void>> *values, const flatbuffers::Vector<uint8_t> *types);
-
-struct Pool2dAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
- enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
- VT_PADDING = 4,
- VT_KERNEL = 6,
- VT_STRIDE = 8
- };
- const flatbuffers::Vector<int32_t> *padding() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_PADDING);
- }
- const flatbuffers::Vector<int32_t> *kernel() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_KERNEL);
- }
- const flatbuffers::Vector<int32_t> *stride() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_STRIDE);
- }
- bool Verify(flatbuffers::Verifier &verifier) const {
- return VerifyTableStart(verifier) &&
- VerifyOffset(verifier, VT_PADDING) &&
- verifier.VerifyVector(padding()) &&
- VerifyOffset(verifier, VT_KERNEL) &&
- verifier.VerifyVector(kernel()) &&
- VerifyOffset(verifier, VT_STRIDE) &&
- verifier.VerifyVector(stride()) &&
- verifier.EndTable();
- }
-};
-
-struct Pool2dAttributeBuilder {
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_padding(flatbuffers::Offset<flatbuffers::Vector<int32_t>> padding) {
- fbb_.AddOffset(Pool2dAttribute::VT_PADDING, padding);
- }
- void add_kernel(flatbuffers::Offset<flatbuffers::Vector<int32_t>> kernel) {
- fbb_.AddOffset(Pool2dAttribute::VT_KERNEL, kernel);
- }
- void add_stride(flatbuffers::Offset<flatbuffers::Vector<int32_t>> stride) {
- fbb_.AddOffset(Pool2dAttribute::VT_STRIDE, stride);
- }
- explicit Pool2dAttributeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
- : fbb_(_fbb) {
- start_ = fbb_.StartTable();
- }
- Pool2dAttributeBuilder &operator=(const Pool2dAttributeBuilder &);
- flatbuffers::Offset<Pool2dAttribute> Finish() {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<Pool2dAttribute>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<Pool2dAttribute> CreatePool2dAttribute(
- flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> padding = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> kernel = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> stride = 0) {
- Pool2dAttributeBuilder builder_(_fbb);
- builder_.add_stride(stride);
- builder_.add_kernel(kernel);
- builder_.add_padding(padding);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<Pool2dAttribute> CreatePool2dAttributeDirect(
- flatbuffers::FlatBufferBuilder &_fbb,
- const std::vector<int32_t> *padding = nullptr,
- const std::vector<int32_t> *kernel = nullptr,
- const std::vector<int32_t> *stride = nullptr) {
- auto padding__ = padding ? _fbb.CreateVector<int32_t>(*padding) : 0;
- auto kernel__ = kernel ? _fbb.CreateVector<int32_t>(*kernel) : 0;
- auto stride__ = stride ? _fbb.CreateVector<int32_t>(*stride) : 0;
- return tosa::CreatePool2dAttribute(
- _fbb,
- padding__,
- kernel__,
- stride__);
-}
-
-struct Conv2dAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
- enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
- VT_PADDING = 4,
- VT_STRIDE = 6,
- VT_DILATION = 8
- };
- const flatbuffers::Vector<int32_t> *padding() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_PADDING);
- }
- const flatbuffers::Vector<int32_t> *stride() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_STRIDE);
- }
- const flatbuffers::Vector<int32_t> *dilation() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_DILATION);
- }
- bool Verify(flatbuffers::Verifier &verifier) const {
- return VerifyTableStart(verifier) &&
- VerifyOffset(verifier, VT_PADDING) &&
- verifier.VerifyVector(padding()) &&
- VerifyOffset(verifier, VT_STRIDE) &&
- verifier.VerifyVector(stride()) &&
- VerifyOffset(verifier, VT_DILATION) &&
- verifier.VerifyVector(dilation()) &&
- verifier.EndTable();
- }
-};
-
-struct Conv2dAttributeBuilder {
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_padding(flatbuffers::Offset<flatbuffers::Vector<int32_t>> padding) {
- fbb_.AddOffset(Conv2dAttribute::VT_PADDING, padding);
- }
- void add_stride(flatbuffers::Offset<flatbuffers::Vector<int32_t>> stride) {
- fbb_.AddOffset(Conv2dAttribute::VT_STRIDE, stride);
- }
- void add_dilation(flatbuffers::Offset<flatbuffers::Vector<int32_t>> dilation) {
- fbb_.AddOffset(Conv2dAttribute::VT_DILATION, dilation);
- }
- explicit Conv2dAttributeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
- : fbb_(_fbb) {
- start_ = fbb_.StartTable();
- }
- Conv2dAttributeBuilder &operator=(const Conv2dAttributeBuilder &);
- flatbuffers::Offset<Conv2dAttribute> Finish() {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<Conv2dAttribute>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<Conv2dAttribute> CreateConv2dAttribute(
- flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> padding = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> stride = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> dilation = 0) {
- Conv2dAttributeBuilder builder_(_fbb);
- builder_.add_dilation(dilation);
- builder_.add_stride(stride);
- builder_.add_padding(padding);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<Conv2dAttribute> CreateConv2dAttributeDirect(
- flatbuffers::FlatBufferBuilder &_fbb,
- const std::vector<int32_t> *padding = nullptr,
- const std::vector<int32_t> *stride = nullptr,
- const std::vector<int32_t> *dilation = nullptr) {
- auto padding__ = padding ? _fbb.CreateVector<int32_t>(*padding) : 0;
- auto stride__ = stride ? _fbb.CreateVector<int32_t>(*stride) : 0;
- auto dilation__ = dilation ? _fbb.CreateVector<int32_t>(*dilation) : 0;
- return tosa::CreateConv2dAttribute(
- _fbb,
- padding__,
- stride__,
- dilation__);
-}
-
-struct TransposeConv2dAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
- enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
- VT_OUTPAD = 4,
- VT_STRIDE = 6,
- VT_DILATION = 8,
- VT_OUTPUT_SHAPE = 10
- };
- const flatbuffers::Vector<int32_t> *outpad() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_OUTPAD);
- }
- const flatbuffers::Vector<int32_t> *stride() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_STRIDE);
- }
- const flatbuffers::Vector<int32_t> *dilation() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_DILATION);
- }
- const flatbuffers::Vector<int32_t> *output_shape() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_OUTPUT_SHAPE);
- }
- bool Verify(flatbuffers::Verifier &verifier) const {
- return VerifyTableStart(verifier) &&
- VerifyOffset(verifier, VT_OUTPAD) &&
- verifier.VerifyVector(outpad()) &&
- VerifyOffset(verifier, VT_STRIDE) &&
- verifier.VerifyVector(stride()) &&
- VerifyOffset(verifier, VT_DILATION) &&
- verifier.VerifyVector(dilation()) &&
- VerifyOffset(verifier, VT_OUTPUT_SHAPE) &&
- verifier.VerifyVector(output_shape()) &&
- verifier.EndTable();
- }
-};
-
-struct TransposeConv2dAttributeBuilder {
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_outpad(flatbuffers::Offset<flatbuffers::Vector<int32_t>> outpad) {
- fbb_.AddOffset(TransposeConv2dAttribute::VT_OUTPAD, outpad);
- }
- void add_stride(flatbuffers::Offset<flatbuffers::Vector<int32_t>> stride) {
- fbb_.AddOffset(TransposeConv2dAttribute::VT_STRIDE, stride);
- }
- void add_dilation(flatbuffers::Offset<flatbuffers::Vector<int32_t>> dilation) {
- fbb_.AddOffset(TransposeConv2dAttribute::VT_DILATION, dilation);
- }
- void add_output_shape(flatbuffers::Offset<flatbuffers::Vector<int32_t>> output_shape) {
- fbb_.AddOffset(TransposeConv2dAttribute::VT_OUTPUT_SHAPE, output_shape);
- }
- explicit TransposeConv2dAttributeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
- : fbb_(_fbb) {
- start_ = fbb_.StartTable();
- }
- TransposeConv2dAttributeBuilder &operator=(const TransposeConv2dAttributeBuilder &);
- flatbuffers::Offset<TransposeConv2dAttribute> Finish() {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<TransposeConv2dAttribute>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<TransposeConv2dAttribute> CreateTransposeConv2dAttribute(
- flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> outpad = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> stride = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> dilation = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> output_shape = 0) {
- TransposeConv2dAttributeBuilder builder_(_fbb);
- builder_.add_output_shape(output_shape);
- builder_.add_dilation(dilation);
- builder_.add_stride(stride);
- builder_.add_outpad(outpad);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<TransposeConv2dAttribute> CreateTransposeConv2dAttributeDirect(
- flatbuffers::FlatBufferBuilder &_fbb,
- const std::vector<int32_t> *outpad = nullptr,
- const std::vector<int32_t> *stride = nullptr,
- const std::vector<int32_t> *dilation = nullptr,
- const std::vector<int32_t> *output_shape = nullptr) {
- auto outpad__ = outpad ? _fbb.CreateVector<int32_t>(*outpad) : 0;
- auto stride__ = stride ? _fbb.CreateVector<int32_t>(*stride) : 0;
- auto dilation__ = dilation ? _fbb.CreateVector<int32_t>(*dilation) : 0;
- auto output_shape__ = output_shape ? _fbb.CreateVector<int32_t>(*output_shape) : 0;
- return tosa::CreateTransposeConv2dAttribute(
- _fbb,
- outpad__,
- stride__,
- dilation__,
- output_shape__);
-}
-
-struct ReluNAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
- enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
- VT_MAX_INT = 4,
- VT_MAX_FP = 6
- };
- int32_t max_int() const {
- return GetField<int32_t>(VT_MAX_INT, 0);
- }
- float max_fp() const {
- return GetField<float>(VT_MAX_FP, 0.0f);
- }
- bool Verify(flatbuffers::Verifier &verifier) const {
- return VerifyTableStart(verifier) &&
- VerifyField<int32_t>(verifier, VT_MAX_INT) &&
- VerifyField<float>(verifier, VT_MAX_FP) &&
- verifier.EndTable();
- }
-};
-
-struct ReluNAttributeBuilder {
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_max_int(int32_t max_int) {
- fbb_.AddElement<int32_t>(ReluNAttribute::VT_MAX_INT, max_int, 0);
- }
- void add_max_fp(float max_fp) {
- fbb_.AddElement<float>(ReluNAttribute::VT_MAX_FP, max_fp, 0.0f);
- }
- explicit ReluNAttributeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
- : fbb_(_fbb) {
- start_ = fbb_.StartTable();
- }
- ReluNAttributeBuilder &operator=(const ReluNAttributeBuilder &);
- flatbuffers::Offset<ReluNAttribute> Finish() {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ReluNAttribute>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ReluNAttribute> CreateReluNAttribute(
- flatbuffers::FlatBufferBuilder &_fbb,
- int32_t max_int = 0,
- float max_fp = 0.0f) {
- ReluNAttributeBuilder builder_(_fbb);
- builder_.add_max_fp(max_fp);
- builder_.add_max_int(max_int);
- return builder_.Finish();
-}
-
-struct AxisAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
- enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
- VT_AXIS = 4
- };
- int32_t axis() const {
- return GetField<int32_t>(VT_AXIS, 0);
- }
- bool Verify(flatbuffers::Verifier &verifier) const {
- return VerifyTableStart(verifier) &&
- VerifyField<int32_t>(verifier, VT_AXIS) &&
- verifier.EndTable();
- }
-};
-
-struct AxisAttributeBuilder {
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_axis(int32_t axis) {
- fbb_.AddElement<int32_t>(AxisAttribute::VT_AXIS, axis, 0);
- }
- explicit AxisAttributeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
- : fbb_(_fbb) {
- start_ = fbb_.StartTable();
- }
- AxisAttributeBuilder &operator=(const AxisAttributeBuilder &);
- flatbuffers::Offset<AxisAttribute> Finish() {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<AxisAttribute>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<AxisAttribute> CreateAxisAttribute(
- flatbuffers::FlatBufferBuilder &_fbb,
- int32_t axis = 0) {
- AxisAttributeBuilder builder_(_fbb);
- builder_.add_axis(axis);
- return builder_.Finish();
-}
-
-struct ReshapeAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
- enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
- VT_SHAPE = 4
- };
- const flatbuffers::Vector<int32_t> *shape() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_SHAPE);
- }
- bool Verify(flatbuffers::Verifier &verifier) const {
- return VerifyTableStart(verifier) &&
- VerifyOffset(verifier, VT_SHAPE) &&
- verifier.VerifyVector(shape()) &&
- verifier.EndTable();
- }
-};
-
-struct ReshapeAttributeBuilder {
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_shape(flatbuffers::Offset<flatbuffers::Vector<int32_t>> shape) {
- fbb_.AddOffset(ReshapeAttribute::VT_SHAPE, shape);
- }
- explicit ReshapeAttributeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
- : fbb_(_fbb) {
- start_ = fbb_.StartTable();
- }
- ReshapeAttributeBuilder &operator=(const ReshapeAttributeBuilder &);
- flatbuffers::Offset<ReshapeAttribute> Finish() {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ReshapeAttribute>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ReshapeAttribute> CreateReshapeAttribute(
- flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> shape = 0) {
- ReshapeAttributeBuilder builder_(_fbb);
- builder_.add_shape(shape);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<ReshapeAttribute> CreateReshapeAttributeDirect(
- flatbuffers::FlatBufferBuilder &_fbb,
- const std::vector<int32_t> *shape = nullptr) {
- auto shape__ = shape ? _fbb.CreateVector<int32_t>(*shape) : 0;
- return tosa::CreateReshapeAttribute(
- _fbb,
- shape__);
-}
-
-struct SliceAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
- enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
- VT_BEGIN = 4,
- VT_SIZE = 6
- };
- const flatbuffers::Vector<int32_t> *begin() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_BEGIN);
- }
- const flatbuffers::Vector<int32_t> *size() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_SIZE);
- }
- bool Verify(flatbuffers::Verifier &verifier) const {
- return VerifyTableStart(verifier) &&
- VerifyOffset(verifier, VT_BEGIN) &&
- verifier.VerifyVector(begin()) &&
- VerifyOffset(verifier, VT_SIZE) &&
- verifier.VerifyVector(size()) &&
- verifier.EndTable();
- }
-};
-
-struct SliceAttributeBuilder {
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_begin(flatbuffers::Offset<flatbuffers::Vector<int32_t>> begin) {
- fbb_.AddOffset(SliceAttribute::VT_BEGIN, begin);
- }
- void add_size(flatbuffers::Offset<flatbuffers::Vector<int32_t>> size) {
- fbb_.AddOffset(SliceAttribute::VT_SIZE, size);
- }
- explicit SliceAttributeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
- : fbb_(_fbb) {
- start_ = fbb_.StartTable();
- }
- SliceAttributeBuilder &operator=(const SliceAttributeBuilder &);
- flatbuffers::Offset<SliceAttribute> Finish() {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SliceAttribute>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SliceAttribute> CreateSliceAttribute(
- flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> begin = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> size = 0) {
- SliceAttributeBuilder builder_(_fbb);
- builder_.add_size(size);
- builder_.add_begin(begin);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<SliceAttribute> CreateSliceAttributeDirect(
- flatbuffers::FlatBufferBuilder &_fbb,
- const std::vector<int32_t> *begin = nullptr,
- const std::vector<int32_t> *size = nullptr) {
- auto begin__ = begin ? _fbb.CreateVector<int32_t>(*begin) : 0;
- auto size__ = size ? _fbb.CreateVector<int32_t>(*size) : 0;
- return tosa::CreateSliceAttribute(
- _fbb,
- begin__,
- size__);
-}
-
-struct TileAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
- enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
- VT_MULTIPLES = 4
- };
- const flatbuffers::Vector<int32_t> *multiples() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_MULTIPLES);
- }
- bool Verify(flatbuffers::Verifier &verifier) const {
- return VerifyTableStart(verifier) &&
- VerifyOffset(verifier, VT_MULTIPLES) &&
- verifier.VerifyVector(multiples()) &&
- verifier.EndTable();
- }
-};
-
-struct TileAttributeBuilder {
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_multiples(flatbuffers::Offset<flatbuffers::Vector<int32_t>> multiples) {
- fbb_.AddOffset(TileAttribute::VT_MULTIPLES, multiples);
- }
- explicit TileAttributeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
- : fbb_(_fbb) {
- start_ = fbb_.StartTable();
- }
- TileAttributeBuilder &operator=(const TileAttributeBuilder &);
- flatbuffers::Offset<TileAttribute> Finish() {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<TileAttribute>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<TileAttribute> CreateTileAttribute(
- flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> multiples = 0) {
- TileAttributeBuilder builder_(_fbb);
- builder_.add_multiples(multiples);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<TileAttribute> CreateTileAttributeDirect(
- flatbuffers::FlatBufferBuilder &_fbb,
- const std::vector<int32_t> *multiples = nullptr) {
- auto multiples__ = multiples ? _fbb.CreateVector<int32_t>(*multiples) : 0;
- return tosa::CreateTileAttribute(
- _fbb,
- multiples__);
-}
-
-struct ResizeAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
- enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
- VT_OUTPUT_SIZE = 4,
- VT_STRIDE = 6,
- VT_OFFSET = 8,
- VT_SHIFT = 10,
- VT_STRIDE_FP = 12,
- VT_OFFSET_FP = 14,
- VT_MODE = 16
- };
- const flatbuffers::Vector<int32_t> *output_size() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_OUTPUT_SIZE);
- }
- const flatbuffers::Vector<int32_t> *stride() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_STRIDE);
- }
- const flatbuffers::Vector<int32_t> *offset() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_OFFSET);
- }
- int32_t shift() const {
- return GetField<int32_t>(VT_SHIFT, 0);
- }
- const flatbuffers::Vector<float> *stride_fp() const {
- return GetPointer<const flatbuffers::Vector<float> *>(VT_STRIDE_FP);
- }
- const flatbuffers::Vector<float> *offset_fp() const {
- return GetPointer<const flatbuffers::Vector<float> *>(VT_OFFSET_FP);
- }
- ResizeMode mode() const {
- return static_cast<ResizeMode>(GetField<uint32_t>(VT_MODE, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const {
- return VerifyTableStart(verifier) &&
- VerifyOffset(verifier, VT_OUTPUT_SIZE) &&
- verifier.VerifyVector(output_size()) &&
- VerifyOffset(verifier, VT_STRIDE) &&
- verifier.VerifyVector(stride()) &&
- VerifyOffset(verifier, VT_OFFSET) &&
- verifier.VerifyVector(offset()) &&
- VerifyField<int32_t>(verifier, VT_SHIFT) &&
- VerifyOffset(verifier, VT_STRIDE_FP) &&
- verifier.VerifyVector(stride_fp()) &&
- VerifyOffset(verifier, VT_OFFSET_FP) &&
- verifier.VerifyVector(offset_fp()) &&
- VerifyField<uint32_t>(verifier, VT_MODE) &&
- verifier.EndTable();
- }
-};
-
-struct ResizeAttributeBuilder {
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_output_size(flatbuffers::Offset<flatbuffers::Vector<int32_t>> output_size) {
- fbb_.AddOffset(ResizeAttribute::VT_OUTPUT_SIZE, output_size);
- }
- void add_stride(flatbuffers::Offset<flatbuffers::Vector<int32_t>> stride) {
- fbb_.AddOffset(ResizeAttribute::VT_STRIDE, stride);
- }
- void add_offset(flatbuffers::Offset<flatbuffers::Vector<int32_t>> offset) {
- fbb_.AddOffset(ResizeAttribute::VT_OFFSET, offset);
- }
- void add_shift(int32_t shift) {
- fbb_.AddElement<int32_t>(ResizeAttribute::VT_SHIFT, shift, 0);
- }
- void add_stride_fp(flatbuffers::Offset<flatbuffers::Vector<float>> stride_fp) {
- fbb_.AddOffset(ResizeAttribute::VT_STRIDE_FP, stride_fp);
- }
- void add_offset_fp(flatbuffers::Offset<flatbuffers::Vector<float>> offset_fp) {
- fbb_.AddOffset(ResizeAttribute::VT_OFFSET_FP, offset_fp);
- }
- void add_mode(ResizeMode mode) {
- fbb_.AddElement<uint32_t>(ResizeAttribute::VT_MODE, static_cast<uint32_t>(mode), 0);
- }
- explicit ResizeAttributeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
- : fbb_(_fbb) {
- start_ = fbb_.StartTable();
- }
- ResizeAttributeBuilder &operator=(const ResizeAttributeBuilder &);
- flatbuffers::Offset<ResizeAttribute> Finish() {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ResizeAttribute>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ResizeAttribute> CreateResizeAttribute(
- flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> output_size = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> stride = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> offset = 0,
- int32_t shift = 0,
- flatbuffers::Offset<flatbuffers::Vector<float>> stride_fp = 0,
- flatbuffers::Offset<flatbuffers::Vector<float>> offset_fp = 0,
- ResizeMode mode = ResizeMode_UNKNOWN) {
- ResizeAttributeBuilder builder_(_fbb);
- builder_.add_mode(mode);
- builder_.add_offset_fp(offset_fp);
- builder_.add_stride_fp(stride_fp);
- builder_.add_shift(shift);
- builder_.add_offset(offset);
- builder_.add_stride(stride);
- builder_.add_output_size(output_size);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<ResizeAttribute> CreateResizeAttributeDirect(
- flatbuffers::FlatBufferBuilder &_fbb,
- const std::vector<int32_t> *output_size = nullptr,
- const std::vector<int32_t> *stride = nullptr,
- const std::vector<int32_t> *offset = nullptr,
- int32_t shift = 0,
- const std::vector<float> *stride_fp = nullptr,
- const std::vector<float> *offset_fp = nullptr,
- ResizeMode mode = ResizeMode_UNKNOWN) {
- auto output_size__ = output_size ? _fbb.CreateVector<int32_t>(*output_size) : 0;
- auto stride__ = stride ? _fbb.CreateVector<int32_t>(*stride) : 0;
- auto offset__ = offset ? _fbb.CreateVector<int32_t>(*offset) : 0;
- auto stride_fp__ = stride_fp ? _fbb.CreateVector<float>(*stride_fp) : 0;
- auto offset_fp__ = offset_fp ? _fbb.CreateVector<float>(*offset_fp) : 0;
- return tosa::CreateResizeAttribute(
- _fbb,
- output_size__,
- stride__,
- offset__,
- shift,
- stride_fp__,
- offset_fp__,
- mode);
-}
-
-struct ClampAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
- enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
- VT_MIN_INT = 4,
- VT_MAX_INT = 6,
- VT_MIN_FP = 8,
- VT_MAX_FP = 10
- };
- int32_t min_int() const {
- return GetField<int32_t>(VT_MIN_INT, 0);
- }
- int32_t max_int() const {
- return GetField<int32_t>(VT_MAX_INT, 0);
- }
- float min_fp() const {
- return GetField<float>(VT_MIN_FP, 0.0f);
- }
- float max_fp() const {
- return GetField<float>(VT_MAX_FP, 0.0f);
- }
- bool Verify(flatbuffers::Verifier &verifier) const {
- return VerifyTableStart(verifier) &&
- VerifyField<int32_t>(verifier, VT_MIN_INT) &&
- VerifyField<int32_t>(verifier, VT_MAX_INT) &&
- VerifyField<float>(verifier, VT_MIN_FP) &&
- VerifyField<float>(verifier, VT_MAX_FP) &&
- verifier.EndTable();
- }
-};
-
-struct ClampAttributeBuilder {
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_min_int(int32_t min_int) {
- fbb_.AddElement<int32_t>(ClampAttribute::VT_MIN_INT, min_int, 0);
- }
- void add_max_int(int32_t max_int) {
- fbb_.AddElement<int32_t>(ClampAttribute::VT_MAX_INT, max_int, 0);
- }
- void add_min_fp(float min_fp) {
- fbb_.AddElement<float>(ClampAttribute::VT_MIN_FP, min_fp, 0.0f);
- }
- void add_max_fp(float max_fp) {
- fbb_.AddElement<float>(ClampAttribute::VT_MAX_FP, max_fp, 0.0f);
- }
- explicit ClampAttributeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
- : fbb_(_fbb) {
- start_ = fbb_.StartTable();
- }
- ClampAttributeBuilder &operator=(const ClampAttributeBuilder &);
- flatbuffers::Offset<ClampAttribute> Finish() {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ClampAttribute>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ClampAttribute> CreateClampAttribute(
- flatbuffers::FlatBufferBuilder &_fbb,
- int32_t min_int = 0,
- int32_t max_int = 0,
- float min_fp = 0.0f,
- float max_fp = 0.0f) {
- ClampAttributeBuilder builder_(_fbb);
- builder_.add_max_fp(max_fp);
- builder_.add_min_fp(min_fp);
- builder_.add_max_int(max_int);
- builder_.add_min_int(min_int);
- return builder_.Finish();
-}
-
-struct RescaleAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
- enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
- VT_INPUT_ZP = 4,
- VT_OUTPUT_ZP = 6,
- VT_MULTIPLIER = 8,
- VT_SHIFT = 10,
- VT_SCALE32 = 12,
- VT_DOUBLE_ROUND = 14,
- VT_PER_CHANNEL = 16
- };
- int32_t input_zp() const {
- return GetField<int32_t>(VT_INPUT_ZP, 0);
- }
- int32_t output_zp() const {
- return GetField<int32_t>(VT_OUTPUT_ZP, 0);
- }
- const flatbuffers::Vector<int32_t> *multiplier() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_MULTIPLIER);
- }
- const flatbuffers::Vector<int32_t> *shift() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_SHIFT);
- }
- bool scale32() const {
- return GetField<uint8_t>(VT_SCALE32, 0) != 0;
- }
- bool double_round() const {
- return GetField<uint8_t>(VT_DOUBLE_ROUND, 0) != 0;
- }
- bool per_channel() const {
- return GetField<uint8_t>(VT_PER_CHANNEL, 0) != 0;
- }
- bool Verify(flatbuffers::Verifier &verifier) const {
- return VerifyTableStart(verifier) &&
- VerifyField<int32_t>(verifier, VT_INPUT_ZP) &&
- VerifyField<int32_t>(verifier, VT_OUTPUT_ZP) &&
- VerifyOffset(verifier, VT_MULTIPLIER) &&
- verifier.VerifyVector(multiplier()) &&
- VerifyOffset(verifier, VT_SHIFT) &&
- verifier.VerifyVector(shift()) &&
- VerifyField<uint8_t>(verifier, VT_SCALE32) &&
- VerifyField<uint8_t>(verifier, VT_DOUBLE_ROUND) &&
- VerifyField<uint8_t>(verifier, VT_PER_CHANNEL) &&
- verifier.EndTable();
- }
-};
-
-struct RescaleAttributeBuilder {
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_input_zp(int32_t input_zp) {
- fbb_.AddElement<int32_t>(RescaleAttribute::VT_INPUT_ZP, input_zp, 0);
- }
- void add_output_zp(int32_t output_zp) {
- fbb_.AddElement<int32_t>(RescaleAttribute::VT_OUTPUT_ZP, output_zp, 0);
- }
- void add_multiplier(flatbuffers::Offset<flatbuffers::Vector<int32_t>> multiplier) {
- fbb_.AddOffset(RescaleAttribute::VT_MULTIPLIER, multiplier);
- }
- void add_shift(flatbuffers::Offset<flatbuffers::Vector<int32_t>> shift) {
- fbb_.AddOffset(RescaleAttribute::VT_SHIFT, shift);
- }
- void add_scale32(bool scale32) {
- fbb_.AddElement<uint8_t>(RescaleAttribute::VT_SCALE32, static_cast<uint8_t>(scale32), 0);
- }
- void add_double_round(bool double_round) {
- fbb_.AddElement<uint8_t>(RescaleAttribute::VT_DOUBLE_ROUND, static_cast<uint8_t>(double_round), 0);
- }
- void add_per_channel(bool per_channel) {
- fbb_.AddElement<uint8_t>(RescaleAttribute::VT_PER_CHANNEL, static_cast<uint8_t>(per_channel), 0);
- }
- explicit RescaleAttributeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
- : fbb_(_fbb) {
- start_ = fbb_.StartTable();
- }
- RescaleAttributeBuilder &operator=(const RescaleAttributeBuilder &);
- flatbuffers::Offset<RescaleAttribute> Finish() {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<RescaleAttribute>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<RescaleAttribute> CreateRescaleAttribute(
- flatbuffers::FlatBufferBuilder &_fbb,
- int32_t input_zp = 0,
- int32_t output_zp = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> multiplier = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> shift = 0,
- bool scale32 = false,
- bool double_round = false,
- bool per_channel = false) {
- RescaleAttributeBuilder builder_(_fbb);
- builder_.add_shift(shift);
- builder_.add_multiplier(multiplier);
- builder_.add_output_zp(output_zp);
- builder_.add_input_zp(input_zp);
- builder_.add_per_channel(per_channel);
- builder_.add_double_round(double_round);
- builder_.add_scale32(scale32);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<RescaleAttribute> CreateRescaleAttributeDirect(
- flatbuffers::FlatBufferBuilder &_fbb,
- int32_t input_zp = 0,
- int32_t output_zp = 0,
- const std::vector<int32_t> *multiplier = nullptr,
- const std::vector<int32_t> *shift = nullptr,
- bool scale32 = false,
- bool double_round = false,
- bool per_channel = false) {
- auto multiplier__ = multiplier ? _fbb.CreateVector<int32_t>(*multiplier) : 0;
- auto shift__ = shift ? _fbb.CreateVector<int32_t>(*shift) : 0;
- return tosa::CreateRescaleAttribute(
- _fbb,
- input_zp,
- output_zp,
- multiplier__,
- shift__,
- scale32,
- double_round,
- per_channel);
-}
-
-struct MulAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
- enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
- VT_SHIFT = 4
- };
- int32_t shift() const {
- return GetField<int32_t>(VT_SHIFT, 0);
- }
- bool Verify(flatbuffers::Verifier &verifier) const {
- return VerifyTableStart(verifier) &&
- VerifyField<int32_t>(verifier, VT_SHIFT) &&
- verifier.EndTable();
- }
-};
-
-struct MulAttributeBuilder {
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_shift(int32_t shift) {
- fbb_.AddElement<int32_t>(MulAttribute::VT_SHIFT, shift, 0);
- }
- explicit MulAttributeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
- : fbb_(_fbb) {
- start_ = fbb_.StartTable();
- }
- MulAttributeBuilder &operator=(const MulAttributeBuilder &);
- flatbuffers::Offset<MulAttribute> Finish() {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<MulAttribute>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<MulAttribute> CreateMulAttribute(
- flatbuffers::FlatBufferBuilder &_fbb,
- int32_t shift = 0) {
- MulAttributeBuilder builder_(_fbb);
- builder_.add_shift(shift);
- return builder_.Finish();
-}
-
-struct ArithmeticRightShiftAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
- enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
- VT_ROUND = 4
- };
- bool round() const {
- return GetField<uint8_t>(VT_ROUND, 0) != 0;
- }
- bool Verify(flatbuffers::Verifier &verifier) const {
- return VerifyTableStart(verifier) &&
- VerifyField<uint8_t>(verifier, VT_ROUND) &&
- verifier.EndTable();
- }
-};
-
-struct ArithmeticRightShiftAttributeBuilder {
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_round(bool round) {
- fbb_.AddElement<uint8_t>(ArithmeticRightShiftAttribute::VT_ROUND, static_cast<uint8_t>(round), 0);
- }
- explicit ArithmeticRightShiftAttributeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
- : fbb_(_fbb) {
- start_ = fbb_.StartTable();
- }
- ArithmeticRightShiftAttributeBuilder &operator=(const ArithmeticRightShiftAttributeBuilder &);
- flatbuffers::Offset<ArithmeticRightShiftAttribute> Finish() {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ArithmeticRightShiftAttribute>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ArithmeticRightShiftAttribute> CreateArithmeticRightShiftAttribute(
- flatbuffers::FlatBufferBuilder &_fbb,
- bool round = false) {
- ArithmeticRightShiftAttributeBuilder builder_(_fbb);
- builder_.add_round(round);
- return builder_.Finish();
-}
-
-struct CondIfAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
- enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
- VT_THEN_BRANCH = 4,
- VT_ELSE_BRANCH = 6
- };
- const flatbuffers::String *then_branch() const {
- return GetPointer<const flatbuffers::String *>(VT_THEN_BRANCH);
- }
- const flatbuffers::String *else_branch() const {
- return GetPointer<const flatbuffers::String *>(VT_ELSE_BRANCH);
- }
- bool Verify(flatbuffers::Verifier &verifier) const {
- return VerifyTableStart(verifier) &&
- VerifyOffset(verifier, VT_THEN_BRANCH) &&
- verifier.VerifyString(then_branch()) &&
- VerifyOffset(verifier, VT_ELSE_BRANCH) &&
- verifier.VerifyString(else_branch()) &&
- verifier.EndTable();
- }
-};
-
-struct CondIfAttributeBuilder {
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_then_branch(flatbuffers::Offset<flatbuffers::String> then_branch) {
- fbb_.AddOffset(CondIfAttribute::VT_THEN_BRANCH, then_branch);
- }
- void add_else_branch(flatbuffers::Offset<flatbuffers::String> else_branch) {
- fbb_.AddOffset(CondIfAttribute::VT_ELSE_BRANCH, else_branch);
- }
- explicit CondIfAttributeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
- : fbb_(_fbb) {
- start_ = fbb_.StartTable();
- }
- CondIfAttributeBuilder &operator=(const CondIfAttributeBuilder &);
- flatbuffers::Offset<CondIfAttribute> Finish() {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<CondIfAttribute>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<CondIfAttribute> CreateCondIfAttribute(
- flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::String> then_branch = 0,
- flatbuffers::Offset<flatbuffers::String> else_branch = 0) {
- CondIfAttributeBuilder builder_(_fbb);
- builder_.add_else_branch(else_branch);
- builder_.add_then_branch(then_branch);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<CondIfAttribute> CreateCondIfAttributeDirect(
- flatbuffers::FlatBufferBuilder &_fbb,
- const char *then_branch = nullptr,
- const char *else_branch = nullptr) {
- auto then_branch__ = then_branch ? _fbb.CreateString(then_branch) : 0;
- auto else_branch__ = else_branch ? _fbb.CreateString(else_branch) : 0;
- return tosa::CreateCondIfAttribute(
- _fbb,
- then_branch__,
- else_branch__);
-}
-
-struct WhileLoopAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
- enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
- VT_COND_BRANCH = 4,
- VT_BODY_BRANCH = 6
- };
- const flatbuffers::String *cond_branch() const {
- return GetPointer<const flatbuffers::String *>(VT_COND_BRANCH);
- }
- const flatbuffers::String *body_branch() const {
- return GetPointer<const flatbuffers::String *>(VT_BODY_BRANCH);
- }
- bool Verify(flatbuffers::Verifier &verifier) const {
- return VerifyTableStart(verifier) &&
- VerifyOffset(verifier, VT_COND_BRANCH) &&
- verifier.VerifyString(cond_branch()) &&
- VerifyOffset(verifier, VT_BODY_BRANCH) &&
- verifier.VerifyString(body_branch()) &&
- verifier.EndTable();
- }
-};
-
-struct WhileLoopAttributeBuilder {
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_cond_branch(flatbuffers::Offset<flatbuffers::String> cond_branch) {
- fbb_.AddOffset(WhileLoopAttribute::VT_COND_BRANCH, cond_branch);
- }
- void add_body_branch(flatbuffers::Offset<flatbuffers::String> body_branch) {
- fbb_.AddOffset(WhileLoopAttribute::VT_BODY_BRANCH, body_branch);
- }
- explicit WhileLoopAttributeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
- : fbb_(_fbb) {
- start_ = fbb_.StartTable();
- }
- WhileLoopAttributeBuilder &operator=(const WhileLoopAttributeBuilder &);
- flatbuffers::Offset<WhileLoopAttribute> Finish() {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<WhileLoopAttribute>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<WhileLoopAttribute> CreateWhileLoopAttribute(
- flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::String> cond_branch = 0,
- flatbuffers::Offset<flatbuffers::String> body_branch = 0) {
- WhileLoopAttributeBuilder builder_(_fbb);
- builder_.add_body_branch(body_branch);
- builder_.add_cond_branch(cond_branch);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<WhileLoopAttribute> CreateWhileLoopAttributeDirect(
- flatbuffers::FlatBufferBuilder &_fbb,
- const char *cond_branch = nullptr,
- const char *body_branch = nullptr) {
- auto cond_branch__ = cond_branch ? _fbb.CreateString(cond_branch) : 0;
- auto body_branch__ = body_branch ? _fbb.CreateString(body_branch) : 0;
- return tosa::CreateWhileLoopAttribute(
- _fbb,
- cond_branch__,
- body_branch__);
-}
-
-struct UnaryQuantInfo FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
- enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
- VT_INPUT_ZP = 4,
- VT_OUTPUT_ZP = 6
- };
- int32_t input_zp() const {
- return GetField<int32_t>(VT_INPUT_ZP, 0);
- }
- int32_t output_zp() const {
- return GetField<int32_t>(VT_OUTPUT_ZP, 0);
- }
- bool Verify(flatbuffers::Verifier &verifier) const {
- return VerifyTableStart(verifier) &&
- VerifyField<int32_t>(verifier, VT_INPUT_ZP) &&
- VerifyField<int32_t>(verifier, VT_OUTPUT_ZP) &&
- verifier.EndTable();
- }
-};
-
-struct UnaryQuantInfoBuilder {
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_input_zp(int32_t input_zp) {
- fbb_.AddElement<int32_t>(UnaryQuantInfo::VT_INPUT_ZP, input_zp, 0);
- }
- void add_output_zp(int32_t output_zp) {
- fbb_.AddElement<int32_t>(UnaryQuantInfo::VT_OUTPUT_ZP, output_zp, 0);
- }
- explicit UnaryQuantInfoBuilder(flatbuffers::FlatBufferBuilder &_fbb)
- : fbb_(_fbb) {
- start_ = fbb_.StartTable();
- }
- UnaryQuantInfoBuilder &operator=(const UnaryQuantInfoBuilder &);
- flatbuffers::Offset<UnaryQuantInfo> Finish() {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<UnaryQuantInfo>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<UnaryQuantInfo> CreateUnaryQuantInfo(
- flatbuffers::FlatBufferBuilder &_fbb,
- int32_t input_zp = 0,
- int32_t output_zp = 0) {
- UnaryQuantInfoBuilder builder_(_fbb);
- builder_.add_output_zp(output_zp);
- builder_.add_input_zp(input_zp);
- return builder_.Finish();
-}
-
-struct ConvQuantInfo FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
- enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
- VT_INPUT_ZP = 4,
- VT_WEIGHT_ZP = 6
- };
- int32_t input_zp() const {
- return GetField<int32_t>(VT_INPUT_ZP, 0);
- }
- int32_t weight_zp() const {
- return GetField<int32_t>(VT_WEIGHT_ZP, 0);
- }
- bool Verify(flatbuffers::Verifier &verifier) const {
- return VerifyTableStart(verifier) &&
- VerifyField<int32_t>(verifier, VT_INPUT_ZP) &&
- VerifyField<int32_t>(verifier, VT_WEIGHT_ZP) &&
- verifier.EndTable();
- }
-};
-
-struct ConvQuantInfoBuilder {
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_input_zp(int32_t input_zp) {
- fbb_.AddElement<int32_t>(ConvQuantInfo::VT_INPUT_ZP, input_zp, 0);
- }
- void add_weight_zp(int32_t weight_zp) {
- fbb_.AddElement<int32_t>(ConvQuantInfo::VT_WEIGHT_ZP, weight_zp, 0);
- }
- explicit ConvQuantInfoBuilder(flatbuffers::FlatBufferBuilder &_fbb)
- : fbb_(_fbb) {
- start_ = fbb_.StartTable();
- }
- ConvQuantInfoBuilder &operator=(const ConvQuantInfoBuilder &);
- flatbuffers::Offset<ConvQuantInfo> Finish() {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ConvQuantInfo>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ConvQuantInfo> CreateConvQuantInfo(
- flatbuffers::FlatBufferBuilder &_fbb,
- int32_t input_zp = 0,
- int32_t weight_zp = 0) {
- ConvQuantInfoBuilder builder_(_fbb);
- builder_.add_weight_zp(weight_zp);
- builder_.add_input_zp(input_zp);
- return builder_.Finish();
-}
-
-struct MatMulQuantInfo FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
- enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
- VT_A_ZP = 4,
- VT_B_ZP = 6
- };
- int32_t a_zp() const {
- return GetField<int32_t>(VT_A_ZP, 0);
- }
- int32_t b_zp() const {
- return GetField<int32_t>(VT_B_ZP, 0);
- }
- bool Verify(flatbuffers::Verifier &verifier) const {
- return VerifyTableStart(verifier) &&
- VerifyField<int32_t>(verifier, VT_A_ZP) &&
- VerifyField<int32_t>(verifier, VT_B_ZP) &&
- verifier.EndTable();
- }
-};
-
-struct MatMulQuantInfoBuilder {
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_a_zp(int32_t a_zp) {
- fbb_.AddElement<int32_t>(MatMulQuantInfo::VT_A_ZP, a_zp, 0);
- }
- void add_b_zp(int32_t b_zp) {
- fbb_.AddElement<int32_t>(MatMulQuantInfo::VT_B_ZP, b_zp, 0);
- }
- explicit MatMulQuantInfoBuilder(flatbuffers::FlatBufferBuilder &_fbb)
- : fbb_(_fbb) {
- start_ = fbb_.StartTable();
- }
- MatMulQuantInfoBuilder &operator=(const MatMulQuantInfoBuilder &);
- flatbuffers::Offset<MatMulQuantInfo> Finish() {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<MatMulQuantInfo>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<MatMulQuantInfo> CreateMatMulQuantInfo(
- flatbuffers::FlatBufferBuilder &_fbb,
- int32_t a_zp = 0,
- int32_t b_zp = 0) {
- MatMulQuantInfoBuilder builder_(_fbb);
- builder_.add_b_zp(b_zp);
- builder_.add_a_zp(a_zp);
- return builder_.Finish();
-}
-
-struct PadQuantInfo FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
- enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
- VT_INPUT_ZP = 4
- };
- int32_t input_zp() const {
- return GetField<int32_t>(VT_INPUT_ZP, 0);
- }
- bool Verify(flatbuffers::Verifier &verifier) const {
- return VerifyTableStart(verifier) &&
- VerifyField<int32_t>(verifier, VT_INPUT_ZP) &&
- verifier.EndTable();
- }
-};
-
-struct PadQuantInfoBuilder {
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_input_zp(int32_t input_zp) {
- fbb_.AddElement<int32_t>(PadQuantInfo::VT_INPUT_ZP, input_zp, 0);
- }
- explicit PadQuantInfoBuilder(flatbuffers::FlatBufferBuilder &_fbb)
- : fbb_(_fbb) {
- start_ = fbb_.StartTable();
- }
- PadQuantInfoBuilder &operator=(const PadQuantInfoBuilder &);
- flatbuffers::Offset<PadQuantInfo> Finish() {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<PadQuantInfo>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<PadQuantInfo> CreatePadQuantInfo(
- flatbuffers::FlatBufferBuilder &_fbb,
- int32_t input_zp = 0) {
- PadQuantInfoBuilder builder_(_fbb);
- builder_.add_input_zp(input_zp);
- return builder_.Finish();
-}
-
-struct Version FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
- enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
- VT__MAJOR = 4,
- VT__MINOR = 6,
- VT__PATCH = 8,
- VT__EXPERIMENTAL = 10
- };
- int32_t _major() const {
- return GetField<int32_t>(VT__MAJOR, 0);
- }
- int32_t _minor() const {
- return GetField<int32_t>(VT__MINOR, 21);
- }
- int32_t _patch() const {
- return GetField<int32_t>(VT__PATCH, 0);
- }
- bool _experimental() const {
- return GetField<uint8_t>(VT__EXPERIMENTAL, 0) != 0;
- }
- bool Verify(flatbuffers::Verifier &verifier) const {
- return VerifyTableStart(verifier) &&
- VerifyField<int32_t>(verifier, VT__MAJOR) &&
- VerifyField<int32_t>(verifier, VT__MINOR) &&
- VerifyField<int32_t>(verifier, VT__PATCH) &&
- VerifyField<uint8_t>(verifier, VT__EXPERIMENTAL) &&
- verifier.EndTable();
- }
-};
-
-struct VersionBuilder {
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add__major(int32_t _major) {
- fbb_.AddElement<int32_t>(Version::VT__MAJOR, _major, 0);
- }
- void add__minor(int32_t _minor) {
- fbb_.AddElement<int32_t>(Version::VT__MINOR, _minor, 21);
- }
- void add__patch(int32_t _patch) {
- fbb_.AddElement<int32_t>(Version::VT__PATCH, _patch, 0);
- }
- void add__experimental(bool _experimental) {
- fbb_.AddElement<uint8_t>(Version::VT__EXPERIMENTAL, static_cast<uint8_t>(_experimental), 0);
- }
- explicit VersionBuilder(flatbuffers::FlatBufferBuilder &_fbb)
- : fbb_(_fbb) {
- start_ = fbb_.StartTable();
- }
- VersionBuilder &operator=(const VersionBuilder &);
- flatbuffers::Offset<Version> Finish() {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<Version>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<Version> CreateVersion(
- flatbuffers::FlatBufferBuilder &_fbb,
- int32_t _major = 0,
- int32_t _minor = 21,
- int32_t _patch = 0,
- bool _experimental = false) {
- VersionBuilder builder_(_fbb);
- builder_.add__patch(_patch);
- builder_.add__minor(_minor);
- builder_.add__major(_major);
- builder_.add__experimental(_experimental);
- return builder_.Finish();
-}
-
-struct TosaTensor FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
- enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
- VT_NAME = 4,
- VT_SHAPE = 6,
- VT_TYPE = 8,
- VT_USAGE = 10,
- VT_FORMAT = 12,
- VT_NPY_FILENAME = 14
- };
- const flatbuffers::String *name() const {
- return GetPointer<const flatbuffers::String *>(VT_NAME);
- }
- const flatbuffers::Vector<int32_t> *shape() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_SHAPE);
- }
- DType type() const {
- return static_cast<DType>(GetField<uint32_t>(VT_TYPE, 0));
- }
- const flatbuffers::Vector<uint32_t> *usage() const {
- return GetPointer<const flatbuffers::Vector<uint32_t> *>(VT_USAGE);
- }
- const flatbuffers::Vector<uint32_t> *format() const {
- return GetPointer<const flatbuffers::Vector<uint32_t> *>(VT_FORMAT);
- }
- const flatbuffers::String *npy_filename() const {
- return GetPointer<const flatbuffers::String *>(VT_NPY_FILENAME);
- }
- bool Verify(flatbuffers::Verifier &verifier) const {
- return VerifyTableStart(verifier) &&
- VerifyOffset(verifier, VT_NAME) &&
- verifier.VerifyString(name()) &&
- VerifyOffset(verifier, VT_SHAPE) &&
- verifier.VerifyVector(shape()) &&
- VerifyField<uint32_t>(verifier, VT_TYPE) &&
- VerifyOffset(verifier, VT_USAGE) &&
- verifier.VerifyVector(usage()) &&
- VerifyOffset(verifier, VT_FORMAT) &&
- verifier.VerifyVector(format()) &&
- VerifyOffset(verifier, VT_NPY_FILENAME) &&
- verifier.VerifyString(npy_filename()) &&
- verifier.EndTable();
- }
-};
-
-struct TosaTensorBuilder {
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_name(flatbuffers::Offset<flatbuffers::String> name) {
- fbb_.AddOffset(TosaTensor::VT_NAME, name);
- }
- void add_shape(flatbuffers::Offset<flatbuffers::Vector<int32_t>> shape) {
- fbb_.AddOffset(TosaTensor::VT_SHAPE, shape);
- }
- void add_type(DType type) {
- fbb_.AddElement<uint32_t>(TosaTensor::VT_TYPE, static_cast<uint32_t>(type), 0);
- }
- void add_usage(flatbuffers::Offset<flatbuffers::Vector<uint32_t>> usage) {
- fbb_.AddOffset(TosaTensor::VT_USAGE, usage);
- }
- void add_format(flatbuffers::Offset<flatbuffers::Vector<uint32_t>> format) {
- fbb_.AddOffset(TosaTensor::VT_FORMAT, format);
- }
- void add_npy_filename(flatbuffers::Offset<flatbuffers::String> npy_filename) {
- fbb_.AddOffset(TosaTensor::VT_NPY_FILENAME, npy_filename);
- }
- explicit TosaTensorBuilder(flatbuffers::FlatBufferBuilder &_fbb)
- : fbb_(_fbb) {
- start_ = fbb_.StartTable();
- }
- TosaTensorBuilder &operator=(const TosaTensorBuilder &);
- flatbuffers::Offset<TosaTensor> Finish() {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<TosaTensor>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<TosaTensor> CreateTosaTensor(
- flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::String> name = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> shape = 0,
- DType type = DType_UNKNOWN,
- flatbuffers::Offset<flatbuffers::Vector<uint32_t>> usage = 0,
- flatbuffers::Offset<flatbuffers::Vector<uint32_t>> format = 0,
- flatbuffers::Offset<flatbuffers::String> npy_filename = 0) {
- TosaTensorBuilder builder_(_fbb);
- builder_.add_npy_filename(npy_filename);
- builder_.add_format(format);
- builder_.add_usage(usage);
- builder_.add_type(type);
- builder_.add_shape(shape);
- builder_.add_name(name);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<TosaTensor> CreateTosaTensorDirect(
- flatbuffers::FlatBufferBuilder &_fbb,
- const char *name = nullptr,
- const std::vector<int32_t> *shape = nullptr,
- DType type = DType_UNKNOWN,
- const std::vector<uint32_t> *usage = nullptr,
- const std::vector<uint32_t> *format = nullptr,
- const char *npy_filename = nullptr) {
- auto name__ = name ? _fbb.CreateString(name) : 0;
- auto shape__ = shape ? _fbb.CreateVector<int32_t>(*shape) : 0;
- auto usage__ = usage ? _fbb.CreateVector<uint32_t>(*usage) : 0;
- auto format__ = format ? _fbb.CreateVector<uint32_t>(*format) : 0;
- auto npy_filename__ = npy_filename ? _fbb.CreateString(npy_filename) : 0;
- return tosa::CreateTosaTensor(
- _fbb,
- name__,
- shape__,
- type,
- usage__,
- format__,
- npy_filename__);
-}
-
-struct TosaOperator FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
- enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
- VT_OP = 4,
- VT_ATTRIBUTE_TYPE = 6,
- VT_ATTRIBUTE = 8,
- VT_INPUTS = 10,
- VT_OUTPUTS = 12,
- VT_QUANT_INFO_TYPE = 14,
- VT_QUANT_INFO = 16
- };
- Op op() const {
- return static_cast<Op>(GetField<uint32_t>(VT_OP, 0));
- }
- Attribute attribute_type() const {
- return static_cast<Attribute>(GetField<uint8_t>(VT_ATTRIBUTE_TYPE, 0));
- }
- const void *attribute() const {
- return GetPointer<const void *>(VT_ATTRIBUTE);
- }
- template<typename T> const T *attribute_as() const;
- const Pool2dAttribute *attribute_as_Pool2dAttribute() const {
- return attribute_type() == Attribute_Pool2dAttribute ? static_cast<const Pool2dAttribute *>(attribute()) : nullptr;
- }
- const Conv2dAttribute *attribute_as_Conv2dAttribute() const {
- return attribute_type() == Attribute_Conv2dAttribute ? static_cast<const Conv2dAttribute *>(attribute()) : nullptr;
- }
- const TransposeConv2dAttribute *attribute_as_TransposeConv2dAttribute() const {
- return attribute_type() == Attribute_TransposeConv2dAttribute ? static_cast<const TransposeConv2dAttribute *>(attribute()) : nullptr;
- }
- const ReluNAttribute *attribute_as_ReluNAttribute() const {
- return attribute_type() == Attribute_ReluNAttribute ? static_cast<const ReluNAttribute *>(attribute()) : nullptr;
- }
- const AxisAttribute *attribute_as_AxisAttribute() const {
- return attribute_type() == Attribute_AxisAttribute ? static_cast<const AxisAttribute *>(attribute()) : nullptr;
- }
- const ReshapeAttribute *attribute_as_ReshapeAttribute() const {
- return attribute_type() == Attribute_ReshapeAttribute ? static_cast<const ReshapeAttribute *>(attribute()) : nullptr;
- }
- const SliceAttribute *attribute_as_SliceAttribute() const {
- return attribute_type() == Attribute_SliceAttribute ? static_cast<const SliceAttribute *>(attribute()) : nullptr;
- }
- const TileAttribute *attribute_as_TileAttribute() const {
- return attribute_type() == Attribute_TileAttribute ? static_cast<const TileAttribute *>(attribute()) : nullptr;
- }
- const ResizeAttribute *attribute_as_ResizeAttribute() const {
- return attribute_type() == Attribute_ResizeAttribute ? static_cast<const ResizeAttribute *>(attribute()) : nullptr;
- }
- const ClampAttribute *attribute_as_ClampAttribute() const {
- return attribute_type() == Attribute_ClampAttribute ? static_cast<const ClampAttribute *>(attribute()) : nullptr;
- }
- const RescaleAttribute *attribute_as_RescaleAttribute() const {
- return attribute_type() == Attribute_RescaleAttribute ? static_cast<const RescaleAttribute *>(attribute()) : nullptr;
- }
- const MulAttribute *attribute_as_MulAttribute() const {
- return attribute_type() == Attribute_MulAttribute ? static_cast<const MulAttribute *>(attribute()) : nullptr;
- }
- const ArithmeticRightShiftAttribute *attribute_as_ArithmeticRightShiftAttribute() const {
- return attribute_type() == Attribute_ArithmeticRightShiftAttribute ? static_cast<const ArithmeticRightShiftAttribute *>(attribute()) : nullptr;
- }
- const CondIfAttribute *attribute_as_CondIfAttribute() const {
- return attribute_type() == Attribute_CondIfAttribute ? static_cast<const CondIfAttribute *>(attribute()) : nullptr;
- }
- const WhileLoopAttribute *attribute_as_WhileLoopAttribute() const {
- return attribute_type() == Attribute_WhileLoopAttribute ? static_cast<const WhileLoopAttribute *>(attribute()) : nullptr;
- }
- const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *inputs() const {
- return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *>(VT_INPUTS);
- }
- const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *outputs() const {
- return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *>(VT_OUTPUTS);
- }
- QuantInfo quant_info_type() const {
- return static_cast<QuantInfo>(GetField<uint8_t>(VT_QUANT_INFO_TYPE, 0));
- }
- const void *quant_info() const {
- return GetPointer<const void *>(VT_QUANT_INFO);
- }
- template<typename T> const T *quant_info_as() const;
- const UnaryQuantInfo *quant_info_as_UnaryQuantInfo() const {
- return quant_info_type() == QuantInfo_UnaryQuantInfo ? static_cast<const UnaryQuantInfo *>(quant_info()) : nullptr;
- }
- const ConvQuantInfo *quant_info_as_ConvQuantInfo() const {
- return quant_info_type() == QuantInfo_ConvQuantInfo ? static_cast<const ConvQuantInfo *>(quant_info()) : nullptr;
- }
- const MatMulQuantInfo *quant_info_as_MatMulQuantInfo() const {
- return quant_info_type() == QuantInfo_MatMulQuantInfo ? static_cast<const MatMulQuantInfo *>(quant_info()) : nullptr;
- }
- const PadQuantInfo *quant_info_as_PadQuantInfo() const {
- return quant_info_type() == QuantInfo_PadQuantInfo ? static_cast<const PadQuantInfo *>(quant_info()) : nullptr;
- }
- bool Verify(flatbuffers::Verifier &verifier) const {
- return VerifyTableStart(verifier) &&
- VerifyField<uint32_t>(verifier, VT_OP) &&
- VerifyField<uint8_t>(verifier, VT_ATTRIBUTE_TYPE) &&
- VerifyOffset(verifier, VT_ATTRIBUTE) &&
- VerifyAttribute(verifier, attribute(), attribute_type()) &&
- VerifyOffset(verifier, VT_INPUTS) &&
- verifier.VerifyVector(inputs()) &&
- verifier.VerifyVectorOfStrings(inputs()) &&
- VerifyOffset(verifier, VT_OUTPUTS) &&
- verifier.VerifyVector(outputs()) &&
- verifier.VerifyVectorOfStrings(outputs()) &&
- VerifyField<uint8_t>(verifier, VT_QUANT_INFO_TYPE) &&
- VerifyOffset(verifier, VT_QUANT_INFO) &&
- VerifyQuantInfo(verifier, quant_info(), quant_info_type()) &&
- verifier.EndTable();
- }
-};
-
-template<> inline const Pool2dAttribute *TosaOperator::attribute_as<Pool2dAttribute>() const {
- return attribute_as_Pool2dAttribute();
-}
-
-template<> inline const Conv2dAttribute *TosaOperator::attribute_as<Conv2dAttribute>() const {
- return attribute_as_Conv2dAttribute();
-}
-
-template<> inline const TransposeConv2dAttribute *TosaOperator::attribute_as<TransposeConv2dAttribute>() const {
- return attribute_as_TransposeConv2dAttribute();
-}
-
-template<> inline const ReluNAttribute *TosaOperator::attribute_as<ReluNAttribute>() const {
- return attribute_as_ReluNAttribute();
-}
-
-template<> inline const AxisAttribute *TosaOperator::attribute_as<AxisAttribute>() const {
- return attribute_as_AxisAttribute();
-}
-
-template<> inline const ReshapeAttribute *TosaOperator::attribute_as<ReshapeAttribute>() const {
- return attribute_as_ReshapeAttribute();
-}
-
-template<> inline const SliceAttribute *TosaOperator::attribute_as<SliceAttribute>() const {
- return attribute_as_SliceAttribute();
-}
-
-template<> inline const TileAttribute *TosaOperator::attribute_as<TileAttribute>() const {
- return attribute_as_TileAttribute();
-}
-
-template<> inline const ResizeAttribute *TosaOperator::attribute_as<ResizeAttribute>() const {
- return attribute_as_ResizeAttribute();
-}
-
-template<> inline const ClampAttribute *TosaOperator::attribute_as<ClampAttribute>() const {
- return attribute_as_ClampAttribute();
-}
-
-template<> inline const RescaleAttribute *TosaOperator::attribute_as<RescaleAttribute>() const {
- return attribute_as_RescaleAttribute();
-}
-
-template<> inline const MulAttribute *TosaOperator::attribute_as<MulAttribute>() const {
- return attribute_as_MulAttribute();
-}
-
-template<> inline const ArithmeticRightShiftAttribute *TosaOperator::attribute_as<ArithmeticRightShiftAttribute>() const {
- return attribute_as_ArithmeticRightShiftAttribute();
-}
-
-template<> inline const CondIfAttribute *TosaOperator::attribute_as<CondIfAttribute>() const {
- return attribute_as_CondIfAttribute();
-}
-
-template<> inline const WhileLoopAttribute *TosaOperator::attribute_as<WhileLoopAttribute>() const {
- return attribute_as_WhileLoopAttribute();
-}
-
-template<> inline const UnaryQuantInfo *TosaOperator::quant_info_as<UnaryQuantInfo>() const {
- return quant_info_as_UnaryQuantInfo();
-}
-
-template<> inline const ConvQuantInfo *TosaOperator::quant_info_as<ConvQuantInfo>() const {
- return quant_info_as_ConvQuantInfo();
-}
-
-template<> inline const MatMulQuantInfo *TosaOperator::quant_info_as<MatMulQuantInfo>() const {
- return quant_info_as_MatMulQuantInfo();
-}
-
-template<> inline const PadQuantInfo *TosaOperator::quant_info_as<PadQuantInfo>() const {
- return quant_info_as_PadQuantInfo();
-}
-
-struct TosaOperatorBuilder {
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_op(Op op) {
- fbb_.AddElement<uint32_t>(TosaOperator::VT_OP, static_cast<uint32_t>(op), 0);
- }
- void add_attribute_type(Attribute attribute_type) {
- fbb_.AddElement<uint8_t>(TosaOperator::VT_ATTRIBUTE_TYPE, static_cast<uint8_t>(attribute_type), 0);
- }
- void add_attribute(flatbuffers::Offset<void> attribute) {
- fbb_.AddOffset(TosaOperator::VT_ATTRIBUTE, attribute);
- }
- void add_inputs(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> inputs) {
- fbb_.AddOffset(TosaOperator::VT_INPUTS, inputs);
- }
- void add_outputs(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> outputs) {
- fbb_.AddOffset(TosaOperator::VT_OUTPUTS, outputs);
- }
- void add_quant_info_type(QuantInfo quant_info_type) {
- fbb_.AddElement<uint8_t>(TosaOperator::VT_QUANT_INFO_TYPE, static_cast<uint8_t>(quant_info_type), 0);
- }
- void add_quant_info(flatbuffers::Offset<void> quant_info) {
- fbb_.AddOffset(TosaOperator::VT_QUANT_INFO, quant_info);
- }
- explicit TosaOperatorBuilder(flatbuffers::FlatBufferBuilder &_fbb)
- : fbb_(_fbb) {
- start_ = fbb_.StartTable();
- }
- TosaOperatorBuilder &operator=(const TosaOperatorBuilder &);
- flatbuffers::Offset<TosaOperator> Finish() {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<TosaOperator>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<TosaOperator> CreateTosaOperator(
- flatbuffers::FlatBufferBuilder &_fbb,
- Op op = Op_UNKNOWN,
- Attribute attribute_type = Attribute_NONE,
- flatbuffers::Offset<void> attribute = 0,
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> inputs = 0,
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> outputs = 0,
- QuantInfo quant_info_type = QuantInfo_NONE,
- flatbuffers::Offset<void> quant_info = 0) {
- TosaOperatorBuilder builder_(_fbb);
- builder_.add_quant_info(quant_info);
- builder_.add_outputs(outputs);
- builder_.add_inputs(inputs);
- builder_.add_attribute(attribute);
- builder_.add_op(op);
- builder_.add_quant_info_type(quant_info_type);
- builder_.add_attribute_type(attribute_type);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<TosaOperator> CreateTosaOperatorDirect(
- flatbuffers::FlatBufferBuilder &_fbb,
- Op op = Op_UNKNOWN,
- Attribute attribute_type = Attribute_NONE,
- flatbuffers::Offset<void> attribute = 0,
- const std::vector<flatbuffers::Offset<flatbuffers::String>> *inputs = nullptr,
- const std::vector<flatbuffers::Offset<flatbuffers::String>> *outputs = nullptr,
- QuantInfo quant_info_type = QuantInfo_NONE,
- flatbuffers::Offset<void> quant_info = 0) {
- auto inputs__ = inputs ? _fbb.CreateVector<flatbuffers::Offset<flatbuffers::String>>(*inputs) : 0;
- auto outputs__ = outputs ? _fbb.CreateVector<flatbuffers::Offset<flatbuffers::String>>(*outputs) : 0;
- return tosa::CreateTosaOperator(
- _fbb,
- op,
- attribute_type,
- attribute,
- inputs__,
- outputs__,
- quant_info_type,
- quant_info);
-}
-
-struct TosaBasicBlock FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
- enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
- VT_NAME = 4,
- VT_OPERATORS = 6,
- VT_TENSORS = 8,
- VT_INPUTS = 10,
- VT_OUTPUTS = 12
- };
- const flatbuffers::String *name() const {
- return GetPointer<const flatbuffers::String *>(VT_NAME);
- }
- const flatbuffers::Vector<flatbuffers::Offset<TosaOperator>> *operators() const {
- return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<TosaOperator>> *>(VT_OPERATORS);
- }
- const flatbuffers::Vector<flatbuffers::Offset<TosaTensor>> *tensors() const {
- return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<TosaTensor>> *>(VT_TENSORS);
- }
- const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *inputs() const {
- return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *>(VT_INPUTS);
- }
- const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *outputs() const {
- return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *>(VT_OUTPUTS);
- }
- bool Verify(flatbuffers::Verifier &verifier) const {
- return VerifyTableStart(verifier) &&
- VerifyOffset(verifier, VT_NAME) &&
- verifier.VerifyString(name()) &&
- VerifyOffset(verifier, VT_OPERATORS) &&
- verifier.VerifyVector(operators()) &&
- verifier.VerifyVectorOfTables(operators()) &&
- VerifyOffset(verifier, VT_TENSORS) &&
- verifier.VerifyVector(tensors()) &&
- verifier.VerifyVectorOfTables(tensors()) &&
- VerifyOffset(verifier, VT_INPUTS) &&
- verifier.VerifyVector(inputs()) &&
- verifier.VerifyVectorOfStrings(inputs()) &&
- VerifyOffset(verifier, VT_OUTPUTS) &&
- verifier.VerifyVector(outputs()) &&
- verifier.VerifyVectorOfStrings(outputs()) &&
- verifier.EndTable();
- }
-};
-
-struct TosaBasicBlockBuilder {
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_name(flatbuffers::Offset<flatbuffers::String> name) {
- fbb_.AddOffset(TosaBasicBlock::VT_NAME, name);
- }
- void add_operators(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<TosaOperator>>> operators) {
- fbb_.AddOffset(TosaBasicBlock::VT_OPERATORS, operators);
- }
- void add_tensors(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<TosaTensor>>> tensors) {
- fbb_.AddOffset(TosaBasicBlock::VT_TENSORS, tensors);
- }
- void add_inputs(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> inputs) {
- fbb_.AddOffset(TosaBasicBlock::VT_INPUTS, inputs);
- }
- void add_outputs(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> outputs) {
- fbb_.AddOffset(TosaBasicBlock::VT_OUTPUTS, outputs);
- }
- explicit TosaBasicBlockBuilder(flatbuffers::FlatBufferBuilder &_fbb)
- : fbb_(_fbb) {
- start_ = fbb_.StartTable();
- }
- TosaBasicBlockBuilder &operator=(const TosaBasicBlockBuilder &);
- flatbuffers::Offset<TosaBasicBlock> Finish() {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<TosaBasicBlock>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<TosaBasicBlock> CreateTosaBasicBlock(
- flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::String> name = 0,
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<TosaOperator>>> operators = 0,
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<TosaTensor>>> tensors = 0,
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> inputs = 0,
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> outputs = 0) {
- TosaBasicBlockBuilder builder_(_fbb);
- builder_.add_outputs(outputs);
- builder_.add_inputs(inputs);
- builder_.add_tensors(tensors);
- builder_.add_operators(operators);
- builder_.add_name(name);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<TosaBasicBlock> CreateTosaBasicBlockDirect(
- flatbuffers::FlatBufferBuilder &_fbb,
- const char *name = nullptr,
- const std::vector<flatbuffers::Offset<TosaOperator>> *operators = nullptr,
- const std::vector<flatbuffers::Offset<TosaTensor>> *tensors = nullptr,
- const std::vector<flatbuffers::Offset<flatbuffers::String>> *inputs = nullptr,
- const std::vector<flatbuffers::Offset<flatbuffers::String>> *outputs = nullptr) {
- auto name__ = name ? _fbb.CreateString(name) : 0;
- auto operators__ = operators ? _fbb.CreateVector<flatbuffers::Offset<TosaOperator>>(*operators) : 0;
- auto tensors__ = tensors ? _fbb.CreateVector<flatbuffers::Offset<TosaTensor>>(*tensors) : 0;
- auto inputs__ = inputs ? _fbb.CreateVector<flatbuffers::Offset<flatbuffers::String>>(*inputs) : 0;
- auto outputs__ = outputs ? _fbb.CreateVector<flatbuffers::Offset<flatbuffers::String>>(*outputs) : 0;
- return tosa::CreateTosaBasicBlock(
- _fbb,
- name__,
- operators__,
- tensors__,
- inputs__,
- outputs__);
-}
-
-struct TosaGraph FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
- enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
- VT_VERSION = 4,
- VT_BLOCKS = 6
- };
- const Version *version() const {
- return GetPointer<const Version *>(VT_VERSION);
- }
- const flatbuffers::Vector<flatbuffers::Offset<TosaBasicBlock>> *blocks() const {
- return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<TosaBasicBlock>> *>(VT_BLOCKS);
- }
- bool Verify(flatbuffers::Verifier &verifier) const {
- return VerifyTableStart(verifier) &&
- VerifyOffset(verifier, VT_VERSION) &&
- verifier.VerifyTable(version()) &&
- VerifyOffset(verifier, VT_BLOCKS) &&
- verifier.VerifyVector(blocks()) &&
- verifier.VerifyVectorOfTables(blocks()) &&
- verifier.EndTable();
- }
-};
-
-struct TosaGraphBuilder {
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_version(flatbuffers::Offset<Version> version) {
- fbb_.AddOffset(TosaGraph::VT_VERSION, version);
- }
- void add_blocks(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<TosaBasicBlock>>> blocks) {
- fbb_.AddOffset(TosaGraph::VT_BLOCKS, blocks);
- }
- explicit TosaGraphBuilder(flatbuffers::FlatBufferBuilder &_fbb)
- : fbb_(_fbb) {
- start_ = fbb_.StartTable();
- }
- TosaGraphBuilder &operator=(const TosaGraphBuilder &);
- flatbuffers::Offset<TosaGraph> Finish() {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<TosaGraph>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<TosaGraph> CreateTosaGraph(
- flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<Version> version = 0,
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<TosaBasicBlock>>> blocks = 0) {
- TosaGraphBuilder builder_(_fbb);
- builder_.add_blocks(blocks);
- builder_.add_version(version);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<TosaGraph> CreateTosaGraphDirect(
- flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<Version> version = 0,
- const std::vector<flatbuffers::Offset<TosaBasicBlock>> *blocks = nullptr) {
- auto blocks__ = blocks ? _fbb.CreateVector<flatbuffers::Offset<TosaBasicBlock>>(*blocks) : 0;
- return tosa::CreateTosaGraph(
- _fbb,
- version,
- blocks__);
-}
-
-inline bool VerifyAttribute(flatbuffers::Verifier &verifier, const void *obj, Attribute type) {
- switch (type) {
- case Attribute_NONE: {
- return true;
- }
- case Attribute_Pool2dAttribute: {
- auto ptr = reinterpret_cast<const Pool2dAttribute *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case Attribute_Conv2dAttribute: {
- auto ptr = reinterpret_cast<const Conv2dAttribute *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case Attribute_TransposeConv2dAttribute: {
- auto ptr = reinterpret_cast<const TransposeConv2dAttribute *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case Attribute_ReluNAttribute: {
- auto ptr = reinterpret_cast<const ReluNAttribute *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case Attribute_AxisAttribute: {
- auto ptr = reinterpret_cast<const AxisAttribute *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case Attribute_ReshapeAttribute: {
- auto ptr = reinterpret_cast<const ReshapeAttribute *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case Attribute_SliceAttribute: {
- auto ptr = reinterpret_cast<const SliceAttribute *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case Attribute_TileAttribute: {
- auto ptr = reinterpret_cast<const TileAttribute *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case Attribute_ResizeAttribute: {
- auto ptr = reinterpret_cast<const ResizeAttribute *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case Attribute_ClampAttribute: {
- auto ptr = reinterpret_cast<const ClampAttribute *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case Attribute_RescaleAttribute: {
- auto ptr = reinterpret_cast<const RescaleAttribute *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case Attribute_MulAttribute: {
- auto ptr = reinterpret_cast<const MulAttribute *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case Attribute_ArithmeticRightShiftAttribute: {
- auto ptr = reinterpret_cast<const ArithmeticRightShiftAttribute *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case Attribute_CondIfAttribute: {
- auto ptr = reinterpret_cast<const CondIfAttribute *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case Attribute_WhileLoopAttribute: {
- auto ptr = reinterpret_cast<const WhileLoopAttribute *>(obj);
- return verifier.VerifyTable(ptr);
- }
- default: return false;
- }
-}
-
-inline bool VerifyAttributeVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector<flatbuffers::Offset<void>> *values, const flatbuffers::Vector<uint8_t> *types) {
- if (!values || !types) return !values && !types;
- if (values->size() != types->size()) return false;
- for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) {
- if (!VerifyAttribute(
- verifier, values->Get(i), types->GetEnum<Attribute>(i))) {
- return false;
- }
- }
- return true;
-}
-
-inline bool VerifyQuantInfo(flatbuffers::Verifier &verifier, const void *obj, QuantInfo type) {
- switch (type) {
- case QuantInfo_NONE: {
- return true;
- }
- case QuantInfo_UnaryQuantInfo: {
- auto ptr = reinterpret_cast<const UnaryQuantInfo *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case QuantInfo_ConvQuantInfo: {
- auto ptr = reinterpret_cast<const ConvQuantInfo *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case QuantInfo_MatMulQuantInfo: {
- auto ptr = reinterpret_cast<const MatMulQuantInfo *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case QuantInfo_PadQuantInfo: {
- auto ptr = reinterpret_cast<const PadQuantInfo *>(obj);
- return verifier.VerifyTable(ptr);
- }
- default: return false;
- }
-}
-
-inline bool VerifyQuantInfoVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector<flatbuffers::Offset<void>> *values, const flatbuffers::Vector<uint8_t> *types) {
- if (!values || !types) return !values && !types;
- if (values->size() != types->size()) return false;
- for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) {
- if (!VerifyQuantInfo(
- verifier, values->Get(i), types->GetEnum<QuantInfo>(i))) {
- return false;
- }
- }
- return true;
-}
-
-inline const tosa::TosaGraph *GetTosaGraph(const void *buf) {
- return flatbuffers::GetRoot<tosa::TosaGraph>(buf);
-}
-
-inline const tosa::TosaGraph *GetSizePrefixedTosaGraph(const void *buf) {
- return flatbuffers::GetSizePrefixedRoot<tosa::TosaGraph>(buf);
-}
-
-inline const char *TosaGraphIdentifier() {
- return "TOSA";
-}
-
-inline bool TosaGraphBufferHasIdentifier(const void *buf) {
- return flatbuffers::BufferHasIdentifier(
- buf, TosaGraphIdentifier());
-}
-
-inline bool VerifyTosaGraphBuffer(
- flatbuffers::Verifier &verifier) {
- return verifier.VerifyBuffer<tosa::TosaGraph>(TosaGraphIdentifier());
-}
-
-inline bool VerifySizePrefixedTosaGraphBuffer(
- flatbuffers::Verifier &verifier) {
- return verifier.VerifySizePrefixedBuffer<tosa::TosaGraph>(TosaGraphIdentifier());
-}
-
-inline const char *TosaGraphExtension() {
- return "tosa";
-}
-
-inline void FinishTosaGraphBuffer(
- flatbuffers::FlatBufferBuilder &fbb,
- flatbuffers::Offset<tosa::TosaGraph> root) {
- fbb.Finish(root, TosaGraphIdentifier());
-}
-
-inline void FinishSizePrefixedTosaGraphBuffer(
- flatbuffers::FlatBufferBuilder &fbb,
- flatbuffers::Offset<tosa::TosaGraph> root) {
- fbb.FinishSizePrefixed(root, TosaGraphIdentifier());
-}
-
-} // namespace tosa
-
-#endif // FLATBUFFERS_GENERATED_TOSA_TOSA_H_
diff --git a/serialization/tosa_serialization_handler.cpp b/serialization/tosa_serialization_handler.cpp
deleted file mode 100644
index ad33d23..0000000
--- a/serialization/tosa_serialization_handler.cpp
+++ /dev/null
@@ -1,1532 +0,0 @@
-
-// Copyright (c) 2020, ARM Limited.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "tosa_serialization_handler.h"
-
-#include <iostream>
-using namespace tosa;
-
-TosaSerializationTensor::TosaSerializationTensor(const flatbuffers::String* name,
- const flatbuffers::Vector<uint32_t>& usage,
- const flatbuffers::Vector<int32_t>& shape,
- DType dtype,
- const flatbuffers::Vector<uint32_t>& format,
- const flatbuffers::String* npy_filename)
-{
- _dtype = dtype;
-
- _usage = new std::vector<Usage>(usage.size());
- for (uint32_t us : usage)
- {
- _usage->push_back((Usage)us);
- }
- assert(_usage);
-
- _format = new std::vector<Format>(format.size());
- for (uint32_t fm : format)
- {
- _format->push_back((Format)fm);
- }
- assert(_format);
-
- _shape = new std::vector<int32_t>(shape.begin(), shape.end());
-
- _shape = new std::vector<int32_t>(shape.begin(), shape.end());
- assert(_shape);
-
- assert(name);
- _name = new std::string(name->str());
- assert(_name);
-
- if (npy_filename)
- {
- _npy_filename = new std::string(npy_filename->str());
- assert(_npy_filename);
- }
- else
- {
- _npy_filename = nullptr;
- }
-}
-
-TosaSerializationTensor::TosaSerializationTensor(std::string name,
- const std::vector<Usage>& usage,
- const std::vector<int32_t>& shape,
- DType dtype,
- const std::vector<Format>& format,
- const std::string* npy_filename)
-{
-
- _dtype = dtype;
-
- _usage = new std::vector<Usage>(usage);
- assert(_usage);
-
- _format = new std::vector<Format>(format);
- assert(_format);
-
- _shape = new std::vector<int32_t>(shape);
- assert(_shape);
-
- _name = new std::string(name);
- assert(_name);
-
- if (npy_filename)
- {
- _npy_filename = new std::string(*npy_filename);
- assert(_npy_filename);
- }
- else
- {
- _npy_filename = nullptr;
- }
-}
-
-TosaSerializationTensor::TosaSerializationTensor()
-{
- _dtype = DType_UNKNOWN;
-
- _usage = new std::vector<Usage>();
- _format = new std::vector<Format>();
- _shape = new std::vector<int32_t>();
- _name = new std::string("UNKNOWN");
- assert(_usage && _format && _shape && _name);
-
- _npy_filename = nullptr;
-}
-
-TosaSerializationTensor::TosaSerializationTensor(const TosaSerializationTensor& rhs)
-{
- _dtype = rhs._dtype;
-
- assert(rhs._usage);
- _usage = new std::vector<Usage>(*rhs._usage);
- assert(_usage);
-
- assert(rhs._format);
- _format = new std::vector<Format>(*rhs._format);
- assert(_format);
-
- assert(rhs._shape);
- _shape = new std::vector<int32_t>(*rhs._shape);
- assert(_shape);
-
- assert(rhs._name);
- _name = new std::string(*rhs._name);
- assert(_name);
-
- if (rhs._npy_filename)
- {
- _npy_filename = new std::string(*rhs._npy_filename);
- assert(_npy_filename);
- }
- else
- {
- _npy_filename = nullptr;
- }
-}
-
-TosaSerializationTensor& TosaSerializationTensor::operator=(const TosaSerializationTensor& rhs)
-{
- _dtype = rhs._dtype;
-
- delete _usage;
- assert(rhs._usage);
- _usage = new std::vector<Usage>(*rhs._usage);
- assert(_usage);
-
- delete _format;
- assert(rhs._format);
- _format = new std::vector<Format>(*rhs._format);
- assert(_format);
-
- delete _shape;
- assert(rhs._shape);
- _shape = new std::vector<int32_t>(*rhs._shape);
- assert(_shape);
-
- delete _name;
- assert(rhs._name);
- _name = new std::string(*rhs._name);
- assert(_name);
-
- if (_npy_filename)
- delete _npy_filename;
-
- if (rhs._npy_filename)
- {
- _npy_filename = new std::string(*rhs._npy_filename);
- }
- else
- {
- _npy_filename = nullptr;
- }
- return *this;
-}
-
-TosaSerializationTensor::TosaSerializationTensor(TosaSerializationTensor&& rhs)
-{
- _dtype = rhs._dtype;
- std::swap(_format, rhs._format);
- std::swap(_usage, rhs._usage);
- std::swap(_shape, rhs._shape);
- std::swap(_name, rhs._name);
- std::swap(_npy_filename, rhs._npy_filename);
-}
-
-TosaSerializationTensor& TosaSerializationTensor::operator=(TosaSerializationTensor&& rhs)
-{
- _dtype = rhs._dtype;
- std::swap(_format, rhs._format);
- std::swap(_usage, rhs._usage);
- std::swap(_shape, rhs._shape);
- std::swap(_name, rhs._name);
- std::swap(_npy_filename, rhs._npy_filename);
- return *this;
-}
-
-TosaSerializationTensor::~TosaSerializationTensor()
-{
- delete _usage;
- delete _format;
- delete _shape;
- delete _name;
- if (_npy_filename)
- delete _npy_filename;
-}
-
-TosaSerializationOperator::TosaSerializationOperator(Op op,
- Attribute attribute_type,
- const TosaAttributeBase* attribute,
- QuantInfo qinfo_type,
- const TosaQuantInfoBase* qinfo,
- std::vector<std::string> input_tensor_names,
- std::vector<std::string> output_tensor_names)
-{
- _op = op;
- _attribute_type = attribute_type;
-
- switch (attribute_type)
- {
- case Attribute_NONE:
- _attribute = new TosaNoneAttribute();
- break;
-#define DEF_ATTRIBUTE(NAME, ...) \
- case Attribute_##NAME##Attribute: \
- _attribute = new Tosa##NAME##Attribute(attribute); \
- break;
-#include "attribute.def"
-#undef DEF_ATTRIBUTE
- default:
- printf("TosaSerializationOperator::TosaSerializationOperator(): Attribute %s not implemented yet\n",
- EnumNamesAttribute()[attribute_type]);
- assert(0);
- }
-
- _qinfo_type = qinfo_type;
- switch (qinfo_type)
- {
- case QuantInfo_NONE:
- _qinfo = new TosaNoneQuantInfo();
- break;
-#define DEF_QUANTIZATION_INFO(NAME, ...) \
- case QuantInfo_##NAME##QuantInfo: \
- _qinfo = new Tosa##NAME##QuantInfo(qinfo); \
- break;
-#include "quant_info.def"
-#undef DEF_QUANTIZATION_INFO
- default:
- printf("TosaSerializationOperator::TosaSerializationOperator(): QuantInfo %s not implemented yet\n",
- EnumNamesQuantInfo()[qinfo_type]);
- assert(0);
- }
-
- assert(_attribute && _qinfo);
-
- _input_tensor_names = new std::vector<std::string>(input_tensor_names);
- _output_tensor_names = new std::vector<std::string>(output_tensor_names);
-
- assert(_input_tensor_names && _output_tensor_names);
-
- _input_tensors = new std::vector<TosaSerializationTensor*>();
- _output_tensors = new std::vector<TosaSerializationTensor*>();
-
- assert(_input_tensors && _output_tensors);
-}
-
-TosaSerializationOperator::~TosaSerializationOperator()
-{
- delete _attribute;
- delete _qinfo;
- delete _input_tensor_names;
- delete _output_tensor_names;
- // TosaSerializationTensor should be free'd in TosaSerializationSerializationHandler destructor
- delete _input_tensors;
- delete _output_tensors;
-}
-
-TosaSerializationBasicBlock::TosaSerializationBasicBlock(std::string name,
- std::vector<TosaSerializationOperator*> operators,
- std::vector<TosaSerializationTensor*> tensors,
- std::vector<std::string> inputs,
- std::vector<std::string> outputs)
-{
-
- _name = new std::string(name);
- assert(_name);
-
- _operators = new std::vector<TosaSerializationOperator*>(operators);
- assert(_operators);
-
- _tensors = new std::vector<TosaSerializationTensor*>(tensors);
- assert(_tensors);
-
- _inputs = new std::vector<std::string>(inputs);
- assert(_inputs);
-
- _outputs = new std::vector<std::string>(outputs);
- assert(_outputs);
-}
-
-TosaSerializationBasicBlock::~TosaSerializationBasicBlock()
-{
- delete _name;
-
- // deallocate all operators
- for (auto op : GetOperators())
- {
- delete op; // ~TosaSerializationOperator()
- }
- delete _operators;
-
- // deallocate all tensors
- for (auto ts : GetTensors())
- {
- delete ts; // ~TosaSerializationTensor()
- }
- _tensors->clear();
-
- delete _inputs;
- delete _outputs;
-}
-
-TosaSerializationHandler::TosaSerializationHandler()
-{
- _schemaLoaded = false;
- _builder = new flatbuffers::FlatBufferBuilder();
- _parser = new flatbuffers::Parser();
- _blocks = new std::vector<TosaSerializationBasicBlock*>();
-
- assert(_builder && _parser && _blocks);
-
- SetTosaVersion();
-}
-
-TosaSerializationHandler::~TosaSerializationHandler()
-{
- if (_version)
- delete _version;
- delete _builder;
- delete _parser;
-
- Clear(); // deallocate all basic blocks
-
- delete _blocks;
-}
-
-tosa_err_t TosaSerializationHandler::SetTosaVersion()
-{
- // version is specified within .fbs
- // and it's encoded as defaulted value of CreateTosaVersion()
- // need to write out one object to read that value out
- // TODO: very costly now. is there any better way to encode constant in .fbs?
- auto fboffset_version = CreateVersion(*_builder);
- auto fboffset_tosa_graph = CreateTosaGraphDirect(*_builder, fboffset_version, nullptr);
- _builder->Finish(fboffset_tosa_graph);
- std::string jsongen;
- uint8_t* buf = _builder->GetBufferPointer();
- auto fb_tosa_graph = GetTosaGraph(buf);
- auto fb_tosa_version = fb_tosa_graph->version();
-
- _version = new TosaVersion(fb_tosa_version->_major(), fb_tosa_version->_minor(), fb_tosa_version->_patch(),
- fb_tosa_version->_experimental());
-
- assert(_version);
- return TOSA_OK;
-}
-
-tosa_err_t TosaSerializationHandler::LoadFileSchema(const char* schema_filename)
-{
- std::string schema;
- bool ok;
-
- ok = flatbuffers::LoadFile(schema_filename, false, &schema);
- if (!ok)
- {
- printf("Error loading schema file: %s\n", schema_filename);
- return TOSA_FILE_ERROR;
- }
-
- ok = _parser->Parse(schema.c_str());
- if (!ok)
- {
- printf("Error parsing ISA schema file: %s\n", schema_filename);
- return TOSA_FILE_ERROR;
- }
- _schemaLoaded = true;
-
- return TOSA_OK;
-}
-
-tosa_err_t TosaSerializationHandler::LoadFileJson(const char* filename)
-{
- std::string jsonfile;
- bool ok;
- tosa_err_t err;
-
- if (!_schemaLoaded)
- {
- return TOSA_SCHEMA_MISSING;
- }
-
- ok = flatbuffers::LoadFile(filename, false, &jsonfile);
- if (!ok)
- {
- printf("Error loading json file: %s\n", filename);
- return TOSA_FILE_ERROR;
- }
-
- ok = _parser->Parse(jsonfile.c_str());
- if (!ok)
- {
- printf("Error parsing json file: %s\n", filename);
- return TOSA_FILE_ERROR;
- }
-
- uint8_t* buf = _parser->builder_.GetBufferPointer();
-
- err = InitWithBuf(buf);
- if (err != TOSA_OK)
- {
- return err;
- }
-
- return TOSA_OK;
-}
-
-tosa_err_t TosaSerializationHandler::SaveFileJson(const char* filename)
-{
- std::string jsongen;
- tosa_err_t err;
-
- if (!_schemaLoaded)
- {
- return TOSA_SCHEMA_MISSING;
- }
-
- err = FreezeBuilder();
- if (err != TOSA_OK)
- {
- return err;
- }
-
- uint8_t* buf = _builder->GetBufferPointer();
-
- if (!GenerateText(*_parser, buf, &jsongen))
- {
- printf("Couldn't serialize parsed data to JSON!\n");
- return TOSA_FILE_ERROR;
- }
-
- FILE* file = fopen(filename, "wb");
-
- if (!file)
- {
- printf("Couldn't open output file: %s\n", filename);
- return TOSA_FILE_ERROR;
- }
-
- if (fwrite(jsongen.c_str(), sizeof(char), jsongen.size(), file) != jsongen.size())
- {
- printf("Error writing to json output file: %s\n", filename);
- fclose(file);
- return TOSA_FILE_ERROR;
- }
-
- if (file)
- fclose(file);
-
- return TOSA_OK;
-}
-
-tosa_err_t TosaSerializationHandler::LoadFileTosaFlatbuffer(const char* filename)
-{
- std::string read_buffer;
- tosa_err_t err;
- uint8_t* buf;
- bool ok;
-
- ok = flatbuffers::LoadFile(filename, false, &read_buffer);
- if (!ok)
- {
- printf("Error loading flatbuffer file: %s\n", filename);
- return TOSA_FILE_ERROR;
- }
-
- buf = (uint8_t*)read_buffer.data();
-
- err = InitWithBuf(buf);
- if (err != TOSA_OK)
- {
- return err;
- }
-
- return TOSA_OK;
-}
-
-tosa_err_t TosaSerializationHandler::SaveFileTosaFlatbuffer(const char* filename)
-{
- tosa_err_t err;
-
- err = FreezeBuilder();
- if (err != TOSA_OK)
- {
- return err;
- }
-
- uint8_t* buf = _builder->GetBufferPointer();
-
- bool ok = flatbuffers::SaveFile(filename, (const char*)buf, _builder->GetSize(), false);
- if (!ok)
- {
- printf("Error saving floatbuffer file: %s\n", filename);
- return TOSA_FILE_ERROR;
- }
-
- return TOSA_OK;
-}
-
-tosa_err_t TosaSerializationHandler::Clear()
-{
- // deallocate all basic blocks
- for (auto bb : GetBlocks())
- {
- delete bb;
- }
- _blocks->clear();
-
- return TOSA_OK;
-}
-
-tosa_err_t TosaSerializationHandler::CheckTosaVersion(const TosaVersion& read_version)
-{
- if ((*_version) != read_version)
- {
- printf("WARNING: read tosa version: %s != schema tosa version %s\n", read_version.to_string().c_str(),
- this->_version->to_string().c_str());
- return TOSA_VERSION_MISMATCH;
- }
-
- return TOSA_OK;
-}
-
-tosa_err_t TosaSerializationHandler::InitWithBuf(const uint8_t* buf)
-{
- auto fb_tosa_graph = GetTosaGraph(buf);
- auto fb_tosa_version = fb_tosa_graph->version();
- auto fb_tosa_blocks = fb_tosa_graph->blocks();
-
- std::vector<std::string> operator_inputs_container;
- std::vector<std::string> operator_outputs_container;
-
- std::vector<TosaSerializationOperator*> block_operators_container;
- std::vector<TosaSerializationTensor*> block_tensors_container;
- std::vector<std::string> block_inputs_container;
- std::vector<std::string> block_outputs_container;
-
- TosaAttributeBase* typed_attribute = NULL;
- TosaQuantInfoBase* typed_qinfo = NULL;
- TosaSerializationOperator* new_operator = NULL;
- TosaSerializationBasicBlock* new_block = NULL;
- TosaSerializationTensor* new_tensor = NULL;
-
- // erase container
- Clear();
-
- TosaVersion read_version(fb_tosa_version->_major(), fb_tosa_version->_minor(), fb_tosa_version->_patch(),
- fb_tosa_version->_experimental());
- tosa_err_t err = CheckTosaVersion(read_version);
-
- if (err != TOSA_OK)
- return err;
-
- for (size_t i = 0; i < fb_tosa_blocks->size(); i++)
- {
- auto curr_block = fb_tosa_blocks->Get(i);
-
- auto block_name = curr_block->name()->str();
-
- auto fb_tosa_operators = curr_block->operators();
- block_operators_container.clear();
- for (size_t j = 0; j < fb_tosa_operators->size(); j++)
- {
- auto curr_operator = fb_tosa_operators->Get(j);
-
- auto operator_op = curr_operator->op();
- auto attribute_type = curr_operator->attribute_type();
- auto attribute = curr_operator->attribute();
- auto operator_qinfo_type = curr_operator->quant_info_type();
- auto operator_qinfo = curr_operator->quant_info();
-
- // input tensors
- auto operator_inputs = curr_operator->inputs();
- operator_inputs_container.clear();
- if (operator_inputs)
- {
- for (size_t k = 0; k < operator_inputs->size(); k++)
- {
- auto curr_input = operator_inputs->Get(k);
- operator_inputs_container.push_back(curr_input->str());
- }
- }
-
- // output tensors
- auto operator_outputs = curr_operator->outputs();
- operator_outputs_container.clear();
- if (operator_outputs)
- {
- for (size_t k = 0; k < operator_outputs->size(); k++)
- {
- auto curr_output = operator_outputs->Get(k);
- operator_outputs_container.push_back(curr_output->str());
- }
- }
-
- switch (attribute_type)
- {
- case Attribute_NONE:
- typed_attribute = new TosaNoneAttribute();
- break;
-#define DEF_ATTRIBUTE(NAME, ...) \
- case Attribute_##NAME##Attribute: \
- typed_attribute = new Tosa##NAME##Attribute(attribute); \
- break;
-#include "attribute.def"
-#undef DEF_ATTRIBUTE
- default:
- printf("TosaSerializationHandler::InitWithBuf(): Attribute %s not implemented yet\n",
- EnumNamesAttribute()[attribute_type]);
- return TOSA_INTERNAL_ERROR;
- }
-
- switch (operator_qinfo_type)
- {
- case QuantInfo_NONE:
- typed_qinfo = new TosaNoneQuantInfo();
- break;
-#define DEF_QUANTIZATION_INFO(NAME, ...) \
- case QuantInfo_##NAME##QuantInfo: \
- typed_qinfo = new Tosa##NAME##QuantInfo(operator_qinfo); \
- break;
-
-#include "quant_info.def"
-#undef DEF_QUANTIZATION_INFO
- default:
- printf("TosaSerializationHandler::InitWithBuf(): QuantInfo %s not implemented yet\n",
- EnumNamesQuantInfo()[operator_qinfo_type]);
- return TOSA_INTERNAL_ERROR;
- }
-
- new_operator =
- new TosaSerializationOperator(operator_op, attribute_type, typed_attribute, operator_qinfo_type,
- typed_qinfo, operator_inputs_container, operator_outputs_container);
- if (new_operator)
- {
- block_operators_container.push_back(new_operator);
- }
- else
- {
- return TOSA_MEMORY_ERROR;
- }
-
- if (typed_attribute)
- delete typed_attribute;
- if (typed_qinfo)
- delete typed_qinfo;
- }
-
- auto fb_tosa_tensors = curr_block->tensors();
- block_tensors_container.clear();
- for (size_t j = 0; j < fb_tosa_tensors->size(); j++)
- {
- auto curr_tensor = fb_tosa_tensors->Get(j);
-
- auto tensor_name = curr_tensor->name();
- auto tensor_usage = curr_tensor->usage();
- auto tensor_shape = curr_tensor->shape();
- auto tensor_type = curr_tensor->type();
- auto tensor_format = curr_tensor->format();
- auto tensor_npy_filename = curr_tensor->npy_filename();
-
- new_tensor = new TosaSerializationTensor(tensor_name, *tensor_usage, *tensor_shape, tensor_type,
- *tensor_format, tensor_npy_filename);
- if (new_tensor)
- {
- block_tensors_container.push_back(new_tensor);
- }
- else
- {
- return TOSA_MEMORY_ERROR;
- }
- }
-
- auto block_inputs = curr_block->inputs();
- auto block_outputs = curr_block->outputs();
-
- block_inputs_container.clear();
- block_outputs_container.clear();
-
- for (size_t j = 0; j < block_inputs->size(); j++)
- {
- auto curr_block_input = block_inputs->Get(j);
- block_inputs_container.push_back(curr_block_input->str());
- }
- for (size_t j = 0; j < block_outputs->size(); j++)
- {
- auto curr_block_output = block_outputs->Get(j);
- block_outputs_container.push_back(curr_block_output->str());
- }
-
- new_block = new TosaSerializationBasicBlock(block_name, block_operators_container, block_tensors_container,
- block_inputs_container, block_outputs_container);
- if (new_block)
- {
- this->GetBlocks().push_back(new_block);
- }
- else
- {
- return TOSA_MEMORY_ERROR;
- }
- }
-
- return TOSA_OK;
-}
-
-tosa_err_t TosaSerializationHandler::FreezeBuilder()
-{
- std::vector<flatbuffers::Offset<TosaBasicBlock>> fboffset_blocks;
-
- std::vector<flatbuffers::Offset<TosaOperator>> fboffset_block_operators;
- std::vector<flatbuffers::Offset<TosaTensor>> fboffset_block_tensors;
- std::vector<flatbuffers::Offset<flatbuffers::String>> fboffset_block_inputs;
- std::vector<flatbuffers::Offset<flatbuffers::String>> fboffset_block_outputs;
-
- std::vector<flatbuffers::Offset<flatbuffers::String>> fboffset_operator_inputs;
- std::vector<flatbuffers::Offset<flatbuffers::String>> fboffset_operator_outputs;
-
- // translate TosaFlatbufferOperator to flatbuffers::Offset<TosaOperator>
- for (auto block : GetBlocks())
- {
- fboffset_block_operators.clear();
- fboffset_block_tensors.clear();
- fboffset_block_inputs.clear();
- fboffset_block_outputs.clear();
-
- auto block_name = _builder->CreateString(block->GetName().c_str());
-
- for (auto tensor_str : block->GetInputs())
- {
- auto tensor_name = _builder->CreateString(tensor_str.c_str());
- fboffset_block_inputs.push_back(tensor_name);
- }
-
- for (auto tensor_str : block->GetOutputs())
- {
- auto tensor_name = _builder->CreateString(tensor_str.c_str());
- fboffset_block_outputs.push_back(tensor_name);
- }
-
- auto fb_block_inputs = _builder->CreateVector(fboffset_block_inputs);
- auto fb_block_outputs = _builder->CreateVector(fboffset_block_outputs);
-
- for (auto op : block->GetOperators())
- {
- fboffset_operator_inputs.clear();
- fboffset_operator_outputs.clear();
-
- auto operator_op = op->GetOp();
- auto attribute_type = op->GetAttributeType();
-
- for (auto tensor_str : op->GetInputTensorNames())
- {
- auto tensor_name = _builder->CreateString(tensor_str.c_str());
- fboffset_operator_inputs.push_back(tensor_name);
- }
-
- for (auto tensor_str : op->GetOutputTensorNames())
- {
- auto tensor_name = _builder->CreateString(tensor_str.c_str());
- fboffset_operator_outputs.push_back(tensor_name);
- }
-
- auto fb_operator_inputs = _builder->CreateVector(fboffset_operator_inputs);
- auto fb_operator_outputs = _builder->CreateVector(fboffset_operator_outputs);
-
- flatbuffers::Offset<void> fb_attribute;
- switch (attribute_type)
- {
- case Attribute_NONE:
- fb_attribute = 0;
- break;
-
-#define DEF_ARGS_S_STR(NAME, V) , _builder->CreateString(reinterpret_cast<Tosa##NAME*>(op->GetAttribute())->V().c_str())
-#define DEF_ARGS_S_DEFAULT(NAME, V) , reinterpret_cast<Tosa##NAME*>(op->GetAttribute())->V()
-
-#define DEF_ARGS_S_int32_t(NAME, V) DEF_ARGS_S_DEFAULT(NAME, V)
-#define DEF_ARGS_S_float(NAME, V) DEF_ARGS_S_DEFAULT(NAME, V)
-#define DEF_ARGS_S_bool(NAME, V) DEF_ARGS_S_DEFAULT(NAME, V)
-#define DEF_ARGS_S_ResizeMode(NAME, V) DEF_ARGS_S_DEFAULT(NAME, V)
-#define DEF_ARGS_S_string(NAME, V) DEF_ARGS_S_STR(NAME, V)
-
-#define DEF_ARGS_S(NAME, T, V) DEF_ARGS_S_##T(NAME, V)
-#define DEF_ARGS_V(NAME, T, V) , _builder->CreateVector<T>(reinterpret_cast<Tosa##NAME*>(op->GetAttribute())->V())
-
-#define DEF_ARGS_1(NAME, T0, F0, V0) DEF_ARGS_##F0(NAME, T0, V0)
-#define DEF_ARGS_2(NAME, T0, F0, V0, T1, F1, V1) DEF_ARGS_##F0(NAME, T0, V0) DEF_ARGS_##F1(NAME, T1, V1)
-#define DEF_ARGS_3(NAME, T0, F0, V0, T1, F1, V1, T2, F2, V2) \
- DEF_ARGS_##F0(NAME, T0, V0) DEF_ARGS_##F1(NAME, T1, V1) DEF_ARGS_##F2(NAME, T2, V2)
-#define DEF_ARGS_4(NAME, T0, F0, V0, T1, F1, V1, T2, F2, V2, T3, F3, V3) \
- DEF_ARGS_##F0(NAME, T0, V0) DEF_ARGS_##F1(NAME, T1, V1) DEF_ARGS_##F2(NAME, T2, V2) DEF_ARGS_##F3(NAME, T3, V3)
-#define DEF_ARGS_5(NAME, T0, F0, V0, T1, F1, V1, T2, F2, V2, T3, F3, V3, T4, F4, V4) \
- DEF_ARGS_##F0(NAME, T0, V0) DEF_ARGS_##F1(NAME, T1, V1) DEF_ARGS_##F2(NAME, T2, V2) DEF_ARGS_##F3(NAME, T3, V3) \
- DEF_ARGS_##F4(NAME, T4, V4)
-#define DEF_ARGS_6(NAME, T0, F0, V0, T1, F1, V1, T2, F2, V2, T3, F3, V3, T4, F4, V4, T5, F5, V5) \
- DEF_ARGS_##F0(NAME, T0, V0) DEF_ARGS_##F1(NAME, T1, V1) DEF_ARGS_##F2(NAME, T2, V2) DEF_ARGS_##F3(NAME, T3, V3) \
- DEF_ARGS_##F4(NAME, T4, V4) DEF_ARGS_##F5(NAME, T5, V5)
-#define DEF_ARGS_7(NAME, T0, F0, V0, T1, F1, V1, T2, F2, V2, T3, F3, V3, T4, F4, V4, T5, F5, V5, T6, F6, V6) \
- DEF_ARGS_##F0(NAME, T0, V0) DEF_ARGS_##F1(NAME, T1, V1) DEF_ARGS_##F2(NAME, T2, V2) DEF_ARGS_##F3(NAME, T3, V3) \
- DEF_ARGS_##F4(NAME, T4, V4) DEF_ARGS_##F5(NAME, T5, V5) DEF_ARGS_##F6(NAME, T6, V6)
-#define DEF_ATTRIBUTE(NAME, NUM_ARGS, ...) \
- case Attribute_##NAME##Attribute: \
- fb_attribute = Create##NAME##Attribute(*_builder DEF_ARGS_##NUM_ARGS(NAME##Attribute, __VA_ARGS__)).Union(); \
- break;
-
-#include "attribute.def"
-#undef DEF_ATTRIBUTE
-#undef DEF_ARGS_1
-#undef DEF_ARGS_2
-#undef DEF_ARGS_3
-#undef DEF_ARGS_4
-#undef DEF_ARGS_5
-#undef DEF_ARGS_6
-#undef DEF_ARGS_7
-#undef DEF_ARGS_S
-#undef DEF_ARGS_V
-#undef DEF_ARGS_S_int32_t
-#undef DEF_ARGS_S_float
-#undef DEF_ARGS_S_bool
-#undef DEF_ARGS_S_ResizeMode
-#undef DEF_ARGS_S_string
-#undef DEF_ARGS_S_STR
-#undef DEF_ARGS_S_DEFAULT
- default:
- printf("TosaSerializationHandler::FreezeBuilder(): Attribute %s not implemented yet\n",
- EnumNamesAttribute()[attribute_type]);
- return TOSA_INTERNAL_ERROR;
- }
-
- auto qinfo_type = op->GetQInfoType();
- flatbuffers::Offset<void> fb_operator_qinfo;
- switch (qinfo_type)
- {
- case QuantInfo_NONE:
- fb_operator_qinfo = 0;
- break;
-#define DEF_ARGS_S(NAME, T, V) , reinterpret_cast<Tosa##NAME*>(op->GetQInfo())->V()
-#define DEF_ARGS_V(NAME, T, V) , _builder->CreateVector<T>(reinterpret_cast<Tosa##NAME*>(op->GetQInfo())->V())
-
-#define DEF_ARGS_1(NAME, T0, F0, V0) DEF_ARGS_##F0(NAME, T0, V0)
-#define DEF_ARGS_2(NAME, T0, F0, V0, T1, F1, V1) DEF_ARGS_##F0(NAME, T0, V0) DEF_ARGS_##F1(NAME, T1, V1)
-#define DEF_ARGS_3(NAME, T0, F0, V0, T1, F1, V1, T2, F2, V2) \
- DEF_ARGS_##F0(NAME, T0, V0) DEF_ARGS_##F1(NAME, T1, V1) DEF_ARGS_##F2(NAME, T2, V2)
-#define DEF_ARGS_4(NAME, T0, F0, V0, T1, F1, V1, T2, F2, V2, T3, F3, V3) \
- DEF_ARGS_##F0(NAME, T0, V0) DEF_ARGS_##F1(NAME, T1, V1) DEF_ARGS_##F2(NAME, T2, V2) DEF_ARGS_##F3(NAME, T3, V3)
-#define DEF_ARGS_5(NAME, T0, F0, V0, T1, F1, V1, T2, F2, V2, T3, F3, V3, T4, F4, V4) \
- DEF_ARGS_##F0(NAME, T0, V0) DEF_ARGS_##F1(NAME, T1, V1) DEF_ARGS_##F2(NAME, T2, V2) DEF_ARGS_##F3(NAME, T3, V3) \
- DEF_ARGS_##F4(NAME, T4, V4)
-#define DEF_ARGS_6(NAME, T0, F0, V0, T1, F1, V1, T2, F2, V2, T3, F3, V3, T4, F4, V4, T5, F5, V5) \
- DEF_ARGS_##F0(NAME, T0, V0) DEF_ARGS_##F1(NAME, T1, V1) DEF_ARGS_##F2(NAME, T2, V2) DEF_ARGS_##F3(NAME, T3, V3) \
- DEF_ARGS_##F4(NAME, T4, V4) DEF_ARGS_##F5(NAME, T5, V5)
-#define DEF_ARGS_7(NAME, T0, F0, V0, T1, F1, V1, T2, F2, V2, T3, F3, V3, T4, F4, V4, T5, F5, V5, T6, F6, V6) \
- DEF_ARGS_##F0(NAME, T0, V0) DEF_ARGS_##F1(NAME, T1, V1) DEF_ARGS_##F2(NAME, T2, V2) DEF_ARGS_##F3(NAME, T3, V3) \
- DEF_ARGS_##F4(NAME, T4, V4) DEF_ARGS_##F5(NAME, T5, V5) DEF_ARGS_##F6(NAME, T6, V6)
-#define DEF_ARGS_8(NAME, T0, F0, V0, T1, F1, V1, T2, F2, V2, T3, F3, V3, T4, F4, V4, T5, F5, V5, T6, F6, V6, T7, F7, \
- V7) \
- DEF_ARGS_##F0(NAME, T0, V0) DEF_ARGS_##F1(NAME, T1, V1) DEF_ARGS_##F2(NAME, T2, V2) DEF_ARGS_##F3(NAME, T3, V3) \
- DEF_ARGS_##F4(NAME, T4, V4) DEF_ARGS_##F5(NAME, T5, V5) DEF_ARGS_##F6(NAME, T6, V6) \
- DEF_ARGS_##F7(NAME, T7, V7)
-#define DEF_ARGS_9(NAME, T0, F0, V0, T1, F1, V1, T2, F2, V2, T3, F3, V3, T4, F4, V4, T5, F5, V5, T6, F6, V6, T7, F7, \
- V7, T8, F8, V8) \
- DEF_ARGS_##F0(NAME, T0, V0) DEF_ARGS_##F1(NAME, T1, V1) DEF_ARGS_##F2(NAME, T2, V2) DEF_ARGS_##F3(NAME, T3, V3) \
- DEF_ARGS_##F4(NAME, T4, V4) DEF_ARGS_##F5(NAME, T5, V5) DEF_ARGS_##F6(NAME, T6, V6) \
- DEF_ARGS_##F7(NAME, T7, V7) DEF_ARGS_##F8(NAME, T8, V8)
-#define DEF_ARGS_10(NAME, T0, F0, V0, T1, F1, V1, T2, F2, V2, T3, F3, V3, T4, F4, V4, T5, F5, V5, T6, F6, V6, T7, F7, \
- V7, T8, F8, V8, T9, F9, V9) \
- DEF_ARGS_##F0(NAME, T0, V0) DEF_ARGS_##F1(NAME, T1, V1) DEF_ARGS_##F2(NAME, T2, V2) DEF_ARGS_##F3(NAME, T3, V3) \
- DEF_ARGS_##F4(NAME, T4, V4) DEF_ARGS_##F5(NAME, T5, V5) DEF_ARGS_##F6(NAME, T6, V6) \
- DEF_ARGS_##F7(NAME, T7, V7) DEF_ARGS_##F8(NAME, T8, V8) DEF_ARGS_##F9(NAME, T9, V9)
-#define DEF_QUANTIZATION_INFO(NAME, NUM_ARGS, ...) \
- case QuantInfo_##NAME##QuantInfo: \
- fb_operator_qinfo = \
- Create##NAME##QuantInfo(*_builder DEF_ARGS_##NUM_ARGS(NAME##QuantInfo, __VA_ARGS__)).Union(); \
- break;
-
-#include "quant_info.def"
-#undef DEF_QUANTIZATION_INFO
-#undef DEF_ARGS_1
-#undef DEF_ARGS_2
-#undef DEF_ARGS_3
-#undef DEF_ARGS_4
-#undef DEF_ARGS_5
-#undef DEF_ARGS_6
-#undef DEF_ARGS_7
-#undef DEF_ARGS_8
-#undef DEF_ARGS_9
-#undef DEF_ARGS_10
-#undef DEF_ARGS_S
-#undef DEF_ARGS_V
- default:
- printf("TosaSerializationHandler::FreezeBuilder(): Attribute %s not implemented yet\n",
- EnumNamesAttribute()[attribute_type]);
- return TOSA_INTERNAL_ERROR;
- }
-
- auto fboffset_operator =
- CreateTosaOperator(*_builder, operator_op, attribute_type, fb_attribute, fb_operator_inputs,
- fb_operator_outputs, qinfo_type, fb_operator_qinfo);
- fboffset_block_operators.push_back(fboffset_operator);
- }
-
- auto fb_block_operators = _builder->CreateVector(fboffset_block_operators);
-
- for (auto tensor : block->GetTensors())
- {
-
- auto tensor_name = _builder->CreateString(tensor->GetName().c_str());
- auto tensor_usage =
- _builder->CreateVector(std::vector<uint32_t>(tensor->GetUsage().begin(), tensor->GetUsage().end()));
- auto tensor_shape = _builder->CreateVector(tensor->GetShape());
- auto tensor_dtype = tensor->GetDtype();
- auto tensor_format =
- _builder->CreateVector(std::vector<uint32_t>(tensor->GetFormat().begin(), tensor->GetFormat().end()));
- flatbuffers::Offset<flatbuffers::String> tensor_npy_filename = 0;
- if (tensor->GetNpyFilePtr())
- tensor_npy_filename = _builder->CreateString(tensor->GetNpyFilePtr()->c_str());
-
- auto fboffset_tensor = CreateTosaTensor(*_builder, tensor_name, tensor_shape, tensor_dtype, tensor_usage,
- tensor_format, tensor_npy_filename);
- fboffset_block_tensors.push_back(fboffset_tensor);
- }
-
- auto fb_block_tensors = _builder->CreateVector(fboffset_block_tensors);
-
- auto fboffset_block = CreateTosaBasicBlock(*_builder, block_name, fb_block_operators, fb_block_tensors,
- fb_block_inputs, fb_block_outputs);
- fboffset_blocks.push_back(fboffset_block);
- }
-
- auto fb_blocks = _builder->CreateVector(fboffset_blocks);
-
- auto fb_version = CreateVersion(*_builder, GetTosaVersion()->_major, GetTosaVersion()->_minor,
- GetTosaVersion()->_patch, GetTosaVersion()->_experimental);
-
- auto fb_graph = CreateTosaGraph(*_builder, fb_version, fb_blocks);
- _builder->Finish(fb_graph);
-
- return TOSA_OK;
-}
-
-// Magic NUMPY header
-static const char NUMPY_HEADER_STR[] = "\x93NUMPY\x1\x0\x76\x0{";
-static const int NUMPY_HEADER_SZ = 128;
-
-NumpyUtilities::NPError NumpyUtilities::readFromNpyFile(const char* filename, const uint32_t elems, bool* databuf)
-{
- const char dtype_str[] = "'|b1'";
- FILE* infile = nullptr;
- NPError rc = NO_ERROR;
-
- assert(filename);
- assert(databuf);
-
- infile = fopen(filename, "rb");
- if (!infile)
- {
- rc = FILE_NOT_FOUND;
- goto done;
- }
-
- rc = checkNpyHeader(infile, elems, dtype_str);
- if (rc != NO_ERROR)
- {
- goto done;
- }
-
- // Read in the data from numpy byte array to native bool
- // array format
- for (uint32_t i = 0; i < elems; i++)
- {
- int val = fgetc(infile);
-
- if (val == EOF)
- {
- rc = FILE_IO_ERROR;
- goto done;
- }
-
- databuf[i] = val;
- }
-
-done:
-
- if (infile)
- fclose(infile);
-
- return rc;
-}
-
-NumpyUtilities::NPError NumpyUtilities::readFromNpyFile(const char* filename, const uint32_t elems, int32_t* databuf)
-{
- const char dtype_str[] = "'<i4'";
- FILE* infile = nullptr;
- NPError rc = NO_ERROR;
-
- assert(filename);
- assert(databuf);
-
- infile = fopen(filename, "rb");
- if (!infile)
- {
- rc = FILE_NOT_FOUND;
- goto done;
- }
-
- rc = checkNpyHeader(infile, elems, dtype_str);
- if (rc != NO_ERROR)
- {
- goto done;
- }
-
- // Now we are at the beginning of the data
- // Parse based on the datatype and number of dimensions
- if (fread(databuf, sizeof(int32_t), elems, infile) != elems)
- {
- rc = FILE_IO_ERROR;
- goto done;
- }
-
-done:
-
- if (infile)
- fclose(infile);
-
- return rc;
-}
-
-NumpyUtilities::NPError NumpyUtilities::readFromNpyFile(const char* filename, const uint32_t elems, int64_t* databuf)
-{
- const char dtype_str[] = "'<i8'";
- FILE* infile = nullptr;
- NPError rc = NO_ERROR;
-
- assert(filename);
- assert(databuf);
-
- infile = fopen(filename, "rb");
- if (!infile)
- {
- rc = FILE_NOT_FOUND;
- goto done;
- }
-
- rc = checkNpyHeader(infile, elems, dtype_str);
- if (rc != NO_ERROR)
- {
- goto done;
- }
-
- // Now we are at the beginning of the data
- // Parse based on the datatype and number of dimensions
- if (fread(databuf, sizeof(int64_t), elems, infile) != elems)
- {
- rc = FILE_IO_ERROR;
- goto done;
- }
-
-done:
-
- if (infile)
- fclose(infile);
-
- return rc;
-}
-
-NumpyUtilities::NPError NumpyUtilities::readFromNpyFile(const char* filename, const uint32_t elems, float* databuf)
-{
- const char dtype_str[] = "'<f4'";
- FILE* infile = nullptr;
- NPError rc = NO_ERROR;
-
- assert(filename);
- assert(databuf);
-
- infile = fopen(filename, "rb");
- if (!infile)
- {
- rc = FILE_NOT_FOUND;
- goto done;
- }
-
- rc = checkNpyHeader(infile, elems, dtype_str);
- if (rc != NO_ERROR)
- {
- goto done;
- }
-
- // Now we are at the beginning of the data
- // Parse based on the datatype and number of dimensions
- if (fread(databuf, sizeof(float), elems, infile) != elems)
- {
- rc = FILE_IO_ERROR;
- goto done;
- }
-
-done:
-
- if (infile)
- fclose(infile);
-
- return rc;
-}
-
-NumpyUtilities::NPError NumpyUtilities::checkNpyHeader(FILE* infile, const uint32_t elems, const char* dtype_str)
-{
- char buf[NUMPY_HEADER_SZ + 1];
- char* ptr = nullptr;
- NPError rc = NO_ERROR;
- bool foundFormat = false;
- bool foundOrder = false;
- bool foundShape = false;
- bool fortranOrder = false;
- std::vector<int> shape;
- uint32_t totalElems = 1;
- char* outer_end = NULL;
-
- assert(infile);
- assert(elems > 0);
-
- if (fread(buf, NUMPY_HEADER_SZ, 1, infile) != 1)
- {
- rc = HEADER_PARSE_ERROR;
- goto done;
- }
-
- if (memcmp(buf, NUMPY_HEADER_STR, sizeof(NUMPY_HEADER_STR) - 1))
- {
- rc = HEADER_PARSE_ERROR;
- goto done;
- }
-
- ptr = strtok_r(buf + sizeof(NUMPY_HEADER_STR) - 1, ":", &outer_end);
-
- // Read in the data type, order, and shape
- while (ptr && (!foundFormat || !foundOrder || !foundShape))
- {
-
- // End of string?
- if (!ptr)
- break;
-
- // Skip whitespace
- while (isspace(*ptr))
- ptr++;
-
- // Parse the dictionary field name
- if (!strcmp(ptr, "'descr'"))
- {
- ptr = strtok_r(NULL, ",", &outer_end);
- if (!ptr)
- break;
-
- while (isspace(*ptr))
- ptr++;
-
- if (strcmp(ptr, dtype_str))
- {
- rc = FILE_TYPE_MISMATCH;
- goto done;
- }
-
- foundFormat = true;
- }
- else if (!strcmp(ptr, "'fortran_order'"))
- {
- ptr = strtok_r(NULL, ",", &outer_end);
- if (!ptr)
- break;
-
- while (isspace(*ptr))
- ptr++;
-
- if (!strcmp(ptr, "False"))
- {
- fortranOrder = false;
- }
- else
- {
- rc = FILE_TYPE_MISMATCH;
- goto done;
- }
-
- foundOrder = true;
- }
- else if (!strcmp(ptr, "'shape'"))
- {
-
- ptr = strtok_r(NULL, "(", &outer_end);
- if (!ptr)
- break;
- ptr = strtok_r(NULL, ")", &outer_end);
- if (!ptr)
- break;
-
- while (isspace(*ptr))
- ptr++;
-
- // The shape contains N comma-separated integers. Read up to 4.
- char* end = NULL;
-
- ptr = strtok_r(ptr, ",", &end);
- for (int i = 0; i < 4; i++)
- {
- // Out of dimensions
- if (!ptr)
- break;
-
- int dim = atoi(ptr);
-
- // Dimension is 0
- if (dim == 0)
- break;
-
- shape.push_back(dim);
- totalElems *= dim;
- ptr = strtok_r(NULL, ",", &end);
- }
-
- foundShape = true;
- }
- else
- {
- rc = HEADER_PARSE_ERROR;
- goto done;
- }
-
- if (!ptr)
- break;
-
- ptr = strtok_r(NULL, ":", &outer_end);
- }
-
- if (!foundShape || !foundFormat || !foundOrder)
- {
- rc = HEADER_PARSE_ERROR;
- goto done;
- }
-
- // Validate header
- if (fortranOrder != false)
- {
- rc = FILE_TYPE_MISMATCH;
- goto done;
- }
-
- if (totalElems != elems)
- {
- rc = BUFFER_SIZE_MISMATCH;
- goto done;
- }
-
- // Go back to the begininng and read until the end of the header dictionary
- rewind(infile);
- int val;
-
- do
- {
- val = fgetc(infile);
- } while (val != EOF && val != '\n');
-
-done:
-
- return rc;
-}
-
-NumpyUtilities::NPError NumpyUtilities::writeToNpyFile(const char* filename, const uint32_t elems, const bool* databuf)
-{
- std::vector<int32_t> shape = { (int32_t)elems };
- return writeToNpyFile(filename, shape, databuf);
-}
-
-NumpyUtilities::NPError
- NumpyUtilities::writeToNpyFile(const char* filename, const std::vector<int32_t>& shape, const bool* databuf)
-{
- const char dtype_str[] = "'|b1'";
- FILE* outfile = nullptr;
- NPError rc = NO_ERROR;
- uint32_t totalElems = 1;
-
- assert(filename);
- assert(shape.size() >= 0);
- assert(databuf);
-
- outfile = fopen(filename, "wb");
-
- if (!outfile)
- {
- rc = FILE_NOT_FOUND;
- goto done;
- }
-
- for (uint32_t i = 0; i < shape.size(); i++)
- {
- totalElems *= shape[i];
- }
-
- rc = writeNpyHeader(outfile, shape, dtype_str);
-
- // Numpy save format stores booleans as a byte array
- // with one byte per boolean. This somewhat inefficiently
- // remaps from system bool[] to this format.
- for (uint32_t i = 0; i < totalElems; i++)
- {
- int val = databuf[i] ? 1 : 0;
- if (fputc(val, outfile) == EOF)
- {
- rc = FILE_IO_ERROR;
- goto done;
- }
- }
-
-done:
-
- if (outfile)
- fclose(outfile);
-
- return rc;
-}
-
-NumpyUtilities::NPError
- NumpyUtilities::writeToNpyFile(const char* filename, const uint32_t elems, const int32_t* databuf)
-{
- std::vector<int32_t> shape = { (int32_t)elems };
- return writeToNpyFile(filename, shape, databuf);
-}
-
-NumpyUtilities::NPError
- NumpyUtilities::writeToNpyFile(const char* filename, const std::vector<int32_t>& shape, const int32_t* databuf)
-{
- const char dtype_str[] = "'<i4'";
- FILE* outfile = nullptr;
- NPError rc = NO_ERROR;
- uint32_t totalElems = 1;
-
- assert(filename);
- assert(shape.size() >= 0);
- assert(databuf);
-
- outfile = fopen(filename, "wb");
-
- if (!outfile)
- {
- rc = FILE_NOT_FOUND;
- goto done;
- }
-
- for (uint32_t i = 0; i < shape.size(); i++)
- {
- totalElems *= shape[i];
- }
-
- rc = writeNpyHeader(outfile, shape, dtype_str);
-
- if (fwrite(databuf, sizeof(int32_t), totalElems, outfile) != totalElems)
- {
- rc = FILE_IO_ERROR;
- goto done;
- }
-
-done:
-
- if (outfile)
- fclose(outfile);
-
- return rc;
-}
-
-NumpyUtilities::NPError
- NumpyUtilities::writeToNpyFile(const char* filename, const uint32_t elems, const int64_t* databuf)
-{
- std::vector<int32_t> shape = { (int32_t)elems };
- return writeToNpyFile(filename, shape, databuf);
-}
-
-NumpyUtilities::NPError
- NumpyUtilities::writeToNpyFile(const char* filename, const std::vector<int32_t>& shape, const int64_t* databuf)
-{
- const char dtype_str[] = "'<i8'";
- FILE* outfile = nullptr;
- NPError rc = NO_ERROR;
- uint32_t totalElems = 1;
-
- assert(filename);
- assert(shape.size() >= 0);
- assert(databuf);
-
- outfile = fopen(filename, "wb");
-
- if (!outfile)
- {
- rc = FILE_NOT_FOUND;
- goto done;
- }
-
- for (uint32_t i = 0; i < shape.size(); i++)
- {
- totalElems *= shape[i];
- }
-
- rc = writeNpyHeader(outfile, shape, dtype_str);
-
- if (fwrite(databuf, sizeof(int64_t), totalElems, outfile) != totalElems)
- {
- rc = FILE_IO_ERROR;
- goto done;
- }
-
-done:
-
- if (outfile)
- fclose(outfile);
-
- return rc;
-}
-
-NumpyUtilities::NPError NumpyUtilities::writeToNpyFile(const char* filename, const uint32_t elems, const float* databuf)
-{
- std::vector<int32_t> shape = { (int32_t)elems };
- return writeToNpyFile(filename, shape, databuf);
-}
-
-NumpyUtilities::NPError
- NumpyUtilities::writeToNpyFile(const char* filename, const std::vector<int32_t>& shape, const float* databuf)
-{
- const char dtype_str[] = "'<f4'";
- FILE* outfile = nullptr;
- NPError rc = NO_ERROR;
- uint32_t totalElems = 1;
-
- assert(filename);
- assert(shape.size() >= 0);
- assert(databuf);
-
- outfile = fopen(filename, "wb");
-
- if (!outfile)
- {
- rc = FILE_NOT_FOUND;
- goto done;
- }
-
- for (uint32_t i = 0; i < shape.size(); i++)
- {
- totalElems *= shape[i];
- }
-
- rc = writeNpyHeader(outfile, shape, dtype_str);
-
- if (fwrite(databuf, sizeof(float), totalElems, outfile) != totalElems)
- {
- rc = FILE_IO_ERROR;
- goto done;
- }
-
-done:
-
- if (outfile)
- fclose(outfile);
-
- return rc;
-}
-
-NumpyUtilities::NPError
- NumpyUtilities::writeNpyHeader(FILE* outfile, const std::vector<int32_t>& shape, const char* dtype_str)
-{
- NPError rc = NO_ERROR;
- uint32_t i;
- char header[NUMPY_HEADER_SZ + 1];
- int headerPos = 0;
-
- assert(outfile);
- assert(shape.size() >= 0);
-
- // Space-fill the header and end with a newline to start per numpy spec
- memset(header, 0x20, NUMPY_HEADER_SZ);
- header[NUMPY_HEADER_SZ - 1] = '\n';
- header[NUMPY_HEADER_SZ] = 0;
-
- // Write out the hard-coded header. We only support a 128-byte 1.0 header
- // for now, which should be sufficient for simple tensor types of any
- // reasonable rank.
- memcpy(header, NUMPY_HEADER_STR, sizeof(NUMPY_HEADER_STR) - 1);
- headerPos += sizeof(NUMPY_HEADER_STR) - 1;
-
- // Output the format dictionary
- // Hard-coded for I32 for now
- headerPos +=
- snprintf(header + headerPos, NUMPY_HEADER_SZ - headerPos, "'descr': %s, 'fortran_order': False, 'shape': (%d,",
- dtype_str, shape.size() > 0 ? shape[0] : 1);
-
- // Remainder of shape array
- for (i = 1; i < shape.size(); i++)
- {
- headerPos += snprintf(header + headerPos, NUMPY_HEADER_SZ - headerPos, " %d,", shape[i]);
- }
-
- // Close off the dictionary
- headerPos += snprintf(header + headerPos, NUMPY_HEADER_SZ - headerPos, "), }");
-
- // snprintf leaves a NULL at the end. Replace with a space
- header[headerPos] = 0x20;
-
- if (fwrite(header, NUMPY_HEADER_SZ, 1, outfile) != 1)
- {
- rc = FILE_IO_ERROR;
- goto done;
- }
-
-done:
-
- return rc;
-}
diff --git a/serialization/tosa_serialization_handler.h b/serialization/tosa_serialization_handler.h
deleted file mode 100644
index 124b8e0..0000000
--- a/serialization/tosa_serialization_handler.h
+++ /dev/null
@@ -1,423 +0,0 @@
-
-// Copyright (c) 2020, ARM Limited.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef _TOSA_SERIALIZATION_HANDLER_H
-#define _TOSA_SERIALIZATION_HANDLER_H
-#include "attribute.h"
-#include "flatbuffers/idl.h"
-#include "flatbuffers/util.h"
-#include "quant_info.h"
-#include "tosa_generated.h"
-#include <cstdint>
-#include <memory>
-#include <string>
-#include <vector>
-
-namespace tosa
-{
-
-enum tosa_err_t
-{
- TOSA_OK,
- TOSA_USER_ERROR,
- TOSA_FILE_ERROR,
- TOSA_MEMORY_ERROR,
- TOSA_SCHEMA_MISSING,
- TOSA_INTERNAL_ERROR,
- TOSA_VERSION_MISMATCH,
- NUM_TOSA_ERROR
-};
-
-struct TosaVersion
-{
- int32_t _major;
- int32_t _minor;
- int32_t _patch;
- bool _experimental;
-
- TosaVersion() = delete;
- TosaVersion(int32_t major, int32_t minor, int32_t patch, bool experimental)
- {
- _major = major;
- _minor = minor;
- _patch = patch;
- _experimental = experimental;
- }
-
- std::string to_string() const
- {
- std::string str;
- str += std::to_string(_major) + ".";
- str += std::to_string(_minor) + ".";
- str += std::to_string(_patch);
- if (_experimental)
- str += "(experimental)";
- return str;
- };
-
- bool operator==(const TosaVersion& rhs)
- {
- if (rhs._major == _major && rhs._minor == _minor && rhs._patch == _patch && rhs._experimental == _experimental)
- {
- return true;
- }
- return false;
- }
-
- bool operator!=(const TosaVersion& rhs)
- {
- return !((*this) == rhs);
- }
-};
-
-class TosaSerializationHandler;
-
-class TosaSerializationTensor
-{
-public:
- // constructor and destructor
- TosaSerializationTensor(const flatbuffers::String* name,
- const flatbuffers::Vector<uint32_t>& usage,
- const flatbuffers::Vector<int32_t>& shape,
- DType dtype,
- const flatbuffers::Vector<uint32_t>& format,
- const flatbuffers::String* npy_filename);
- TosaSerializationTensor(std::string name,
- const std::vector<Usage>& usage,
- const std::vector<int32_t>& shape,
- DType dtype,
- const std::vector<Format>& format,
- const std::string* npy_filename);
- TosaSerializationTensor();
- ~TosaSerializationTensor();
-
- // copy constructor/assignment
- TosaSerializationTensor(const TosaSerializationTensor& rhs);
- TosaSerializationTensor& operator=(const TosaSerializationTensor& rhs);
-
- // move constructor/assignment
- TosaSerializationTensor(TosaSerializationTensor&& rhs);
- TosaSerializationTensor& operator=(TosaSerializationTensor&& rhs);
-
- // accessor
- std::string GetName() const
- {
- return *_name;
- }
- const std::vector<int32_t>& GetShape() const
- {
- return *_shape;
- }
- DType GetDtype()
- {
- return _dtype;
- }
- bool HasFormat(Format format)
- {
- for (Format us : *_format)
- {
- if (us == format)
- return true;
- }
- return false;
- }
- std::vector<Format>& GetFormat()
- {
- return *_format;
- }
- bool HasUsage(Usage usage)
- {
- for (Usage us : *_usage)
- {
- if (us == usage)
- return true;
- }
- return false;
- }
- std::vector<Usage>& GetUsage()
- {
- return *_usage;
- }
- std::string* GetNpyFilePtr() const
- {
- return _npy_filename;
- }
-
- // modifier
- void SetDtype(DType dtype)
- {
- _dtype = dtype;
- }
- void SetName(std::string name)
- {
- *_name = name;
- }
-
-private:
- DType _dtype; /* data type enumeration, see tosa_isa_generated.h */
- std::vector<Format>* _format; /* list of possible tensor format */
- std::vector<Usage>* _usage; /* list of possible tensor usage */
- std::vector<int32_t>* _shape; /* shape of the tensor */
- std::string* _name; /* name of the tensor, used for solving dependency */
- std::string* _npy_filename; /* numpy array filename if not null. so null is the distinguisher */
-};
-
-class TosaSerializationOperator
-{
-public:
- // use default copy, void constructor
- // constructor and destructor
- TosaSerializationOperator(Op op_name,
- Attribute attribute_type,
- const TosaAttributeBase* attribute,
- QuantInfo qinfo_type,
- const TosaQuantInfoBase* qinfo,
- std::vector<std::string> input_tensor_names,
- std::vector<std::string> output_tensor_names);
- ~TosaSerializationOperator();
-
- // accessor
- Op GetOp() const
- {
- return _op;
- }
- Attribute GetAttributeType() const
- {
- return _attribute_type;
- }
- TosaAttributeBase* GetAttribute() const
- {
- return _attribute;
- }
- QuantInfo GetQInfoType() const
- {
- return _qinfo_type;
- }
- TosaQuantInfoBase* GetQInfo() const
- {
- return _qinfo;
- }
- std::vector<std::string>& GetInputTensorNames() const
- {
- return *_input_tensor_names;
- }
- std::vector<std::string>& GetOutputTensorNames() const
- {
- return *_output_tensor_names;
- }
- std::vector<TosaSerializationTensor*>& GetInputTensors() const
- {
- return *_input_tensors;
- }
- std::vector<TosaSerializationTensor*>& GetOutputTensors() const
- {
- return *_output_tensors;
- }
-
-private:
- Op _op; /* operator enum, see tosa_isa_generated.h for enumeration table */
- Attribute _attribute_type; /* operator attribute enum, used for dynamic casting TosaAttributeBase class */
- TosaAttributeBase* _attribute; /* real attribute class goes here */
- QuantInfo _qinfo_type; /* QuantInfo enum */
- TosaQuantInfoBase* _qinfo; /* base class pointer of QuantInfo */
- std::vector<std::string>* _input_tensor_names; /* array of input tensor names */
- std::vector<std::string>* _output_tensor_names; /* array of output tensor names */
-
- std::vector<TosaSerializationTensor*>* _input_tensors; /* array of input TosaSerializationTensor */
- std::vector<TosaSerializationTensor*>* _output_tensors; /* array of output TosaSerializationTensor */
-};
-
-class TosaSerializationBasicBlock
-{
-public:
- // constructor and destructor
- TosaSerializationBasicBlock(std::string name,
- std::vector<TosaSerializationOperator*> operators,
- std::vector<TosaSerializationTensor*> tensors,
- std::vector<std::string> inputs,
- std::vector<std::string> outputs);
- ~TosaSerializationBasicBlock();
-
- // accessor
- std::string GetName() const
- {
- return *_name;
- }
- std::vector<TosaSerializationOperator*>& GetOperators()
- {
- return *_operators;
- }
- std::vector<TosaSerializationTensor*>& GetTensors()
- {
- return *_tensors;
- }
-
- TosaSerializationTensor* GetTensorByName(std::string name)
- {
- TosaSerializationTensor* result = nullptr;
- for (auto tensor : GetTensors())
- {
- if (tensor->GetName() == name)
- {
- result = tensor;
- break;
- }
- }
- return result;
- }
-
- std::vector<std::string>& GetInputs()
- {
- return *_inputs;
- }
- std::vector<std::string>& GetOutputs()
- {
- return *_outputs;
- }
-
-private:
- std::string* _name; /* name of basic block */
- std::vector<TosaSerializationOperator*>* _operators; /* TosaSerializationOperator list */
- std::vector<TosaSerializationTensor*>* _tensors; /* TosaSerializationTensor list */
- std::vector<std::string>* _inputs; /* array of string to specify block inputs */
- std::vector<std::string>* _outputs; /* array of string to specify block outputs */
-};
-
-/*
- * this is a helper class for writing/reading Tosa ISA
- * supported format: .tosa (flatbuffer), .json
- * and provide high-level std::vector-like interface
- * to access internal data structure
- */
-class TosaSerializationHandler
-{
-public:
- // constructor and destructor
- TosaSerializationHandler();
- ~TosaSerializationHandler();
-
- // file io
- tosa_err_t LoadFileJson(const char* filename);
- tosa_err_t LoadFileTosaFlatbuffer(const char* filename);
- tosa_err_t SaveFileJson(const char* filename);
- tosa_err_t SaveFileTosaFlatbuffer(const char* filename);
- tosa_err_t LoadFileSchema(const char* filename);
-
- // version
- TosaVersion* GetTosaVersion() const
- {
- return _version;
- }
-
- // accessor
- std::vector<TosaSerializationBasicBlock*>& GetBlocks()
- {
- return *_blocks;
- }
-
- TosaSerializationBasicBlock* GetBlockByName(std::string name)
- {
- TosaSerializationBasicBlock* result = nullptr;
- for (auto block : GetBlocks())
- {
- if (block->GetName() == name)
- {
- result = block;
- break;
- }
- }
- return result;
- }
- TosaSerializationBasicBlock* GetMainBlock()
- {
- TosaSerializationBasicBlock* main_block = GetBlockByName(std::string("main"));
- assert(main_block);
- return main_block;
- }
-
- std::vector<std::string>& GetInputs()
- {
- return GetMainBlock()->GetInputs();
- }
- std::vector<std::string>& GetOutputs()
- {
- return GetMainBlock()->GetOutputs();
- }
-
- bool GetSchemaLoaded() const
- {
- return _schemaLoaded;
- }
-
-protected:
- tosa_err_t Clear();
- tosa_err_t InitWithBuf(const uint8_t* buf);
- tosa_err_t FreezeBuilder();
- tosa_err_t SetTosaVersion();
- tosa_err_t CheckTosaVersion(const TosaVersion& read_version);
-
-private:
- TosaVersion* _version; /* tosa version */
- flatbuffers::FlatBufferBuilder* _builder; /* flatbuffer builder */
- flatbuffers::Parser* _parser; /* flatbuffer parser, used for json parsing */
- std::vector<TosaSerializationBasicBlock*>* _blocks; /* array structure to store all TosaSerializationBasicBlock */
- bool _schemaLoaded; /* is the schema properly loaded? */
-};
-
-class NumpyUtilities
-{
-public:
- enum NPError
- {
- NO_ERROR = 0,
- FILE_NOT_FOUND,
- FILE_IO_ERROR,
- FILE_TYPE_MISMATCH,
- HEADER_PARSE_ERROR,
- BUFFER_SIZE_MISMATCH,
- };
-
- static NPError readFromNpyFile(const char* filename, const uint32_t elems, float* buf);
-
- static NPError readFromNpyFile(const char* filename, const uint32_t elems, int32_t* buf);
-
- static NPError readFromNpyFile(const char* filename, const uint32_t elems, int64_t* buf);
-
- static NPError readFromNpyFile(const char* filename, const uint32_t elems, bool* buf);
-
- static NPError writeToNpyFile(const char* filename, const std::vector<int32_t>& shape, const bool* buf);
-
- static NPError writeToNpyFile(const char* filename, const uint32_t elems, const bool* buf);
-
- static NPError writeToNpyFile(const char* filename, const std::vector<int32_t>& shape, const int32_t* buf);
-
- static NPError writeToNpyFile(const char* filename, const uint32_t elems, const int32_t* buf);
-
- static NPError writeToNpyFile(const char* filename, const std::vector<int32_t>& shape, const int64_t* buf);
-
- static NPError writeToNpyFile(const char* filename, const uint32_t elems, const int64_t* buf);
-
- static NPError writeToNpyFile(const char* filename, const std::vector<int32_t>& shape, const float* buf);
-
- static NPError writeToNpyFile(const char* filename, const uint32_t elems, const float* buf);
-
-private:
- static NPError checkNpyHeader(FILE* infile, const uint32_t elems, const char* dtype_str);
- static NPError writeNpyHeader(FILE* infile, const std::vector<int32_t>& shape, const char* dtype_str);
-};
-
-} // namespace tosa
-
-#endif // _TOSA_SERIALIZATION_HANDLER_H
diff --git a/thirdparty/CMakeLists.txt b/thirdparty/CMakeLists.txt
index 8c7bee3..66b72b9 100644
--- a/thirdparty/CMakeLists.txt
+++ b/thirdparty/CMakeLists.txt
@@ -4,7 +4,4 @@ set(CMAKE_INSTALL_PREFIX "./thirdparty" CACHE PATH "..." FORCE)
project(thirdparty LANGUAGES CXX)
-# Flatbuffers tests are not needed
-set(FLATBUFFERS_BUILD_TESTS OFF)
-
-add_subdirectory(flatbuffers)
+add_subdirectory(serialization_lib)
diff --git a/thirdparty/flatbuffers b/thirdparty/flatbuffers
deleted file mode 160000
-Subproject bf9eb67ab9371755c6bcece13cadc7693bcbf26
diff --git a/thirdparty/serialization_lib b/thirdparty/serialization_lib
new file mode 160000
+Subproject 2364dcd7241d730021bf68e000e5a6411b9f09d
diff --git a/verif/tosa/ArithmeticRightShiftAttribute.py b/verif/tosa/ArithmeticRightShiftAttribute.py
deleted file mode 100644
index eaa52ab..0000000
--- a/verif/tosa/ArithmeticRightShiftAttribute.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# Copyright (c) 2020, ARM Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# namespace: tosa
-
-import flatbuffers
-
-class ArithmeticRightShiftAttribute(object):
- __slots__ = ['_tab']
-
- @classmethod
- def GetRootAsArithmeticRightShiftAttribute(cls, buf, offset):
- n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
- x = ArithmeticRightShiftAttribute()
- x.Init(buf, n + offset)
- return x
-
- # ArithmeticRightShiftAttribute
- def Init(self, buf, pos):
- self._tab = flatbuffers.table.Table(buf, pos)
-
- # ArithmeticRightShiftAttribute
- def Round(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
- return False
-
-def ArithmeticRightShiftAttributeStart(builder): builder.StartObject(1)
-def ArithmeticRightShiftAttributeAddRound(builder, round): builder.PrependBoolSlot(0, round, 0)
-def ArithmeticRightShiftAttributeEnd(builder): return builder.EndObject()
diff --git a/verif/tosa/Attribute.py b/verif/tosa/Attribute.py
deleted file mode 100644
index 5d79a08..0000000
--- a/verif/tosa/Attribute.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# Copyright (c) 2020, ARM Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# namespace: tosa
-
-class Attribute(object):
- NONE = 0
- Pool2dAttribute = 1
- Conv2dAttribute = 2
- TransposeConv2dAttribute = 3
- ReluNAttribute = 4
- AxisAttribute = 5
- ReshapeAttribute = 6
- SliceAttribute = 7
- TileAttribute = 8
- ResizeAttribute = 9
- ClampAttribute = 10
- RescaleAttribute = 11
- MulAttribute = 12
- ArithmeticRightShiftAttribute = 13
- CondIfAttribute = 14
- WhileLoopAttribute = 15
-
diff --git a/verif/tosa/AxisAttribute.py b/verif/tosa/AxisAttribute.py
deleted file mode 100644
index d47eb81..0000000
--- a/verif/tosa/AxisAttribute.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# Copyright (c) 2020, ARM Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# namespace: tosa
-
-import flatbuffers
-
-class AxisAttribute(object):
- __slots__ = ['_tab']
-
- @classmethod
- def GetRootAsAxisAttribute(cls, buf, offset):
- n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
- x = AxisAttribute()
- x.Init(buf, n + offset)
- return x
-
- # AxisAttribute
- def Init(self, buf, pos):
- self._tab = flatbuffers.table.Table(buf, pos)
-
- # AxisAttribute
- def Axis(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
- return 0
-
-def AxisAttributeStart(builder): builder.StartObject(1)
-def AxisAttributeAddAxis(builder, axis): builder.PrependInt32Slot(0, axis, 0)
-def AxisAttributeEnd(builder): return builder.EndObject()
diff --git a/verif/tosa/ClampAttribute.py b/verif/tosa/ClampAttribute.py
deleted file mode 100644
index ddc95cf..0000000
--- a/verif/tosa/ClampAttribute.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# Copyright (c) 2020, ARM Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# namespace: tosa
-
-import flatbuffers
-
-class ClampAttribute(object):
- __slots__ = ['_tab']
-
- @classmethod
- def GetRootAsClampAttribute(cls, buf, offset):
- n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
- x = ClampAttribute()
- x.Init(buf, n + offset)
- return x
-
- # ClampAttribute
- def Init(self, buf, pos):
- self._tab = flatbuffers.table.Table(buf, pos)
-
- # ClampAttribute
- def MinInt(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
- return 0
-
- # ClampAttribute
- def MaxInt(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
- if o != 0:
- return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
- return 0
-
- # ClampAttribute
- def MinFp(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
- if o != 0:
- return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
- return 0.0
-
- # ClampAttribute
- def MaxFp(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
- if o != 0:
- return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
- return 0.0
-
-def ClampAttributeStart(builder): builder.StartObject(4)
-def ClampAttributeAddMinInt(builder, minInt): builder.PrependInt32Slot(0, minInt, 0)
-def ClampAttributeAddMaxInt(builder, maxInt): builder.PrependInt32Slot(1, maxInt, 0)
-def ClampAttributeAddMinFp(builder, minFp): builder.PrependFloat32Slot(2, minFp, 0.0)
-def ClampAttributeAddMaxFp(builder, maxFp): builder.PrependFloat32Slot(3, maxFp, 0.0)
-def ClampAttributeEnd(builder): return builder.EndObject()
diff --git a/verif/tosa/CondIfAttribute.py b/verif/tosa/CondIfAttribute.py
deleted file mode 100644
index 0bf4566..0000000
--- a/verif/tosa/CondIfAttribute.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# Copyright (c) 2020, ARM Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# namespace: tosa
-
-import flatbuffers
-
-class CondIfAttribute(object):
- __slots__ = ['_tab']
-
- @classmethod
- def GetRootAsCondIfAttribute(cls, buf, offset):
- n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
- x = CondIfAttribute()
- x.Init(buf, n + offset)
- return x
-
- # CondIfAttribute
- def Init(self, buf, pos):
- self._tab = flatbuffers.table.Table(buf, pos)
-
- # CondIfAttribute
- def ThenBranch(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- return self._tab.String(o + self._tab.Pos)
- return None
-
- # CondIfAttribute
- def ElseBranch(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
- if o != 0:
- return self._tab.String(o + self._tab.Pos)
- return None
-
-def CondIfAttributeStart(builder): builder.StartObject(2)
-def CondIfAttributeAddThenBranch(builder, thenBranch): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(thenBranch), 0)
-def CondIfAttributeAddElseBranch(builder, elseBranch): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(elseBranch), 0)
-def CondIfAttributeEnd(builder): return builder.EndObject()
diff --git a/verif/tosa/Conv2dAttribute.py b/verif/tosa/Conv2dAttribute.py
deleted file mode 100644
index c7861a5..0000000
--- a/verif/tosa/Conv2dAttribute.py
+++ /dev/null
@@ -1,109 +0,0 @@
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# Copyright (c) 2020, ARM Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# namespace: tosa
-
-import flatbuffers
-
-class Conv2dAttribute(object):
- __slots__ = ['_tab']
-
- @classmethod
- def GetRootAsConv2dAttribute(cls, buf, offset):
- n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
- x = Conv2dAttribute()
- x.Init(buf, n + offset)
- return x
-
- # Conv2dAttribute
- def Init(self, buf, pos):
- self._tab = flatbuffers.table.Table(buf, pos)
-
- # Conv2dAttribute
- def Padding(self, j):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- a = self._tab.Vector(o)
- return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
- return 0
-
- # Conv2dAttribute
- def PaddingAsNumpy(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
- return 0
-
- # Conv2dAttribute
- def PaddingLength(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- return self._tab.VectorLen(o)
- return 0
-
- # Conv2dAttribute
- def Stride(self, j):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
- if o != 0:
- a = self._tab.Vector(o)
- return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
- return 0
-
- # Conv2dAttribute
- def StrideAsNumpy(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
- if o != 0:
- return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
- return 0
-
- # Conv2dAttribute
- def StrideLength(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
- if o != 0:
- return self._tab.VectorLen(o)
- return 0
-
- # Conv2dAttribute
- def Dilation(self, j):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
- if o != 0:
- a = self._tab.Vector(o)
- return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
- return 0
-
- # Conv2dAttribute
- def DilationAsNumpy(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
- if o != 0:
- return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
- return 0
-
- # Conv2dAttribute
- def DilationLength(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
- if o != 0:
- return self._tab.VectorLen(o)
- return 0
-
-def Conv2dAttributeStart(builder): builder.StartObject(3)
-def Conv2dAttributeAddPadding(builder, padding): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(padding), 0)
-def Conv2dAttributeStartPaddingVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def Conv2dAttributeAddStride(builder, stride): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(stride), 0)
-def Conv2dAttributeStartStrideVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def Conv2dAttributeAddDilation(builder, dilation): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(dilation), 0)
-def Conv2dAttributeStartDilationVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def Conv2dAttributeEnd(builder): return builder.EndObject()
diff --git a/verif/tosa/ConvQuantInfo.py b/verif/tosa/ConvQuantInfo.py
deleted file mode 100644
index a88bfa6..0000000
--- a/verif/tosa/ConvQuantInfo.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# Copyright (c) 2020, ARM Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# namespace: tosa
-
-import flatbuffers
-
-class ConvQuantInfo(object):
- __slots__ = ['_tab']
-
- @classmethod
- def GetRootAsConvQuantInfo(cls, buf, offset):
- n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
- x = ConvQuantInfo()
- x.Init(buf, n + offset)
- return x
-
- # ConvQuantInfo
- def Init(self, buf, pos):
- self._tab = flatbuffers.table.Table(buf, pos)
-
- # ConvQuantInfo
- def InputZp(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
- return 0
-
- # ConvQuantInfo
- def WeightZp(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
- if o != 0:
- return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
- return 0
-
-def ConvQuantInfoStart(builder): builder.StartObject(2)
-def ConvQuantInfoAddInputZp(builder, inputZp): builder.PrependInt32Slot(0, inputZp, 0)
-def ConvQuantInfoAddWeightZp(builder, weightZp): builder.PrependInt32Slot(1, weightZp, 0)
-def ConvQuantInfoEnd(builder): return builder.EndObject()
diff --git a/verif/tosa/CustomAttribute.py b/verif/tosa/CustomAttribute.py
deleted file mode 100644
index 25f6759..0000000
--- a/verif/tosa/CustomAttribute.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# Copyright (c) 2020, ARM Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# namespace: tosa
-
-import flatbuffers
-
-class CustomAttribute(object):
- __slots__ = ['_tab']
-
- @classmethod
- def GetRootAsCustomAttribute(cls, buf, offset):
- n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
- x = CustomAttribute()
- x.Init(buf, n + offset)
- return x
-
- # CustomAttribute
- def Init(self, buf, pos):
- self._tab = flatbuffers.table.Table(buf, pos)
-
- # CustomAttribute
- def Identifier(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- return self._tab.String(o + self._tab.Pos)
- return None
-
-def CustomAttributeStart(builder): builder.StartObject(1)
-def CustomAttributeAddIdentifier(builder, identifier): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(identifier), 0)
-def CustomAttributeEnd(builder): return builder.EndObject()
diff --git a/verif/tosa/DType.py b/verif/tosa/DType.py
deleted file mode 100644
index 2e30531..0000000
--- a/verif/tosa/DType.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# Copyright (c) 2020-2021, ARM Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# namespace: tosa
-
-class DType(object):
- UNKNOWN = 0
- BOOL = 1
- UINT8 = 2
- INT4 = 3
- INT8 = 4
- INT16 = 5
- INT32 = 6
- INT48 = 7
- FLOAT = 8
-
diff --git a/verif/tosa/Format.py b/verif/tosa/Format.py
deleted file mode 100644
index 5db4f27..0000000
--- a/verif/tosa/Format.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# Copyright (c) 2020, ARM Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# namespace: tosa
-
-class Format(object):
- UNKNOWN = 0
- NHWC = 1
- NDHWC = 2
- OHWI = 3
- HWIM = 4
- DOHWI = 5
-
diff --git a/verif/tosa/MatMulQuantInfo.py b/verif/tosa/MatMulQuantInfo.py
deleted file mode 100644
index b8390a9..0000000
--- a/verif/tosa/MatMulQuantInfo.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# Copyright (c) 2020, ARM Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# namespace: tosa
-
-import flatbuffers
-
-class MatMulQuantInfo(object):
- __slots__ = ['_tab']
-
- @classmethod
- def GetRootAsMatMulQuantInfo(cls, buf, offset):
- n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
- x = MatMulQuantInfo()
- x.Init(buf, n + offset)
- return x
-
- # MatMulQuantInfo
- def Init(self, buf, pos):
- self._tab = flatbuffers.table.Table(buf, pos)
-
- # MatMulQuantInfo
- def AZp(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
- return 0
-
- # MatMulQuantInfo
- def BZp(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
- if o != 0:
- return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
- return 0
-
-def MatMulQuantInfoStart(builder): builder.StartObject(2)
-def MatMulQuantInfoAddAZp(builder, aZp): builder.PrependInt32Slot(0, aZp, 0)
-def MatMulQuantInfoAddBZp(builder, bZp): builder.PrependInt32Slot(1, bZp, 0)
-def MatMulQuantInfoEnd(builder): return builder.EndObject()
diff --git a/verif/tosa/MulAttribute.py b/verif/tosa/MulAttribute.py
deleted file mode 100644
index f45b285..0000000
--- a/verif/tosa/MulAttribute.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# Copyright (c) 2020, ARM Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# namespace: tosa
-
-import flatbuffers
-
-class MulAttribute(object):
- __slots__ = ['_tab']
-
- @classmethod
- def GetRootAsMulAttribute(cls, buf, offset):
- n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
- x = MulAttribute()
- x.Init(buf, n + offset)
- return x
-
- # MulAttribute
- def Init(self, buf, pos):
- self._tab = flatbuffers.table.Table(buf, pos)
-
- # MulAttribute
- def Shift(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
- return 0
-
-def MulAttributeStart(builder): builder.StartObject(1)
-def MulAttributeAddShift(builder, shift): builder.PrependInt32Slot(0, shift, 0)
-def MulAttributeEnd(builder): return builder.EndObject()
diff --git a/verif/tosa/Op.py b/verif/tosa/Op.py
deleted file mode 100644
index ea9cdfe..0000000
--- a/verif/tosa/Op.py
+++ /dev/null
@@ -1,91 +0,0 @@
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# Copyright (c) 2020, ARM Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# namespace: tosa
-
-class Op(object):
- UNKNOWN = 0
- ARGMAX = 1
- AVG_POOL2D = 2
- CONV2D = 3
- CONV3D = 4
- DEPTHWISE_CONV2D = 5
- FULLY_CONNECTED = 6
- MATMUL = 7
- MAX_POOL2D = 8
- TRANSPOSE_CONV2D = 9
- CLAMP = 10
- RELUN = 11
- SIGMOID = 12
- TANH = 13
- ADD = 14
- ARITHMETIC_RIGHT_SHIFT = 15
- BITWISE_AND = 16
- BITWISE_OR = 17
- BITWISE_XOR = 18
- LOGICAL_AND = 19
- LOGICAL_LEFT_SHIFT = 20
- LOGICAL_RIGHT_SHIFT = 21
- LOGICAL_OR = 22
- LOGICAL_XOR = 23
- MAXIMUM = 24
- MINIMUM = 25
- MUL = 26
- POW = 27
- SUB = 28
- TABLE = 29
- ABS = 30
- BITWISE_NOT = 31
- CEIL = 32
- CLZ = 33
- EXP = 34
- FLOOR = 35
- LOG = 36
- LOGICAL_NOT = 37
- NEGATE = 38
- RECIPROCAL = 39
- RSQRT = 40
- SELECT = 41
- EQUAL = 42
- GREATER = 43
- GREATER_EQUAL = 44
- REDUCE_ANY = 45
- REDUCE_ALL = 46
- REDUCE_MAX = 47
- REDUCE_MIN = 48
- REDUCE_PRODUCT = 49
- REDUCE_SUM = 50
- CONCAT = 51
- PAD = 52
- RESHAPE = 53
- REVERSE = 54
- SLICE = 55
- TILE = 56
- TRANSPOSE = 57
- GATHER = 58
- SCATTER = 59
- RESIZE = 60
- CAST = 61
- RESCALE = 62
- CONST = 63
- PLACEHOLDER = 64
- IDENTITY = 65
- IDENTITYN = 66
- CUSTOM = 67
- COND_IF = 68
- WHILE_LOOP = 69
-
diff --git a/verif/tosa/PadQuantInfo.py b/verif/tosa/PadQuantInfo.py
deleted file mode 100644
index df61926..0000000
--- a/verif/tosa/PadQuantInfo.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# Copyright (c) 2020, ARM Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# namespace: tosa
-
-import flatbuffers
-
-class PadQuantInfo(object):
- __slots__ = ['_tab']
-
- @classmethod
- def GetRootAsPadQuantInfo(cls, buf, offset):
- n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
- x = PadQuantInfo()
- x.Init(buf, n + offset)
- return x
-
- # PadQuantInfo
- def Init(self, buf, pos):
- self._tab = flatbuffers.table.Table(buf, pos)
-
- # PadQuantInfo
- def InputZp(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
- return 0
-
-def PadQuantInfoStart(builder): builder.StartObject(1)
-def PadQuantInfoAddInputZp(builder, inputZp): builder.PrependInt32Slot(0, inputZp, 0)
-def PadQuantInfoEnd(builder): return builder.EndObject()
diff --git a/verif/tosa/Pool2dAttribute.py b/verif/tosa/Pool2dAttribute.py
deleted file mode 100644
index 1520de2..0000000
--- a/verif/tosa/Pool2dAttribute.py
+++ /dev/null
@@ -1,109 +0,0 @@
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# Copyright (c) 2020, ARM Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# namespace: tosa
-
-import flatbuffers
-
-class Pool2dAttribute(object):
- __slots__ = ['_tab']
-
- @classmethod
- def GetRootAsPool2dAttribute(cls, buf, offset):
- n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
- x = Pool2dAttribute()
- x.Init(buf, n + offset)
- return x
-
- # Pool2dAttribute
- def Init(self, buf, pos):
- self._tab = flatbuffers.table.Table(buf, pos)
-
- # Pool2dAttribute
- def Padding(self, j):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- a = self._tab.Vector(o)
- return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
- return 0
-
- # Pool2dAttribute
- def PaddingAsNumpy(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
- return 0
-
- # Pool2dAttribute
- def PaddingLength(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- return self._tab.VectorLen(o)
- return 0
-
- # Pool2dAttribute
- def Kernel(self, j):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
- if o != 0:
- a = self._tab.Vector(o)
- return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
- return 0
-
- # Pool2dAttribute
- def KernelAsNumpy(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
- if o != 0:
- return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
- return 0
-
- # Pool2dAttribute
- def KernelLength(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
- if o != 0:
- return self._tab.VectorLen(o)
- return 0
-
- # Pool2dAttribute
- def Stride(self, j):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
- if o != 0:
- a = self._tab.Vector(o)
- return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
- return 0
-
- # Pool2dAttribute
- def StrideAsNumpy(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
- if o != 0:
- return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
- return 0
-
- # Pool2dAttribute
- def StrideLength(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
- if o != 0:
- return self._tab.VectorLen(o)
- return 0
-
-def Pool2dAttributeStart(builder): builder.StartObject(3)
-def Pool2dAttributeAddPadding(builder, padding): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(padding), 0)
-def Pool2dAttributeStartPaddingVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def Pool2dAttributeAddKernel(builder, kernel): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(kernel), 0)
-def Pool2dAttributeStartKernelVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def Pool2dAttributeAddStride(builder, stride): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(stride), 0)
-def Pool2dAttributeStartStrideVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def Pool2dAttributeEnd(builder): return builder.EndObject()
diff --git a/verif/tosa/QuantInfo.py b/verif/tosa/QuantInfo.py
deleted file mode 100644
index 0544cce..0000000
--- a/verif/tosa/QuantInfo.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# Copyright (c) 2020, ARM Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# namespace: tosa
-
-class QuantInfo(object):
- NONE = 0
- UnaryQuantInfo = 1
- ConvQuantInfo = 2
- MatMulQuantInfo = 3
- PadQuantInfo = 4
-
diff --git a/verif/tosa/README.md b/verif/tosa/README.md
deleted file mode 100644
index de8c1f9..0000000
--- a/verif/tosa/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-TOSA FlatBuffers python serialization library
-=============================================
-
-Files in this directory are automatically generated by running:
-
-``` bash
-../build/thirdparty/flatbuffers/flatc --python ../serialization/tosa.fbs
-```
-
-From the ``verif/`` directory. Flatc is compiled along with the *TOSA
-Reference Model*.
-
-*Because they are automatically generated, please do not edit the
-python files in this directory by hand.*
diff --git a/verif/tosa/ReluNAttribute.py b/verif/tosa/ReluNAttribute.py
deleted file mode 100644
index e446c03..0000000
--- a/verif/tosa/ReluNAttribute.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# Copyright (c) 2020, ARM Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# namespace: tosa
-
-import flatbuffers
-
-class ReluNAttribute(object):
- __slots__ = ['_tab']
-
- @classmethod
- def GetRootAsReluNAttribute(cls, buf, offset):
- n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
- x = ReluNAttribute()
- x.Init(buf, n + offset)
- return x
-
- # ReluNAttribute
- def Init(self, buf, pos):
- self._tab = flatbuffers.table.Table(buf, pos)
-
- # ReluNAttribute
- def MaxInt(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
- return 0
-
- # ReluNAttribute
- def MaxFp(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
- if o != 0:
- return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
- return 0.0
-
-def ReluNAttributeStart(builder): builder.StartObject(2)
-def ReluNAttributeAddMaxInt(builder, maxInt): builder.PrependInt32Slot(0, maxInt, 0)
-def ReluNAttributeAddMaxFp(builder, maxFp): builder.PrependFloat32Slot(1, maxFp, 0.0)
-def ReluNAttributeEnd(builder): return builder.EndObject()
diff --git a/verif/tosa/RescaleAttribute.py b/verif/tosa/RescaleAttribute.py
deleted file mode 100644
index 0ec8c2b..0000000
--- a/verif/tosa/RescaleAttribute.py
+++ /dev/null
@@ -1,125 +0,0 @@
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# Copyright (c) 2020, ARM Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# namespace: tosa
-
-import flatbuffers
-
-class RescaleAttribute(object):
- __slots__ = ['_tab']
-
- @classmethod
- def GetRootAsRescaleAttribute(cls, buf, offset):
- n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
- x = RescaleAttribute()
- x.Init(buf, n + offset)
- return x
-
- # RescaleAttribute
- def Init(self, buf, pos):
- self._tab = flatbuffers.table.Table(buf, pos)
-
- # RescaleAttribute
- def InputZp(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
- return 0
-
- # RescaleAttribute
- def OutputZp(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
- if o != 0:
- return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
- return 0
-
- # RescaleAttribute
- def Multiplier(self, j):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
- if o != 0:
- a = self._tab.Vector(o)
- return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
- return 0
-
- # RescaleAttribute
- def MultiplierAsNumpy(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
- if o != 0:
- return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
- return 0
-
- # RescaleAttribute
- def MultiplierLength(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
- if o != 0:
- return self._tab.VectorLen(o)
- return 0
-
- # RescaleAttribute
- def Shift(self, j):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
- if o != 0:
- a = self._tab.Vector(o)
- return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
- return 0
-
- # RescaleAttribute
- def ShiftAsNumpy(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
- if o != 0:
- return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
- return 0
-
- # RescaleAttribute
- def ShiftLength(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
- if o != 0:
- return self._tab.VectorLen(o)
- return 0
-
- # RescaleAttribute
- def Scale32(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
- if o != 0:
- return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
- return False
-
- # RescaleAttribute
- def DoubleRound(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
- if o != 0:
- return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
- return False
-
- # RescaleAttribute
- def PerChannel(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
- if o != 0:
- return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
- return False
-
-def RescaleAttributeStart(builder): builder.StartObject(7)
-def RescaleAttributeAddInputZp(builder, inputZp): builder.PrependInt32Slot(0, inputZp, 0)
-def RescaleAttributeAddOutputZp(builder, outputZp): builder.PrependInt32Slot(1, outputZp, 0)
-def RescaleAttributeAddMultiplier(builder, multiplier): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(multiplier), 0)
-def RescaleAttributeStartMultiplierVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def RescaleAttributeAddShift(builder, shift): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(shift), 0)
-def RescaleAttributeStartShiftVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def RescaleAttributeAddScale32(builder, scale32): builder.PrependBoolSlot(4, scale32, 0)
-def RescaleAttributeAddDoubleRound(builder, doubleRound): builder.PrependBoolSlot(5, doubleRound, 0)
-def RescaleAttributeAddPerChannel(builder, perChannel): builder.PrependBoolSlot(6, perChannel, 0)
-def RescaleAttributeEnd(builder): return builder.EndObject()
diff --git a/verif/tosa/ReshapeAttribute.py b/verif/tosa/ReshapeAttribute.py
deleted file mode 100644
index 2c50cef..0000000
--- a/verif/tosa/ReshapeAttribute.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# Copyright (c) 2020, ARM Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# namespace: tosa
-
-import flatbuffers
-
-class ReshapeAttribute(object):
- __slots__ = ['_tab']
-
- @classmethod
- def GetRootAsReshapeAttribute(cls, buf, offset):
- n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
- x = ReshapeAttribute()
- x.Init(buf, n + offset)
- return x
-
- # ReshapeAttribute
- def Init(self, buf, pos):
- self._tab = flatbuffers.table.Table(buf, pos)
-
- # ReshapeAttribute
- def Shape(self, j):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- a = self._tab.Vector(o)
- return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
- return 0
-
- # ReshapeAttribute
- def ShapeAsNumpy(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
- return 0
-
- # ReshapeAttribute
- def ShapeLength(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- return self._tab.VectorLen(o)
- return 0
-
-def ReshapeAttributeStart(builder): builder.StartObject(1)
-def ReshapeAttributeAddShape(builder, shape): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(shape), 0)
-def ReshapeAttributeStartShapeVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def ReshapeAttributeEnd(builder): return builder.EndObject()
diff --git a/verif/tosa/ResizeAttribute.py b/verif/tosa/ResizeAttribute.py
deleted file mode 100644
index 35be73a..0000000
--- a/verif/tosa/ResizeAttribute.py
+++ /dev/null
@@ -1,173 +0,0 @@
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# Copyright (c) 2020, ARM Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# namespace: tosa
-
-import flatbuffers
-
-class ResizeAttribute(object):
- __slots__ = ['_tab']
-
- @classmethod
- def GetRootAsResizeAttribute(cls, buf, offset):
- n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
- x = ResizeAttribute()
- x.Init(buf, n + offset)
- return x
-
- # ResizeAttribute
- def Init(self, buf, pos):
- self._tab = flatbuffers.table.Table(buf, pos)
-
- # ResizeAttribute
- def OutputSize(self, j):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- a = self._tab.Vector(o)
- return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
- return 0
-
- # ResizeAttribute
- def OutputSizeAsNumpy(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
- return 0
-
- # ResizeAttribute
- def OutputSizeLength(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- return self._tab.VectorLen(o)
- return 0
-
- # ResizeAttribute
- def Stride(self, j):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
- if o != 0:
- a = self._tab.Vector(o)
- return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
- return 0
-
- # ResizeAttribute
- def StrideAsNumpy(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
- if o != 0:
- return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
- return 0
-
- # ResizeAttribute
- def StrideLength(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
- if o != 0:
- return self._tab.VectorLen(o)
- return 0
-
- # ResizeAttribute
- def Offset(self, j):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
- if o != 0:
- a = self._tab.Vector(o)
- return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
- return 0
-
- # ResizeAttribute
- def OffsetAsNumpy(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
- if o != 0:
- return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
- return 0
-
- # ResizeAttribute
- def OffsetLength(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
- if o != 0:
- return self._tab.VectorLen(o)
- return 0
-
- # ResizeAttribute
- def Shift(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
- if o != 0:
- return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
- return 0
-
- # ResizeAttribute
- def StrideFp(self, j):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
- if o != 0:
- a = self._tab.Vector(o)
- return self._tab.Get(flatbuffers.number_types.Float32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
- return 0
-
- # ResizeAttribute
- def StrideFpAsNumpy(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
- if o != 0:
- return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o)
- return 0
-
- # ResizeAttribute
- def StrideFpLength(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
- if o != 0:
- return self._tab.VectorLen(o)
- return 0
-
- # ResizeAttribute
- def OffsetFp(self, j):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
- if o != 0:
- a = self._tab.Vector(o)
- return self._tab.Get(flatbuffers.number_types.Float32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
- return 0
-
- # ResizeAttribute
- def OffsetFpAsNumpy(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
- if o != 0:
- return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o)
- return 0
-
- # ResizeAttribute
- def OffsetFpLength(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
- if o != 0:
- return self._tab.VectorLen(o)
- return 0
-
- # ResizeAttribute
- def Mode(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
- if o != 0:
- return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
- return 0
-
-def ResizeAttributeStart(builder): builder.StartObject(7)
-def ResizeAttributeAddOutputSize(builder, outputSize): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(outputSize), 0)
-def ResizeAttributeStartOutputSizeVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def ResizeAttributeAddStride(builder, stride): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(stride), 0)
-def ResizeAttributeStartStrideVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def ResizeAttributeAddOffset(builder, offset): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(offset), 0)
-def ResizeAttributeStartOffsetVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def ResizeAttributeAddShift(builder, shift): builder.PrependInt32Slot(3, shift, 0)
-def ResizeAttributeAddStrideFp(builder, strideFp): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(strideFp), 0)
-def ResizeAttributeStartStrideFpVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def ResizeAttributeAddOffsetFp(builder, offsetFp): builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(offsetFp), 0)
-def ResizeAttributeStartOffsetFpVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def ResizeAttributeAddMode(builder, mode): builder.PrependUint32Slot(6, mode, 0)
-def ResizeAttributeEnd(builder): return builder.EndObject()
diff --git a/verif/tosa/ResizeMode.py b/verif/tosa/ResizeMode.py
deleted file mode 100644
index 02bed51..0000000
--- a/verif/tosa/ResizeMode.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# Copyright (c) 2020, ARM Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# namespace: tosa
-
-class ResizeMode(object):
- UNKNOWN = 0
- NEAREST = 1
- BILINEAR = 2
-
diff --git a/verif/tosa/SliceAttribute.py b/verif/tosa/SliceAttribute.py
deleted file mode 100644
index d156a4a..0000000
--- a/verif/tosa/SliceAttribute.py
+++ /dev/null
@@ -1,85 +0,0 @@
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# Copyright (c) 2020, ARM Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# namespace: tosa
-
-import flatbuffers
-
-class SliceAttribute(object):
- __slots__ = ['_tab']
-
- @classmethod
- def GetRootAsSliceAttribute(cls, buf, offset):
- n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
- x = SliceAttribute()
- x.Init(buf, n + offset)
- return x
-
- # SliceAttribute
- def Init(self, buf, pos):
- self._tab = flatbuffers.table.Table(buf, pos)
-
- # SliceAttribute
- def Begin(self, j):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- a = self._tab.Vector(o)
- return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
- return 0
-
- # SliceAttribute
- def BeginAsNumpy(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
- return 0
-
- # SliceAttribute
- def BeginLength(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- return self._tab.VectorLen(o)
- return 0
-
- # SliceAttribute
- def Size(self, j):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
- if o != 0:
- a = self._tab.Vector(o)
- return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
- return 0
-
- # SliceAttribute
- def SizeAsNumpy(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
- if o != 0:
- return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
- return 0
-
- # SliceAttribute
- def SizeLength(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
- if o != 0:
- return self._tab.VectorLen(o)
- return 0
-
-def SliceAttributeStart(builder): builder.StartObject(2)
-def SliceAttributeAddBegin(builder, begin): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(begin), 0)
-def SliceAttributeStartBeginVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def SliceAttributeAddSize(builder, size): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(size), 0)
-def SliceAttributeStartSizeVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def SliceAttributeEnd(builder): return builder.EndObject()
diff --git a/verif/tosa/TileAttribute.py b/verif/tosa/TileAttribute.py
deleted file mode 100644
index 6385edd..0000000
--- a/verif/tosa/TileAttribute.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# Copyright (c) 2020, ARM Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# namespace: tosa
-
-import flatbuffers
-
-class TileAttribute(object):
- __slots__ = ['_tab']
-
- @classmethod
- def GetRootAsTileAttribute(cls, buf, offset):
- n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
- x = TileAttribute()
- x.Init(buf, n + offset)
- return x
-
- # TileAttribute
- def Init(self, buf, pos):
- self._tab = flatbuffers.table.Table(buf, pos)
-
- # TileAttribute
- def Multiples(self, j):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- a = self._tab.Vector(o)
- return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
- return 0
-
- # TileAttribute
- def MultiplesAsNumpy(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
- return 0
-
- # TileAttribute
- def MultiplesLength(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- return self._tab.VectorLen(o)
- return 0
-
-def TileAttributeStart(builder): builder.StartObject(1)
-def TileAttributeAddMultiples(builder, multiples): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(multiples), 0)
-def TileAttributeStartMultiplesVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def TileAttributeEnd(builder): return builder.EndObject()
diff --git a/verif/tosa/TosaBasicBlock.py b/verif/tosa/TosaBasicBlock.py
deleted file mode 100644
index 42a7379..0000000
--- a/verif/tosa/TosaBasicBlock.py
+++ /dev/null
@@ -1,123 +0,0 @@
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# Copyright (c) 2020, ARM Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# namespace: tosa
-
-import flatbuffers
-
-class TosaBasicBlock(object):
- __slots__ = ['_tab']
-
- @classmethod
- def GetRootAsTosaBasicBlock(cls, buf, offset):
- n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
- x = TosaBasicBlock()
- x.Init(buf, n + offset)
- return x
-
- # TosaBasicBlock
- def Init(self, buf, pos):
- self._tab = flatbuffers.table.Table(buf, pos)
-
- # TosaBasicBlock
- def Name(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- return self._tab.String(o + self._tab.Pos)
- return None
-
- # TosaBasicBlock
- def Operators(self, j):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
- if o != 0:
- x = self._tab.Vector(o)
- x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
- x = self._tab.Indirect(x)
- from .TosaOperator import TosaOperator
- obj = TosaOperator()
- obj.Init(self._tab.Bytes, x)
- return obj
- return None
-
- # TosaBasicBlock
- def OperatorsLength(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
- if o != 0:
- return self._tab.VectorLen(o)
- return 0
-
- # TosaBasicBlock
- def Tensors(self, j):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
- if o != 0:
- x = self._tab.Vector(o)
- x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
- x = self._tab.Indirect(x)
- from .TosaTensor import TosaTensor
- obj = TosaTensor()
- obj.Init(self._tab.Bytes, x)
- return obj
- return None
-
- # TosaBasicBlock
- def TensorsLength(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
- if o != 0:
- return self._tab.VectorLen(o)
- return 0
-
- # TosaBasicBlock
- def Inputs(self, j):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
- if o != 0:
- a = self._tab.Vector(o)
- return self._tab.String(a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
- return ""
-
- # TosaBasicBlock
- def InputsLength(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
- if o != 0:
- return self._tab.VectorLen(o)
- return 0
-
- # TosaBasicBlock
- def Outputs(self, j):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
- if o != 0:
- a = self._tab.Vector(o)
- return self._tab.String(a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
- return ""
-
- # TosaBasicBlock
- def OutputsLength(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
- if o != 0:
- return self._tab.VectorLen(o)
- return 0
-
-def TosaBasicBlockStart(builder): builder.StartObject(5)
-def TosaBasicBlockAddName(builder, name): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
-def TosaBasicBlockAddOperators(builder, operators): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(operators), 0)
-def TosaBasicBlockStartOperatorsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def TosaBasicBlockAddTensors(builder, tensors): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(tensors), 0)
-def TosaBasicBlockStartTensorsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def TosaBasicBlockAddInputs(builder, inputs): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(inputs), 0)
-def TosaBasicBlockStartInputsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def TosaBasicBlockAddOutputs(builder, outputs): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(outputs), 0)
-def TosaBasicBlockStartOutputsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def TosaBasicBlockEnd(builder): return builder.EndObject()
diff --git a/verif/tosa/TosaGraph.py b/verif/tosa/TosaGraph.py
deleted file mode 100644
index 92568b9..0000000
--- a/verif/tosa/TosaGraph.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# Copyright (c) 2020, ARM Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# namespace: tosa
-
-import flatbuffers
-
-class TosaGraph(object):
- __slots__ = ['_tab']
-
- @classmethod
- def GetRootAsTosaGraph(cls, buf, offset):
- n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
- x = TosaGraph()
- x.Init(buf, n + offset)
- return x
-
- # TosaGraph
- def Init(self, buf, pos):
- self._tab = flatbuffers.table.Table(buf, pos)
-
- # TosaGraph
- def Version(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- x = self._tab.Indirect(o + self._tab.Pos)
- from .Version import Version
- obj = Version()
- obj.Init(self._tab.Bytes, x)
- return obj
- return None
-
- # TosaGraph
- def Blocks(self, j):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
- if o != 0:
- x = self._tab.Vector(o)
- x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
- x = self._tab.Indirect(x)
- from .TosaBasicBlock import TosaBasicBlock
- obj = TosaBasicBlock()
- obj.Init(self._tab.Bytes, x)
- return obj
- return None
-
- # TosaGraph
- def BlocksLength(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
- if o != 0:
- return self._tab.VectorLen(o)
- return 0
-
-def TosaGraphStart(builder): builder.StartObject(2)
-def TosaGraphAddVersion(builder, version): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(version), 0)
-def TosaGraphAddBlocks(builder, blocks): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(blocks), 0)
-def TosaGraphStartBlocksVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def TosaGraphEnd(builder): return builder.EndObject()
diff --git a/verif/tosa/TosaOperator.py b/verif/tosa/TosaOperator.py
deleted file mode 100644
index ab4a160..0000000
--- a/verif/tosa/TosaOperator.py
+++ /dev/null
@@ -1,117 +0,0 @@
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# Copyright (c) 2020, ARM Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# namespace: tosa
-
-import flatbuffers
-
-class TosaOperator(object):
- __slots__ = ['_tab']
-
- @classmethod
- def GetRootAsTosaOperator(cls, buf, offset):
- n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
- x = TosaOperator()
- x.Init(buf, n + offset)
- return x
-
- # TosaOperator
- def Init(self, buf, pos):
- self._tab = flatbuffers.table.Table(buf, pos)
-
- # TosaOperator
- def Op(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
- return 0
-
- # TosaOperator
- def AttributeType(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
- if o != 0:
- return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
- return 0
-
- # TosaOperator
- def Attribute(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
- if o != 0:
- from flatbuffers.table import Table
- obj = Table(bytearray(), 0)
- self._tab.Union(obj, o)
- return obj
- return None
-
- # TosaOperator
- def Inputs(self, j):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
- if o != 0:
- a = self._tab.Vector(o)
- return self._tab.String(a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
- return ""
-
- # TosaOperator
- def InputsLength(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
- if o != 0:
- return self._tab.VectorLen(o)
- return 0
-
- # TosaOperator
- def Outputs(self, j):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
- if o != 0:
- a = self._tab.Vector(o)
- return self._tab.String(a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
- return ""
-
- # TosaOperator
- def OutputsLength(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
- if o != 0:
- return self._tab.VectorLen(o)
- return 0
-
- # TosaOperator
- def QuantInfoType(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
- if o != 0:
- return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
- return 0
-
- # TosaOperator
- def QuantInfo(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
- if o != 0:
- from flatbuffers.table import Table
- obj = Table(bytearray(), 0)
- self._tab.Union(obj, o)
- return obj
- return None
-
-def TosaOperatorStart(builder): builder.StartObject(7)
-def TosaOperatorAddOp(builder, op): builder.PrependUint32Slot(0, op, 0)
-def TosaOperatorAddAttributeType(builder, attributeType): builder.PrependUint8Slot(1, attributeType, 0)
-def TosaOperatorAddAttribute(builder, attribute): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(attribute), 0)
-def TosaOperatorAddInputs(builder, inputs): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(inputs), 0)
-def TosaOperatorStartInputsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def TosaOperatorAddOutputs(builder, outputs): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(outputs), 0)
-def TosaOperatorStartOutputsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def TosaOperatorAddQuantInfoType(builder, quantInfoType): builder.PrependUint8Slot(5, quantInfoType, 0)
-def TosaOperatorAddQuantInfo(builder, quantInfo): builder.PrependUOffsetTRelativeSlot(6, flatbuffers.number_types.UOffsetTFlags.py_type(quantInfo), 0)
-def TosaOperatorEnd(builder): return builder.EndObject()
diff --git a/verif/tosa/TosaTensor.py b/verif/tosa/TosaTensor.py
deleted file mode 100644
index 0b30266..0000000
--- a/verif/tosa/TosaTensor.py
+++ /dev/null
@@ -1,133 +0,0 @@
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# Copyright (c) 2020, ARM Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# namespace: tosa
-
-import flatbuffers
-
-class TosaTensor(object):
- __slots__ = ['_tab']
-
- @classmethod
- def GetRootAsTosaTensor(cls, buf, offset):
- n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
- x = TosaTensor()
- x.Init(buf, n + offset)
- return x
-
- # TosaTensor
- def Init(self, buf, pos):
- self._tab = flatbuffers.table.Table(buf, pos)
-
- # TosaTensor
- def Name(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- return self._tab.String(o + self._tab.Pos)
- return None
-
- # TosaTensor
- def Shape(self, j):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
- if o != 0:
- a = self._tab.Vector(o)
- return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
- return 0
-
- # TosaTensor
- def ShapeAsNumpy(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
- if o != 0:
- return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
- return 0
-
- # TosaTensor
- def ShapeLength(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
- if o != 0:
- return self._tab.VectorLen(o)
- return 0
-
- # TosaTensor
- def Type(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
- if o != 0:
- return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
- return 0
-
- # TosaTensor
- def Usage(self, j):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
- if o != 0:
- a = self._tab.Vector(o)
- return self._tab.Get(flatbuffers.number_types.Uint32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
- return 0
-
- # TosaTensor
- def UsageAsNumpy(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
- if o != 0:
- return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint32Flags, o)
- return 0
-
- # TosaTensor
- def UsageLength(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
- if o != 0:
- return self._tab.VectorLen(o)
- return 0
-
- # TosaTensor
- def Format(self, j):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
- if o != 0:
- a = self._tab.Vector(o)
- return self._tab.Get(flatbuffers.number_types.Uint32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
- return 0
-
- # TosaTensor
- def FormatAsNumpy(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
- if o != 0:
- return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint32Flags, o)
- return 0
-
- # TosaTensor
- def FormatLength(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
- if o != 0:
- return self._tab.VectorLen(o)
- return 0
-
- # TosaTensor
- def NpyFilename(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
- if o != 0:
- return self._tab.String(o + self._tab.Pos)
- return None
-
-def TosaTensorStart(builder): builder.StartObject(6)
-def TosaTensorAddName(builder, name): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
-def TosaTensorAddShape(builder, shape): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(shape), 0)
-def TosaTensorStartShapeVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def TosaTensorAddType(builder, type): builder.PrependUint32Slot(2, type, 0)
-def TosaTensorAddUsage(builder, usage): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(usage), 0)
-def TosaTensorStartUsageVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def TosaTensorAddFormat(builder, format): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(format), 0)
-def TosaTensorStartFormatVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def TosaTensorAddNpyFilename(builder, npyFilename): builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(npyFilename), 0)
-def TosaTensorEnd(builder): return builder.EndObject()
diff --git a/verif/tosa/TransposeConv2dAttribute.py b/verif/tosa/TransposeConv2dAttribute.py
deleted file mode 100644
index 043d8e8..0000000
--- a/verif/tosa/TransposeConv2dAttribute.py
+++ /dev/null
@@ -1,133 +0,0 @@
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# Copyright (c) 2020, ARM Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# namespace: tosa
-
-import flatbuffers
-
-class TransposeConv2dAttribute(object):
- __slots__ = ['_tab']
-
- @classmethod
- def GetRootAsTransposeConv2dAttribute(cls, buf, offset):
- n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
- x = TransposeConv2dAttribute()
- x.Init(buf, n + offset)
- return x
-
- # TransposeConv2dAttribute
- def Init(self, buf, pos):
- self._tab = flatbuffers.table.Table(buf, pos)
-
- # TransposeConv2dAttribute
- def Outpad(self, j):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- a = self._tab.Vector(o)
- return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
- return 0
-
- # TransposeConv2dAttribute
- def OutpadAsNumpy(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
- return 0
-
- # TransposeConv2dAttribute
- def OutpadLength(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- return self._tab.VectorLen(o)
- return 0
-
- # TransposeConv2dAttribute
- def Stride(self, j):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
- if o != 0:
- a = self._tab.Vector(o)
- return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
- return 0
-
- # TransposeConv2dAttribute
- def StrideAsNumpy(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
- if o != 0:
- return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
- return 0
-
- # TransposeConv2dAttribute
- def StrideLength(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
- if o != 0:
- return self._tab.VectorLen(o)
- return 0
-
- # TransposeConv2dAttribute
- def Dilation(self, j):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
- if o != 0:
- a = self._tab.Vector(o)
- return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
- return 0
-
- # TransposeConv2dAttribute
- def DilationAsNumpy(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
- if o != 0:
- return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
- return 0
-
- # TransposeConv2dAttribute
- def DilationLength(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
- if o != 0:
- return self._tab.VectorLen(o)
- return 0
-
- # TransposeConv2dAttribute
- def OutputShape(self, j):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
- if o != 0:
- a = self._tab.Vector(o)
- return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
- return 0
-
- # TransposeConv2dAttribute
- def OutputShapeAsNumpy(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
- if o != 0:
- return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
- return 0
-
- # TransposeConv2dAttribute
- def OutputShapeLength(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
- if o != 0:
- return self._tab.VectorLen(o)
- return 0
-
-def TransposeConv2dAttributeStart(builder): builder.StartObject(4)
-def TransposeConv2dAttributeAddOutpad(builder, outpad): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(outpad), 0)
-def TransposeConv2dAttributeStartOutpadVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def TransposeConv2dAttributeAddStride(builder, stride): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(stride), 0)
-def TransposeConv2dAttributeStartStrideVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def TransposeConv2dAttributeAddDilation(builder, dilation): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(dilation), 0)
-def TransposeConv2dAttributeStartDilationVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def TransposeConv2dAttributeAddOutputShape(builder, outputShape): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(outputShape), 0)
-def TransposeConv2dAttributeStartOutputShapeVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def TransposeConv2dAttributeEnd(builder): return builder.EndObject()
diff --git a/verif/tosa/UnaryQuantInfo.py b/verif/tosa/UnaryQuantInfo.py
deleted file mode 100644
index 9ae0214..0000000
--- a/verif/tosa/UnaryQuantInfo.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# Copyright (c) 2020, ARM Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# namespace: tosa
-
-import flatbuffers
-
-class UnaryQuantInfo(object):
- __slots__ = ['_tab']
-
- @classmethod
- def GetRootAsUnaryQuantInfo(cls, buf, offset):
- n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
- x = UnaryQuantInfo()
- x.Init(buf, n + offset)
- return x
-
- # UnaryQuantInfo
- def Init(self, buf, pos):
- self._tab = flatbuffers.table.Table(buf, pos)
-
- # UnaryQuantInfo
- def InputZp(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
- return 0
-
- # UnaryQuantInfo
- def OutputZp(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
- if o != 0:
- return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
- return 0
-
-def UnaryQuantInfoStart(builder): builder.StartObject(2)
-def UnaryQuantInfoAddInputZp(builder, inputZp): builder.PrependInt32Slot(0, inputZp, 0)
-def UnaryQuantInfoAddOutputZp(builder, outputZp): builder.PrependInt32Slot(1, outputZp, 0)
-def UnaryQuantInfoEnd(builder): return builder.EndObject()
diff --git a/verif/tosa/Usage.py b/verif/tosa/Usage.py
deleted file mode 100644
index 4c42daa..0000000
--- a/verif/tosa/Usage.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# Copyright (c) 2020, ARM Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# namespace: tosa
-
-class Usage(object):
- UNKNOWN = 0
- ACTIVATION = 1
- WEIGHT = 2
- INDEX = 3
-
diff --git a/verif/tosa/Version.py b/verif/tosa/Version.py
deleted file mode 100644
index e327507..0000000
--- a/verif/tosa/Version.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# Copyright (c) 2020, ARM Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# namespace: tosa
-
-import flatbuffers
-
-class Version(object):
- __slots__ = ['_tab']
-
- @classmethod
- def GetRootAsVersion(cls, buf, offset):
- n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
- x = Version()
- x.Init(buf, n + offset)
- return x
-
- # Version
- def Init(self, buf, pos):
- self._tab = flatbuffers.table.Table(buf, pos)
-
- # Version
- def _major(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
- return 0
-
- # Version
- def _minor(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
- if o != 0:
- return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
- return 21
-
- # Version
- def _patch(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
- if o != 0:
- return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
- return 0
-
- # Version
- def _experimental(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
- if o != 0:
- return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
- return False
-
-def VersionStart(builder): builder.StartObject(4)
-def VersionAdd_major(builder, Major): builder.PrependInt32Slot(0, Major, 0)
-def VersionAdd_minor(builder, Minor): builder.PrependInt32Slot(1, Minor, 21)
-def VersionAdd_patch(builder, Patch): builder.PrependInt32Slot(2, Patch, 0)
-def VersionAdd_experimental(builder, Experimental): builder.PrependBoolSlot(3, Experimental, 0)
-def VersionEnd(builder): return builder.EndObject()
diff --git a/verif/tosa/WhileLoopAttribute.py b/verif/tosa/WhileLoopAttribute.py
deleted file mode 100644
index c37977f..0000000
--- a/verif/tosa/WhileLoopAttribute.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# Copyright (c) 2020, ARM Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# namespace: tosa
-
-import flatbuffers
-
-class WhileLoopAttribute(object):
- __slots__ = ['_tab']
-
- @classmethod
- def GetRootAsWhileLoopAttribute(cls, buf, offset):
- n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
- x = WhileLoopAttribute()
- x.Init(buf, n + offset)
- return x
-
- # WhileLoopAttribute
- def Init(self, buf, pos):
- self._tab = flatbuffers.table.Table(buf, pos)
-
- # WhileLoopAttribute
- def CondBranch(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- return self._tab.String(o + self._tab.Pos)
- return None
-
- # WhileLoopAttribute
- def BodyBranch(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
- if o != 0:
- return self._tab.String(o + self._tab.Pos)
- return None
-
-def WhileLoopAttributeStart(builder): builder.StartObject(2)
-def WhileLoopAttributeAddCondBranch(builder, condBranch): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(condBranch), 0)
-def WhileLoopAttributeAddBodyBranch(builder, bodyBranch): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(bodyBranch), 0)
-def WhileLoopAttributeEnd(builder): return builder.EndObject()
diff --git a/verif/tosa/__init__.py b/verif/tosa/__init__.py
deleted file mode 100644
index ee1ab30..0000000
--- a/verif/tosa/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-
-# Copyright (c) 2020, ARM Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
diff --git a/verif/tosa_ref_run.py b/verif/tosa_ref_run.py
index 2035147..26c64da 100644
--- a/verif/tosa_ref_run.py
+++ b/verif/tosa_ref_run.py
@@ -19,6 +19,7 @@ import shlex
import subprocess
from tosa_test_runner import TosaTestRunner, run_sh_command
+
class TosaRefRunner(TosaTestRunner):
def __init__(self, args, runnerArgs, testDir):
super().__init__(args, runnerArgs, testDir)
@@ -28,28 +29,33 @@ class TosaRefRunner(TosaTestRunner):
# Uses arguments from the argParser args, not the runnerArgs
args = self.args
- ref_cmd = [ args.ref_model_path,
- '-Csubgraph_file={}'.format(self.testDesc['tosa_file']),
- '-Csubgraph_dir={}'.format(self.testDir),
- '-Cinput_dir={}'.format(self.testDir),
- '-Coutput_dir={}'.format(self.testDir),
- '-Coutput_tensor_prefix=ref-', # Naming agreement with TosaSerializer
- ]
+ ref_cmd = [
+ args.ref_model_path,
+ "-Csubgraph_file={}".format(self.testDesc["tosa_file"]),
+ "-Csubgraph_dir={}".format(self.testDir),
+ "-Cinput_dir={}".format(self.testDir),
+ "-Coutput_dir={}".format(self.testDir),
+ "-Coutput_tensor_prefix=ref-", # Naming agreement with TosaSerializer
+ ]
# Build up input tensor_name/filename list
inputTensors = []
- for i in range(len(self.testDesc['ifm_placeholder'])):
- inputTensors.append('{}:{}'.format(self.testDesc['ifm_placeholder'][i], self.testDesc['ifm_file'][i]))
+ for i in range(len(self.testDesc["ifm_placeholder"])):
+ inputTensors.append(
+ "{}:{}".format(
+ self.testDesc["ifm_placeholder"][i], self.testDesc["ifm_file"][i]
+ )
+ )
- ref_cmd.append('-Cinput_tensor={}'.format(','.join(inputTensors)))
+ ref_cmd.append("-Cinput_tensor={}".format(",".join(inputTensors)))
if args.ref_debug:
- ref_cmd.extend(['-dALL', '-l{}'.format(args.ref_debug)])
+ ref_cmd.extend(["-dALL", "-l{}".format(args.ref_debug)])
if args.ref_intermediates:
- ref_cmd.extend(['-Ddump_intermediates=1'])
+ ref_cmd.extend(["-Ddump_intermediates=1"])
- expectedFailure = self.testDesc['expected_failure']
+ expectedFailure = self.testDesc["expected_failure"]
try:
run_sh_command(self.args, ref_cmd)
diff --git a/verif/tosa_serializer.py b/verif/tosa_serializer.py
index 136f7aa..fa1fdcb 100644
--- a/verif/tosa_serializer.py
+++ b/verif/tosa_serializer.py
@@ -1,5 +1,3 @@
-
-
# Copyright (c) 2020-2021, ARM Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -16,37 +14,57 @@
#!/usr/bin/env python3
+import os
+import sys
+import json
import flatbuffers
import numpy as np
from enum import Enum, IntEnum, unique
-from tosa import TosaGraph, TosaBasicBlock, TosaTensor, TosaOperator, DType, Format, Usage, Op, ResizeMode, Version
+from tosa import (
+ TosaGraph,
+ TosaBasicBlock,
+ TosaTensor,
+ TosaOperator,
+ DType,
+ Op,
+ ResizeMode,
+ Version,
+)
+
+# Include the ../thirdparty/serialization_lib/python directory in PYTHONPATH
+parent_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(
+ os.path.join(parent_dir, "..", "thirdparty", "serialization_lib", "python")
+)
import tosa
-import os
-import json
# With the way flatc generates its python types, there is no programatic way
# to get string names for the integer types. Manually maintain a string table
# here.
-DTypeNames = [ 'UNKNOWN',
- 'BOOL',
- 'UINT8',
- 'INT4',
- 'INT8',
- 'INT16',
- 'INT32',
- 'INT48',
- 'FLOAT' ]
+DTypeNames = [
+ "UNKNOWN",
+ "BOOL",
+ "UINT8",
+ "INT4",
+ "INT8",
+ "INT16",
+ "INT32",
+ "INT48",
+ "FLOAT",
+]
+
def dtype_str_to_val(name):
for i in range(len(DTypeNames)):
if name.casefold() == DTypeNames[i].casefold():
return i
- raise Exception('Unable to parse DType name {}'.format(name))
+ raise Exception("Unable to parse DType name {}".format(name))
class TosaSerializerUnion:
- '''This class handles encapsulating and serializing union types into flatbuffers'''
+ """This class handles encapsulating and serializing union types into flatbuffers"""
+
def __init__(self):
# A tuple of the start and end functions. Set by the options constructors below
@@ -105,8 +123,9 @@ class TosaSerializerUnion:
return endFcn(builder)
+
class TosaSerializerAttribute(TosaSerializerUnion):
- '''This class handles encapsulating all of the enumerated types for attributes'''
+ """This class handles encapsulating all of the enumerated types for attributes"""
def __init__(self):
super().__init__()
@@ -117,12 +136,9 @@ class TosaSerializerAttribute(TosaSerializerUnion):
self.utype = Attribute.Attribute().Pool2dAttribute
self.optFcns = (a.Pool2dAttributeStart, a.Pool2dAttributeEnd)
- self.intvecs.append((a.Pool2dAttributeAddPadding,
- padding))
- self.intvecs.append((a.Pool2dAttributeAddKernel,
- kernel))
- self.intvecs.append((a.Pool2dAttributeAddStride,
- stride))
+ self.intvecs.append((a.Pool2dAttributeAddPadding, padding))
+ self.intvecs.append((a.Pool2dAttributeAddKernel, kernel))
+ self.intvecs.append((a.Pool2dAttributeAddStride, stride))
def Conv2dAttribute(self, padding, stride, dilation):
from tosa import Conv2dAttribute as a, Attribute
@@ -130,12 +146,9 @@ class TosaSerializerAttribute(TosaSerializerUnion):
self.utype = Attribute.Attribute().Conv2dAttribute
self.optFcns = (a.Conv2dAttributeStart, a.Conv2dAttributeEnd)
- self.intvecs.append((a.Conv2dAttributeAddPadding,
- padding))
- self.intvecs.append((a.Conv2dAttributeAddStride,
- stride))
- self.intvecs.append((a.Conv2dAttributeAddDilation,
- dilation))
+ self.intvecs.append((a.Conv2dAttributeAddPadding, padding))
+ self.intvecs.append((a.Conv2dAttributeAddStride, stride))
+ self.intvecs.append((a.Conv2dAttributeAddDilation, dilation))
def TransposeConv2DAttribute(self, outpad, stride, dilation, output_shape):
from tosa import TransposeConv2dAttribute as a, Attribute
@@ -143,14 +156,10 @@ class TosaSerializerAttribute(TosaSerializerUnion):
self.utype = Attribute.Attribute().TransposeConv2dAttribute
self.optFcns = (a.TransposeConv2dAttributeStart, a.TransposeConv2dAttributeEnd)
- self.intvecs.append((a.TransposeConv2dAttributeAddOutpad,
- outpad))
- self.intvecs.append((a.TransposeConv2dAttributeAddStride,
- stride))
- self.intvecs.append((a.TransposeConv2dAttributeAddDilation,
- dilation))
- self.intvecs.append((a.TransposeConv2dAttributeAddOutputShape,
- output_shape))
+ self.intvecs.append((a.TransposeConv2dAttributeAddOutpad, outpad))
+ self.intvecs.append((a.TransposeConv2dAttributeAddStride, stride))
+ self.intvecs.append((a.TransposeConv2dAttributeAddDilation, dilation))
+ self.intvecs.append((a.TransposeConv2dAttributeAddOutputShape, output_shape))
def ReluNAttribute(self, maxint, maxfp):
from tosa import ReluNAttribute as a, Attribute
@@ -161,15 +170,13 @@ class TosaSerializerAttribute(TosaSerializerUnion):
self.ints.append((a.ReluNAttributeAddMaxInt, maxint))
self.ints.append((a.ReluNAttributeAddMaxFp, maxfp))
-
def AxisAttribute(self, axis):
from tosa import AxisAttribute as a, Attribute
self.utype = Attribute.Attribute().AxisAttribute
self.optFcns = (a.AxisAttributeStart, a.AxisAttributeEnd)
- self.ints.append((a.AxisAttributeAddAxis,
- axis))
+ self.ints.append((a.AxisAttributeAddAxis, axis))
def ReshapeAttribute(self, shape):
from tosa import ReshapeAttribute as a, Attribute
@@ -177,8 +184,7 @@ class TosaSerializerAttribute(TosaSerializerUnion):
self.utype = Attribute.Attribute().ReshapeAttribute
self.optFcns = (a.ReshapeAttributeStart, a.ReshapeAttributeEnd)
- self.intvecs.append((a.ReshapeAttributeAddShape,
- shape))
+ self.intvecs.append((a.ReshapeAttributeAddShape, shape))
def SliceAttribute(self, begin, size):
from tosa import SliceAttribute as a, Attribute
@@ -186,10 +192,8 @@ class TosaSerializerAttribute(TosaSerializerUnion):
self.utype = Attribute.Attribute().SliceAttribute
self.optFcns = (a.SliceAttributeStart, a.SliceAttributeEnd)
- self.intvecs.append((a.SliceAttributeAddBegin,
- begin))
- self.intvecs.append((a.SliceAttributeAddSize,
- size))
+ self.intvecs.append((a.SliceAttributeAddBegin, begin))
+ self.intvecs.append((a.SliceAttributeAddSize, size))
def TileAttribute(self, multiples):
from tosa import TileAttribute as a, Attribute
@@ -197,29 +201,23 @@ class TosaSerializerAttribute(TosaSerializerUnion):
self.utype = Attribute.Attribute().TileAttribute
self.optFcns = (a.TileAttributeStart, a.TileAttributeEnd)
- self.intvecs.append((a.TileAttributeAddMultiples,
- multiples))
+ self.intvecs.append((a.TileAttributeAddMultiples, multiples))
- def ResizeAttribute(self, output_size, stride, offset, shift, stride_fp, offset_fp, mode):
+ def ResizeAttribute(
+ self, output_size, stride, offset, shift, stride_fp, offset_fp, mode
+ ):
from tosa import ResizeAttribute as a, Attribute
self.utype = Attribute.Attribute().ResizeAttribute
self.optFcns = (a.ResizeAttributeStart, a.ResizeAttributeEnd)
- self.intvecs.append((a.ResizeAttributeAddOutputSize,
- output_size))
- self.intvecs.append((a.ResizeAttributeAddStride,
- stride))
- self.intvecs.append((a.ResizeAttributeAddOffset,
- offset))
- self.ints.append((a.ResizeAttributeAddShift,
- shift))
- self.fpvecs.append((a.ResizeAttributeAddStrideFp,
- stride_fp))
- self.fpvecs.append((a.ResizeAttributeAddOffsetFp,
- offset_fp))
- self.ints.append((a.ResizeAttributeAddMode,
- mode))
+ self.intvecs.append((a.ResizeAttributeAddOutputSize, output_size))
+ self.intvecs.append((a.ResizeAttributeAddStride, stride))
+ self.intvecs.append((a.ResizeAttributeAddOffset, offset))
+ self.ints.append((a.ResizeAttributeAddShift, shift))
+ self.fpvecs.append((a.ResizeAttributeAddStrideFp, stride_fp))
+ self.fpvecs.append((a.ResizeAttributeAddOffsetFp, offset_fp))
+ self.ints.append((a.ResizeAttributeAddMode, mode))
def ClampAttribute(self, minint, maxint, minfp, maxfp):
from tosa import ClampAttribute as a, Attribute
@@ -227,36 +225,27 @@ class TosaSerializerAttribute(TosaSerializerUnion):
self.utype = Attribute.Attribute().ClampAttribute
self.optFcns = (a.ClampAttributeStart, a.ClampAttributeEnd)
- self.ints.append((a.ClampAttributeAddMinInt,
- minint))
- self.ints.append((a.ClampAttributeAddMaxInt,
- maxint))
+ self.ints.append((a.ClampAttributeAddMinInt, minint))
+ self.ints.append((a.ClampAttributeAddMaxInt, maxint))
- self.ints.append((a.ClampAttributeAddMinFp,
- minfp))
- self.ints.append((a.ClampAttributeAddMaxFp,
- maxfp))
+ self.ints.append((a.ClampAttributeAddMinFp, minfp))
+ self.ints.append((a.ClampAttributeAddMaxFp, maxfp))
- def RescaleAttribute(self, input_zp, output_zp, multiplier, shift, scale32, double_round, per_channel):
+ def RescaleAttribute(
+ self, input_zp, output_zp, multiplier, shift, scale32, double_round, per_channel
+ ):
from tosa import RescaleAttribute as a, Attribute
self.utype = Attribute.Attribute().RescaleAttribute
self.optFcns = (a.RescaleAttributeStart, a.RescaleAttributeEnd)
- self.ints.append((a.RescaleAttributeAddInputZp,
- input_zp))
- self.ints.append((a.RescaleAttributeAddOutputZp,
- output_zp))
- self.intvecs.append((a.RescaleAttributeAddMultiplier,
- multiplier))
- self.intvecs.append((a.RescaleAttributeAddShift,
- shift))
- self.bools.append((a.RescaleAttributeAddScale32,
- scale32))
- self.bools.append((a.RescaleAttributeAddDoubleRound,
- double_round))
- self.bools.append((a.RescaleAttributeAddPerChannel,
- per_channel))
+ self.ints.append((a.RescaleAttributeAddInputZp, input_zp))
+ self.ints.append((a.RescaleAttributeAddOutputZp, output_zp))
+ self.intvecs.append((a.RescaleAttributeAddMultiplier, multiplier))
+ self.intvecs.append((a.RescaleAttributeAddShift, shift))
+ self.bools.append((a.RescaleAttributeAddScale32, scale32))
+ self.bools.append((a.RescaleAttributeAddDoubleRound, double_round))
+ self.bools.append((a.RescaleAttributeAddPerChannel, per_channel))
def MulAttribute(self, shift):
from tosa import MulAttribute as a, Attribute
@@ -264,17 +253,18 @@ class TosaSerializerAttribute(TosaSerializerUnion):
self.utype = Attribute.Attribute().MulAttribute
self.optFcns = (a.MulAttributeStart, a.MulAttributeEnd)
- self.ints.append((a.MulAttributeAddShift,
- shift))
+ self.ints.append((a.MulAttributeAddShift, shift))
def ArithmeticRightShiftAttribute(self, round):
from tosa import ArithmeticRightShiftAttribute as a, Attribute
self.utype = Attribute.Attribute().ArithmeticRightShiftAttribute
- self.optFcns = (a.ArithmeticRightShiftAttributeStart, a.ArithmeticRightShiftAttributeEnd)
+ self.optFcns = (
+ a.ArithmeticRightShiftAttributeStart,
+ a.ArithmeticRightShiftAttributeEnd,
+ )
- self.bools.append((a.ArithmeticRightShiftAttributeAddRound,
- round))
+ self.bools.append((a.ArithmeticRightShiftAttributeAddRound, round))
def CustomAttribute(self, identifier):
from tosa import CustomAttribute as a, Attribute
@@ -282,8 +272,7 @@ class TosaSerializerAttribute(TosaSerializerUnion):
self.utype = Attribute.Attribute().CustomAttribute
self.optFcns = (a.CustomAttributeStart, a.CustomAttributeEnd)
- self.strings.append((a.CustomAttributeAddIdentifier,
- identifier))
+ self.strings.append((a.CustomAttributeAddIdentifier, identifier))
def CondIfAttribute(self, then_branch, else_branch):
from tosa import CondIfAttribute as a, Attribute
@@ -291,10 +280,8 @@ class TosaSerializerAttribute(TosaSerializerUnion):
self.utype = Attribute.Attribute().CondIfAttribute
self.optFcns = (a.CondIfAttributeStart, a.CondIfAttributeEnd)
- self.strings.append((a.CondIfAttributeAddThenBranch,
- then_branch))
- self.strings.append((a.CondIfAttributeAddElseBranch,
- else_branch))
+ self.strings.append((a.CondIfAttributeAddThenBranch, then_branch))
+ self.strings.append((a.CondIfAttributeAddElseBranch, else_branch))
def WhileLoopAttribute(self, cond_branch, body_branch):
from tosa import WhileLoopAttribute as a, Attribute
@@ -302,13 +289,13 @@ class TosaSerializerAttribute(TosaSerializerUnion):
self.utype = Attribute.Attribute().WhileLoopAttribute
self.optFcns = (a.WhileLoopAttributeStart, a.WhileLoopAttributeEnd)
- self.strings.append((a.WhileLoopAttributeAddCondBranch,
- cond_branch))
- self.strings.append((a.WhileLoopAttributeAddBodyBranch,
- body_branch))
+ self.strings.append((a.WhileLoopAttributeAddCondBranch, cond_branch))
+ self.strings.append((a.WhileLoopAttributeAddBodyBranch, body_branch))
+
class TosaSerializerQuantInfo(TosaSerializerUnion):
- '''This class handles encapsulating all of the enumerated types for quantinfo types'''
+ """This class handles encapsulating all of the enumerated types for quantinfo types"""
+
def __init__(self):
super().__init__()
@@ -343,8 +330,16 @@ class TosaSerializerQuantInfo(TosaSerializerUnion):
self.optFcns = (q.PadQuantInfoStart, q.PadQuantInfoEnd)
self.ints.append((q.PadQuantInfoAddInputZp, input_zp))
+
class TosaSerializerTensor:
- def __init__(self, name, shape, dtype, usage, dformat, filename = None, placeholderFilename = None):
+ def __init__(
+ self,
+ name,
+ shape,
+ dtype,
+ filename=None,
+ placeholderFilename=None,
+ ):
self.name = name
if isinstance(shape, np.ndarray):
@@ -353,8 +348,6 @@ class TosaSerializerTensor:
self.shape = shape
self.dtype = dtype
- self.usage = TosaSerializer.toList(usage)
- self.dformat = TosaSerializer.toList(dformat)
# Filename for const tensors. This gets written to the .tosa serialization
self.filename = filename
@@ -366,58 +359,35 @@ class TosaSerializerTensor:
self.placeholderFilename = placeholderFilename
def __str__(self):
- str = 'TosaSerializerTensor name: {} shape: {} dtype: {} Usage: {} format {} filename: {}'.format(
- self.name, self.shape, DTypeNames[self.dtype], self.usage, self.dformat, self.filename)
+ str = "TosaSerializerTensor name: {} shape: {} dtype: {} filename: {}".format(
+ self.name,
+ self.shape,
+ DTypeNames[self.dtype],
+ self.filename,
+ )
return str
- def addUsage(self, usage):
- self.usage.append(usage)
-
- def addFormat(self, format):
- self.dformat.append(format)
-
def setDtype(self, dtype):
self.dtype = dtype
- def merge(self, name, shape, dtype, usage, dformat, filename = None):
- # Merge in additional usage/formats to the list
- found = 0
- for i in self.usage:
- if i == usage:
- found = 1
- break
- if not found:
- self.usage.append(usage)
-
- found = 0
- for i in self.dformat:
- if i == dformat:
- found = 1
- break
- if not found:
- self.dformat.append(dformat)
-
def serialize(self, builder):
fb_name = builder.CreateString(self.name)
if self.filename:
fb_filename = builder.CreateString(self.filename)
fb_shapes = TosaSerializer.serializeInt32Vec(builder, self.shape)
- fb_usage = TosaSerializer.serializeInt32Vec(builder, self.usage)
- fb_dformat = TosaSerializer.serializeInt32Vec(builder, self.dformat)
TosaTensor.TosaTensorStart(builder)
TosaTensor.TosaTensorAddName(builder, fb_name)
TosaTensor.TosaTensorAddShape(builder, fb_shapes)
TosaTensor.TosaTensorAddType(builder, self.dtype)
- TosaTensor.TosaTensorAddUsage(builder, fb_usage)
- TosaTensor.TosaTensorAddFormat(builder, fb_dformat)
if self.filename:
TosaTensor.TosaTensorAddNpyFilename(builder, fb_filename)
return TosaTensor.TosaTensorEnd(builder)
+
class TosaSerializerOperator:
- def __init__(self, op, inputs, outputs, attributes = None, quantInfo = None):
+ def __init__(self, op, inputs, outputs, attributes=None, quantInfo=None):
self.op = op
self.attributes = attributes
self.inputs = TosaSerializer.toList(inputs)
@@ -425,18 +395,22 @@ class TosaSerializerOperator:
self.quantInfo = quantInfo
def __str__(self):
- str = 'Op {}\n----\n'.format(self.op)
+ str = "Op {}\n----\n".format(self.op)
for i in self.inputs:
- str = str + ' Input: {}\n'.format(i)
+ str = str + " Input: {}\n".format(i)
for o in self.outputs:
- str = str + ' Output: {}\n'.format(o)
+ str = str + " Output: {}\n".format(o)
return str
def serialize(self, builder):
- fb_inputs = TosaSerializer.serializeStrVec(builder, self.inputs, TosaOperator.TosaOperatorStartInputsVector)
- fb_outputs = TosaSerializer.serializeStrVec(builder, self.outputs, TosaOperator.TosaOperatorStartOutputsVector)
+ fb_inputs = TosaSerializer.serializeStrVec(
+ builder, self.inputs, TosaOperator.TosaOperatorStartInputsVector
+ )
+ fb_outputs = TosaSerializer.serializeStrVec(
+ builder, self.outputs, TosaOperator.TosaOperatorStartOutputsVector
+ )
# Need to serialize quant_info and attributes enums still
if self.attributes is not None:
fb_attributes = self.attributes.serialize(builder)
@@ -457,6 +431,7 @@ class TosaSerializerOperator:
return TosaOperator.TosaOperatorEnd(builder)
+
class TosaSerializerBasicBlock:
def __init__(self, name):
self.name = name
@@ -468,14 +443,21 @@ class TosaSerializerBasicBlock:
self.inputs = []
self.outputs = []
- def addTensor(self, name, shape, dtype, usage, dformat, filename = None, placeholderFilename = None):
+ def addTensor(
+ self,
+ name,
+ shape,
+ dtype,
+ filename=None,
+ placeholderFilename=None,
+ ):
try:
# Someone already added this tensor.
- # We may have to add more usages and formats
tens = self.tensors[name]
- filename = tens.merge(name, shape, dtype, usage, dformat, filename)
except KeyError:
- self.tensors[name] = TosaSerializerTensor(name, shape, dtype, usage, dformat, filename, placeholderFilename)
+ self.tensors[name] = TosaSerializerTensor(
+ name, shape, dtype, filename, placeholderFilename
+ )
return self.tensors[name]
@@ -485,15 +467,27 @@ class TosaSerializerBasicBlock:
def addOutput(self, name):
self.outputs.append(name)
- def addOperator(self, op, inputs, outputs, attributes = None, quant_info = None):
- self.operators.append(TosaSerializerOperator(op, inputs, outputs, attributes, quant_info))
+ def addOperator(self, op, inputs, outputs, attributes=None, quant_info=None):
+ self.operators.append(
+ TosaSerializerOperator(op, inputs, outputs, attributes, quant_info)
+ )
def serialize(self, builder):
fb_name = builder.CreateString(self.name)
- fbv_inputs = TosaSerializer.serializeStrVec(builder, list(self.inputs), TosaBasicBlock.TosaBasicBlockStartInputsVector)
- fbv_outputs = TosaSerializer.serializeStrVec(builder, list(self.outputs), TosaBasicBlock.TosaBasicBlockStartOutputsVector)
- fbv_tensors = TosaSerializer.serializeObjVec(builder, list(self.tensors.values()), TosaBasicBlock.TosaBasicBlockStartTensorsVector)
- fbv_operators = TosaSerializer.serializeObjVec(builder, self.operators, TosaBasicBlock.TosaBasicBlockStartOperatorsVector)
+ fbv_inputs = TosaSerializer.serializeStrVec(
+ builder, list(self.inputs), TosaBasicBlock.TosaBasicBlockStartInputsVector
+ )
+ fbv_outputs = TosaSerializer.serializeStrVec(
+ builder, list(self.outputs), TosaBasicBlock.TosaBasicBlockStartOutputsVector
+ )
+ fbv_tensors = TosaSerializer.serializeObjVec(
+ builder,
+ list(self.tensors.values()),
+ TosaBasicBlock.TosaBasicBlockStartTensorsVector,
+ )
+ fbv_operators = TosaSerializer.serializeObjVec(
+ builder, self.operators, TosaBasicBlock.TosaBasicBlockStartOperatorsVector
+ )
TosaBasicBlock.TosaBasicBlockStart(builder)
TosaBasicBlock.TosaBasicBlockAddName(builder, fb_name)
@@ -503,6 +497,7 @@ class TosaSerializerBasicBlock:
TosaBasicBlock.TosaBasicBlockAddOperators(builder, fbv_operators)
return TosaBasicBlock.TosaBasicBlockEnd(builder)
+
@unique
class TensorDir(IntEnum):
PLACEHOLDER = 0
@@ -510,6 +505,7 @@ class TensorDir(IntEnum):
INTERMEDIATE = 2
RESULT = 3
+
class TosaSerializer:
def __init__(self, pathPrefix):
@@ -522,7 +518,7 @@ class TosaSerializer:
self.builder = flatbuffers.Builder(0)
self.basicBlocks = []
- self.startBasicBlock('main')
+ self.startBasicBlock("main")
self.pathPrefix = pathPrefix
# Indicies used for adding/naming tensors
@@ -533,23 +529,23 @@ class TosaSerializer:
# Is this an illegal test that is expected to fail?
self.expectedFailure = False
- self.expectedFailureDesc = ''
+ self.expectedFailureDesc = ""
def __str__(self):
- str = ''
+ str = ""
for bb in self.basicBlocks:
str = str + bb.__str__()
return str
- def addPlaceholder(self, shape, dtype, usage, dformat, vals):
+ def addPlaceholder(self, shape, dtype, vals):
if not self.currBasicBlock:
- raise Exception('addTensor called without valid basic block')
+ raise Exception("addTensor called without valid basic block")
- name = 'input-{}'.format(self.currInputIdx)
- filename = '{}.npy'.format(name)
+ name = "input-{}".format(self.currInputIdx)
+ filename = "{}.npy".format(name)
self.currInputIdx = self.currInputIdx + 1
- tens = self.currBasicBlock.addTensor(name, shape, dtype, usage, dformat, None, filename)
+ tens = self.currBasicBlock.addTensor(name, shape, dtype, None, filename)
# This is always an input to the block
self.currBasicBlock.addInput(name)
# Add the operator now
@@ -560,15 +556,15 @@ class TosaSerializer:
return tens
- def addConst(self, shape, dtype, usage, dformat, vals):
+ def addConst(self, shape, dtype, vals):
if not self.currBasicBlock:
- raise Exception('addTensor called without valid basic block')
+ raise Exception("addTensor called without valid basic block")
- name = 'const-{}'.format(self.currInputIdx)
- filename = '{}.npy'.format(name)
+ name = "const-{}".format(self.currInputIdx)
+ filename = "{}.npy".format(name)
self.currInputIdx = self.currInputIdx + 1
- tens = self.currBasicBlock.addTensor(name, shape, dtype, usage, dformat, filename)
+ tens = self.currBasicBlock.addTensor(name, shape, dtype, filename)
# Add the operator now
self.currBasicBlock.addOperator(tosa.Op.Op().CONST, [], name)
@@ -576,51 +572,54 @@ class TosaSerializer:
np.save(os.path.join(self.pathPrefix, filename), vals, False)
return tens
- def addIntermediate(self, shape, dtype, usage, dformat):
+ def addIntermediate(self, shape, dtype):
if not self.currBasicBlock:
- raise Exception('addTensor called without valid basic block')
+ raise Exception("addTensor called without valid basic block")
- name = 'layer-{}'.format(self.currLayerIdx)
- filename = None # No file, so no filename
+ name = "layer-{}".format(self.currLayerIdx)
+ filename = None # No file, so no filename
self.currLayerIdx = self.currLayerIdx + 1
- tens = self.currBasicBlock.addTensor(name, shape, dtype, usage, dformat, filename)
+ tens = self.currBasicBlock.addTensor(name, shape, dtype, filename)
return tens
def addInputTensor(self, tensor):
self.currBasicBlock.addOperator(tosa.Op.Op().PLACEHOLDER, [], tensor.name)
- self.currBasicBlock.addTensor(tensor.name, tensor.shape, tensor.dtype, tensor.usage, tensor.dformat)
+ self.currBasicBlock.addTensor(tensor.name, tensor.shape, tensor.dtype)
self.currBasicBlock.addInput(tensor.name)
def addOutputTensor(self, tensor):
self.currBasicBlock.addOutput(tensor.name)
- def addOutput(self, shape, dtype, usage, dformat):
+ def addOutput(self, shape, dtype):
if not self.currBasicBlock:
- raise Exception('addTensor called without valid basic block')
+ raise Exception("addTensor called without valid basic block")
- name = 'result-{}'.format(self.currResultIdx)
+ name = "result-{}".format(self.currResultIdx)
self.currResultIdx = self.currResultIdx + 1
- tens = self.currBasicBlock.addTensor(name, shape, dtype, usage, dformat, None)
+ tens = self.currBasicBlock.addTensor(name, shape, dtype, None)
self.currBasicBlock.addOutput(name)
return tens
- def addOperator(self, op, inputs, outputs, attributes = None, quant_info = None):
+ def addOperator(self, op, inputs, outputs, attributes=None, quant_info=None):
- if op == tosa.Op.Op().PLACEHOLDER or \
- op == tosa.Op.Op().CONST:
- raise Exception('Use addPlaceholderTensor() or addConstTensor() to add PLACEHOLDER and CONST ops')
+ if op == tosa.Op.Op().PLACEHOLDER or op == tosa.Op.Op().CONST:
+ raise Exception(
+ "Use addPlaceholderTensor() or addConstTensor() to add PLACEHOLDER and CONST ops"
+ )
- return self.currBasicBlock.addOperator(op, inputs, outputs, attributes, quant_info)
+ return self.currBasicBlock.addOperator(
+ op, inputs, outputs, attributes, quant_info
+ )
- def setExpectedFailure(self, desc='', val=True):
+ def setExpectedFailure(self, desc="", val=True):
self.expectedFailure = val
self.expectedFailureDesc = desc
- def setExpectedFailure(self, desc='', val=True):
+ def setExpectedFailure(self, desc="", val=True):
self.expectedFailure = val
self.expectedFailureDesc = desc
@@ -635,7 +634,9 @@ class TosaSerializer:
Version.VersionAdd_experimental(builder, TOSA_VERSION[3])
version = Version.VersionEnd(builder)
- fbv_bb = TosaSerializer.serializeObjVec(builder, self.basicBlocks, TosaGraph.TosaGraphStartBlocksVector)
+ fbv_bb = TosaSerializer.serializeObjVec(
+ builder, self.basicBlocks, TosaGraph.TosaGraphStartBlocksVector
+ )
TosaGraph.TosaGraphStart(builder)
TosaGraph.TosaGraphAddVersion(builder, version)
@@ -646,11 +647,11 @@ class TosaSerializer:
return self.builder.Output()
def writeJson(self, tosa_filename):
- '''Write a json test file so that it is fairly easy to pick up the test
- and generate commands for third party tool'''
+ """Write a json test file so that it is fairly easy to pick up the test
+ and generate commands for third party tool"""
test_desc = dict()
- test_desc['tosa_file'] = tosa_filename
+ test_desc["tosa_file"] = tosa_filename
ifm_name = []
ifm_shape = []
ifm_file = []
@@ -659,7 +660,7 @@ class TosaSerializer:
ofm_shape = []
for b in self.basicBlocks:
- if b.name == 'main':
+ if b.name == "main":
for i in b.inputs:
ifm_name.append(i)
ifm_shape.append(b.tensors[i].shape)
@@ -669,19 +670,19 @@ class TosaSerializer:
ofm_shape.append(b.tensors[o].shape)
# Make up an OFM filename here. One isn't generated until the reference tool is
# run, so any name is a good name
- ofm_file.append('ref-{}.npy'.format(o))
-
- test_desc['ifm_placeholder'] = ifm_name
- test_desc['ifm_file'] = ifm_file
- test_desc['ifm_shape'] = ifm_shape
- test_desc['ofm_name'] = ofm_name
- test_desc['ofm_shape'] = ofm_shape
- test_desc['ofm_file'] = ofm_file
- test_desc['expected_failure'] = self.expectedFailure
+ ofm_file.append("ref-{}.npy".format(o))
+
+ test_desc["ifm_placeholder"] = ifm_name
+ test_desc["ifm_file"] = ifm_file
+ test_desc["ifm_shape"] = ifm_shape
+ test_desc["ofm_name"] = ofm_name
+ test_desc["ofm_shape"] = ofm_shape
+ test_desc["ofm_file"] = ofm_file
+ test_desc["expected_failure"] = self.expectedFailure
if self.expectedFailureDesc:
- test_desc['expected_failure_desc'] = self.expectedFailureDesc
+ test_desc["expected_failure_desc"] = self.expectedFailureDesc
- return json.dumps(test_desc, indent=' ')
+ return json.dumps(test_desc, indent=" ")
def startBasicBlock(self, name):
self.currBasicBlock = TosaSerializerBasicBlock(name)
@@ -748,7 +749,9 @@ class TosaSerializer:
# Store the version as a global variable so that it only needs to be
# generated once per process.
global TOSA_VERSION
- TOSA_VERSION = [root.Version()._major(),
- root.Version()._minor(),
- root.Version()._patch(),
- root.Version()._experimental() ]
+ TOSA_VERSION = [
+ root.Version()._major(),
+ root.Version()._minor(),
+ root.Version()._patch(),
+ root.Version()._experimental(),
+ ]
diff --git a/verif/tosa_test_gen.py b/verif/tosa_test_gen.py
index ae1a5c6..b059ef5 100644
--- a/verif/tosa_test_gen.py
+++ b/verif/tosa_test_gen.py
@@ -32,19 +32,24 @@ import math
from enum import IntEnum, Enum, unique
+# Include the ../thirdparty/serialization_lib/python directory in PYTHONPATH
+parent_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(
+ os.path.join(parent_dir, "..", "thirdparty", "serialization_lib", "python")
+)
import tosa_serializer as ts
from tosa_serializer import *
import tosa
# Convenience variables to the flatc-generated types that should be enums, but aren't
DType = tosa.DType.DType()
-Usage = tosa.Usage.Usage()
-Format = tosa.Format.Format()
-Op = tosa.Op.Op()
+Op = tosa.Op.Op()
ResizeMode = tosa.ResizeMode.ResizeMode()
+
class TosaQuantGen:
- '''QuantizedInfo random generator helper functions. Specify with 'qgen': in the operator defintion'''
+ """QuantizedInfo random generator helper functions. Specify with 'qgen': in the operator defintion"""
+
def __init__(self):
pass
@@ -107,30 +112,31 @@ class TosaQuantGen:
m = -m
multiplier = round(m * (1 << scaleBits))
- assert(multiplier <= (1 << scaleBits))
+ assert multiplier <= (1 << scaleBits)
if multiplier == (1 << scaleBits):
multiplier = multiplier // 2
shift = shift + 1
shift = (-shift) + scaleBits
- #print('scalefp {} scaleBits {} m {} mult {} shift {}'.format(scaleFp, scaleBits, m, multiplier, shift))
+ # print('scalefp {} scaleBits {} m {} mult {} shift {}'.format(scaleFp, scaleBits, m, multiplier, shift))
- assert(multiplier <= (1 << scaleBits))
- assert(shift >= 0 and shift <= 63)
+ assert multiplier <= (1 << scaleBits)
+ assert shift >= 0 and shift <= 63
return multiplier, shift
-class TosaTensorGen():
- ''' Tensor generators create a shape list for the placeholder and const tensor
- data operands for the operator. The actual random data is generated separately for each test.'''
+class TosaTensorGen:
+ """Tensor generators create a shape list for the placeholder and const tensor
+ data operands for the operator. The actual random data is generated separately for each test."""
+
def __init__(self):
pass
@staticmethod
def tgBasic(testGen, opName, rank):
- pl, const = opName['operands']
+ pl, const = opName["operands"]
shape = testGen.makeShape(rank)
shape_list = []
@@ -141,9 +147,9 @@ class TosaTensorGen():
@staticmethod
def tgNHWC(testGen, opName, rank):
- pl, const = opName['operands']
+ pl, const = opName["operands"]
- assert(rank == 4)
+ assert rank == 4
shape = testGen.makeShape(rank)
@@ -159,11 +165,11 @@ class TosaTensorGen():
@staticmethod
def tgScatter(testGen, opName, rank):
- pl, const = opName['operands']
+ pl, const = opName["operands"]
- assert(pl == 2)
- assert(const == 0)
- assert(rank == 3)
+ assert pl == 2
+ assert const == 0
+ assert rank == 3
values_in_shape = testGen.makeShape(rank)
@@ -171,7 +177,9 @@ class TosaTensorGen():
if testGen.args.max_batch_size:
values_in_shape[0] = (values_in_shape[0] % testGen.args.max_batch_size) + 1
- W = testGen.randInt(testGen.args.tensor_shape_range[0], testGen.args.tensor_shape_range[1])
+ W = testGen.randInt(
+ testGen.args.tensor_shape_range[0], testGen.args.tensor_shape_range[1]
+ )
input_shape = [values_in_shape[0], W, values_in_shape[2]]
shape_list = []
@@ -184,7 +192,7 @@ class TosaTensorGen():
def tgBroadcastFuzz(testGen, op, rank):
shape = testGen.makeShape(rank)
- pl, const = op['operands']
+ pl, const = op["operands"]
shape_list = []
@@ -204,9 +212,9 @@ class TosaTensorGen():
@staticmethod
def tgConv2D(testGen, op, rank):
- pl, const = op['operands']
+ pl, const = op["operands"]
- assert(rank == 4)
+ assert rank == 4
# IFM dimensions are NHWC
ifm_shape = testGen.makeShape(rank)
@@ -216,7 +224,7 @@ class TosaTensorGen():
ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
# Get the filter height/width from the operator parameters
- filter_hw = op['filter']
+ filter_hw = op["filter"]
# Generate a random OFM depth
ofm_depth = testGen.makeShape(1)[0]
@@ -231,9 +239,9 @@ class TosaTensorGen():
@staticmethod
def tgTransposeConv2D(testGen, op, rank):
- pl, const = op['operands']
+ pl, const = op["operands"]
- assert(rank == 4)
+ assert rank == 4
# IFM dimensions are NHWC
ifm_shape = testGen.makeShape(rank)
@@ -243,7 +251,7 @@ class TosaTensorGen():
ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
# Get the filter height/width from the operator parameters
- filter_hw = op['filter']
+ filter_hw = op["filter"]
# Generate a random OFM depth
ofm_depth = testGen.makeShape(1)[0]
@@ -255,10 +263,10 @@ class TosaTensorGen():
@staticmethod
def tgDepthwiseConv2D(testGen, op, rank):
- pl, const = op['operands']
+ pl, const = op["operands"]
- assert(rank == 4)
- assert(pl == 1 and const == 2)
+ assert rank == 4
+ assert pl == 1 and const == 2
# IFM dimensions are NHWC
ifm_shape = testGen.makeShape(rank)
@@ -269,11 +277,13 @@ class TosaTensorGen():
# Get the filter height/width from the operator parameters
# Filter is KH, HW, C, M
- filter_hw = op['filter']
+ filter_hw = op["filter"]
# Generate a random OFM depth, but don't let it get too big because
# the output depth is M * C
- filter_m = (testGen.makeShape(1)[0] % (testGen.args.tensor_shape_range[1] // 4)) + 1
+ filter_m = (
+ testGen.makeShape(1)[0] % (testGen.args.tensor_shape_range[1] // 4)
+ ) + 1
# The filter dimensions are HWCM
filter_shape = np.asarray([filter_hw[0], filter_hw[1], ifm_shape[3], filter_m])
@@ -285,10 +295,10 @@ class TosaTensorGen():
@staticmethod
def tgFullyConnected(testGen, op, rank):
- pl, const = op['operands']
+ pl, const = op["operands"]
- assert(rank == 2)
- assert(pl == 2 and const == 0)
+ assert rank == 2
+ assert pl == 2 and const == 0
input_shape = testGen.makeShape(rank)
filter_oc = testGen.makeShape(1)[0]
@@ -300,10 +310,10 @@ class TosaTensorGen():
@staticmethod
def tgMatmul(testGen, op, rank):
- pl, const = op['operands']
+ pl, const = op["operands"]
- assert(rank == 2)
- assert(pl == 2 and const == 0)
+ assert rank == 2
+ assert pl == 2 and const == 0
a_shape = testGen.makeShape(rank)
b_oc = testGen.makeShape(1)[0]
@@ -311,29 +321,31 @@ class TosaTensorGen():
return [a_shape, b_shape]
+
class TosaArgGen:
- '''Argument generators create exhaustive or random lists of attributes for operators that take
- attributes or other parameters. The return value is a list of (descriptive_name, [arglist])
- tuples where the descriptive_name is appended to the test name and the arglist is expanded
- as arguments to the operator build function.'''
+ """Argument generators create exhaustive or random lists of attributes for operators that take
+ attributes or other parameters. The return value is a list of (descriptive_name, [arglist])
+ tuples where the descriptive_name is appended to the test name and the arglist is expanded
+ as arguments to the operator build function."""
+
def __init__(self):
pass
@staticmethod
def agNone(testGen, opName, shapeList, dtype):
- '''A trivial argument generator for operators that don't take any
- non-tensor arguments'''
- return [('', [])]
+ """A trivial argument generator for operators that don't take any
+ non-tensor arguments"""
+ return [("", [])]
@staticmethod
def agAxis(testGen, opName, shapeList, dtype):
- '''Build the axis argument for operators that take a single axis'''
+ """Build the axis argument for operators that take a single axis"""
axes = []
shape = shapeList[0]
for a in range(0, len(shape)):
- axes.append(('axis_{}'.format(a), [a]))
+ axes.append(("axis_{}".format(a), [a]))
return axes
@staticmethod
@@ -344,8 +356,8 @@ class TosaArgGen:
filter_shape = shapeList[1]
# Must be rank 4
- assert(len(ifm_shape) == 4)
- assert(len(filter_shape) == 4)
+ assert len(ifm_shape) == 4
+ assert len(filter_shape) == 4
maxStride = testGen.args.max_conv_stride
maxPadding = testGen.args.max_conv_padding + 1
@@ -356,20 +368,24 @@ class TosaArgGen:
for padding in range(0, (maxPadding) ** 4):
for dilation in range(0, maxDilation ** 2):
- s = [stride // maxStride + 1,
- stride % maxStride + 1]
- p = [(padding // (maxPadding * 4)) % maxPadding,
- (padding // (maxPadding * 2)) % maxPadding,
- (padding // (maxPadding * 1)) % maxPadding,
- padding % maxPadding]
- d = [ dilation // maxDilation + 1,
- dilation % maxDilation + 1]
+ s = [stride // maxStride + 1, stride % maxStride + 1]
+ p = [
+ (padding // (maxPadding * 4)) % maxPadding,
+ (padding // (maxPadding * 2)) % maxPadding,
+ (padding // (maxPadding * 1)) % maxPadding,
+ padding % maxPadding,
+ ]
+ d = [dilation // maxDilation + 1, dilation % maxDilation + 1]
# 4 padding parameters for regular conv2d
- arg_list.append(('st{}{}_pad{}{}{}{}_dilat{}{}'.format(s[0], s[1],
- p[0], p[1], p[2], p[3],
- d[0], d[1]),
- [ s, p, d ]))
+ arg_list.append(
+ (
+ "st{}{}_pad{}{}{}{}_dilat{}{}".format(
+ s[0], s[1], p[0], p[1], p[2], p[3], d[0], d[1]
+ ),
+ [s, p, d],
+ )
+ )
return arg_list
@staticmethod
@@ -380,8 +396,8 @@ class TosaArgGen:
filter_shape = shapeList[1]
# Must be rank 4
- assert(len(ifm_shape) == 4)
- assert(len(filter_shape) == 4)
+ assert len(ifm_shape) == 4
+ assert len(filter_shape) == 4
maxStride = testGen.args.max_conv_stride
maxPadding = testGen.args.max_conv_padding + 1
@@ -392,27 +408,47 @@ class TosaArgGen:
for out_padding in range(0, (maxPadding) ** 2):
for dilation in range(0, maxDilation ** 2):
- s = [stride // maxStride + 1,
- stride % maxStride + 1]
- p = [(out_padding // (maxPadding * 1)) % maxPadding,
- out_padding % maxPadding]
- d = [ dilation // maxDilation + 1,
- dilation % maxDilation + 1]
-
- oh = (ifm_shape[1] - filter_shape[1] - (filter_shape[1] - 1) * (d[0] - 1) + \
- 2 * p[0]) // s[0] + 1
-
- ow = (ifm_shape[2] - filter_shape[2] - (filter_shape[2] - 1) * (d[1] - 1) + \
- 2 * p[1]) // s[1] + 1
+ s = [stride // maxStride + 1, stride % maxStride + 1]
+ p = [
+ (out_padding // (maxPadding * 1)) % maxPadding,
+ out_padding % maxPadding,
+ ]
+ d = [dilation // maxDilation + 1, dilation % maxDilation + 1]
+
+ oh = (
+ ifm_shape[1]
+ - filter_shape[1]
+ - (filter_shape[1] - 1) * (d[0] - 1)
+ + 2 * p[0]
+ ) // s[0] + 1
+
+ ow = (
+ ifm_shape[2]
+ - filter_shape[2]
+ - (filter_shape[2] - 1) * (d[1] - 1)
+ + 2 * p[1]
+ ) // s[1] + 1
# Output shape
- os = [ ifm_shape[0], oh, ow, filter_shape[0] ]
-
- arg_list.append(('st{}{}_outpad{}{}_dilat{}{}_os{}x{}x{}x{}'.format(s[0], s[1],
- p[0], p[1],
- d[0], d[1],
- os[0], os[1], os[2], os[3]),
- [ s, p, d, os ]))
+ os = [ifm_shape[0], oh, ow, filter_shape[0]]
+
+ arg_list.append(
+ (
+ "st{}{}_outpad{}{}_dilat{}{}_os{}x{}x{}x{}".format(
+ s[0],
+ s[1],
+ p[0],
+ p[1],
+ d[0],
+ d[1],
+ os[0],
+ os[1],
+ os[2],
+ os[3],
+ ),
+ [s, p, d, os],
+ )
+ )
return arg_list
@@ -430,14 +466,14 @@ class TosaArgGen:
paddings = np.zeros((rank * 2), dtype=np.int32)
# Fill in the 1's
- for r in (range(rank * 2)):
+ for r in range(rank * 2):
if (v >> r) & 1:
paddings[r] = 1
# Reshape back to a 2D array
paddings = paddings.reshape((rank, 2))
- arg_list.append(('pad{0:b}'.format(v), [ paddings ]))
+ arg_list.append(("pad{0:b}".format(v), [paddings]))
return arg_list
@@ -446,7 +482,7 @@ class TosaArgGen:
arg_list = []
shape = shapeList[0]
- assert(len(shape) == 4)
+ assert len(shape) == 4
maxStride = testGen.args.max_pooling_stride
maxKernel = testGen.args.max_pooling_kernel
@@ -455,19 +491,23 @@ class TosaArgGen:
for kernel in range(0, maxKernel ** 2):
for stride in range(0, maxStride ** 2):
for padding in range(0, maxPadding ** 4):
- s = [stride // maxStride + 1,
- stride % maxStride + 1]
- k = [(kernel // maxKernel) + 2,
- (kernel % maxKernel) + 2]
- p = [(padding // (maxPadding * 4)) % maxPadding,
- (padding // (maxPadding * 2)) % maxPadding,
- (padding // (maxPadding * 1)) % maxPadding,
- padding % maxPadding]
-
- arg_list.append(('st{}{}_kern{}{}_pad{}{}{}{}'.format(s[0], s[1],
- k[0], k[1],
- p[0], p[1], p[2], p[3]),
- [k, s, p]))
+ s = [stride // maxStride + 1, stride % maxStride + 1]
+ k = [(kernel // maxKernel) + 2, (kernel % maxKernel) + 2]
+ p = [
+ (padding // (maxPadding * 4)) % maxPadding,
+ (padding // (maxPadding * 2)) % maxPadding,
+ (padding // (maxPadding * 1)) % maxPadding,
+ padding % maxPadding,
+ ]
+
+ arg_list.append(
+ (
+ "st{}{}_kern{}{}_pad{}{}{}{}".format(
+ s[0], s[1], k[0], k[1], p[0], p[1], p[2], p[3]
+ ),
+ [k, s, p],
+ )
+ )
return arg_list
@staticmethod
@@ -476,20 +516,20 @@ class TosaArgGen:
# Enumerate the output types here
if inDtype == DType.INT8:
- dtypeList = [ DType.BOOL, DType.INT16, DType.INT32, DType.FLOAT ]
+ dtypeList = [DType.BOOL, DType.INT16, DType.INT32, DType.FLOAT]
elif inDtype == DType.INT16:
- dtypeList = [ DType.BOOL, DType.INT8, DType.INT32, DType.FLOAT ]
+ dtypeList = [DType.BOOL, DType.INT8, DType.INT32, DType.FLOAT]
elif inDtype == DType.INT32:
- dtypeList = [ DType.BOOL, DType.INT8, DType.INT16, DType.FLOAT ]
+ dtypeList = [DType.BOOL, DType.INT8, DType.INT16, DType.FLOAT]
elif inDtype == DType.BOOL:
- dtypeList = [ DType.INT8, DType.INT16, DType.INT32 ]
+ dtypeList = [DType.INT8, DType.INT16, DType.INT32]
elif inDtype == DType.FLOAT:
- dtypeList = [ DType.INT8, DType.INT16, DType.INT32 ]
+ dtypeList = [DType.INT8, DType.INT16, DType.INT32]
else:
- raise Exception('Unexpected input dtype: {}'.format(inDtype))
+ raise Exception("Unexpected input dtype: {}".format(inDtype))
for dtype in dtypeList:
- arg_list.append(('out{}'.format(DTypeNames[dtype]), [dtype]))
+ arg_list.append(("out{}".format(DTypeNames[dtype]), [dtype]))
return arg_list
@@ -498,17 +538,26 @@ class TosaArgGen:
arg_list = []
# Enumerate the output types here
- for dtype in [ DType.INT8, DType.INT16, DType.INT32 ]:
- for scale32 in [ False, True ]:
- for double_round in [ False, True ]:
- for per_channel in [ False, True ]:
+ for dtype in [DType.INT8, DType.INT16, DType.INT32]:
+ for scale32 in [False, True]:
+ for double_round in [False, True]:
+ for per_channel in [False, True]:
if inDtype == DType.INT48 and scale32:
# Illegal condition. Must be scale32=False
continue
- arg_list.append(('out{}_sc{}_dr{}_pc{}'.format(DTypeNames[dtype], int(scale32), int(double_round), int(per_channel)),
- [dtype, scale32, double_round, per_channel]))
+ arg_list.append(
+ (
+ "out{}_sc{}_dr{}_pc{}".format(
+ DTypeNames[dtype],
+ int(scale32),
+ int(double_round),
+ int(per_channel),
+ ),
+ [dtype, scale32, double_round, per_channel],
+ )
+ )
return arg_list
@@ -521,9 +570,9 @@ class TosaArgGen:
shift = testGen.randInt(0, 32)
- arg_list.append(('perm{}_shift{}'.format(p, shift), [shift]))
+ arg_list.append(("perm{}_shift{}".format(p, shift), [shift]))
else:
- arg_list.append(('shift0', [0]))
+ arg_list.append(("shift0", [0]))
return arg_list
@@ -531,8 +580,8 @@ class TosaArgGen:
def agArithmeticRightShift(testGen, opName, shapeList, dtype):
arg_list = []
- arg_list.append(('roundTrue', [True]))
- arg_list.append(('roundFalse', [False]))
+ arg_list.append(("roundTrue", [True]))
+ arg_list.append(("roundFalse", [False]))
return arg_list
@@ -563,7 +612,7 @@ class TosaArgGen:
for p in range(testGen.args.num_rand_permutations):
newRank = testGen.randInt(1, 6)
newShape = []
- if (len(factors) < newRank):
+ if len(factors) < newRank:
continue
remainingElements = totalElements
@@ -572,7 +621,9 @@ class TosaArgGen:
# pick rank-1 factors
newShape.append(shuffledFactors[0])
remainingElements = remainingElements // shuffledFactors[0]
- shuffledFactors = testGen.rng.permutation(TosaArgGen.getFactors(remainingElements))
+ shuffledFactors = testGen.rng.permutation(
+ TosaArgGen.getFactors(remainingElements)
+ )
newShape.append(remainingElements)
# Toss in a -1 sometimes
@@ -580,11 +631,10 @@ class TosaArgGen:
if minusOne < newRank:
newShape[minusOne] = -1
- arg_list.append(('perm{}_rank{}'.format(p, newRank), [newShape]))
+ arg_list.append(("perm{}_rank{}".format(p, newRank), [newShape]))
return arg_list
-
@staticmethod
def agTranspose(testGen, opName, shapeList, dtype):
arg_list = []
@@ -603,7 +653,7 @@ class TosaArgGen:
break
if not found:
- arg_list.append(('perm{}'.format(p), [perms]))
+ arg_list.append(("perm{}".format(p), [perms]))
return arg_list
@@ -618,7 +668,7 @@ class TosaArgGen:
begin = []
size = []
- valid=True
+ valid = True
for i in range(rank):
if ifm_shape[i] > 1:
@@ -633,7 +683,7 @@ class TosaArgGen:
size.append(1)
if valid:
- arg_list.append(('perm{}'.format(p), [begin, size]))
+ arg_list.append(("perm{}".format(p), [begin, size]))
return arg_list
@staticmethod
@@ -652,7 +702,7 @@ class TosaArgGen:
for i in range(rank):
multiples.append(testGen.randInt(1, 4))
- arg_list.append(('perm{}'.format(p), [multiples]))
+ arg_list.append(("perm{}".format(p), [multiples]))
return arg_list
@@ -666,15 +716,15 @@ class TosaArgGen:
# Exclude illegal {mode, type} configurations. Pick legal output types
if m == ResizeMode.NEAREST and dtype == DType.INT8:
- outputDTypeList = [ DType.INT32 ]
+ outputDTypeList = [DType.INT32]
elif m == ResizeMode.NEAREST and dtype == DType.INT16:
- outputDTypeList = [ DType.INT16 ]
+ outputDTypeList = [DType.INT16]
elif m == ResizeMode.BILINEAR and dtype == DType.INT8:
- outputDTypeList = [ DType.INT8 ]
+ outputDTypeList = [DType.INT8]
elif m == ResizeMode.BILINEAR and dtype == DType.INT16:
- outputDTypeList = [ DType.INT48 ]
+ outputDTypeList = [DType.INT48]
elif dtype == DType.FLOAT:
- outputDTypeList = [ DType.FLOAT ]
+ outputDTypeList = [DType.FLOAT]
else:
continue
@@ -683,7 +733,7 @@ class TosaArgGen:
# Randomly generate legal output dimensions and shift
# and then compute the stride and offset based on them
- output_dims = [ testGen.randInt(1), testGen.randInt(1) ]
+ output_dims = [testGen.randInt(1), testGen.randInt(1)]
in_center_h = (ifm_shape[1] - 1) / 2.0
in_center_w = (ifm_shape[2] - 1) / 2.0
out_center_h = (output_dims[0] - 1) / 2.0
@@ -698,12 +748,33 @@ class TosaArgGen:
shift = 0
stride = [0, 0]
offset = [0, 0]
- stride_fp = [ fp_stride_y, fp_stride_x]
- offset_fp = [ fp_offset_y, fp_offset_x]
- arg_list.append(('mode{}_odim{}x{}_out{}_st{:.2f}x{:.2f}_off{:.2f}x{:.2f}'.format(m, output_dims[0], output_dims[1],
- testGen.typeStr(outputDType), stride_fp[0], stride_fp[1],
- offset_fp[0], offset_fp[1]),
- [m, stride, offset, shift, stride_fp, offset_fp, output_dims, dtype, outputDType]))
+ stride_fp = [fp_stride_y, fp_stride_x]
+ offset_fp = [fp_offset_y, fp_offset_x]
+ arg_list.append(
+ (
+ "mode{}_odim{}x{}_out{}_st{:.2f}x{:.2f}_off{:.2f}x{:.2f}".format(
+ m,
+ output_dims[0],
+ output_dims[1],
+ testGen.typeStr(outputDType),
+ stride_fp[0],
+ stride_fp[1],
+ offset_fp[0],
+ offset_fp[1],
+ ),
+ [
+ m,
+ stride,
+ offset,
+ shift,
+ stride_fp,
+ offset_fp,
+ output_dims,
+ dtype,
+ outputDType,
+ ],
+ )
+ )
else:
shift = 11
unit = float(1 << shift)
@@ -712,7 +783,14 @@ class TosaArgGen:
offset_y = int(round(fp_offset_y * unit))
offset_x = int(round(fp_offset_x * unit))
- while (stride_y >= 32768 or stride_x >= 32768 or offset_y >= 32768 or offset_x >= 32768 or offset_y < -32768 or offset_x < -32768):
+ while (
+ stride_y >= 32768
+ or stride_x >= 32768
+ or offset_y >= 32768
+ or offset_x >= 32768
+ or offset_y < -32768
+ or offset_x < -32768
+ ):
shift = shift - 1
unit = float(1 << shift)
stride_y = int(round(fp_stride_y * unit))
@@ -720,16 +798,38 @@ class TosaArgGen:
offset_y = int(round(fp_offset_y * unit))
offset_x = int(round(fp_offset_x * unit))
- stride = [ stride_y, stride_x]
- offset = [ offset_y, offset_x]
+ stride = [stride_y, stride_x]
+ offset = [offset_y, offset_x]
stride_fp = [0.0, 0.0]
offset_fp = [0.0, 0.0]
- arg_list.append(('mode{}_shift{}_odim{}x{}_out{}_st{}x{}_off{}x{}'.format(m, shift, output_dims[0], output_dims[1],
- testGen.typeStr(outputDType), stride[0], stride[1],
- offset[0], offset[1]),
- [m, stride, offset, shift, stride_fp, offset_fp, output_dims, dtype, outputDType]))
+ arg_list.append(
+ (
+ "mode{}_shift{}_odim{}x{}_out{}_st{}x{}_off{}x{}".format(
+ m,
+ shift,
+ output_dims[0],
+ output_dims[1],
+ testGen.typeStr(outputDType),
+ stride[0],
+ stride[1],
+ offset[0],
+ offset[1],
+ ),
+ [
+ m,
+ stride,
+ offset,
+ shift,
+ stride_fp,
+ offset_fp,
+ output_dims,
+ dtype,
+ outputDType,
+ ],
+ )
+ )
return arg_list
@@ -740,7 +840,7 @@ class TosaArgGen:
arg_list = []
for c in [False, True]:
- arg_list.append(('cond{}'.format(int(c)), [ c ]))
+ arg_list.append(("cond{}".format(int(c)), [c]))
return arg_list
@@ -749,10 +849,11 @@ class TosaArgGen:
arg_list = []
for iter in [0, 1, 4]:
- arg_list.append(('iter{}'.format(iter), [ iter ]))
+ arg_list.append(("iter{}".format(iter), [iter]))
return arg_list
+
class TosaTestGen:
def __init__(self, args):
self.args = args
@@ -777,11 +878,13 @@ class TosaTestGen:
return self.ser
def serialize(self, testName):
- with open(os.path.join(self.basePath, self.testPath, '{}.tosa'.format(testName)), 'wb') as fd:
+ with open(
+ os.path.join(self.basePath, self.testPath, "{}.tosa".format(testName)), "wb"
+ ) as fd:
fd.write(self.ser.serialize())
- with open(os.path.join(self.basePath, self.testPath, 'desc.json'), 'w') as fd:
- fd.write(self.ser.writeJson('{}.tosa'.format(testName)))
+ with open(os.path.join(self.basePath, self.testPath, "desc.json"), "w") as fd:
+ fd.write(self.ser.writeJson("{}.tosa".format(testName)))
def getRandTensor(self, shape, dtype):
RAND_SHIFT_FACTOR = 0.5
@@ -797,20 +900,26 @@ class TosaTestGen:
elif dtype == DType.INT16:
return np.int32(self.rng.integers(low=-32768, high=32768, size=shape))
elif dtype == DType.INT32:
- return np.int32(self.rng.integers(low=-(1 << 31), high=(1 << 31), size=shape))
+ return np.int32(
+ self.rng.integers(low=-(1 << 31), high=(1 << 31), size=shape)
+ )
elif dtype == DType.INT48:
- return np.int64(self.rng.integers(low=-(1 << 47), high=(1 << 47), size=shape))
+ return np.int64(
+ self.rng.integers(low=-(1 << 47), high=(1 << 47), size=shape)
+ )
elif dtype == DType.FLOAT:
- return np.float32(self.rng.random(size=shape) - RAND_SHIFT_FACTOR * RAND_SCALE_FACTOR)
+ return np.float32(
+ self.rng.random(size=shape) - RAND_SHIFT_FACTOR * RAND_SCALE_FACTOR
+ )
else:
- raise Exception('Unrecognized Dtype: {}'.format(dtype))
+ raise Exception("Unrecognized Dtype: {}".format(dtype))
def buildPlaceholderTensors(self, shape_list, dtype):
placeholders = []
for shape in shape_list:
arr = self.getRandTensor(shape, dtype)
- placeholders.append(self.ser.addPlaceholder(shape, dtype, Usage.ACTIVATION, [], arr))
+ placeholders.append(self.ser.addPlaceholder(shape, dtype, arr))
return placeholders
@@ -819,16 +928,20 @@ class TosaTestGen:
for shape in shape_list:
arr = self.getRandTensor(shape, dtype)
- consts.append(self.ser.addConst(shape, dtype, Usage.ACTIVATION, [], arr))
+ consts.append(self.ser.addConst(shape, dtype, arr))
return consts
def makeShape(self, rank):
if self.targetted_shape:
return np.int32(self.targetted_shape)
- return np.int32(self.rng.integers(low=self.args.tensor_shape_range[0],
- high=self.args.tensor_shape_range[1],
- size=rank))
+ return np.int32(
+ self.rng.integers(
+ low=self.args.tensor_shape_range[0],
+ high=self.args.tensor_shape_range[1],
+ size=rank,
+ )
+ )
def setTargetShape(self, shape):
self.targetted_shape = shape
@@ -848,13 +961,13 @@ class TosaTestGen:
elif dtype == DType.INT16:
low, high = (-32768, 32768)
elif dtype == DType.INT32:
- low, high = (-(1<<31), (1<<31))
+ low, high = (-(1 << 31), (1 << 31))
elif dtype == DType.INT48:
- low, high = (-(1<<47), (1<<47))
+ low, high = (-(1 << 47), (1 << 47))
# Special size
return np.int64(self.rng.integers(low, high, size=1))[0]
else:
- raise Exception('Unknown dtype: {}'.format(dtype))
+ raise Exception("Unknown dtype: {}".format(dtype))
return np.int32(self.rng.integers(low, high, size=1))[0]
@@ -865,30 +978,30 @@ class TosaTestGen:
for i in shape:
sStr.append(str(i))
- return 'x'.join(sStr)
+ return "x".join(sStr)
def typeStr(self, t):
if t == DType.BOOL:
- return 'b'
+ return "b"
elif t == DType.INT4:
- return 'i4'
+ return "i4"
elif t == DType.INT8:
- return 'i8'
+ return "i8"
elif t == DType.UINT8:
- return 'u8'
+ return "u8"
elif t == DType.INT16:
- return 'i16'
+ return "i16"
elif t == DType.INT32:
- return 'i32'
+ return "i32"
elif t == DType.INT48:
- return 'i48'
+ return "i48"
elif t == DType.FLOAT:
- return 'float'
+ return "float"
else:
- raise Exception('Unknown dtype, cannot convert to string: {}'.format(t))
+ raise Exception("Unknown dtype, cannot convert to string: {}".format(t))
def typeWidth(self, t):
- ''' Get the datatype width for integer types'''
+ """ Get the datatype width for integer types"""
if t == DType.INT4:
return 4
elif t == DType.INT8:
@@ -902,7 +1015,7 @@ class TosaTestGen:
elif t == DType.INT48:
return 48
else:
- raise Exception('Unknown dtype, cannot convert to string: {}'.format(t))
+ raise Exception("Unknown dtype, cannot convert to string: {}".format(t))
# Argument generators
# Returns a list of tuples (stringDescriptor, [build_fcn_arg_list])
@@ -910,8 +1023,7 @@ class TosaTestGen:
# The build_fcn_arg_list is expanded and passed to the operator test
# build function
-
- def build_unary(self, op, a, qinfo = None):
+ def build_unary(self, op, a, qinfo=None):
result_tens = OutputShaper.unaryOp(self.ser, a)
self.ser.addOperator(op, [a.name], [result_tens.name], None, qinfo)
return result_tens
@@ -952,7 +1064,7 @@ class TosaTestGen:
def build_table(self, op, a):
# Constant size, random values
table_arr = self.getRandTensor([513], DType.INT16)
- table_tens = self.ser.addConst(table_arr.shape, DType.INT16, Usage.INDEX, [], table_arr)
+ table_tens = self.ser.addConst(table_arr.shape, DType.INT16, table_arr)
result_tens = OutputShaper.tableOp(self.ser, a, table_tens)
self.ser.addOperator(op, [a.name, table_tens.name], [result_tens.name], None)
@@ -985,43 +1097,38 @@ class TosaTestGen:
self.ser.addOperator(op, [a.name], [result_tens.name], attr)
return result_tens
- def build_pool2d(self, op, input, kernel, stride, pad, qinfo = None):
+ def build_pool2d(self, op, input, kernel, stride, pad, qinfo=None):
result_tens = OutputShaper.pool2dOp(self.ser, input, kernel, stride, pad)
attr = ts.TosaSerializerAttribute()
attr.Pool2dAttribute(kernel, stride, pad)
- input.addFormat(Format.NHWC)
self.ser.addOperator(op, [input.name], [result_tens.name], attr, qinfo)
return result_tens
def build_conv2d(self, op, ifm, filter, bias, strides, padding, dilations, qinfo):
- assert(len(padding) == 4)
- result_tens = OutputShaper.conv2dOp(self.ser, ifm, filter, strides, padding, dilations)
+ assert len(padding) == 4
+ result_tens = OutputShaper.conv2dOp(
+ self.ser, ifm, filter, strides, padding, dilations
+ )
attr = ts.TosaSerializerAttribute()
attr.Conv2dAttribute(padding, strides, dilations)
- ifm.addFormat(Format.NHWC)
- # Update the filter ordering
- filter.addUsage(Usage.WEIGHT)
- filter.addFormat(Format.OHWI)
-
- self.ser.addOperator(op, [ifm.name, filter.name, bias.name], [result_tens.name], attr, qinfo)
+ self.ser.addOperator(
+ op, [ifm.name, filter.name, bias.name], [result_tens.name], attr, qinfo
+ )
return result_tens
- def build_transpose_conv2d(self, op, ifm, filter, stride, outpad, dilation, output_shape, qinfo):
- assert(len(outpad) == 2)
+ def build_transpose_conv2d(
+ self, op, ifm, filter, stride, outpad, dilation, output_shape, qinfo
+ ):
+ assert len(outpad) == 2
result_tens = OutputShaper.transposeConv2DOp(self.ser, ifm, output_shape)
attr = ts.TosaSerializerAttribute()
attr.TransposeConv2DAttribute(outpad, stride, dilation, output_shape)
- ifm.addFormat(Format.NHWC)
- # Update the filter ordering
- filter.addUsage(Usage.WEIGHT)
- filter.addFormat(Format.OHWI)
-
# Create bias here since the acc_t depends on (but isn't the same as) the input dtype
# The bias is OC
if ifm.dtype == DType.INT8:
@@ -1031,32 +1138,39 @@ class TosaTestGen:
elif ifm.dtype == DType.FLOAT:
bias_type = DType.FLOAT
else:
- raise Exception('Unsupported dtype for transpose_conv2d: {}'.format(ifm.dtype))
+ raise Exception(
+ "Unsupported dtype for transpose_conv2d: {}".format(ifm.dtype)
+ )
bias_arr = self.getRandTensor([filter.shape[0]], bias_type)
- bias_tens = self.ser.addConst([filter.shape[0]], bias_type, [], [], bias_arr)
+ bias_tens = self.ser.addConst([filter.shape[0]], bias_type, bias_arr)
- self.ser.addOperator(op, [ifm.name, filter.name, bias_tens.name], [result_tens.name], attr, qinfo)
+ self.ser.addOperator(
+ op, [ifm.name, filter.name, bias_tens.name], [result_tens.name], attr, qinfo
+ )
return result_tens
- def build_depthwise_conv2d(self, op, ifm, filter, bias, strides, padding, dilations, qinfo):
- result_tens = OutputShaper.depthwiseConv2dOp(self.ser, ifm, filter, strides, padding, dilations)
+ def build_depthwise_conv2d(
+ self, op, ifm, filter, bias, strides, padding, dilations, qinfo
+ ):
+ result_tens = OutputShaper.depthwiseConv2dOp(
+ self.ser, ifm, filter, strides, padding, dilations
+ )
attr = ts.TosaSerializerAttribute()
attr.Conv2dAttribute(padding, strides, dilations)
- ifm.addFormat(Format.NHWC)
- filter.addUsage(Usage.WEIGHT)
- filter.addFormat(Format.HWIM)
-
- self.ser.addOperator(op, [ifm.name, filter.name, bias.name], [result_tens.name], attr, qinfo)
+ self.ser.addOperator(
+ op, [ifm.name, filter.name, bias.name], [result_tens.name], attr, qinfo
+ )
return result_tens
def build_fully_connected(self, op, ifm, filter, bias, qinfo):
result_tens = OutputShaper.fullyConnectedOp(self.ser, ifm, filter)
- filter.addUsage(Usage.WEIGHT)
- self.ser.addOperator(op, [ifm.name, filter.name, bias.name], [result_tens.name], None, qinfo)
+ self.ser.addOperator(
+ op, [ifm.name, filter.name, bias.name], [result_tens.name], None, qinfo
+ )
return result_tens
def build_matmul(self, op, a, b, qinfo):
@@ -1142,9 +1256,11 @@ class TosaTestGen:
# Need to turn the padding array into a TOSA tensor here.
# This is one of the few tensor operands that does not get
# randomly generated
- padding_tens = self.ser.addConst(padding.shape, DType.INT32, [], [], padding)
+ padding_tens = self.ser.addConst(padding.shape, DType.INT32, padding)
- self.ser.addOperator(op, [a.name, padding_tens.name], [result_tens.name], None, qinfo)
+ self.ser.addOperator(
+ op, [a.name, padding_tens.name], [result_tens.name], None, qinfo
+ )
def build_reshape(self, op, a, newShape):
result_tens = OutputShaper.reshapeOp(self.ser, a, newShape)
@@ -1167,7 +1283,7 @@ class TosaTestGen:
def build_transpose(self, op, a, perms):
result_tens = OutputShaper.transposeOp(self.ser, a, perms)
- perms_tens = self.ser.addConst([len(perms)], DType.INT32, Usage.ACTIVATION, [], np.int32(perms))
+ perms_tens = self.ser.addConst([len(perms)], DType.INT32, np.int32(perms))
self.ser.addOperator(op, [a.name, perms_tens.name], [result_tens.name])
return result_tens
@@ -1190,16 +1306,19 @@ class TosaTestGen:
self.ser.addOperator(op, [a.name], [result_tens.name], attr)
return result_tens
-
def build_gather(self, op, values):
# Create a new indicies tensor
# here with data that doesn't exceed the dimensions of the values tensor
- K = values.shape[1] # K
- W = self.randInt(self.args.tensor_shape_range[0], self.args.tensor_shape_range[1]) # W
- indicies_arr = np.int32(self.rng.integers(low=0, high=K, size=[values.shape[0], W])) # (N, W)
- indicies = self.ser.addConst(indicies_arr.shape, DType.INT32, Usage.INDEX, [], indicies_arr)
+ K = values.shape[1] # K
+ W = self.randInt(
+ self.args.tensor_shape_range[0], self.args.tensor_shape_range[1]
+ ) # W
+ indicies_arr = np.int32(
+ self.rng.integers(low=0, high=K, size=[values.shape[0], W])
+ ) # (N, W)
+ indicies = self.ser.addConst(indicies_arr.shape, DType.INT32, indicies_arr)
result_tens = OutputShaper.gatherOp(self.ser, values, indicies)
@@ -1212,32 +1331,65 @@ class TosaTestGen:
# Create a new indicies tensor
# here with data that doesn't exceed the dimensions of the values_in tensor
- K = values_in.shape[1] # K
- W = input.shape[1] # W
- indicies_arr = np.int32(self.rng.integers(low=0, high=K, size=[values_in.shape[0], W])) # (N, W)
- indicies = self.ser.addConst(indicies_arr.shape, DType.INT32, Usage.INDEX, [], indicies_arr)
+ K = values_in.shape[1] # K
+ W = input.shape[1] # W
+ indicies_arr = np.int32(
+ self.rng.integers(low=0, high=K, size=[values_in.shape[0], W])
+ ) # (N, W)
+ indicies = self.ser.addConst(indicies_arr.shape, DType.INT32, indicies_arr)
result_tens = OutputShaper.scatterOp(self.ser, values_in, indicies, input)
- self.ser.addOperator(op, [values_in.name, indicies.name, input.name], [result_tens.name])
+ self.ser.addOperator(
+ op, [values_in.name, indicies.name, input.name], [result_tens.name]
+ )
return result_tens
- def build_resize(self, op, input, mode, stride, offset, shift, stride_fp, offset_fp, output_dims, input_dtype, output_dtype):
- result_tens = OutputShaper.resizeOp(self.ser, input, mode, stride, offset, shift, stride_fp, offset_fp, output_dims, input_dtype, output_dtype)
+ def build_resize(
+ self,
+ op,
+ input,
+ mode,
+ stride,
+ offset,
+ shift,
+ stride_fp,
+ offset_fp,
+ output_dims,
+ input_dtype,
+ output_dtype,
+ ):
+ result_tens = OutputShaper.resizeOp(
+ self.ser,
+ input,
+ mode,
+ stride,
+ offset,
+ shift,
+ stride_fp,
+ offset_fp,
+ output_dims,
+ input_dtype,
+ output_dtype,
+ )
attr = ts.TosaSerializerAttribute()
- attr.ResizeAttribute(output_dims, stride, offset, shift, stride_fp, offset_fp, mode)
+ attr.ResizeAttribute(
+ output_dims, stride, offset, shift, stride_fp, offset_fp, mode
+ )
self.ser.addOperator(op, [input.name], [result_tens.name], attr)
return result_tens
def build_identityn(self, op, val, val2):
- result_tens = OutputShaper.unaryOp(self.ser, val)
+ result_tens = OutputShaper.unaryOp(self.ser, val)
result_tens2 = OutputShaper.unaryOp(self.ser, val2)
- self.ser.addOperator(op, [val.name, val2.name], [result_tens.name, result_tens2.name])
+ self.ser.addOperator(
+ op, [val.name, val2.name], [result_tens.name, result_tens2.name]
+ )
return result_tens
def build_placeholder(self, op, val):
@@ -1287,27 +1439,30 @@ class TosaTestGen:
# Cap the scaling at 2^15 - 1 for scale16
scale_arr = np.clip(scale_arr, 1.0 / (1 << 31), 32767.0)
- #print('{} {} -> {}'.format(out_type_width, in_type_width, scale_arr))
+ # print('{} {} -> {}'.format(out_type_width, in_type_width, scale_arr))
multiplier_arr = np.int32(np.zeros(shape=[nc]))
shift_arr = np.int32(np.zeros(shape=[nc]))
for i in range(nc):
- multiplier_arr[i], shift_arr[i] = TosaQuantGen.computeMultiplierAndShift(scale_arr[i], scale32)
+ multiplier_arr[i], shift_arr[i] = TosaQuantGen.computeMultiplierAndShift(
+ scale_arr[i], scale32
+ )
if shift_arr[i] < 2 or shift_arr[i] > 62:
- self.ser.setExpectedFailure(True, 'OpRescale: invalid shift value')
+ self.ser.setExpectedFailure(True, "OpRescale: invalid shift value")
- #print('multiplier {} shift {} inzp {} outzp {}'.format(multiplier_arr, shift_arr, input_zp, output_zp))
+ # print('multiplier {} shift {} inzp {} outzp {}'.format(multiplier_arr, shift_arr, input_zp, output_zp))
attr = ts.TosaSerializerAttribute()
- attr.RescaleAttribute(input_zp,
- output_zp,
- multiplier_arr,
- shift_arr,
- scale32,
- double_round,
-
- per_channel)
+ attr.RescaleAttribute(
+ input_zp,
+ output_zp,
+ multiplier_arr,
+ shift_arr,
+ scale32,
+ double_round,
+ per_channel,
+ )
self.ser.addOperator(op, [val.name], [result_tens.name], attr)
return result_tens
@@ -1318,7 +1473,7 @@ class TosaTestGen:
# and fill them with const nodes for the body.
# Condition tensor
- cond_tens = self.ser.addConst([], DType.BOOL, Usage.ACTIVATION, [], [cond])
+ cond_tens = self.ser.addConst([], DType.BOOL, [cond])
# Make then/else tensors
out_shape = then_tens.shape
@@ -1326,11 +1481,11 @@ class TosaTestGen:
else_arr = np.int32(self.rng.integers(0, 255, size=out_shape))
# And the result tensor based on any of the outputs
- result_tens = self.ser.addOutput(out_shape, DType.INT32, Usage.ACTIVATION, [])
+ result_tens = self.ser.addOutput(out_shape, DType.INT32)
# Create the attribute with the names of the then/else blocks
- then_block = 'THEN_BLOCK'
- else_block = 'ELSE_BLOCK'
+ then_block = "THEN_BLOCK"
+ else_block = "ELSE_BLOCK"
attr = ts.TosaSerializerAttribute()
attr.CondIfAttribute(then_block, else_block)
@@ -1339,11 +1494,11 @@ class TosaTestGen:
self.ser.startBasicBlock(then_block)
# Build the actual then/else tensors inside their blocks
- then_tens = self.ser.addConst(out_shape, DType.INT32, Usage.ACTIVATION, [], then_arr)
+ then_tens = self.ser.addConst(out_shape, DType.INT32, then_arr)
self.ser.addOutputTensor(then_tens)
self.ser.startBasicBlock(else_block)
- else_tens = self.ser.addConst(out_shape, DType.INT32, Usage.ACTIVATION, [], else_arr)
+ else_tens = self.ser.addConst(out_shape, DType.INT32, else_arr)
self.ser.addOutputTensor(else_tens)
return result_tens
@@ -1353,67 +1508,71 @@ class TosaTestGen:
# alternately add or subtract them based on the condition
# Condition tensor
- cond_tens = self.ser.addConst([], DType.BOOL, Usage.ACTIVATION, [], [cond])
+ cond_tens = self.ser.addConst([], DType.BOOL, [cond])
- result_tens = self.ser.addOutput(a.shape, a.dtype, Usage.ACTIVATION, [])
+ result_tens = self.ser.addOutput(a.shape, a.dtype)
self.ser.currBasicBlock.addOutput(result_tens.name)
# Create the attribute with the names of the then/else blocks
- then_block = 'THEN_BLOCK'
- else_block = 'ELSE_BLOCK'
+ then_block = "THEN_BLOCK"
+ else_block = "ELSE_BLOCK"
attr = ts.TosaSerializerAttribute()
attr.CondIfAttribute(then_block, else_block)
# Finally, build the op and the two blocks
- self.ser.addOperator(op, [cond_tens.name, a.name, b.name], [result_tens.name], attr)
+ self.ser.addOperator(
+ op, [cond_tens.name, a.name, b.name], [result_tens.name], attr
+ )
self.ser.startBasicBlock(then_block)
self.ser.addInputTensor(a)
self.ser.addInputTensor(b)
- then_tens = self.ser.addOutput(a.shape, a.dtype, a.usage, a.dformat)
+ then_tens = self.ser.addOutput(a.shape, a.dtype)
self.ser.addOperator(Op.ADD, [a.name, b.name], [then_tens.name])
self.ser.startBasicBlock(else_block)
self.ser.addInputTensor(a)
self.ser.addInputTensor(b)
- else_tens = self.ser.addOutput(a.shape, a.dtype, a.usage, a.dformat)
+ else_tens = self.ser.addOutput(a.shape, a.dtype)
self.ser.addOperator(Op.SUB, [a.name, b.name], [else_tens.name])
return result_tens
def build_while_loop(self, op, a, iter_val):
- iter = self.ser.addPlaceholder([], DType.INT32, Usage.ACTIVATION, [], [np.int32(iter_val)])
+ iter = self.ser.addPlaceholder([], DType.INT32, [np.int32(iter_val)])
- cond_block = 'COND_BLOCK'
- body_block = 'BODY_BLOCK'
+ cond_block = "COND_BLOCK"
+ body_block = "BODY_BLOCK"
attr = ts.TosaSerializerAttribute()
attr.WhileLoopAttribute(cond_block, body_block)
# Accumulator tensor
- #acc = self.ser.addOutput(a.shape, a.dtype, a.usage, a.dformat)
+ # acc = self.ser.addOutput(a.shape, a.dtype)
acc_init_val = np.int32(np.zeros(a.shape))
- acc = self.ser.addPlaceholder(a.shape, a.dtype, a.usage, a.dformat, acc_init_val)
+ acc = self.ser.addPlaceholder(a.shape, a.dtype, acc_init_val)
# Intermediate/output tensors for everything going through the loop
- iter_out = self.ser.addIntermediate(iter.shape, iter.dtype, iter.usage, iter.dformat)
- a_out = self.ser.addIntermediate(a.shape, a.dtype, a.usage, a.dformat)
- acc_out = self.ser.addIntermediate(acc.shape, acc.dtype, acc.usage, acc.dformat)
+ iter_out = self.ser.addIntermediate(iter.shape, iter.dtype)
+ a_out = self.ser.addIntermediate(a.shape, a.dtype)
+ acc_out = self.ser.addIntermediate(acc.shape, acc.dtype)
# While_loop operator
- self.ser.addOperator(op,
- [iter.name, a.name, acc.name],
- [iter_out.name, a_out.name, acc_out.name], attr)
+ self.ser.addOperator(
+ op,
+ [iter.name, a.name, acc.name],
+ [iter_out.name, a_out.name, acc_out.name],
+ attr,
+ )
# COND block (input: iter, output: cond_tens )
self.ser.startBasicBlock(cond_block)
self.ser.addInputTensor(iter)
self.ser.addInputTensor(a)
self.ser.addInputTensor(acc)
- zero_tens = self.ser.addConst([], DType.INT32, [], [], [np.int32(0)])
- cond_tens = self.ser.addOutput([], DType.BOOL, [], [])
- self.ser.addOperator(Op.GREATER, [iter.name, zero_tens.name],
- [cond_tens.name])
+ zero_tens = self.ser.addConst([], DType.INT32, [np.int32(0)])
+ cond_tens = self.ser.addOutput([], DType.BOOL)
+ self.ser.addOperator(Op.GREATER, [iter.name, zero_tens.name], [cond_tens.name])
# BODY block (input: a, acc, iter, output: a, acc, iter)
# Note that local intermediate tensors need to be declared here for the outputs
@@ -1421,9 +1580,9 @@ class TosaTestGen:
self.ser.addInputTensor(iter)
self.ser.addInputTensor(a)
self.ser.addInputTensor(acc)
- one_tens = self.ser.addConst([], DType.INT32, [], [], [np.int32(1)])
- iter_body_out = self.ser.addIntermediate(iter.shape, iter.dtype, iter.usage, iter.dformat)
- acc_body_out = self.ser.addIntermediate(acc.shape, acc.dtype, acc.usage, acc.dformat)
+ one_tens = self.ser.addConst([], DType.INT32, [np.int32(1)])
+ iter_body_out = self.ser.addIntermediate(iter.shape, iter.dtype)
+ acc_body_out = self.ser.addIntermediate(acc.shape, acc.dtype)
self.ser.addOperator(Op.ADD, [a.name, acc.name], [acc_body_out.name])
self.ser.addOperator(Op.SUB, [iter.name, one_tens.name], [iter_body_out.name])
self.ser.addOutputTensor(iter_body_out)
@@ -1432,21 +1591,22 @@ class TosaTestGen:
return acc_out
-
- def genOpTestList(self, opName, shapeFilter=[None], rankFilter=None, dtypeFilter=None):
+ def genOpTestList(
+ self, opName, shapeFilter=[None], rankFilter=None, dtypeFilter=None
+ ):
try:
op = self.TOSA_OP_LIST[opName]
except KeyError as e:
- raise Exception('Cannot find op with name {}'.format(opName))
+ raise Exception("Cannot find op with name {}".format(opName))
# Initialize a new random number generator
self.rng = np.random.default_rng(self.random_seed)
- build_fcn, tgen_fcn, agen_fcn = op['build_fcn']
+ build_fcn, tgen_fcn, agen_fcn = op["build_fcn"]
# Generate the lists of arguments
- rmin, rmax = op['rank']
+ rmin, rmax = op["rank"]
# Test list consists of a tuple of:
# (opName, testNameStr, dtype, shapeList, argumentsList)
@@ -1461,7 +1621,7 @@ class TosaTestGen:
if rankFilter is not None and r not in rankFilter:
continue
- for t in op['types']:
+ for t in op["types"]:
# Filter tests based on dtype?
if dtypeFilter is not None:
@@ -1487,13 +1647,15 @@ class TosaTestGen:
if agen_fcn:
argList = agen_fcn(self, opName, shapeList, t)
else:
- argList = [('', [])]
+ argList = [("", [])]
for argStr, args in argList:
if argStr:
- testStr = '{}_{}_{}_{}'.format(opName, shapeStr, typeStr, argStr)
+ testStr = "{}_{}_{}_{}".format(
+ opName, shapeStr, typeStr, argStr
+ )
else:
- testStr = '{}_{}_{}'.format(opName, shapeStr, typeStr)
+ testStr = "{}_{}_{}".format(opName, shapeStr, typeStr)
testList.append((opName, testStr, t, shapeList, args))
@@ -1503,16 +1665,16 @@ class TosaTestGen:
try:
op = self.TOSA_OP_LIST[opName]
except KeyError as e:
- raise Exception('Cannot find op with name {}'.format(opName))
+ raise Exception("Cannot find op with name {}".format(opName))
# Create a serializer
self.createSerializer(opName, testStr)
- build_fcn, tgen_fcn, agen_fcn = op['build_fcn']
- pCount, cCount = op['operands']
+ build_fcn, tgen_fcn, agen_fcn = op["build_fcn"]
+ pCount, cCount = op["operands"]
try:
- qgen = op['qgen']
+ qgen = op["qgen"]
except KeyError:
qgen = None
@@ -1520,8 +1682,10 @@ class TosaTestGen:
tens = []
# If test is ArithmeticRightShift, force value of operand[1] to be within [0, num_bits]
- if op['op'] == Op.ARITHMETIC_RIGHT_SHIFT:
- assert pCount == 2 and cCount == 0, 'Op.ArithmeticRightShift must have 2 placeholders, 0 consts'
+ if op["op"] == Op.ARITHMETIC_RIGHT_SHIFT:
+ assert (
+ pCount == 2 and cCount == 0
+ ), "Op.ArithmeticRightShift must have 2 placeholders, 0 consts"
placeholders = []
for idx, shape in enumerate(shapeList[:]):
@@ -1533,10 +1697,10 @@ class TosaTestGen:
elif dtype == DType.INT32:
arr = np.int32(self.rng.integers(low=0, high=32, size=shape))
else:
- raise Exception('OpArithmeticRightShift: invalid input dtype')
+ raise Exception("OpArithmeticRightShift: invalid input dtype")
else:
arr = self.getRandTensor(shapeList[0], dtype)
- placeholders.append(self.ser.addPlaceholder(shape, dtype, Usage.ACTIVATION, [], arr))
+ placeholders.append(self.ser.addPlaceholder(shape, dtype, arr))
tens.extend(placeholders)
else:
@@ -1550,36 +1714,44 @@ class TosaTestGen:
try:
if qinfo is not None:
- resultName = build_fcn(self, op['op'], *tens, *testArgs, qinfo)
+ resultName = build_fcn(self, op["op"], *tens, *testArgs, qinfo)
else:
- resultName = build_fcn(self, op['op'], *tens, *testArgs)
+ resultName = build_fcn(self, op["op"], *tens, *testArgs)
except TypeError as e:
- print('build_fcn: {}\nTensors: {}\nArgs: {}\n'.format(build_fcn, tens, testArgs))
+ print(
+ "build_fcn: {}\nTensors: {}\nArgs: {}\n".format(
+ build_fcn, tens, testArgs
+ )
+ )
raise e
# Save the serialized test
- self.serialize('test')
+ self.serialize("test")
def createDynamicOpLists(self):
# Dynamically create op lists for convolutions with a list of kernel sizes
- KERNELS = [ [1, 1], [2, 2], [3, 3], [5, 5], [3, 1], [1, 3] ]
+ KERNELS = [[1, 1], [2, 2], [3, 3], [5, 5], [3, 1], [1, 3]]
for k in KERNELS:
- testName = 'conv2d_{}x{}'.format(k[0], k[1])
- self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST['conv2d_TEMPLATE'].copy()
- self.TOSA_OP_LIST[testName]['filter'] = k
- self.TOSA_OP_LIST[testName]['template'] = False
-
- testName = 'depthwise_conv2d_{}x{}'.format(k[0], k[1])
- self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST['depthwise_conv2d_TEMPLATE'].copy()
- self.TOSA_OP_LIST[testName]['filter'] = k
- self.TOSA_OP_LIST[testName]['template'] = False
-
- testName = 'transpose_conv2d_{}x{}'.format(k[0], k[1])
- self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST['transpose_conv2d_TEMPLATE'].copy()
- self.TOSA_OP_LIST[testName]['filter'] = k
- self.TOSA_OP_LIST[testName]['template'] = False
+ testName = "conv2d_{}x{}".format(k[0], k[1])
+ self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST["conv2d_TEMPLATE"].copy()
+ self.TOSA_OP_LIST[testName]["filter"] = k
+ self.TOSA_OP_LIST[testName]["template"] = False
+
+ testName = "depthwise_conv2d_{}x{}".format(k[0], k[1])
+ self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST[
+ "depthwise_conv2d_TEMPLATE"
+ ].copy()
+ self.TOSA_OP_LIST[testName]["filter"] = k
+ self.TOSA_OP_LIST[testName]["template"] = False
+
+ testName = "transpose_conv2d_{}x{}".format(k[0], k[1])
+ self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST[
+ "transpose_conv2d_TEMPLATE"
+ ].copy()
+ self.TOSA_OP_LIST[testName]["filter"] = k
+ self.TOSA_OP_LIST[testName]["template"] = False
# Delete any templates after having created any dynamic ops
# This is a two-pass operation because it's bad practice to delete
@@ -1587,7 +1759,7 @@ class TosaTestGen:
keyList = []
for k in self.TOSA_OP_LIST:
try:
- if self.TOSA_OP_LIST[k]['template'] == True:
+ if self.TOSA_OP_LIST[k]["template"] == True:
keyList.append(k)
continue
except KeyError:
@@ -1597,36 +1769,46 @@ class TosaTestGen:
del self.TOSA_OP_LIST[k]
def initOpListDefaults(self):
- '''Fill in default fields for ops if they aren't already specified.
- Look for missing required fields (datastructure linting).'''
+ """Fill in default fields for ops if they aren't already specified.
+ Look for missing required fields (datastructure linting)."""
for op in self.TOSA_OP_LIST:
# Required fields
try:
- pl, c = self.TOSA_OP_LIST[op]['operands']
+ pl, c = self.TOSA_OP_LIST[op]["operands"]
except (KeyError, ValueError, TypeError):
- raise Exception('Op {} is missing a valid operand tuple in TOSA_OP_LIST'.format(op))
+ raise Exception(
+ "Op {} is missing a valid operand tuple in TOSA_OP_LIST".format(op)
+ )
try:
- fcn, tgen, arggen = self.TOSA_OP_LIST[op]['build_fcn']
+ fcn, tgen, arggen = self.TOSA_OP_LIST[op]["build_fcn"]
except (KeyError, ValueError, TypeError):
- raise Exception('Op {} is missing a valid build_fcn tuple in TOSA_OP_LIST'.format(op))
+ raise Exception(
+ "Op {} is missing a valid build_fcn tuple in TOSA_OP_LIST".format(
+ op
+ )
+ )
try:
- types = self.TOSA_OP_LIST[op]['types']
+ types = self.TOSA_OP_LIST[op]["types"]
except KeyError as e:
- raise Exception('Op {} is missing a valid type list in TOSA_OP_LIST'.format(op))
+ raise Exception(
+ "Op {} is missing a valid type list in TOSA_OP_LIST".format(op)
+ )
try:
- opcode = self.TOSA_OP_LIST[op]['op']
+ opcode = self.TOSA_OP_LIST[op]["op"]
except KeyError as e:
- raise Exception('Op {} is missing the Op field in TOSA_OP_LIST'.format(op))
+ raise Exception(
+ "Op {} is missing the Op field in TOSA_OP_LIST".format(op)
+ )
# Put in default rank range, if missing
try:
- rank = self.TOSA_OP_LIST[op]['rank']
+ rank = self.TOSA_OP_LIST[op]["rank"]
except KeyError:
- self.TOSA_OP_LIST[op]['rank'] = self.DEFAULT_RANK_RANGE
+ self.TOSA_OP_LIST[op]["rank"] = self.DEFAULT_RANK_RANGE
# Tensor operator list
# 'op': op name
@@ -1635,495 +1817,517 @@ class TosaTestGen:
# if not specified, defaults to (1, 4)
# 'build_fcn': tuple of the function to (build_operator(), TensorGen function, ArgGen enum)
# 'types': array of datatypes to be tested
- TYPE_FP = [ DType.FLOAT ]
+ TYPE_FP = [DType.FLOAT]
- TYPE_INT = [ DType.INT8, DType.INT16, DType.INT32 ] # Excludes INT4
- TYPE_INT_FP = [ DType.INT8, DType.INT16, DType.INT32, DType.FLOAT ] # Excludes INT4
+ TYPE_INT = [DType.INT8, DType.INT16, DType.INT32] # Excludes INT4
+ TYPE_INT_FP = [DType.INT8, DType.INT16, DType.INT32, DType.FLOAT] # Excludes INT4
- TYPE_BOOL = [ DType.BOOL ]
- TYPE_FI32 = [ DType.FLOAT, DType.INT32 ]
- TYPE_FIB = [ DType.FLOAT, DType.INT8, DType.INT16, DType.INT32, DType.BOOL ]
- TYPE_FI16 = [ DType.FLOAT, DType.INT16 ]
+ TYPE_BOOL = [DType.BOOL]
+ TYPE_FI32 = [DType.FLOAT, DType.INT32]
+ TYPE_FIB = [DType.FLOAT, DType.INT8, DType.INT16, DType.INT32, DType.BOOL]
+ TYPE_FI16 = [DType.FLOAT, DType.INT16]
- TYPE_NARROW_INT_FP = [ DType.INT8, DType.INT16, DType.FLOAT ]
+ TYPE_NARROW_INT_FP = [DType.INT8, DType.INT16, DType.FLOAT]
DEFAULT_RANK_RANGE = (1, 4)
TOSA_OP_LIST = {
# Binary ops
- 'add':
- { 'op': Op.ADD,
- 'operands': (2, 0),
- 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
- 'types': TYPE_FI32 },
-
- 'arithmetic_right_shift':
- { 'op': Op.ARITHMETIC_RIGHT_SHIFT,
- 'operands': (2, 0),
- 'build_fcn': (build_arithmetic_right_shift, TosaTensorGen.tgBroadcastFuzz, TosaArgGen.agArithmeticRightShift),
- 'types': TYPE_INT },
-
- 'bitwise_and':
- { 'op': Op.BITWISE_AND,
- 'operands': (2, 0),
- 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
- 'types': TYPE_INT },
-
- 'bitwise_or':
- { 'op': Op.BITWISE_OR,
- 'operands': (2, 0),
- 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
- 'types': TYPE_INT },
-
- 'bitwise_xor':
- { 'op': Op.BITWISE_XOR,
- 'operands': (2, 0),
- 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
- 'types': TYPE_INT },
-
- 'logical_and':
- { 'op': Op.LOGICAL_AND,
- 'operands': (2, 0),
- 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
- 'types': TYPE_BOOL },
-
- 'logical_left_shift':
- { 'op': Op.LOGICAL_LEFT_SHIFT,
- 'operands': (2, 0),
- 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
- 'types': TYPE_INT },
-
- 'logical_right_shift':
- { 'op': Op.LOGICAL_RIGHT_SHIFT,
- 'operands': (2, 0),
- 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
- 'types': TYPE_INT },
-
- 'logical_or':
- { 'op': Op.LOGICAL_OR,
- 'operands': (2, 0),
- 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
- 'types': TYPE_BOOL },
-
- 'logical_xor':
- { 'op': Op.LOGICAL_XOR,
- 'operands': (2, 0),
- 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
- 'types': TYPE_BOOL },
-
- 'max':
- { 'op': Op.MAXIMUM,
- 'operands': (2, 0),
- 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
- 'types': TYPE_FI32 },
-
- 'min':
- { 'op': Op.MINIMUM,
- 'operands': (2, 0),
- 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
- 'types': TYPE_FI32 },
-
- 'mul':
- { 'op': Op.MUL,
- 'operands': (2, 0),
- 'build_fcn': (build_mul, TosaTensorGen.tgBroadcastFuzz, TosaArgGen.agMul),
- 'types': TYPE_INT_FP },
-
- 'pow':
- { 'op': Op.POW,
- 'operands': (2, 0),
- 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBasic, None),
- 'types': TYPE_FP },
-
- 'sub':
- { 'op': Op.SUB,
- 'operands': (2, 0),
- 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
- 'types': TYPE_FI32 },
-
- 'table':
- { 'op': Op.TABLE,
- # Use the automatic generation functions to create the input array
- # but create the table tensor in the build function, as it may be
- # a different type from the input
- 'operands': (1, 0),
- 'build_fcn': (build_table, TosaTensorGen.tgBasic, None),
- 'types': [ DType.INT16 ] },
-
- 'argmax':
- { 'op': Op.ARGMAX,
- 'operands': (1, 0),
- 'build_fcn': (build_argmax, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
- 'types': TYPE_NARROW_INT_FP },
-
+ "add": {
+ "op": Op.ADD,
+ "operands": (2, 0),
+ "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
+ "types": TYPE_FI32,
+ },
+ "arithmetic_right_shift": {
+ "op": Op.ARITHMETIC_RIGHT_SHIFT,
+ "operands": (2, 0),
+ "build_fcn": (
+ build_arithmetic_right_shift,
+ TosaTensorGen.tgBroadcastFuzz,
+ TosaArgGen.agArithmeticRightShift,
+ ),
+ "types": TYPE_INT,
+ },
+ "bitwise_and": {
+ "op": Op.BITWISE_AND,
+ "operands": (2, 0),
+ "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
+ "types": TYPE_INT,
+ },
+ "bitwise_or": {
+ "op": Op.BITWISE_OR,
+ "operands": (2, 0),
+ "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
+ "types": TYPE_INT,
+ },
+ "bitwise_xor": {
+ "op": Op.BITWISE_XOR,
+ "operands": (2, 0),
+ "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
+ "types": TYPE_INT,
+ },
+ "logical_and": {
+ "op": Op.LOGICAL_AND,
+ "operands": (2, 0),
+ "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
+ "types": TYPE_BOOL,
+ },
+ "logical_left_shift": {
+ "op": Op.LOGICAL_LEFT_SHIFT,
+ "operands": (2, 0),
+ "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
+ "types": TYPE_INT,
+ },
+ "logical_right_shift": {
+ "op": Op.LOGICAL_RIGHT_SHIFT,
+ "operands": (2, 0),
+ "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
+ "types": TYPE_INT,
+ },
+ "logical_or": {
+ "op": Op.LOGICAL_OR,
+ "operands": (2, 0),
+ "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
+ "types": TYPE_BOOL,
+ },
+ "logical_xor": {
+ "op": Op.LOGICAL_XOR,
+ "operands": (2, 0),
+ "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
+ "types": TYPE_BOOL,
+ },
+ "max": {
+ "op": Op.MAXIMUM,
+ "operands": (2, 0),
+ "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
+ "types": TYPE_FI32,
+ },
+ "min": {
+ "op": Op.MINIMUM,
+ "operands": (2, 0),
+ "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
+ "types": TYPE_FI32,
+ },
+ "mul": {
+ "op": Op.MUL,
+ "operands": (2, 0),
+ "build_fcn": (build_mul, TosaTensorGen.tgBroadcastFuzz, TosaArgGen.agMul),
+ "types": TYPE_INT_FP,
+ },
+ "pow": {
+ "op": Op.POW,
+ "operands": (2, 0),
+ "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBasic, None),
+ "types": TYPE_FP,
+ },
+ "sub": {
+ "op": Op.SUB,
+ "operands": (2, 0),
+ "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
+ "types": TYPE_FI32,
+ },
+ "table": {
+ "op": Op.TABLE,
+ # Use the automatic generation functions to create the input array
+ # but create the table tensor in the build function, as it may be
+ # a different type from the input
+ "operands": (1, 0),
+ "build_fcn": (build_table, TosaTensorGen.tgBasic, None),
+ "types": [DType.INT16],
+ },
+ "argmax": {
+ "op": Op.ARGMAX,
+ "operands": (1, 0),
+ "build_fcn": (build_argmax, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
+ "types": TYPE_NARROW_INT_FP,
+ },
# Templated operator. Filled in by createDynamicOpLists
- 'conv2d_TEMPLATE':
- { 'op': Op.CONV2D,
- 'operands': (1, 2),
- 'rank': (4, 4),
- 'build_fcn': (build_conv2d, TosaTensorGen.tgConv2D, TosaArgGen.agConv2D),
- 'qgen': TosaQuantGen.qgConv,
- 'types': TYPE_NARROW_INT_FP,
- 'template': True },
-
+ "conv2d_TEMPLATE": {
+ "op": Op.CONV2D,
+ "operands": (1, 2),
+ "rank": (4, 4),
+ "build_fcn": (build_conv2d, TosaTensorGen.tgConv2D, TosaArgGen.agConv2D),
+ "qgen": TosaQuantGen.qgConv,
+ "types": TYPE_NARROW_INT_FP,
+ "template": True,
+ },
# Templated operator. Filled in by createDynamicOpLists
- 'depthwise_conv2d_TEMPLATE':
- { 'op': Op.DEPTHWISE_CONV2D,
- 'operands': (1, 2),
- 'filter': [1, 1],
- 'rank': (4, 4),
- 'build_fcn': (build_depthwise_conv2d, TosaTensorGen.tgDepthwiseConv2D, TosaArgGen.agConv2D),
- 'qgen': TosaQuantGen.qgConv,
- 'types': TYPE_NARROW_INT_FP,
- 'template': True },
-
+ "depthwise_conv2d_TEMPLATE": {
+ "op": Op.DEPTHWISE_CONV2D,
+ "operands": (1, 2),
+ "filter": [1, 1],
+ "rank": (4, 4),
+ "build_fcn": (
+ build_depthwise_conv2d,
+ TosaTensorGen.tgDepthwiseConv2D,
+ TosaArgGen.agConv2D,
+ ),
+ "qgen": TosaQuantGen.qgConv,
+ "types": TYPE_NARROW_INT_FP,
+ "template": True,
+ },
# Templated operator. Filled in by createDynamicOpLists
- 'transpose_conv2d_TEMPLATE':
- { 'op': Op.TRANSPOSE_CONV2D,
- 'operands': (1, 1),
- 'rank': (4, 4),
- 'build_fcn': (build_transpose_conv2d, TosaTensorGen.tgTransposeConv2D, TosaArgGen.agTransposeConv2D),
- 'qgen': TosaQuantGen.qgConv,
- 'types': TYPE_NARROW_INT_FP,
- 'template': True },
-
- 'fully_connected':
- { 'op': Op.FULLY_CONNECTED,
- 'operands': (2, 0),
- 'rank': (2, 2),
- 'build_fcn': (build_fully_connected, TosaTensorGen.tgFullyConnected, None),
- 'qgen': TosaQuantGen.qgConv,
- 'types': TYPE_NARROW_INT_FP },
-
- 'matmul':
- { 'op': Op.MATMUL,
- 'operands': (2, 0),
- 'rank': (2, 2),
- 'build_fcn': (build_matmul, TosaTensorGen.tgMatmul, None),
- 'qgen': TosaQuantGen.qgMatmul,
- 'types': TYPE_NARROW_INT_FP },
-
+ "transpose_conv2d_TEMPLATE": {
+ "op": Op.TRANSPOSE_CONV2D,
+ "operands": (1, 1),
+ "rank": (4, 4),
+ "build_fcn": (
+ build_transpose_conv2d,
+ TosaTensorGen.tgTransposeConv2D,
+ TosaArgGen.agTransposeConv2D,
+ ),
+ "qgen": TosaQuantGen.qgConv,
+ "types": TYPE_FP,
+ "template": True,
+ },
+ "fully_connected": {
+ "op": Op.FULLY_CONNECTED,
+ "operands": (2, 0),
+ "rank": (2, 2),
+ "build_fcn": (build_fully_connected, TosaTensorGen.tgFullyConnected, None),
+ "qgen": TosaQuantGen.qgConv,
+ "types": TYPE_NARROW_INT_FP,
+ },
+ "matmul": {
+ "op": Op.MATMUL,
+ "operands": (2, 0),
+ "rank": (2, 2),
+ "build_fcn": (build_matmul, TosaTensorGen.tgMatmul, None),
+ "qgen": TosaQuantGen.qgMatmul,
+ "types": TYPE_NARROW_INT_FP,
+ },
# Unary operators
- 'abs':
- { 'op': Op.ABS,
- 'operands': (1, 0),
- 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None),
- 'types': TYPE_FI32 },
-
- 'bitwise_not':
- { 'op': Op.BITWISE_NOT,
- 'operands': (1, 0),
- 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None),
- 'types': TYPE_INT },
-
- 'ceil':
- { 'op': Op.CEIL,
- 'operands': (1, 0),
- 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None),
- 'types': TYPE_FP },
-
- 'clz':
- { 'op': Op.CLZ,
- 'operands': (1, 0),
- 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None),
- 'types': [ DType.INT32 ] },
-
- 'exp':
- { 'op': Op.EXP,
- 'operands': (1, 0),
- 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None),
- 'types': TYPE_FP },
-
- 'floor':
- { 'op': Op.FLOOR,
- 'operands': (1, 0),
- 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None),
- 'types': TYPE_FP },
-
- 'log':
- { 'op': Op.LOG,
- 'operands': (1, 0),
- 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None),
- 'types': TYPE_FP },
-
- 'floor':
- { 'op': Op.FLOOR,
- 'operands': (1, 0),
- 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None),
- 'types': TYPE_FP },
-
- 'logical_not':
- { 'op': Op.LOGICAL_NOT,
- 'operands': (1, 0),
- 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None),
- 'types': TYPE_BOOL },
-
- 'negate':
- { 'op': Op.NEGATE,
- 'operands': (1, 0),
- 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None),
- 'qgen': TosaQuantGen.qgUnary,
- 'types': TYPE_INT_FP },
-
- 'reciprocal':
- { 'op': Op.RECIPROCAL,
- 'operands': (1, 0),
- 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None),
- 'types': TYPE_FP },
-
- 'rsqrt':
- { 'op': Op.RSQRT,
- 'operands': (1, 0),
- 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None),
- 'types': TYPE_FP },
-
+ "abs": {
+ "op": Op.ABS,
+ "operands": (1, 0),
+ "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
+ "types": TYPE_FI32,
+ },
+ "bitwise_not": {
+ "op": Op.BITWISE_NOT,
+ "operands": (1, 0),
+ "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
+ "types": TYPE_INT,
+ },
+ "ceil": {
+ "op": Op.CEIL,
+ "operands": (1, 0),
+ "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
+ "types": TYPE_FP,
+ },
+ "clz": {
+ "op": Op.CLZ,
+ "operands": (1, 0),
+ "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
+ "types": [DType.INT32],
+ },
+ "exp": {
+ "op": Op.EXP,
+ "operands": (1, 0),
+ "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
+ "types": TYPE_FP,
+ },
+ "floor": {
+ "op": Op.FLOOR,
+ "operands": (1, 0),
+ "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
+ "types": TYPE_FP,
+ },
+ "log": {
+ "op": Op.LOG,
+ "operands": (1, 0),
+ "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
+ "types": TYPE_FP,
+ },
+ "floor": {
+ "op": Op.FLOOR,
+ "operands": (1, 0),
+ "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
+ "types": TYPE_FP,
+ },
+ "logical_not": {
+ "op": Op.LOGICAL_NOT,
+ "operands": (1, 0),
+ "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
+ "types": TYPE_BOOL,
+ },
+ "negate": {
+ "op": Op.NEGATE,
+ "operands": (1, 0),
+ "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
+ "qgen": TosaQuantGen.qgUnary,
+ "types": TYPE_INT_FP,
+ },
+ "reciprocal": {
+ "op": Op.RECIPROCAL,
+ "operands": (1, 0),
+ "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
+ "types": TYPE_FP,
+ },
+ "rsqrt": {
+ "op": Op.RSQRT,
+ "operands": (1, 0),
+ "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
+ "types": TYPE_FP,
+ },
# Ternary operators
- 'select':
- { 'op': Op.SELECT,
- 'operands': (3, 0),
- 'build_fcn': (build_select, TosaTensorGen.tgBroadcastFuzz, None),
- 'types': TYPE_FIB },
-
+ "select": {
+ "op": Op.SELECT,
+ "operands": (3, 0),
+ "build_fcn": (build_select, TosaTensorGen.tgBroadcastFuzz, None),
+ "types": TYPE_FIB,
+ },
# Comparison operators
- 'equal':
- { 'op': Op.EQUAL,
- 'operands': (2, 0),
- 'build_fcn': (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
- 'types': TYPE_FI32 },
-
- 'greater_equal':
- { 'op': Op.GREATER_EQUAL,
- 'operands': (2, 0),
- 'build_fcn': (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
- 'types': TYPE_FI32 },
-
- 'greater':
- { 'op': Op.GREATER,
- 'operands': (2, 0),
- 'build_fcn': (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
- 'types': TYPE_FI32 },
-
+ "equal": {
+ "op": Op.EQUAL,
+ "operands": (2, 0),
+ "build_fcn": (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
+ "types": TYPE_FI32,
+ },
+ "greater_equal": {
+ "op": Op.GREATER_EQUAL,
+ "operands": (2, 0),
+ "build_fcn": (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
+ "types": TYPE_FI32,
+ },
+ "greater": {
+ "op": Op.GREATER,
+ "operands": (2, 0),
+ "build_fcn": (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
+ "types": TYPE_FI32,
+ },
# Pooling operators
- 'avg_pool2d':
- { 'op': Op.AVG_POOL2D,
- 'operands': (1, 0),
- 'rank': (4, 4),
- 'build_fcn': (build_pool2d, TosaTensorGen.tgNHWC, TosaArgGen.agPooling),
- 'qgen': TosaQuantGen.qgUnary,
- 'types': TYPE_NARROW_INT_FP },
-
-
- 'max_pool2d':
- { 'op': Op.MAX_POOL2D,
- 'operands': (1, 0),
- 'rank': (4, 4),
- 'build_fcn': (build_pool2d, TosaTensorGen.tgNHWC, TosaArgGen.agPooling),
- 'types': TYPE_NARROW_INT_FP },
-
+ "avg_pool2d": {
+ "op": Op.AVG_POOL2D,
+ "operands": (1, 0),
+ "rank": (4, 4),
+ "build_fcn": (build_pool2d, TosaTensorGen.tgNHWC, TosaArgGen.agPooling),
+ "qgen": TosaQuantGen.qgUnary,
+ "types": TYPE_NARROW_INT_FP,
+ },
+ "max_pool2d": {
+ "op": Op.MAX_POOL2D,
+ "operands": (1, 0),
+ "rank": (4, 4),
+ "build_fcn": (build_pool2d, TosaTensorGen.tgNHWC, TosaArgGen.agPooling),
+ "types": TYPE_NARROW_INT_FP,
+ },
# Reduce operators
- 'reduce_any':
- { 'op': Op.REDUCE_ANY,
- 'operands': (1, 0),
- 'build_fcn': (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
- 'types': TYPE_BOOL },
-
- 'reduce_all':
- { 'op': Op.REDUCE_ALL,
- 'operands': (1, 0),
- 'build_fcn': (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
- 'types': TYPE_BOOL },
-
- 'reduce_max':
- { 'op': Op.REDUCE_MAX,
- 'operands': (1, 0),
- 'build_fcn': (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
- 'types': TYPE_INT_FP },
-
- 'reduce_min':
- { 'op': Op.REDUCE_MAX,
- 'operands': (1, 0),
- 'build_fcn': (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
- 'types': TYPE_INT_FP },
-
- 'reduce_product':
- { 'op': Op.REDUCE_PRODUCT,
- 'operands': (1, 0),
- 'build_fcn': (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
- 'types': TYPE_FP },
-
- 'reduce_sum':
- { 'op': Op.REDUCE_SUM,
- 'operands': (1, 0),
- 'build_fcn': (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
- 'types': TYPE_FI32 },
-
+ "reduce_any": {
+ "op": Op.REDUCE_ANY,
+ "operands": (1, 0),
+ "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
+ "types": TYPE_BOOL,
+ },
+ "reduce_all": {
+ "op": Op.REDUCE_ALL,
+ "operands": (1, 0),
+ "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
+ "types": TYPE_BOOL,
+ },
+ "reduce_max": {
+ "op": Op.REDUCE_MAX,
+ "operands": (1, 0),
+ "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
+ "types": TYPE_INT_FP,
+ },
+ "reduce_min": {
+ "op": Op.REDUCE_MAX,
+ "operands": (1, 0),
+ "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
+ "types": TYPE_INT_FP,
+ },
+ "reduce_product": {
+ "op": Op.REDUCE_PRODUCT,
+ "operands": (1, 0),
+ "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
+ "types": TYPE_FP,
+ },
+ "reduce_sum": {
+ "op": Op.REDUCE_SUM,
+ "operands": (1, 0),
+ "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
+ "types": TYPE_FI32,
+ },
# Activation functions
- 'clamp':
- { 'op': Op.CLAMP,
- 'operands': (1, 0),
- 'build_fcn': (build_clamp, TosaTensorGen.tgBasic, None),
- 'types': TYPE_NARROW_INT_FP },
-
- 'relun':
- { 'op': Op.RELUN,
- 'operands': (1, 0),
- 'build_fcn': (build_relun, TosaTensorGen.tgBasic, None),
- 'types': TYPE_FI32 },
-
- 'sigmoid':
- { 'op': Op.SIGMOID,
- 'operands': (1, 0),
- 'build_fcn': (build_sigmoid, TosaTensorGen.tgBasic, None),
- 'types': TYPE_FP },
-
- 'tanh':
- { 'op': Op.TANH,
- 'operands': (1, 0),
- 'build_fcn': (build_tanh, TosaTensorGen.tgBasic, None),
- 'types': TYPE_FP },
-
+ "clamp": {
+ "op": Op.CLAMP,
+ "operands": (1, 0),
+ "build_fcn": (build_clamp, TosaTensorGen.tgBasic, None),
+ "types": TYPE_NARROW_INT_FP,
+ },
+ "relun": {
+ "op": Op.RELUN,
+ "operands": (1, 0),
+ "build_fcn": (build_relun, TosaTensorGen.tgBasic, None),
+ "types": TYPE_FI32,
+ },
+ "sigmoid": {
+ "op": Op.SIGMOID,
+ "operands": (1, 0),
+ "build_fcn": (build_sigmoid, TosaTensorGen.tgBasic, None),
+ "types": TYPE_FP,
+ },
+ "tanh": {
+ "op": Op.TANH,
+ "operands": (1, 0),
+ "build_fcn": (build_tanh, TosaTensorGen.tgBasic, None),
+ "types": TYPE_FP,
+ },
# Data layout operators
- 'concat':
- { 'op': Op.CONCAT,
- 'operands': (2, 0),
- 'build_fcn': (build_concat, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
- 'types': TYPE_FIB },
-
- 'pad':
- { 'op': Op.PAD,
- 'operands': (1, 0),
- 'build_fcn': (build_pad, TosaTensorGen.tgBasic, TosaArgGen.agPad),
- 'qgen': TosaQuantGen.qgPad,
- 'types': TYPE_FIB },
-
- 'reshape':
- { 'op': Op.RESHAPE,
- 'operands': (1, 0),
- 'build_fcn': (build_reshape, TosaTensorGen.tgBasic, TosaArgGen.agReshape),
- 'types': TYPE_FIB },
-
- 'reverse':
- { 'op': Op.REVERSE,
- 'operands': (1, 0),
- 'build_fcn': (build_reverse, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
- 'types': TYPE_FIB },
-
- 'slice':
- { 'op': Op.SLICE,
- 'operands': (1, 0),
- 'build_fcn': (build_slice, TosaTensorGen.tgBasic, TosaArgGen.agSlice),
- 'types': TYPE_FIB },
-
- 'tile':
- { 'op': Op.TILE,
- 'operands': (1, 0),
- 'build_fcn': (build_tile, TosaTensorGen.tgBasic, TosaArgGen.agTile),
- 'types': TYPE_FIB },
-
- 'transpose':
- { 'op': Op.TRANSPOSE,
- 'operands': (1, 0),
- 'rank': (2, 4), # Do not allow tranpose on rank=1
- 'build_fcn': (build_transpose, TosaTensorGen.tgBasic, TosaArgGen.agTranspose),
- 'types': TYPE_FIB },
-
+ "concat": {
+ "op": Op.CONCAT,
+ "operands": (2, 0),
+ "build_fcn": (build_concat, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
+ "types": TYPE_FIB,
+ },
+ "pad": {
+ "op": Op.PAD,
+ "operands": (1, 0),
+ "build_fcn": (build_pad, TosaTensorGen.tgBasic, TosaArgGen.agPad),
+ "qgen": TosaQuantGen.qgPad,
+ "types": TYPE_FIB,
+ },
+ "reshape": {
+ "op": Op.RESHAPE,
+ "operands": (1, 0),
+ "build_fcn": (build_reshape, TosaTensorGen.tgBasic, TosaArgGen.agReshape),
+ "types": TYPE_FIB,
+ },
+ "reverse": {
+ "op": Op.REVERSE,
+ "operands": (1, 0),
+ "build_fcn": (build_reverse, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
+ "types": TYPE_FIB,
+ },
+ "slice": {
+ "op": Op.SLICE,
+ "operands": (1, 0),
+ "build_fcn": (build_slice, TosaTensorGen.tgBasic, TosaArgGen.agSlice),
+ "types": TYPE_FIB,
+ },
+ "tile": {
+ "op": Op.TILE,
+ "operands": (1, 0),
+ "build_fcn": (build_tile, TosaTensorGen.tgBasic, TosaArgGen.agTile),
+ "types": TYPE_FIB,
+ },
+ "transpose": {
+ "op": Op.TRANSPOSE,
+ "operands": (1, 0),
+ "rank": (2, 4), # Do not allow tranpose on rank=1
+ "build_fcn": (
+ build_transpose,
+ TosaTensorGen.tgBasic,
+ TosaArgGen.agTranspose,
+ ),
+ "types": TYPE_FIB,
+ },
# Scatter/Gather
- 'gather':
- { 'op': Op.GATHER,
- # Only specify 'values' tensor here. 'indices' is generated in op building stage
- 'operands': (1, 0),
- 'rank': (3, 3),
- 'build_fcn': (build_gather, TosaTensorGen.tgBasic, None),
- 'types': TYPE_INT_FP },
-
- 'scatter':
- { 'op': Op.SCATTER,
- # Only specify 'values_in' tensor here.
- #'indices' and 'input' are generated in op building stage
- 'operands': (2, 0),
- 'rank': (3, 3),
- 'build_fcn': (build_scatter, TosaTensorGen.tgScatter, None),
- 'types': TYPE_INT_FP },
-
+ "gather": {
+ "op": Op.GATHER,
+ # Only specify 'values' tensor here. 'indices' is generated in op building stage
+ "operands": (1, 0),
+ "rank": (3, 3),
+ "build_fcn": (build_gather, TosaTensorGen.tgBasic, None),
+ "types": TYPE_INT_FP,
+ },
+ "scatter": {
+ "op": Op.SCATTER,
+ # Only specify 'values_in' tensor here.
+ #'indices' and 'input' are generated in op building stage
+ "operands": (2, 0),
+ "rank": (3, 3),
+ "build_fcn": (build_scatter, TosaTensorGen.tgScatter, None),
+ "types": TYPE_INT_FP,
+ },
# Image operations
- 'resize':
- { 'op': Op.RESIZE,
- 'operands': (1, 0),
- 'rank': (4, 4),
- 'build_fcn': ( build_resize, TosaTensorGen.tgNHWC, TosaArgGen.agResize),
- 'types': [ DType.INT8, DType.INT16, DType.FLOAT ] },
-
-
+ "resize": {
+ "op": Op.RESIZE,
+ "operands": (1, 0),
+ "rank": (4, 4),
+ "build_fcn": (build_resize, TosaTensorGen.tgNHWC, TosaArgGen.agResize),
+ "types": [DType.INT8, DType.INT16, DType.FLOAT],
+ },
# Data nodes
- 'placeholder':
- { 'op': Op.PLACEHOLDER,
- 'operands': (1, 0),
- 'build_fcn': ( build_placeholder, TosaTensorGen.tgBasic, None),
- 'types': TYPE_FIB },
-
- 'const':
- { 'op': Op.CONST,
- 'operands': (1, 0),
- 'build_fcn': ( build_placeholder, TosaTensorGen.tgBasic, None),
- 'types': TYPE_FIB },
-
-
- 'identity':
- { 'op': Op.IDENTITY,
- 'operands': (1, 0),
- 'build_fcn': ( build_unary, TosaTensorGen.tgBasic, None),
- 'types': TYPE_FIB },
-
-
- 'identityn':
- { 'op': Op.IDENTITYN,
- 'operands': (2, 0),
- 'build_fcn': ( build_identityn, TosaTensorGen.tgBasic, None),
- 'types': TYPE_FIB },
-
+ "placeholder": {
+ "op": Op.PLACEHOLDER,
+ "operands": (1, 0),
+ "build_fcn": (build_placeholder, TosaTensorGen.tgBasic, None),
+ "types": TYPE_FIB,
+ },
+ "const": {
+ "op": Op.CONST,
+ "operands": (1, 0),
+ "build_fcn": (build_placeholder, TosaTensorGen.tgBasic, None),
+ "types": TYPE_FIB,
+ },
+ "identity": {
+ "op": Op.IDENTITY,
+ "operands": (1, 0),
+ "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
+ "types": TYPE_FIB,
+ },
+ "identityn": {
+ "op": Op.IDENTITYN,
+ "operands": (2, 0),
+ "build_fcn": (build_identityn, TosaTensorGen.tgBasic, None),
+ "types": TYPE_FIB,
+ },
# Type conversion
- 'cast':
- { 'op': Op.CAST,
- 'operands': (1, 0),
- 'build_fcn': ( build_cast, TosaTensorGen.tgBasic, TosaArgGen.agCast ),
- 'types': [ DType.FLOAT, DType.INT8, DType.INT16, DType.INT32, DType.BOOL ] },
-
- 'rescale':
- { 'op': Op.RESCALE,
- 'operands': (1, 0),
- 'build_fcn': ( build_rescale, TosaTensorGen.tgBasic, TosaArgGen.agRescale ),
- 'types': [ DType.INT8, DType.INT16, DType.INT32, DType.INT48 ] },
-
+ "cast": {
+ "op": Op.CAST,
+ "operands": (1, 0),
+ "build_fcn": (build_cast, TosaTensorGen.tgBasic, TosaArgGen.agCast),
+ "types": [DType.FLOAT, DType.INT8, DType.INT16, DType.INT32, DType.BOOL],
+ },
+ "rescale": {
+ "op": Op.RESCALE,
+ "operands": (1, 0),
+ "build_fcn": (build_rescale, TosaTensorGen.tgBasic, TosaArgGen.agRescale),
+ "types": [DType.INT8, DType.INT16, DType.INT32, DType.INT48],
+ },
# Custom
# Not implemented.
-
# Control flow
-
# Two varients of cond_if, one that generates one of two constant tensors (no
# inputs to the basic blocks, one output) and another that either adds or subtracts two tensors
# (two inputs to the basic blocks, one output)
- 'cond_if_const':
- { 'op': Op.COND_IF,
- 'operands': (0, 2),
- 'build_fcn': ( build_cond_if_const, TosaTensorGen.tgBasic, TosaArgGen.agCondIf ),
- 'types': [ DType.BOOL ] },
-
- 'cond_if_binary':
- { 'op': Op.COND_IF,
- 'operands': (2, 0),
- 'build_fcn': ( build_cond_if_binary, TosaTensorGen.tgBasic, TosaArgGen.agCondIf ),
- 'types': TYPE_FI32 },
-
+ "cond_if_const": {
+ "op": Op.COND_IF,
+ "operands": (0, 2),
+ "build_fcn": (
+ build_cond_if_const,
+ TosaTensorGen.tgBasic,
+ TosaArgGen.agCondIf,
+ ),
+ "types": [DType.BOOL],
+ },
+ "cond_if_binary": {
+ "op": Op.COND_IF,
+ "operands": (2, 0),
+ "build_fcn": (
+ build_cond_if_binary,
+ TosaTensorGen.tgBasic,
+ TosaArgGen.agCondIf,
+ ),
+ "types": TYPE_FI32,
+ },
# while_loop
- 'while_loop':
- { 'op': Op.WHILE_LOOP,
- 'operands': (0, 1),
- 'build_fcn': ( build_while_loop, TosaTensorGen.tgBasic, TosaArgGen.agWhileLoop ),
- 'types': [DType.INT32] },
-
-
+ "while_loop": {
+ "op": Op.WHILE_LOOP,
+ "operands": (0, 1),
+ "build_fcn": (
+ build_while_loop,
+ TosaTensorGen.tgBasic,
+ TosaArgGen.agWhileLoop,
+ ),
+ "types": [DType.INT32],
+ },
}
+
class OutputShaper:
# Methods in this class compute the expected output shape and datatype
# for common classes of operations
@@ -2134,8 +2338,8 @@ class OutputShaper:
# creating a new output tensor
@staticmethod
def binaryBroadcastOp(ser, a, b):
- assert(len(a.shape) == len(b.shape))
- assert(a.dtype == b.dtype)
+ assert len(a.shape) == len(b.shape)
+ assert a.dtype == b.dtype
shape = []
for i in range(len(a.shape)):
@@ -2144,39 +2348,39 @@ class OutputShaper:
else:
shape.append(a.shape[i])
- return ser.addOutput(shape, a.dtype, a.usage, a.dformat)
+ return ser.addOutput(shape, a.dtype)
@staticmethod
def binaryNonBroadcastOp(ser, a, b):
- assert(len(a.shape) == len(b.shape))
- assert(a.dtype == b.dtype)
+ assert len(a.shape) == len(b.shape)
+ assert a.dtype == b.dtype
shape = []
for i in range(len(a.shape)):
- assert(a.shape[i] == b.shape[i])
+ assert a.shape[i] == b.shape[i]
shape.append(a.shape[i])
- return ser.addOutput(shape, a.dtype, a.usage, a.dformat)
+ return ser.addOutput(shape, a.dtype)
@staticmethod
def unaryOp(ser, a):
- return ser.addOutput(a.shape, a.dtype, a.usage, a.dformat)
+ return ser.addOutput(a.shape, a.dtype)
@staticmethod
def selectOp(ser, cond, a, b):
- assert(len(a.shape) == len(b.shape) and len(a.shape) == len(cond.shape))
- assert(a.dtype == b.dtype)
+ assert len(a.shape) == len(b.shape) and len(a.shape) == len(cond.shape)
+ assert a.dtype == b.dtype
shape = []
for i in range(len(a.shape)):
shape.append(max(cond.shape[i], a.shape[i], b.shape[i]))
- return ser.addOutput(shape, a.dtype, a.usage, a.dformat)
+ return ser.addOutput(shape, a.dtype)
@staticmethod
def binaryComparisonOp(ser, a, b):
- assert(len(a.shape) == len(b.shape))
- assert(a.dtype == b.dtype)
+ assert len(a.shape) == len(b.shape)
+ assert a.dtype == b.dtype
# Do broadcast
shape = []
@@ -2187,7 +2391,7 @@ class OutputShaper:
shape.append(a.shape[i])
# Force the output type to bool
- return ser.addOutput(shape, DType.BOOL, a.usage, a.dformat)
+ return ser.addOutput(shape, DType.BOOL)
@staticmethod
def reduceOp(ser, a, axis):
@@ -2196,13 +2400,13 @@ class OutputShaper:
shape[axis] = 1
- return ser.addOutput(shape, a.dtype, a.usage, a.dformat)
+ return ser.addOutput(shape, a.dtype)
@staticmethod
def argmaxOp(ser, a, axis):
shape = a.shape.copy()
del shape[axis]
- return ser.addOutput(shape, DType.INT32, a.usage, a.dformat)
+ return ser.addOutput(shape, DType.INT32)
@staticmethod
def conv2dOp(ser, ifm, filter, strides, padding, dilations):
@@ -2216,17 +2420,27 @@ class OutputShaper:
# From H,W to T,B,L,R
padding = [padding[0], padding[0], padding[1], padding[1]]
- h = (ifm.shape[1] - filter.shape[1] - (filter.shape[1] - 1) * (dilations[0] - 1) + \
- padding[0] + padding[1]) // strides[0] + 1
-
- w = (ifm.shape[2] - filter.shape[2] - (filter.shape[2] - 1) * (dilations[1] - 1) + \
- padding[2] + padding[3]) // strides[1] + 1
+ h = (
+ ifm.shape[1]
+ - filter.shape[1]
+ - (filter.shape[1] - 1) * (dilations[0] - 1)
+ + padding[0]
+ + padding[1]
+ ) // strides[0] + 1
+
+ w = (
+ ifm.shape[2]
+ - filter.shape[2]
+ - (filter.shape[2] - 1) * (dilations[1] - 1)
+ + padding[2]
+ + padding[3]
+ ) // strides[1] + 1
if h <= 0 or w <= 0:
# Invalid test parameters?
h = 0
w = 0
- ser.setExpectedFailure(True, 'Invalid combination of conv2d parameters')
+ ser.setExpectedFailure(True, "Invalid combination of conv2d parameters")
ofm_shape = [ifm.shape[0], h, w, filter.shape[0]]
@@ -2237,29 +2451,39 @@ class OutputShaper:
elif ifm.dtype == DType.FLOAT:
out_dtype = DType.FLOAT
else:
- raise Exception('Unsupported input dtype: {}'.format(ifm.dtype))
+ raise Exception("Unsupported input dtype: {}".format(ifm.dtype))
if ifm.dtype == DType.INT16:
ser.setExpectedFailure(True, "INT16 support is in progress")
- return ser.addOutput(ofm_shape, out_dtype, ifm.usage, ifm.dformat)
+ return ser.addOutput(ofm_shape, out_dtype)
@staticmethod
def depthwiseConv2dOp(ser, ifm, filter, strides, padding, dilations):
# IFM: NHWC
# Filter: HWCM
# OFM: NHW C*M
- h = (ifm.shape[1] - filter.shape[0] - (filter.shape[0] - 1) * (dilations[0] - 1) + \
- padding[0] + padding[1]) // strides[0] + 1
-
- w = (ifm.shape[2] - filter.shape[1] - (filter.shape[1] - 1) * (dilations[1] - 1) + \
- padding[2] + padding[3]) // strides[1] + 1
+ h = (
+ ifm.shape[1]
+ - filter.shape[0]
+ - (filter.shape[0] - 1) * (dilations[0] - 1)
+ + padding[0]
+ + padding[1]
+ ) // strides[0] + 1
+
+ w = (
+ ifm.shape[2]
+ - filter.shape[1]
+ - (filter.shape[1] - 1) * (dilations[1] - 1)
+ + padding[2]
+ + padding[3]
+ ) // strides[1] + 1
if h <= 0 or w <= 0:
# Invalid test parameters?
h = 0
w = 0
- ser.setExpectedFailure(True, 'Invalid combination of conv2d parameters')
+ ser.setExpectedFailure(True, "Invalid combination of conv2d parameters")
ofm_shape = [ifm.shape[0], h, w, filter.shape[2] * filter.shape[3]]
@@ -2270,13 +2494,12 @@ class OutputShaper:
elif ifm.dtype == DType.FLOAT:
out_dtype = DType.FLOAT
else:
- raise Exception('Unsupported input dtype: {}'.format(ifm.dtype))
+ raise Exception("Unsupported input dtype: {}".format(ifm.dtype))
if ifm.dtype == DType.INT16:
ser.setExpectedFailure(True, "INT16 support is in progress")
- return ser.addOutput(ofm_shape, out_dtype, ifm.usage, ifm.dformat)
-
+ return ser.addOutput(ofm_shape, out_dtype)
@staticmethod
def pool2dOp(ser, ifm, kernel, stride, pad):
@@ -2288,10 +2511,10 @@ class OutputShaper:
# Invalid test parameters?
h = 0
w = 0
- ser.setExpectedFailure(True, 'Invalid combination of pooling parameters')
+ ser.setExpectedFailure(True, "Invalid combination of pooling parameters")
ofm_shape = [ifm.shape[0], h, w, ifm.shape[3]]
- return ser.addOutput(ofm_shape, ifm.dtype, ifm.usage, ifm.dformat)
+ return ser.addOutput(ofm_shape, ifm.dtype)
@staticmethod
def fullyConnectedOp(ser, input, filter):
@@ -2308,12 +2531,12 @@ class OutputShaper:
elif input.dtype == DType.FLOAT:
out_dtype = DType.FLOAT
else:
- raise Exception('Unsupported input dtype: {}'.format(input.dtype))
+ raise Exception("Unsupported input dtype: {}".format(input.dtype))
if input.dtype == DType.INT16:
ser.setExpectedFailure(True, "INT16 support is in progress")
- return ser.addOutput(output_shape, out_dtype, input.usage, input.dformat)
+ return ser.addOutput(output_shape, out_dtype)
@staticmethod
def matmulOp(ser, a, b):
@@ -2330,9 +2553,9 @@ class OutputShaper:
elif a.dtype == DType.FLOAT:
out_dtype = DType.FLOAT
else:
- raise Exception('UNsupported input dtype for matmul: {}'.format(a.dtype))
+ raise Exception("UNsupported input dtype for matmul: {}".format(a.dtype))
- return ser.addOutput(output_shape, out_dtype, a.usage, a.dformat)
+ return ser.addOutput(output_shape, out_dtype)
@staticmethod
def concatOp(ser, a, b, axis):
@@ -2340,7 +2563,7 @@ class OutputShaper:
output_shape = a.shape.copy()
output_shape[axis] = a.shape[axis] + b.shape[axis]
- return ser.addOutput(output_shape, a.dtype, a.usage, a.dformat)
+ return ser.addOutput(output_shape, a.dtype)
@staticmethod
def padOp(ser, a, padding):
@@ -2350,7 +2573,7 @@ class OutputShaper:
for i in range(len(output_shape)):
output_shape[i] = padding[i][0] + padding[i][1] + output_shape[i]
- return ser.addOutput(output_shape, a.dtype, a.usage, a.dformat)
+ return ser.addOutput(output_shape, a.dtype)
@staticmethod
def reshapeOp(ser, a, shape):
@@ -2371,34 +2594,34 @@ class OutputShaper:
if output_shape[i] == -1:
output_shape[i] = totalElements // totalOutputElements
- return ser.addOutput(output_shape, a.dtype, a.usage, a.dformat)
+ return ser.addOutput(output_shape, a.dtype)
@staticmethod
def sliceOp(ser, a, begin, size):
output_shape = size.copy()
- return ser.addOutput(output_shape, a.dtype, a.usage, a.dformat)
+ return ser.addOutput(output_shape, a.dtype)
@staticmethod
def tileOp(ser, a, multiples):
output_shape = a.shape.copy()
- assert(len(multiples) == len(output_shape))
+ assert len(multiples) == len(output_shape)
for i in range(len(output_shape)):
output_shape[i] = a.shape[i] * multiples[i]
- return ser.addOutput(output_shape, a.dtype, a.usage, a.dformat)
+ return ser.addOutput(output_shape, a.dtype)
@staticmethod
def transposeOp(ser, a, perms):
output_shape = a.shape.copy()
- assert(len(perms) == len(output_shape))
+ assert len(perms) == len(output_shape)
for i in range(len(output_shape)):
output_shape[i] = a.shape[perms[i]]
- return ser.addOutput(output_shape, a.dtype, a.usage, a.dformat)
+ return ser.addOutput(output_shape, a.dtype)
@staticmethod
def gatherOp(ser, values, indices):
@@ -2408,72 +2631,84 @@ class OutputShaper:
output_shape = [values.shape[0], indices.shape[1], values.shape[2]]
- return ser.addOutput(output_shape, values.dtype, values.usage, values.dformat)
+ return ser.addOutput(output_shape, values.dtype)
@staticmethod
def scatterOp(ser, values_in, indices, input):
assert len(values_in.shape) == 3
assert len(indices.shape) == 2
assert len(input.shape) == 3
- assert values_in.shape[0] == indices.shape[0] # N
- assert input.shape[1] == indices.shape[1] # W
- assert values_in.shape[2] == input.shape[2] # C
+ assert values_in.shape[0] == indices.shape[0] # N
+ assert input.shape[1] == indices.shape[1] # W
+ assert values_in.shape[2] == input.shape[2] # C
output_shape = values_in.shape
- return ser.addOutput(output_shape, values_in.dtype, values_in.usage, values_in.dformat)
+ return ser.addOutput(output_shape, values_in.dtype)
@staticmethod
def tableOp(ser, input, table):
# Same shape as the input, but with the type of the table.
- return ser.addOutput(input.shape, DType.INT32, input.usage, input.dformat)
+ return ser.addOutput(input.shape, DType.INT32)
@staticmethod
- def resizeOp(ser, input, mode, stride, offset, shift, stride_fp, offset_fp, output_dims, input_dtype, output_dtype):
+ def resizeOp(
+ ser,
+ input,
+ mode,
+ stride,
+ offset,
+ shift,
+ stride_fp,
+ offset_fp,
+ output_dims,
+ input_dtype,
+ output_dtype,
+ ):
output_dims = [input.shape[0], output_dims[0], output_dims[1], input.shape[3]]
if input_dtype == DType.FLOAT:
if stride_fp[0] <= 0 or stride_fp[1] <= 0:
- ser.setExpectedFailure(True, 'Negative or zero stride')
+ ser.setExpectedFailure(True, "Negative or zero stride")
else:
if stride[0] <= 0 or stride[1] <= 0:
- ser.setExpectedFailure(True, 'Negative or zero stride')
+ ser.setExpectedFailure(True, "Negative or zero stride")
if mode == ResizeMode.BILINEAR:
if input_dtype == DType.INT8:
if output_dtype != DType.INT32:
- ser.setExpectedFailure(True, 'Invalid output data type')
+ ser.setExpectedFailure(True, "Invalid output data type")
elif input_dtype == DType.INT16:
if output_dtype != DType.INT48:
- ser.setexpectedfailure(true, 'Invalid output data type')
+ ser.setexpectedfailure(true, "Invalid output data type")
elif input_dtype == DType.FLOAT:
if output_dtype != DType.FLOAT:
- ser.setexpectedfailure(true, 'Invalid output data type')
+ ser.setexpectedfailure(true, "Invalid output data type")
else:
- ser.setexpectedfailure(true, 'Invalid input data type')
+ ser.setexpectedfailure(true, "Invalid input data type")
elif mode == ResizeMode.NEAREST:
if input_dtype == DType.INT8:
if output_dtype != DType.INT8:
- ser.setExpectedFailure(True, 'Invalid output data type')
+ ser.setExpectedFailure(True, "Invalid output data type")
elif input_dtype == DType.INT16:
if output_dtype != DType.INT16:
- ser.setexpectedfailure(true, 'Invalid output data type')
+ ser.setexpectedfailure(true, "Invalid output data type")
elif input_dtype == DType.FLOAT:
if output_dtype != DType.FLOAT:
- ser.setexpectedfailure(true, 'Invalid output data type')
+ ser.setexpectedfailure(true, "Invalid output data type")
else:
- ser.setexpectedfailure(true, 'Invalid input data type')
+ ser.setexpectedfailure(true, "Invalid input data type")
else:
- ser.setexpectedfailure(true, 'Invalid resize mode')
+ ser.setexpectedfailure(true, "Invalid resize mode")
- return ser.addOutput(output_dims, output_dtype, input.usage, input.dformat)
+ return ser.addOutput(output_dims, output_dtype)
@staticmethod
def typeConversionOp(ser, val, out_dtype):
- return ser.addOutput(val.shape, out_dtype, val.usage, val.dformat)
+ return ser.addOutput(val.shape, out_dtype)
@staticmethod
def transposeConv2DOp(ser, ifm, output_shape):
@@ -2484,12 +2719,12 @@ class OutputShaper:
elif ifm.dtype == DType.FLOAT:
out_dtype = DType.FLOAT
else:
- raise Exception('Unsupported input dtype: {}'.format(ifm.dtype))
+ raise Exception("Unsupported input dtype: {}".format(ifm.dtype))
if output_shape[1] <= 0 or output_shape[2] <= 0:
- ser.setExpectedFailure(True, 'Negative output shape')
+ ser.setExpectedFailure(True, "Negative output shape")
if ifm.dtype == DType.INT16:
ser.setExpectedFailure(True, "INT16 support is in progress")
- return ser.addOutput(output_shape, out_dtype, ifm.usage, ifm.dformat)
+ return ser.addOutput(output_shape, out_dtype)
diff --git a/verif/tosa_test_runner.py b/verif/tosa_test_runner.py
index 6549192..82d447e 100644
--- a/verif/tosa_test_runner.py
+++ b/verif/tosa_test_runner.py
@@ -19,29 +19,34 @@ import shlex
import subprocess
from enum import IntEnum, unique
+
def run_sh_command(args, full_cmd, capture_output=False):
- '''Utility function to run an external command. Optionally return captured stdout/stderr'''
+ """Utility function to run an external command. Optionally return captured stdout/stderr"""
# Quote the command line for printing
- full_cmd_esc = [ shlex.quote(x) for x in full_cmd ]
+ full_cmd_esc = [shlex.quote(x) for x in full_cmd]
if args.verbose:
- print('### Running {}'.format(' '.join(full_cmd_esc)))
+ print("### Running {}".format(" ".join(full_cmd_esc)))
if capture_output:
rc = subprocess.run(full_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if rc.returncode != 0:
- print(rc.stdout.decode('utf-8'))
- print(rc.stderr.decode('utf-8'))
- raise Exception('Error running command: {}.\n{}'.format(' '.join(full_cmd_esc), rc.stderr.decode('utf-8')))
+ print(rc.stdout.decode("utf-8"))
+ print(rc.stderr.decode("utf-8"))
+ raise Exception(
+ "Error running command: {}.\n{}".format(
+ " ".join(full_cmd_esc), rc.stderr.decode("utf-8")
+ )
+ )
return (rc.stdout, rc.stderr)
else:
rc = subprocess.run(full_cmd)
if rc.returncode != 0:
- raise Exception('Error running command: {}'.format(' '.join(full_cmd_esc)))
+ raise Exception("Error running command: {}".format(" ".join(full_cmd_esc)))
-class TosaTestRunner:
+class TosaTestRunner:
def __init__(self, args, runnerArgs, testDir):
self.args = args
@@ -49,7 +54,7 @@ class TosaTestRunner:
self.testDir = testDir
# Load the json test file
- with open(os.path.join(testDir, 'desc.json'), 'r') as fd:
+ with open(os.path.join(testDir, "desc.json"), "r") as fd:
self.testDesc = json.load(fd)
def runModel(self):
diff --git a/verif/tosa_verif_build_tests.py b/verif/tosa_verif_build_tests.py
index b8a24dd..15482e6 100755
--- a/verif/tosa_verif_build_tests.py
+++ b/verif/tosa_verif_build_tests.py
@@ -32,11 +32,18 @@ import traceback
from enum import IntEnum, Enum, unique
from datetime import datetime
-# Include the ../shared directory in PYTHONPATH
+# Include the ../scripts and ../scripts/xunit directory in PYTHONPATH
parent_dir = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(parent_dir, '..', 'scripts'))
-sys.path.append(os.path.join(parent_dir, '..', 'scripts', 'xunit'))
+sys.path.append(os.path.join(parent_dir, "..", "scripts"))
+sys.path.append(os.path.join(parent_dir, "..", "scripts", "xunit"))
+
import xunit
+
+# Include the ../thirdparty/serialization_lib/python directory in PYTHONPATH
+parent_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(
+ os.path.join(parent_dir, "..", "thirdparty", "serialization_lib", "python")
+)
from tosa_serializer import *
from tosa_test_gen import TosaTestGen
import tosa
@@ -44,77 +51,153 @@ import tosa
# Used for parsing a comma-separated list of integers in a string
# to an actual list of integers
def str_to_list(in_s):
- '''Converts a comma-separated list of string integers to a python list of ints'''
- lst = in_s.split(',')
+ """Converts a comma-separated list of string integers to a python list of ints"""
+ lst = in_s.split(",")
out_list = []
for i in lst:
out_list.append(int(i))
return out_list
+
def auto_int(x):
- '''Converts hex/dec argument values to an int'''
+ """Converts hex/dec argument values to an int"""
return int(x, 0)
+
def parseArgs():
parser = argparse.ArgumentParser()
- parser.add_argument('-o', dest='output_dir', type=str, default='vtest',
- help='Test output directory')
-
- parser.add_argument('--seed', dest='random_seed', default=42, type=int,
- help='Random seed for test generation')
-
- parser.add_argument('--filter', dest='filter', default='', type=str,
- help='Filter operator test names by this expression')
-
- parser.add_argument('-v', '--verbose', dest='verbose', action='count',
- help='Verbose operation')
+ parser.add_argument(
+ "-o", dest="output_dir", type=str, default="vtest", help="Test output directory"
+ )
+
+ parser.add_argument(
+ "--seed",
+ dest="random_seed",
+ default=42,
+ type=int,
+ help="Random seed for test generation",
+ )
+
+ parser.add_argument(
+ "--filter",
+ dest="filter",
+ default="",
+ type=str,
+ help="Filter operator test names by this expression",
+ )
+
+ parser.add_argument(
+ "-v", "--verbose", dest="verbose", action="count", help="Verbose operation"
+ )
# Constraints on tests
- parser.add_argument('--tensor-dim-range', dest='tensor_shape_range', default='1,64',
- type=lambda x: str_to_list(x),
- help='Min,Max range of tensor shapes')
-
- parser.add_argument('--max-batch-size', dest='max_batch_size', default=1, type=int,
- help='Maximum batch size for NHWC tests')
-
- parser.add_argument('--max-conv-padding', dest='max_conv_padding', default=1, type=int,
- help='Maximum padding for Conv tests')
-
- parser.add_argument('--max-conv-dilation', dest='max_conv_dilation', default=2, type=int,
- help='Maximum dilation for Conv tests')
-
- parser.add_argument('--max-conv-stride', dest='max_conv_stride', default=2, type=int,
- help='Maximum stride for Conv tests')
-
- parser.add_argument('--max-pooling-padding', dest='max_pooling_padding', default=1, type=int,
- help='Maximum padding for pooling tests')
-
- parser.add_argument('--max-pooling-stride', dest='max_pooling_stride', default=2, type=int,
- help='Maximum stride for pooling tests')
-
- parser.add_argument('--max-pooling-kernel', dest='max_pooling_kernel', default=2, type=int,
- help='Maximum padding for pooling tests')
-
- parser.add_argument('--num-rand-permutations', dest='num_rand_permutations', default=6, type=int,
- help='Number of random permutations for a given shape/rank for randomly-sampled parameter spaces')
+ parser.add_argument(
+ "--tensor-dim-range",
+ dest="tensor_shape_range",
+ default="1,64",
+ type=lambda x: str_to_list(x),
+ help="Min,Max range of tensor shapes",
+ )
+
+ parser.add_argument(
+ "--max-batch-size",
+ dest="max_batch_size",
+ default=1,
+ type=int,
+ help="Maximum batch size for NHWC tests",
+ )
+
+ parser.add_argument(
+ "--max-conv-padding",
+ dest="max_conv_padding",
+ default=1,
+ type=int,
+ help="Maximum padding for Conv tests",
+ )
+
+ parser.add_argument(
+ "--max-conv-dilation",
+ dest="max_conv_dilation",
+ default=2,
+ type=int,
+ help="Maximum dilation for Conv tests",
+ )
+
+ parser.add_argument(
+ "--max-conv-stride",
+ dest="max_conv_stride",
+ default=2,
+ type=int,
+ help="Maximum stride for Conv tests",
+ )
+
+ parser.add_argument(
+ "--max-pooling-padding",
+ dest="max_pooling_padding",
+ default=1,
+ type=int,
+ help="Maximum padding for pooling tests",
+ )
+
+ parser.add_argument(
+ "--max-pooling-stride",
+ dest="max_pooling_stride",
+ default=2,
+ type=int,
+ help="Maximum stride for pooling tests",
+ )
+
+ parser.add_argument(
+ "--max-pooling-kernel",
+ dest="max_pooling_kernel",
+ default=2,
+ type=int,
+ help="Maximum padding for pooling tests",
+ )
+
+ parser.add_argument(
+ "--num-rand-permutations",
+ dest="num_rand_permutations",
+ default=6,
+ type=int,
+ help="Number of random permutations for a given shape/rank for randomly-sampled parameter spaces",
+ )
# Targetting a specific shape/rank/dtype
- parser.add_argument('--target-shape', dest='target_shapes', action='append', default=[], type=lambda x: str_to_list(x),
- help='Create tests with a particular input tensor shape, e.g., 1,4,4,8 (may be repeated for tests that require multiple input shapes)')
-
- parser.add_argument('--target-rank', dest='target_ranks', action='append', default=None, type=lambda x: auto_int(x),
- help='Create tests with a particular input tensor rank')
-
- parser.add_argument('--target-dtype', dest='target_dtypes', action='append', default=None, type=lambda x: dtype_str_to_val(x),
- help='Create test with a particular DType (may be repeated)')
+ parser.add_argument(
+ "--target-shape",
+ dest="target_shapes",
+ action="append",
+ default=[],
+ type=lambda x: str_to_list(x),
+ help="Create tests with a particular input tensor shape, e.g., 1,4,4,8 (may be repeated for tests that require multiple input shapes)",
+ )
+
+ parser.add_argument(
+ "--target-rank",
+ dest="target_ranks",
+ action="append",
+ default=None,
+ type=lambda x: auto_int(x),
+ help="Create tests with a particular input tensor rank",
+ )
+
+ parser.add_argument(
+ "--target-dtype",
+ dest="target_dtypes",
+ action="append",
+ default=None,
+ type=lambda x: dtype_str_to_val(x),
+ help="Create test with a particular DType (may be repeated)",
+ )
args = parser.parse_args()
return args
-def main():
+def main():
args = parseArgs()
@@ -122,16 +205,23 @@ def main():
testList = []
for op in ttg.TOSA_OP_LIST:
- if re.match(args.filter + '.*', op):
- testList.extend(ttg.genOpTestList(op, shapeFilter=args.target_shapes, rankFilter=args.target_ranks, dtypeFilter=args.target_dtypes))
-
- print('{} matching tests'.format(len(testList)))
+ if re.match(args.filter + ".*", op):
+ testList.extend(
+ ttg.genOpTestList(
+ op,
+ shapeFilter=args.target_shapes,
+ rankFilter=args.target_ranks,
+ dtypeFilter=args.target_dtypes,
+ )
+ )
+
+ print("{} matching tests".format(len(testList)))
for opName, testStr, dtype, shapeList, testArgs in testList:
if args.verbose:
print(testStr)
ttg.serializeTest(opName, testStr, dtype, shapeList, testArgs)
- print('Done creating {} tests'.format(len(testList)))
+ print("Done creating {} tests".format(len(testList)))
-if __name__ == '__main__':
+if __name__ == "__main__":
exit(main())
diff --git a/verif/tosa_verif_run_ref.py b/verif/tosa_verif_run_ref.py
index 2284e35..e3926ea 100755
--- a/verif/tosa_verif_run_ref.py
+++ b/verif/tosa_verif_run_ref.py
@@ -33,42 +33,90 @@ import importlib
from enum import IntEnum, Enum, unique
from datetime import datetime
-# Include the ../shared directory in PYTHONPATH
+# Include the ../scripts and ../scripts/xunit directory in PYTHONPATH
parent_dir = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(parent_dir, '..', 'scripts'))
-sys.path.append(os.path.join(parent_dir, '..', 'scripts', 'xunit'))
+sys.path.append(os.path.join(parent_dir, "..", "scripts"))
+sys.path.append(os.path.join(parent_dir, "..", "scripts", "xunit"))
+
import xunit
+
+# Include the ../thirdparty/serialization_lib/python directory in PYTHONPATH
+parent_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(
+ os.path.join(parent_dir, "..", "thirdparty", "serialization_lib", "python")
+)
import tosa
from tosa_test_gen import TosaTestGen
from tosa_test_runner import TosaTestRunner
no_color_printing = False
-#from run_tf_unit_test import LogColors, print_color, run_sh_command
+# from run_tf_unit_test import LogColors, print_color, run_sh_command
+
def parseArgs():
parser = argparse.ArgumentParser()
- parser.add_argument('-t', '--test', dest='test', type=str, nargs='+',
- help='Test(s) to run')
- parser.add_argument('--seed', dest='random_seed', default=42, type=int,
- help='Random seed for test generation')
- parser.add_argument('--ref-model-path', dest='ref_model_path',
- default='build/reference_model/tosa_reference_model', type=str,
- help='Path to reference model executable')
- parser.add_argument('--ref-debug', dest='ref_debug', default='', type=str,
- help='Reference debug flag (low, med, high)')
- parser.add_argument('--ref-intermediates', dest='ref_intermediates', default=0, type=int,
- help='Reference model dumps intermediate tensors')
- parser.add_argument('-v', '--verbose', dest='verbose', action='count',
- help='Verbose operation')
- parser.add_argument('-j', '--jobs', dest='jobs', type=int, default=1,
- help='Number of parallel jobs')
- parser.add_argument('--sut-module', '-s', dest='sut_module', type=str, nargs='+', default=['tosa_ref_run'],
- help='System under test module to load (derives from TosaTestRunner). May be repeated')
- parser.add_argument('--sut-module-args', dest='sut_module_args', type=str, nargs='+', default=[],
- help='System under test module arguments. Use sutmodulename:argvalue to pass an argument. May be repeated.')
- parser.add_argument('--xunit-file', dest='xunit_file', type=str, default='result.xml',
- help='XUnit output file')
+ parser.add_argument(
+ "-t", "--test", dest="test", type=str, nargs="+", help="Test(s) to run"
+ )
+ parser.add_argument(
+ "--seed",
+ dest="random_seed",
+ default=42,
+ type=int,
+ help="Random seed for test generation",
+ )
+ parser.add_argument(
+ "--ref-model-path",
+ dest="ref_model_path",
+ default="build/reference_model/tosa_reference_model",
+ type=str,
+ help="Path to reference model executable",
+ )
+ parser.add_argument(
+ "--ref-debug",
+ dest="ref_debug",
+ default="",
+ type=str,
+ help="Reference debug flag (low, med, high)",
+ )
+ parser.add_argument(
+ "--ref-intermediates",
+ dest="ref_intermediates",
+ default=0,
+ type=int,
+ help="Reference model dumps intermediate tensors",
+ )
+ parser.add_argument(
+ "-v", "--verbose", dest="verbose", action="count", help="Verbose operation"
+ )
+ parser.add_argument(
+ "-j", "--jobs", dest="jobs", type=int, default=1, help="Number of parallel jobs"
+ )
+ parser.add_argument(
+ "--sut-module",
+ "-s",
+ dest="sut_module",
+ type=str,
+ nargs="+",
+ default=["tosa_ref_run"],
+ help="System under test module to load (derives from TosaTestRunner). May be repeated",
+ )
+ parser.add_argument(
+ "--sut-module-args",
+ dest="sut_module_args",
+ type=str,
+ nargs="+",
+ default=[],
+ help="System under test module arguments. Use sutmodulename:argvalue to pass an argument. May be repeated.",
+ )
+ parser.add_argument(
+ "--xunit-file",
+ dest="xunit_file",
+ type=str,
+ default="result.xml",
+ help="XUnit output file",
+ )
args = parser.parse_args()
@@ -78,6 +126,7 @@ def parseArgs():
return args
+
def workerThread(task_queue, runnerList, args, result_queue):
while True:
try:
@@ -88,21 +137,31 @@ def workerThread(task_queue, runnerList, args, result_queue):
if test is None:
break
- msg = ''
+ msg = ""
start_time = datetime.now()
try:
for runnerModule, runnerArgs in runnerList:
if args.verbose:
- print('Running runner {} with test {}'.format(runnerModule.__name__, test))
+ print(
+ "Running runner {} with test {}".format(
+ runnerModule.__name__, test
+ )
+ )
runner = runnerModule.TosaRefRunner(args, runnerArgs, test)
try:
rc = runner.runModel()
except Exception as e:
rc = TosaTestRunner.Result.INTERNAL_ERROR
except Exception as e:
- print('Internal regression error: {}'.format(e))
- print(''.join(traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__)))
+ print("Internal regression error: {}".format(e))
+ print(
+ "".join(
+ traceback.format_exception(
+ etype=type(e), value=e, tb=e.__traceback__
+ )
+ )
+ )
rc = TosaTestRunner.Result.INTERNAL_ERROR
end_time = datetime.now()
@@ -112,25 +171,27 @@ def workerThread(task_queue, runnerList, args, result_queue):
return True
+
def loadRefModules(args):
# Returns a tuple of (runner_module, [argument list])
runnerList = []
for r in args.sut_module:
if args.verbose:
- print('Loading module {}'.format(r))
+ print("Loading module {}".format(r))
runner = importlib.import_module(r)
# Look for arguments associated with this runner
- runnerArgPrefix = '{}:'.format(r)
+ runnerArgPrefix = "{}:".format(r)
runnerArgList = []
for a in args.sut_module_args:
if a.startswith(runnerArgPrefix):
- runnerArgList.append(a[len(runnerArgPrefix):])
+ runnerArgList.append(a[len(runnerArgPrefix) :])
runnerList.append((runner, runnerArgList))
return runnerList
+
def main():
args = parseArgs()
@@ -143,10 +204,12 @@ def main():
for t in args.test:
taskQueue.put((t))
- print('Running {} tests '.format(taskQueue.qsize()))
+ print("Running {} tests ".format(taskQueue.qsize()))
for i in range(args.jobs):
- t = threading.Thread(target=workerThread, args=(taskQueue, runnerList, args, resultQueue))
+ t = threading.Thread(
+ target=workerThread, args=(taskQueue, runnerList, args, resultQueue)
+ )
t.setDaemon(True)
t.start()
threads.append(t)
@@ -165,34 +228,40 @@ def main():
resultList.append((test, rc, msg, time_delta))
results[rc] = results[rc] + 1
- xunit_result = xunit.xunit_results('Regressions')
- xunit_suite = xunit_result.create_suite('Unit tests')
+ xunit_result = xunit.xunit_results("Regressions")
+ xunit_suite = xunit_result.create_suite("Unit tests")
# Sort by test name
for test, rc, msg, time_delta in sorted(resultList, key=lambda tup: tup[0]):
test_name = test
- xt = xunit.xunit_test(test_name, 'reference')
+ xt = xunit.xunit_test(test_name, "reference")
- xt.time = str(float(time_delta.seconds) + (float(time_delta.microseconds) * 1e-6))
+ xt.time = str(
+ float(time_delta.seconds) + (float(time_delta.microseconds) * 1e-6)
+ )
- if rc == TosaTestRunner.Result.EXPECTED_PASS or rc == TosaTestRunner.Result.EXPECTED_FAILURE:
+ if (
+ rc == TosaTestRunner.Result.EXPECTED_PASS
+ or rc == TosaTestRunner.Result.EXPECTED_FAILURE
+ ):
if args.verbose:
- print('{} {}'.format(rc.name, test_name))
+ print("{} {}".format(rc.name, test_name))
else:
xt.failed(msg)
- print('{} {}'.format(rc.name, test_name))
+ print("{} {}".format(rc.name, test_name))
xunit_suite.tests.append(xt)
resultQueue.task_done()
xunit_result.write_results(args.xunit_file)
- print('Totals: ', end='')
+ print("Totals: ", end="")
for result in TosaTestRunner.Result:
- print('{} {}, '.format(results[result], result.name.lower()), end ='')
+ print("{} {}, ".format(results[result], result.name.lower()), end="")
print()
return 0
-if __name__ == '__main__':
+
+if __name__ == "__main__":
exit(main())