aboutsummaryrefslogtreecommitdiff
path: root/src/armnnSerializer/ArmnnSchema_generated.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnnSerializer/ArmnnSchema_generated.h')
-rw-r--r--src/armnnSerializer/ArmnnSchema_generated.h24
1 files changed, 19 insertions, 5 deletions
diff --git a/src/armnnSerializer/ArmnnSchema_generated.h b/src/armnnSerializer/ArmnnSchema_generated.h
index cb9e686ca9..524ffb0182 100644
--- a/src/armnnSerializer/ArmnnSchema_generated.h
+++ b/src/armnnSerializer/ArmnnSchema_generated.h
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
// automatically generated by the FlatBuffers compiler, do not modify
@@ -1633,7 +1633,8 @@ struct TensorInfo FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
VT_QUANTIZATIONOFFSET = 10,
VT_QUANTIZATIONSCALES = 12,
VT_QUANTIZATIONDIM = 14,
- VT_DIMENSIONALITY = 16
+ VT_DIMENSIONALITY = 16,
+ VT_DIMENSIONSPECIFICITY = 18
};
const flatbuffers::Vector<uint32_t> *dimensions() const {
return GetPointer<const flatbuffers::Vector<uint32_t> *>(VT_DIMENSIONS);
@@ -1656,6 +1657,9 @@ struct TensorInfo FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
uint32_t dimensionality() const {
return GetField<uint32_t>(VT_DIMENSIONALITY, 1);
}
+ const flatbuffers::Vector<uint8_t> *dimensionSpecificity() const {
+ return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_DIMENSIONSPECIFICITY);
+ }
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_DIMENSIONS) &&
@@ -1667,6 +1671,8 @@ struct TensorInfo FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
verifier.VerifyVector(quantizationScales()) &&
VerifyField<uint32_t>(verifier, VT_QUANTIZATIONDIM) &&
VerifyField<uint32_t>(verifier, VT_DIMENSIONALITY) &&
+ VerifyOffset(verifier, VT_DIMENSIONSPECIFICITY) &&
+ verifier.VerifyVector(dimensionSpecificity()) &&
verifier.EndTable();
}
};
@@ -1696,6 +1702,9 @@ struct TensorInfoBuilder {
void add_dimensionality(uint32_t dimensionality) {
fbb_.AddElement<uint32_t>(TensorInfo::VT_DIMENSIONALITY, dimensionality, 1);
}
+ void add_dimensionSpecificity(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> dimensionSpecificity) {
+ fbb_.AddOffset(TensorInfo::VT_DIMENSIONSPECIFICITY, dimensionSpecificity);
+ }
explicit TensorInfoBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
@@ -1716,8 +1725,10 @@ inline flatbuffers::Offset<TensorInfo> CreateTensorInfo(
int32_t quantizationOffset = 0,
flatbuffers::Offset<flatbuffers::Vector<float>> quantizationScales = 0,
uint32_t quantizationDim = 0,
- uint32_t dimensionality = 1) {
+ uint32_t dimensionality = 1,
+ flatbuffers::Offset<flatbuffers::Vector<uint8_t>> dimensionSpecificity = 0) {
TensorInfoBuilder builder_(_fbb);
+ builder_.add_dimensionSpecificity(dimensionSpecificity);
builder_.add_dimensionality(dimensionality);
builder_.add_quantizationDim(quantizationDim);
builder_.add_quantizationScales(quantizationScales);
@@ -1736,9 +1747,11 @@ inline flatbuffers::Offset<TensorInfo> CreateTensorInfoDirect(
int32_t quantizationOffset = 0,
const std::vector<float> *quantizationScales = nullptr,
uint32_t quantizationDim = 0,
- uint32_t dimensionality = 1) {
+ uint32_t dimensionality = 1,
+ const std::vector<uint8_t> *dimensionSpecificity = nullptr) {
auto dimensions__ = dimensions ? _fbb.CreateVector<uint32_t>(*dimensions) : 0;
auto quantizationScales__ = quantizationScales ? _fbb.CreateVector<float>(*quantizationScales) : 0;
+ auto dimensionSpecificity__ = dimensionSpecificity ? _fbb.CreateVector<uint8_t>(*dimensionSpecificity) : 0;
return armnnSerializer::CreateTensorInfo(
_fbb,
dimensions__,
@@ -1747,7 +1760,8 @@ inline flatbuffers::Offset<TensorInfo> CreateTensorInfoDirect(
quantizationOffset,
quantizationScales__,
quantizationDim,
- dimensionality);
+ dimensionality,
+ dimensionSpecificity__);
}
struct ByteData FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {