diff options
author | Colm Donelan <Colm.Donelan@arm.com> | 2021-02-12 12:43:35 +0000 |
---|---|---|
committer | Colm Donelan <colm.donelan@arm.com> | 2021-02-15 10:24:30 +0000 |
commit | 800b281e506e921006c23cd4309781b6508c0fcb (patch) | |
tree | 06c6eb1a252fa6d90460c1e821542bc1a86b067f /src/armnnSerializer/ArmnnSchema_generated.h | |
parent | 0a7dc6bba5d0810fe2ed6f84b0376a8b0674c0b3 (diff) | |
download | armnn-800b281e506e921006c23cd4309781b6508c0fcb.tar.gz |
IVGCVSW-5648 Adding serializer support for m_DimensionsSpecificity
The field m_DimensionsSpecificity in TensorShape was not being serialized
and deserialized following implementation of type 1 dynamic tensors.
* Update schema.
* Add to Serializer and Deserializer.
Signed-off-by: Colm Donelan <Colm.Donelan@arm.com>
Change-Id: I7ddbdaf54c8f4b988c6cb300f90ba848a94bdad0
Diffstat (limited to 'src/armnnSerializer/ArmnnSchema_generated.h')
-rw-r--r-- | src/armnnSerializer/ArmnnSchema_generated.h | 24 |
1 files changed, 19 insertions, 5 deletions
diff --git a/src/armnnSerializer/ArmnnSchema_generated.h b/src/armnnSerializer/ArmnnSchema_generated.h index cb9e686ca9..524ffb0182 100644 --- a/src/armnnSerializer/ArmnnSchema_generated.h +++ b/src/armnnSerializer/ArmnnSchema_generated.h @@ -1,5 +1,5 @@ // -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // // automatically generated by the FlatBuffers compiler, do not modify @@ -1633,7 +1633,8 @@ struct TensorInfo FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { VT_QUANTIZATIONOFFSET = 10, VT_QUANTIZATIONSCALES = 12, VT_QUANTIZATIONDIM = 14, - VT_DIMENSIONALITY = 16 + VT_DIMENSIONALITY = 16, + VT_DIMENSIONSPECIFICITY = 18 }; const flatbuffers::Vector<uint32_t> *dimensions() const { return GetPointer<const flatbuffers::Vector<uint32_t> *>(VT_DIMENSIONS); @@ -1656,6 +1657,9 @@ struct TensorInfo FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { uint32_t dimensionality() const { return GetField<uint32_t>(VT_DIMENSIONALITY, 1); } + const flatbuffers::Vector<uint8_t> *dimensionSpecificity() const { + return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_DIMENSIONSPECIFICITY); + } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_DIMENSIONS) && @@ -1667,6 +1671,8 @@ struct TensorInfo FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { verifier.VerifyVector(quantizationScales()) && VerifyField<uint32_t>(verifier, VT_QUANTIZATIONDIM) && VerifyField<uint32_t>(verifier, VT_DIMENSIONALITY) && + VerifyOffset(verifier, VT_DIMENSIONSPECIFICITY) && + verifier.VerifyVector(dimensionSpecificity()) && verifier.EndTable(); } }; @@ -1696,6 +1702,9 @@ struct TensorInfoBuilder { void add_dimensionality(uint32_t dimensionality) { fbb_.AddElement<uint32_t>(TensorInfo::VT_DIMENSIONALITY, dimensionality, 1); } + void add_dimensionSpecificity(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> dimensionSpecificity) { + fbb_.AddOffset(TensorInfo::VT_DIMENSIONSPECIFICITY, dimensionSpecificity); + } explicit TensorInfoBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); @@ -1716,8 +1725,10 @@ inline flatbuffers::Offset<TensorInfo> CreateTensorInfo( int32_t quantizationOffset = 0, flatbuffers::Offset<flatbuffers::Vector<float>> quantizationScales = 0, uint32_t quantizationDim = 0, - uint32_t dimensionality = 1) { + uint32_t dimensionality = 1, + flatbuffers::Offset<flatbuffers::Vector<uint8_t>> dimensionSpecificity = 0) { TensorInfoBuilder builder_(_fbb); + builder_.add_dimensionSpecificity(dimensionSpecificity); builder_.add_dimensionality(dimensionality); builder_.add_quantizationDim(quantizationDim); builder_.add_quantizationScales(quantizationScales); @@ -1736,9 +1747,11 @@ inline flatbuffers::Offset<TensorInfo> CreateTensorInfoDirect( int32_t quantizationOffset = 0, const std::vector<float> *quantizationScales = nullptr, uint32_t quantizationDim = 0, - uint32_t dimensionality = 1) { + uint32_t dimensionality = 1, + const std::vector<uint8_t> *dimensionSpecificity = nullptr) { auto dimensions__ = dimensions ? _fbb.CreateVector<uint32_t>(*dimensions) : 0; auto quantizationScales__ = quantizationScales ? _fbb.CreateVector<float>(*quantizationScales) : 0; + auto dimensionSpecificity__ = dimensionSpecificity ? _fbb.CreateVector<uint8_t>(*dimensionSpecificity) : 0; return armnnSerializer::CreateTensorInfo( _fbb, dimensions__, @@ -1747,7 +1760,8 @@ inline flatbuffers::Offset<TensorInfo> CreateTensorInfoDirect( quantizationOffset, quantizationScales__, quantizationDim, - dimensionality); + dimensionality, + dimensionSpecificity__); } struct ByteData FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { |