aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorColm Donelan <Colm.Donelan@arm.com>2021-02-12 12:43:35 +0000
committerColm Donelan <colm.donelan@arm.com>2021-02-15 10:24:30 +0000
commit800b281e506e921006c23cd4309781b6508c0fcb (patch)
tree06c6eb1a252fa6d90460c1e821542bc1a86b067f
parent0a7dc6bba5d0810fe2ed6f84b0376a8b0674c0b3 (diff)
downloadarmnn-800b281e506e921006c23cd4309781b6508c0fcb.tar.gz
IVGCVSW-5648 Adding serializer support for m_DimensionsSpecificity
The field m_DimensionsSpecificity in TensorShape was not being serialized and deserialized following implementation of type 1 dynamic tensors. * Update schema. * Add to Serializer and Deserializer. Signed-off-by: Colm Donelan <Colm.Donelan@arm.com> Change-Id: I7ddbdaf54c8f4b988c6cb300f90ba848a94bdad0
-rw-r--r--src/armnnDeserializer/Deserializer.cpp42
-rw-r--r--src/armnnSerializer/ArmnnSchema.fbs1
-rw-r--r--src/armnnSerializer/ArmnnSchema_generated.h24
-rw-r--r--src/armnnSerializer/Serializer.cpp14
4 files changed, 62 insertions, 19 deletions
diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp
index cbc4758e0e..9b4cbe9439 100644
--- a/src/armnnDeserializer/Deserializer.cpp
+++ b/src/armnnDeserializer/Deserializer.cpp
@@ -608,45 +608,63 @@ armnn::TensorInfo ToTensorInfo(TensorRawPtr tensorPtr)
}
}
+ float quantizationScale = tensorPtr->quantizationScale();
+ int32_t quantizationOffset = tensorPtr->quantizationOffset();
+
if (tensorPtr->dimensionality() == static_cast<unsigned int>(Dimensionality::Scalar))
{
- float quantizationScale = tensorPtr->quantizationScale();
- int32_t quantizationOffset = tensorPtr->quantizationOffset();
-
- return armnn::TensorInfo(armnn::TensorShape{armnn::Dimensionality::Scalar},
+ return armnn::TensorInfo(TensorShape{armnn::Dimensionality::Scalar},
type,
quantizationScale,
quantizationOffset);
}
+ else if (tensorPtr->dimensionality() == static_cast<unsigned int>(Dimensionality::NotSpecified))
+ {
+ armnn::TensorInfo result(TensorShape{Dimensionality::NotSpecified},
+ type,
+ quantizationScale,
+ quantizationOffset);
+ return result;
+ }
auto dimensions = tensorPtr->dimensions();
unsigned int size = dimensions->size();
std::vector<unsigned int> outputDims(dimensions->begin(), dimensions->begin() + size);
+ bool dimensionsSpecificity[armnn::MaxNumOfTensorDimensions];
+ std::fill_n(dimensionsSpecificity, armnn::MaxNumOfTensorDimensions, true);
+ // For backwards compatibility check if the dimensionSpecificity vector is present first.
+ // The default is to have dimensionSpecificity set to all true's anyway.
+ if (tensorPtr->dimensionSpecificity() != nullptr)
+ {
+ auto dimensionSpecificity = tensorPtr->dimensionSpecificity();
+ size = dimensionSpecificity->size();
+ for (unsigned int i = 0; i < size; ++i)
+ {
+ dimensionsSpecificity[i] = dimensionSpecificity->Get(i);
+ }
+ }
+ // Construct a TensorShape
+ TensorShape shape(size, outputDims.data(), dimensionsSpecificity);
auto quantizationScales = tensorPtr->quantizationScales();
-
if (quantizationScales)
{
unsigned int quantizationScalesSize = quantizationScales->size();
std::vector<float> scales(quantizationScales->begin(), quantizationScales->begin() + quantizationScalesSize);
unsigned int quantizationDim = tensorPtr->quantizationDim();
- armnn::TensorInfo result(size,
- outputDims.data(),
+ armnn::TensorInfo result(shape,
type,
scales,
quantizationDim);
return result;
}
- float quantizationScale = tensorPtr->quantizationScale();
- int32_t quantizationOffset = tensorPtr->quantizationOffset();
-
// two statements (on purpose) for easier debugging:
- armnn::TensorInfo result(size,
- outputDims.data(),
+ armnn::TensorInfo result(shape,
type,
quantizationScale,
quantizationOffset);
+
return result;
}
diff --git a/src/armnnSerializer/ArmnnSchema.fbs b/src/armnnSerializer/ArmnnSchema.fbs
index 9dbf6aa3df..e2b3a3c288 100644
--- a/src/armnnSerializer/ArmnnSchema.fbs
+++ b/src/armnnSerializer/ArmnnSchema.fbs
@@ -67,6 +67,7 @@ table TensorInfo {
quantizationScales:[float];
quantizationDim:uint;
dimensionality:uint = 1;
+ dimensionSpecificity:[bool];
}
struct Connection {
diff --git a/src/armnnSerializer/ArmnnSchema_generated.h b/src/armnnSerializer/ArmnnSchema_generated.h
index cb9e686ca9..524ffb0182 100644
--- a/src/armnnSerializer/ArmnnSchema_generated.h
+++ b/src/armnnSerializer/ArmnnSchema_generated.h
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
// automatically generated by the FlatBuffers compiler, do not modify
@@ -1633,7 +1633,8 @@ struct TensorInfo FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
VT_QUANTIZATIONOFFSET = 10,
VT_QUANTIZATIONSCALES = 12,
VT_QUANTIZATIONDIM = 14,
- VT_DIMENSIONALITY = 16
+ VT_DIMENSIONALITY = 16,
+ VT_DIMENSIONSPECIFICITY = 18
};
const flatbuffers::Vector<uint32_t> *dimensions() const {
return GetPointer<const flatbuffers::Vector<uint32_t> *>(VT_DIMENSIONS);
@@ -1656,6 +1657,9 @@ struct TensorInfo FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
uint32_t dimensionality() const {
return GetField<uint32_t>(VT_DIMENSIONALITY, 1);
}
+ const flatbuffers::Vector<uint8_t> *dimensionSpecificity() const {
+ return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_DIMENSIONSPECIFICITY);
+ }
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_DIMENSIONS) &&
@@ -1667,6 +1671,8 @@ struct TensorInfo FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
verifier.VerifyVector(quantizationScales()) &&
VerifyField<uint32_t>(verifier, VT_QUANTIZATIONDIM) &&
VerifyField<uint32_t>(verifier, VT_DIMENSIONALITY) &&
+ VerifyOffset(verifier, VT_DIMENSIONSPECIFICITY) &&
+ verifier.VerifyVector(dimensionSpecificity()) &&
verifier.EndTable();
}
};
@@ -1696,6 +1702,9 @@ struct TensorInfoBuilder {
void add_dimensionality(uint32_t dimensionality) {
fbb_.AddElement<uint32_t>(TensorInfo::VT_DIMENSIONALITY, dimensionality, 1);
}
+ void add_dimensionSpecificity(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> dimensionSpecificity) {
+ fbb_.AddOffset(TensorInfo::VT_DIMENSIONSPECIFICITY, dimensionSpecificity);
+ }
explicit TensorInfoBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
@@ -1716,8 +1725,10 @@ inline flatbuffers::Offset<TensorInfo> CreateTensorInfo(
int32_t quantizationOffset = 0,
flatbuffers::Offset<flatbuffers::Vector<float>> quantizationScales = 0,
uint32_t quantizationDim = 0,
- uint32_t dimensionality = 1) {
+ uint32_t dimensionality = 1,
+ flatbuffers::Offset<flatbuffers::Vector<uint8_t>> dimensionSpecificity = 0) {
TensorInfoBuilder builder_(_fbb);
+ builder_.add_dimensionSpecificity(dimensionSpecificity);
builder_.add_dimensionality(dimensionality);
builder_.add_quantizationDim(quantizationDim);
builder_.add_quantizationScales(quantizationScales);
@@ -1736,9 +1747,11 @@ inline flatbuffers::Offset<TensorInfo> CreateTensorInfoDirect(
int32_t quantizationOffset = 0,
const std::vector<float> *quantizationScales = nullptr,
uint32_t quantizationDim = 0,
- uint32_t dimensionality = 1) {
+ uint32_t dimensionality = 1,
+ const std::vector<uint8_t> *dimensionSpecificity = nullptr) {
auto dimensions__ = dimensions ? _fbb.CreateVector<uint32_t>(*dimensions) : 0;
auto quantizationScales__ = quantizationScales ? _fbb.CreateVector<float>(*quantizationScales) : 0;
+ auto dimensionSpecificity__ = dimensionSpecificity ? _fbb.CreateVector<uint8_t>(*dimensionSpecificity) : 0;
return armnnSerializer::CreateTensorInfo(
_fbb,
dimensions__,
@@ -1747,7 +1760,8 @@ inline flatbuffers::Offset<TensorInfo> CreateTensorInfoDirect(
quantizationOffset,
quantizationScales__,
quantizationDim,
- dimensionality);
+ dimensionality,
+ dimensionSpecificity__);
}
struct ByteData FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index bcdaa087fb..0586700ada 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -1668,6 +1668,14 @@ flatbuffers::Offset<TensorInfo> SerializerStrategy::CreateTensorInfo(const armn
shape.push_back(tensorInfo.GetShape()[dim]);
}
+ std::vector<bool> specificity;
+ // This assumes that the TensorShape constructors have ensured that the size of m_DimensionsSpecificity
+ // matches the size of dimensions.
+ for(unsigned int dim = 0; dim < tensorInfo.GetShape().GetNumDimensions(); ++dim)
+ {
+ specificity.push_back(tensorInfo.GetShape().GetDimensionSpecificity(dim));
+ }
+
if (tensorInfo.HasPerAxisQuantization())
{
// Create FlatBuffer TensorInfo
@@ -1680,7 +1688,8 @@ flatbuffers::Offset<TensorInfo> SerializerStrategy::CreateTensorInfo(const armn
m_flatBufferBuilder.CreateVector(tensorInfo.GetQuantizationScales()),
tensorInfo.GetQuantizationDim().value(),
static_cast<unsigned int>
- (tensorInfo.GetShape().GetDimensionality()));
+ (tensorInfo.GetShape().GetDimensionality()),
+ m_flatBufferBuilder.CreateVector(specificity));
return flatBufferTensorInfo;
}
@@ -1693,7 +1702,8 @@ flatbuffers::Offset<TensorInfo> SerializerStrategy::CreateTensorInfo(const armn
0,
0,
static_cast<unsigned int>
- (tensorInfo.GetShape().GetDimensionality()));
+ (tensorInfo.GetShape().GetDimensionality()),
+ m_flatBufferBuilder.CreateVector(specificity));
return flatBufferTensorInfo;
}