aboutsummaryrefslogtreecommitdiff
path: root/src/armnnSerializer/Serializer.cpp
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2020-03-27 15:56:57 +0000
committerSadik Armagan <sadik.armagan@arm.com>2020-03-27 15:56:57 +0000
commit1a84fe3b92aaab61d2f3b865d347459e16eb43d4 (patch)
tree140928df39ee8d46e3fb4e3ccc7883116a1ce929 /src/armnnSerializer/Serializer.cpp
parent57ef0088d20dd708ff92222d244ea02f1e1e5216 (diff)
downloadarmnn-1a84fe3b92aaab61d2f3b865d347459e16eb43d4.tar.gz
IVGCVSW-4555 ArmnnConverter (Serializer) does not support per-axis quantization params
* TensorInfo can have multiple scales and quantization dimension. Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Change-Id: I0ff02e3766996b6a9da6dc4e92d366bc9505c77d
Diffstat (limited to 'src/armnnSerializer/Serializer.cpp')
-rw-r--r--src/armnnSerializer/Serializer.cpp47
1 files changed, 26 insertions, 21 deletions
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index 37ab326a28..cb7a5c456e 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -1420,25 +1420,43 @@ flatbuffers::Offset<flatbuffers::Vector<T>> SerializerVisitor::CreateDataVector(
return fbVector;
}
-flatbuffers::Offset<serializer::ConstTensor>
- SerializerVisitor::CreateConstTensorInfo(const armnn::ConstTensor& constTensor)
+flatbuffers::Offset<TensorInfo> SerializerVisitor::CreateTensorInfo(const armnn::TensorInfo& tensorInfo)
{
- armnn::TensorInfo tensorInfo = constTensor.GetInfo();
-
// Get the dimensions
std::vector<unsigned int> shape;
-
for(unsigned int dim = 0; dim < tensorInfo.GetShape().GetNumDimensions(); ++dim)
{
shape.push_back(tensorInfo.GetShape()[dim]);
}
+ if (tensorInfo.HasPerAxisQuantization())
+ {
+ // Create FlatBuffer TensorInfo
+ auto flatBufferTensorInfo =
+ serializer::CreateTensorInfo(m_flatBufferBuilder,
+ m_flatBufferBuilder.CreateVector(shape),
+ GetFlatBufferDataType(tensorInfo.GetDataType()),
+ tensorInfo.GetQuantizationScales()[0],
+ tensorInfo.GetQuantizationOffset(),
+ m_flatBufferBuilder.CreateVector(tensorInfo.GetQuantizationScales()),
+ tensorInfo.GetQuantizationDim().value());
+ return flatBufferTensorInfo;
+ }
+
// Create FlatBuffer TensorInfo
auto flatBufferTensorInfo = serializer::CreateTensorInfo(m_flatBufferBuilder,
m_flatBufferBuilder.CreateVector(shape),
GetFlatBufferDataType(tensorInfo.GetDataType()),
tensorInfo.GetQuantizationScale(),
tensorInfo.GetQuantizationOffset());
+ return flatBufferTensorInfo;
+}
+
+flatbuffers::Offset<serializer::ConstTensor>
+ SerializerVisitor::CreateConstTensorInfo(const armnn::ConstTensor& constTensor)
+{
+ armnn::TensorInfo tensorInfo = constTensor.GetInfo();
+
flatbuffers::Offset<void> fbPayload;
switch (tensorInfo.GetDataType())
@@ -1471,6 +1489,7 @@ flatbuffers::Offset<serializer::ConstTensor>
fbPayload = flatBuffersData.o;
break;
}
+ case armnn::DataType::QSymmS8:
case armnn::DataType::QAsymmU8:
case armnn::DataType::Boolean:
default:
@@ -1484,7 +1503,7 @@ flatbuffers::Offset<serializer::ConstTensor>
}
flatbuffers::Offset<serializer::ConstTensor> flatBufferConstTensor = serializer::CreateConstTensor(
m_flatBufferBuilder,
- flatBufferTensorInfo,
+ CreateTensorInfo(tensorInfo),
GetFlatBufferConstTensorData(tensorInfo.GetDataType()),
fbPayload);
return flatBufferConstTensor;
@@ -1533,24 +1552,10 @@ std::vector<fb::Offset<serializer::OutputSlot>>
const IOutputSlot& outputSlot = layer->GetOutputSlot(slotIndex);
const armnn::TensorInfo& tensorInfo = outputSlot.GetTensorInfo();
- // Get the dimensions
- std::vector<unsigned int> shape;
- for(unsigned int dim = 0; dim < tensorInfo.GetShape().GetNumDimensions(); ++dim)
- {
- shape.push_back(tensorInfo.GetShape()[dim]);
- }
-
- // Create FlatBuffer TensorInfo
- auto flatBufferTensorInfo = serializer::CreateTensorInfo(m_flatBufferBuilder,
- m_flatBufferBuilder.CreateVector(shape),
- GetFlatBufferDataType(tensorInfo.GetDataType()),
- tensorInfo.GetQuantizationScale(),
- tensorInfo.GetQuantizationOffset());
-
// Create FlatBuffer Outputslot
outputSlots.push_back(serializer::CreateOutputSlot(m_flatBufferBuilder,
slotIndex,
- flatBufferTensorInfo));
+ CreateTensorInfo(tensorInfo)));
}
return outputSlots;
}