aboutsummaryrefslogtreecommitdiff
path: root/src/armnnDeserializer
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2020-03-27 15:56:57 +0000
committerSadik Armagan <sadik.armagan@arm.com>2020-03-27 15:56:57 +0000
commit1a84fe3b92aaab61d2f3b865d347459e16eb43d4 (patch)
tree140928df39ee8d46e3fb4e3ccc7883116a1ce929 /src/armnnDeserializer
parent57ef0088d20dd708ff92222d244ea02f1e1e5216 (diff)
downloadarmnn-1a84fe3b92aaab61d2f3b865d347459e16eb43d4.tar.gz
IVGCVSW-4555 ArmnnConverter (Serializer) does not support per-axis quantization params
* TensorInfo can have multiple scales and quantization dimension. Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Change-Id: I0ff02e3766996b6a9da6dc4e92d366bc9505c77d
Diffstat (limited to 'src/armnnDeserializer')
-rw-r--r--src/armnnDeserializer/Deserializer.cpp24
1 files changed, 22 insertions, 2 deletions
diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp
index bc6fbf0194..58232a2763 100644
--- a/src/armnnDeserializer/Deserializer.cpp
+++ b/src/armnnDeserializer/Deserializer.cpp
@@ -508,6 +508,9 @@ armnn::TensorInfo ToTensorInfo(Deserializer::TensorRawPtr tensorPtr)
case DataType_QAsymmS8:
type = armnn::DataType::QAsymmS8;
break;
+ case DataType_QSymmS8:
+ type = armnn::DataType::QSymmS8;
+ break;
case DataType_QuantisedAsymm8:
case DataType_QAsymmU8:
type = armnn::DataType::QAsymmU8;
@@ -539,13 +542,30 @@ armnn::TensorInfo ToTensorInfo(Deserializer::TensorRawPtr tensorPtr)
location.AsString()));
}
}
- float quantizationScale = tensorPtr->quantizationScale();
- int32_t quantizationOffset = tensorPtr->quantizationOffset();
+
auto dimensions = tensorPtr->dimensions();
unsigned int size = dimensions->size();
std::vector<unsigned int> outputDims(dimensions->begin(), dimensions->begin() + size);
+ auto quantizationScales = tensorPtr->quantizationScales();
+
+ if (quantizationScales)
+ {
+ unsigned int quantizationScalesSize = quantizationScales->size();
+ std::vector<float> scales(quantizationScales->begin(), quantizationScales->begin() + quantizationScalesSize);
+ unsigned int quantizationDim = tensorPtr->quantizationDim();
+ armnn::TensorInfo result(size,
+ outputDims.data(),
+ type,
+ scales,
+ quantizationDim);
+ return result;
+ }
+
+ float quantizationScale = tensorPtr->quantizationScale();
+ int32_t quantizationOffset = tensorPtr->quantizationOffset();
+
// two statements (on purpose) for easier debugging:
armnn::TensorInfo result(size,
outputDims.data(),