aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/Network.cpp
diff options
context:
space:
mode:
authorColm Donelan <colm.donelan@arm.com>2024-03-29 16:00:24 +0000
committerColm Donelan <colm.donelan@arm.com>2024-04-17 13:46:29 +0000
commit4f1771ab4d321afba9f5a52411855b5dc33bf247 (patch)
treea5772debacfaaea83b078d923633c10715284d98 /src/armnn/Network.cpp
parent4e74df277b80db0be5c19b07d9f59575dc7ef3d6 (diff)
downloadarmnn-4f1771ab4d321afba9f5a52411855b5dc33bf247.tar.gz
MLCE-1248 Removing limitations on zero scale value in quantization.
Currently Arm NN will fail to load models containing quantization scale value of zero. Signed-off-by: Colm Donelan <colm.donelan@arm.com> Change-Id: Ifefcee1279b8667da63d1aa7d42e5d44875f9fbe
Diffstat (limited to 'src/armnn/Network.cpp')
-rw-r--r--src/armnn/Network.cpp11
1 files changed, 1 insertions, 10 deletions
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 6f33fb6a15..810abaa5b9 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -818,16 +818,6 @@ bool CheckScaleSetOnQuantizedType(Layer* layer, Optional<std::vector<std::string
throw InvalidArgumentException("Per Axis Quantization is not supported in "
"Asymmetric Quantization Datatype.");
}
- if ((!info.HasPerAxisQuantization() && info.GetQuantizationScale() == 0.f)
- || (info.HasPerAxisQuantization() && (quantizationScales.end() !=
- std::find(quantizationScales.begin(), quantizationScales.end(), 0.f)))) {
- noErrors = false;
- std::stringstream ss;
- ss << "output " << i << " of layer " << GetLayerTypeAsCString(layer->GetType())
- << " (" << layer->GetNameStr() << ") is of type"
- << " Quantized value but the scale parameter has not been set";
- ReportError(ss.str(), errMessages);
- }
// Softmax under QuantisedAsymm8 must always be scale (1.0f/256.0f) and offset 0
if (!info.HasPerAxisQuantization() && quantizationDataType == DataType::QAsymmU8 &&
(info.GetQuantizationScale() != (1.0f / 256.0f) ||
@@ -841,6 +831,7 @@ bool CheckScaleSetOnQuantizedType(Layer* layer, Optional<std::vector<std::string
info.SetQuantizationScale((1.0f / 256.0f));
info.SetQuantizationOffset(0);
outputSlot.SetTensorInfo(info);
+ ReportError(ss.str(), errMessages);
}
break;
default: