aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Monahan <david.monahan@arm.com>2019-04-25 16:03:38 +0100
committerDavid Monahan <david.monahan@arm.com>2019-04-29 07:15:23 +0000
commitb85547024490d4c89538dcebfb0a2730e18a6d8d (patch)
treea6962f8c2392c4be7edd2b00249ff6a28bfdcf30
parent04a8b05b25d3b752040a262a2725fa59753dd9b5 (diff)
downloadarmnn-b85547024490d4c89538dcebfb0a2730e18a6d8d.tar.gz
IVGCVSW-2657: Fix to force correct quantisation parameters for QASYMM8 Softmax
Signed-off-by: David Monahan <david.monahan@arm.com> Change-Id: I0989ea843714ba1d5da756bb87ddefa3706b07eb
-rw-r--r--include/armnn/INetwork.hpp2
-rw-r--r--src/armnn/Network.cpp18
2 files changed, 18 insertions, 2 deletions
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index a15ceb1c15..7141770298 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -219,6 +219,8 @@ public:
const char* name = nullptr) = 0;
/// Adds a softmax layer to the network.
+ /// If the data type is QAsymm8, then the output quantization parameters
+ /// must have a scale of 1/256 and an offset of 0
/// @param softmaxDescriptor - SoftmaxDescriptor to configure the softmax.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 0bd8d4b69b..a38bcf1910 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -114,8 +114,8 @@ bool CheckScaleSetOnQuantizedType(Layer* layer, Optional<std::vector<std::string
bool noErrors = true;
unsigned int numOutputs = layer->GetNumOutputSlots();
for (unsigned int i = 0; i < numOutputs; i++) {
- const OutputSlot &outputSlot = layer->GetOutputSlot(i);
- const TensorInfo &info = outputSlot.GetTensorInfo();
+ OutputSlot& outputSlot = layer->GetOutputSlot(i);
+ TensorInfo info = outputSlot.GetTensorInfo();
if (DataType::QuantisedAsymm8 == info.GetDataType()) {
if (0.f == info.GetQuantizationScale()) {
noErrors = false;
@@ -125,6 +125,20 @@ bool CheckScaleSetOnQuantizedType(Layer* layer, Optional<std::vector<std::string
<< " Quantized 8 bit but its scale parameter has not been set";
ReportError(ss.str(), errMessages);
}
+ // Softmax under QuantisedAsymm8 must always be scale (1.0f/256.0f) and offset 0
+ if ((info.GetQuantizationScale() != (1.0f / 256.0f) ||
+ info.GetQuantizationOffset() != 0) &&
+ layer->GetType() == armnn::LayerType::Softmax)
+ {
+ std::stringstream ss;
+ ss << "Quantization parameters for Softmax layer (Scale: " <<
+ info.GetQuantizationScale() << " and Offset: " << info.GetQuantizationOffset() <<
+ ") are incorrect and have been updated to Scale: 0.00390625 and Offset: 0";
+ BOOST_LOG_TRIVIAL(warning) << ss.str();
+ info.SetQuantizationScale((1.0f /256.0f));
+ info.SetQuantizationOffset(0);
+ outputSlot.SetTensorInfo(info);
+ }
}
}
return noErrors;