From b85547024490d4c89538dcebfb0a2730e18a6d8d Mon Sep 17 00:00:00 2001 From: David Monahan Date: Thu, 25 Apr 2019 16:03:38 +0100 Subject: IVGCVSW-2657: Fix to force correct quantisation parameters for QASYMM8 Softmax Signed-off-by: David Monahan Change-Id: I0989ea843714ba1d5da756bb87ddefa3706b07eb --- include/armnn/INetwork.hpp | 2 ++ src/armnn/Network.cpp | 18 ++++++++++++++++-- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp index a15ceb1c15..7141770298 100644 --- a/include/armnn/INetwork.hpp +++ b/include/armnn/INetwork.hpp @@ -219,6 +219,8 @@ public: const char* name = nullptr) = 0; /// Adds a softmax layer to the network. + /// If the data type is QAsymm8, then the output quantization parameters + /// must have a scale of 1/256 and an offset of 0 /// @param softmaxDescriptor - SoftmaxDescriptor to configure the softmax. /// @param name - Optional name for the layer. /// @return - Interface for configuring the layer. diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index 0bd8d4b69b..a38bcf1910 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -114,8 +114,8 @@ bool CheckScaleSetOnQuantizedType(Layer* layer, OptionalGetNumOutputSlots(); for (unsigned int i = 0; i < numOutputs; i++) { - const OutputSlot &outputSlot = layer->GetOutputSlot(i); - const TensorInfo &info = outputSlot.GetTensorInfo(); + OutputSlot& outputSlot = layer->GetOutputSlot(i); + TensorInfo info = outputSlot.GetTensorInfo(); if (DataType::QuantisedAsymm8 == info.GetDataType()) { if (0.f == info.GetQuantizationScale()) { noErrors = false; @@ -125,6 +125,20 @@ bool CheckScaleSetOnQuantizedType(Layer* layer, OptionalGetType() == armnn::LayerType::Softmax) + { + std::stringstream ss; + ss << "Quantization parameters for Softmax layer (Scale: " << + info.GetQuantizationScale() << " and Offset: " << info.GetQuantizationOffset() << + ") are incorrect and have been updated to Scale: 0.00390625 and Offset: 0"; + BOOST_LOG_TRIVIAL(warning) << ss.str(); + info.SetQuantizationScale((1.0f /256.0f)); + info.SetQuantizationOffset(0); + outputSlot.SetTensorInfo(info); + } } } return noErrors; -- cgit v1.2.1