diff options
author | Mike Kelly <mike.kelly@arm.com> | 2021-01-14 10:04:56 +0000 |
---|---|---|
committer | mike.kelly <mike.kelly@arm.com> | 2021-01-14 20:12:49 +0000 |
commit | c5e5a04bd26c2d1838828ba291464967a3e20f3f (patch) | |
tree | ad7354097ef3053416cf2162aa8e26f0f0f5b578 /src/armnn/QuantizerVisitor.cpp | |
parent | ec27971facf490fdafd1da1172fa88327808866a (diff) | |
download | armnn-c5e5a04bd26c2d1838828ba291464967a3e20f3f.tar.gz |
MLCE-325 ArmnnQuantizer incorrectly Quantizes all DataTypes
* ArmnnQuantizer incorrectly converts boolean or integer DataTypes to quantized
DataTypes. This breaks layers like ArgMinMax where the output contains the
index of an element along an axis.
Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Change-Id: I272c3d0f48bf884a2480bfa43eb14ec265fcda6b
Diffstat (limited to 'src/armnn/QuantizerVisitor.cpp')
-rw-r--r-- | src/armnn/QuantizerVisitor.cpp | 24 |
1 files changed, 14 insertions, 10 deletions
diff --git a/src/armnn/QuantizerVisitor.cpp b/src/armnn/QuantizerVisitor.cpp index 7889f03c5b..0e9d22463f 100644 --- a/src/armnn/QuantizerVisitor.cpp +++ b/src/armnn/QuantizerVisitor.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -52,16 +52,20 @@ void QuantizerVisitor::SetQuantizedInputConnections(const IConnectableLayer* src IInputSlot& newInputSlot = quantizedLayer->GetInputSlot(i); IOutputSlot& newOutputSlot = prevQuantizedLayer->GetOutputSlot(slotIdx); newOutputSlot.Connect(newInputSlot); - - // Fetch the min/max ranges that were computed earlier - auto range = m_Ranges.GetRange(layerToFind.GetGuid(), slotIdx); - OffsetScalePair qParams = m_QuantizationScheme->ComputeScheme(range.first, range.second); - - // Set the quantization params TensorInfo info(outputSlot->GetTensorInfo()); - info.SetDataType(m_QuantizationScheme->GetDataType()); - info.SetQuantizationOffset(qParams.second); - info.SetQuantizationScale(qParams.first); + + // Only try to set quantization params on tensors that can be quantized + if (inputSlot->GetConnectedOutputSlot()->GetTensorInfo().GetDataType() != DataType::Boolean && + inputSlot->GetConnectedOutputSlot()->GetTensorInfo().GetDataType() != DataType::Signed32 && + inputSlot->GetConnectedOutputSlot()->GetTensorInfo().GetDataType() != DataType::Signed64) + { + // Fetch the min/max ranges that were computed earlier + auto range = m_Ranges.GetRange(layerToFind.GetGuid(), slotIdx); + OffsetScalePair qParams = m_QuantizationScheme->ComputeScheme(range.first, range.second); + info.SetDataType(m_QuantizationScheme->GetDataType()); + info.SetQuantizationOffset(qParams.second); + info.SetQuantizationScale(qParams.first); + } newOutputSlot.SetTensorInfo(info); } } |