From edf5230830f8d9794ef0aeb8986da99734ff925f Mon Sep 17 00:00:00 2001 From: Francis Murtagh Date: Fri, 7 Jun 2019 11:28:49 +0100 Subject: IVGCVSW-3228 Fix bias quantization to be INT32 not QAsymm8 * Add function to calculate bias tensor quantization scale from input and weights scales. * Change visitor method of Conv2d, DepthwiseConv and FullyConnected to use the new function. * Fix Unit tests to expect correctly calculated quantization parameters. Change-Id: Ic36f47ceea81243c813d74ccf791e984c819cc71 Signed-off-by: Francis Murtagh --- src/armnn/QuantizerVisitor.hpp | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'src/armnn/QuantizerVisitor.hpp') diff --git a/src/armnn/QuantizerVisitor.hpp b/src/armnn/QuantizerVisitor.hpp index 6e5609df02..6463350f2b 100644 --- a/src/armnn/QuantizerVisitor.hpp +++ b/src/armnn/QuantizerVisitor.hpp @@ -139,6 +139,12 @@ private: /// Record the guids so we can easily find the layers later void RecordLayer(const IConnectableLayer* srcLayer, IConnectableLayer* qLayer); + /// Sets the bias quantization scale based on input and weight scales + ConstTensor CreateQuantizedBias(const IConnectableLayer* srcLayer, + const ConstTensor& weights, + const Optional& biases, + std::vector& weightsBacking); + /// Reference to the static range visitor used to retrieve the quantization ranges const RangeTracker& m_Ranges; -- cgit v1.2.1