From 0085978ac40ecd008195d635cd009a1d4f49fb74 Mon Sep 17 00:00:00 2001 From: Aron Virginas-Tar Date: Mon, 11 Feb 2019 12:21:27 +0000 Subject: IVGCVSW-2676 Make biases optional in ILayerVisitor for Convolution2D, DepthwiseConvolution2D and FullyConnected Change-Id: I3048504ff699fdb266488e7c07b7262e5843d4b0 Signed-off-by: Aron Virginas-Tar --- src/armnn/QuantizerVisitor.cpp | 67 ++++++++++++++++++------------------------ 1 file changed, 28 insertions(+), 39 deletions(-) (limited to 'src/armnn/QuantizerVisitor.cpp') diff --git a/src/armnn/QuantizerVisitor.cpp b/src/armnn/QuantizerVisitor.cpp index c5e203ef86..f5ff83c31f 100644 --- a/src/armnn/QuantizerVisitor.cpp +++ b/src/armnn/QuantizerVisitor.cpp @@ -82,29 +82,24 @@ void QuantizerVisitor::VisitActivationLayer(const IConnectableLayer* layer, void QuantizerVisitor::VisitFullyConnectedLayer(const IConnectableLayer *layer, const FullyConnectedDescriptor& desc, const ConstTensor& weights, + const Optional& biases, const char *name) { std::vector weightsBacking; ConstTensor qWeights = CreateQuantizedConst(weights, weightsBacking); - IConnectableLayer* newLayer = m_QuantizedNetwork->AddFullyConnectedLayer(desc, qWeights, name); - RecordLayer(layer, newLayer); - SetQuantizedInputConnections(layer, newLayer); -} - -void QuantizerVisitor::VisitFullyConnectedLayer(const IConnectableLayer *layer, - const FullyConnectedDescriptor& desc, - const ConstTensor& weights, - const ConstTensor& bias, - const char *name) -{ - std::vector weightsBacking; - ConstTensor qWeights = CreateQuantizedConst(weights, weightsBacking); - - std::vector biasBacking; - ConstTensor qBias = CreateQuantizedConst(bias, biasBacking); + IConnectableLayer* newLayer; + if (biases.has_value()) + { + std::vector biasBacking; + ConstTensor qBias = CreateQuantizedConst(biases.value(), biasBacking); + newLayer = m_QuantizedNetwork->AddFullyConnectedLayer(desc, qWeights, qBias, name); + } + else + { + newLayer = m_QuantizedNetwork->AddFullyConnectedLayer(desc, qWeights, name); + } - IConnectableLayer* newLayer = m_QuantizedNetwork->AddFullyConnectedLayer(desc, qWeights, qBias, name); RecordLayer(layer, newLayer); SetQuantizedInputConnections(layer, newLayer); } @@ -156,36 +151,30 @@ void QuantizerVisitor::VisitBatchNormalizationLayer(const IConnectableLayer* lay void QuantizerVisitor::VisitConvolution2dLayer(const IConnectableLayer* layer, const Convolution2dDescriptor& convolution2dDescriptor, const ConstTensor& weights, + const Optional& biases, const char* name) { std::vector weightsBacking; ConstTensor qWeights = CreateQuantizedConst(weights, weightsBacking); - IConnectableLayer* newLayer = m_QuantizedNetwork->AddConvolution2dLayer(convolution2dDescriptor, - qWeights, - name); - RecordLayer(layer, newLayer); - SetQuantizedInputConnections(layer, newLayer); -} - -void QuantizerVisitor::VisitConvolution2dLayer(const IConnectableLayer* layer, - const Convolution2dDescriptor& convolution2dDescriptor, - const ConstTensor& weights, - const ConstTensor& biases, - const char* name) -{ - std::vector weightsBacking; - ConstTensor qWeights = CreateQuantizedConst(weights, weightsBacking); + IConnectableLayer* newLayer; + if (biases.has_value()) + { + std::vector biasesBacking; + ConstTensor qBiases = CreateQuantizedConst(biases.value(), biasesBacking); - std::vector biasesBacking; - ConstTensor qBiases = CreateQuantizedConst(weights, biasesBacking); + newLayer = m_QuantizedNetwork->AddConvolution2dLayer(convolution2dDescriptor, + qWeights, + qBiases, + name); + } + else + { + newLayer = m_QuantizedNetwork->AddConvolution2dLayer(convolution2dDescriptor, qWeights, name); + } - IConnectableLayer* newLayer = m_QuantizedNetwork->AddConvolution2dLayer(convolution2dDescriptor, - qWeights, - qBiases, - name); RecordLayer(layer, newLayer); SetQuantizedInputConnections(layer, newLayer); } -} //namespace armnn +} // namespace armnn -- cgit v1.2.1