aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/QuantizerVisitor.cpp
diff options
context:
space:
mode:
authorMatteo Martincigh <matteo.martincigh@arm.com>2019-05-14 10:36:13 +0100
committerMatteo Martincigh <matteo.martincigh@arm.com>2019-05-14 13:33:59 +0100
commitfc598e15ff30bc375c95c9536d4a56662d867926 (patch)
tree0d17a7928ae4faab6978552e666123bfc1926d93 /src/armnn/QuantizerVisitor.cpp
parent906f94631aa7ef590b9d8ff45507e818a0d1ac2c (diff)
downloadarmnn-fc598e15ff30bc375c95c9536d4a56662d867926.tar.gz
Use the new deprecation API
* Used the new ARMNN_DEPRECATED_MSG macro instead of @deprecated * Refactored the code to no longer use the deprecated methods where applicable !android-nn-driver:1126 Change-Id: Ib0578d3d6fc5a763f5fb922f67ba91fafc7796f6 Signed-off-by: Matteo Martincigh <matteo.martincigh@arm.com>
Diffstat (limited to 'src/armnn/QuantizerVisitor.cpp')
-rw-r--r--src/armnn/QuantizerVisitor.cpp55
1 files changed, 25 insertions, 30 deletions
diff --git a/src/armnn/QuantizerVisitor.cpp b/src/armnn/QuantizerVisitor.cpp
index 38e33cf2a3..4a87ca16ce 100644
--- a/src/armnn/QuantizerVisitor.cpp
+++ b/src/armnn/QuantizerVisitor.cpp
@@ -90,19 +90,20 @@ void QuantizerVisitor::VisitFullyConnectedLayer(const IConnectableLayer *layer,
{
std::vector<uint8_t> weightsBacking;
ConstTensor qWeights = CreateQuantizedConst(weights, weightsBacking);
+ Optional<ConstTensor> optionalQBiases;
+ std::vector<uint8_t> biasesBacking;
- IConnectableLayer* newLayer;
if (biases.has_value())
{
- std::vector<uint8_t> biasBacking;
- ConstTensor qBias = CreateQuantizedConst(biases.value(), biasBacking);
- newLayer = m_QuantizedNetwork->AddFullyConnectedLayer(desc, qWeights, qBias, name);
- }
- else
- {
- newLayer = m_QuantizedNetwork->AddFullyConnectedLayer(desc, qWeights, name);
+ ConstTensor qBiases = CreateQuantizedConst(biases.value(), biasesBacking);
+ optionalQBiases = Optional<ConstTensor>(qBiases);
}
+ IConnectableLayer* newLayer = m_QuantizedNetwork->AddFullyConnectedLayer(desc,
+ qWeights,
+ optionalQBiases,
+ name);
+
RecordLayer(layer, newLayer);
SetQuantizedInputConnections(layer, newLayer);
}
@@ -185,23 +186,20 @@ void QuantizerVisitor::VisitConvolution2dLayer(const IConnectableLayer* layer,
{
std::vector<uint8_t> weightsBacking;
ConstTensor qWeights = CreateQuantizedConst(weights, weightsBacking);
+ Optional<ConstTensor> optionalQBiases;
+ std::vector<uint8_t> biasesBacking;
- IConnectableLayer* newLayer;
if (biases.has_value())
{
- std::vector<uint8_t> biasesBacking;
ConstTensor qBiases = CreateQuantizedConst(biases.value(), biasesBacking);
-
- newLayer = m_QuantizedNetwork->AddConvolution2dLayer(convolution2dDescriptor,
- qWeights,
- qBiases,
- name);
- }
- else
- {
- newLayer = m_QuantizedNetwork->AddConvolution2dLayer(convolution2dDescriptor, qWeights, name);
+ optionalQBiases = Optional<ConstTensor>(qBiases);
}
+ IConnectableLayer* newLayer = m_QuantizedNetwork->AddConvolution2dLayer(convolution2dDescriptor,
+ qWeights,
+ optionalQBiases,
+ name);
+
RecordLayer(layer, newLayer);
SetQuantizedInputConnections(layer, newLayer);
}
@@ -214,23 +212,20 @@ void QuantizerVisitor::VisitDepthwiseConvolution2dLayer(const IConnectableLayer*
{
std::vector<uint8_t> weightsBacking;
ConstTensor qWeights = CreateQuantizedConst(weights, weightsBacking);
+ Optional<ConstTensor> optionalQBiases;
+ std::vector<uint8_t> biasesBacking;
- IConnectableLayer* newLayer;
if (biases.has_value())
{
- std::vector<uint8_t> biasesBacking;
ConstTensor qBiases = CreateQuantizedConst(biases.value(), biasesBacking);
-
- newLayer = m_QuantizedNetwork->AddDepthwiseConvolution2dLayer(desc,
- qWeights,
- qBiases,
- name);
- }
- else
- {
- newLayer = m_QuantizedNetwork->AddDepthwiseConvolution2dLayer(desc, qWeights, name);
+ optionalQBiases = Optional<ConstTensor>(qBiases);
}
+ IConnectableLayer* newLayer = m_QuantizedNetwork->AddDepthwiseConvolution2dLayer(desc,
+ qWeights,
+ optionalQBiases,
+ name);
+
RecordLayer(layer, newLayer);
SetQuantizedInputConnections(layer, newLayer);
}