aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/test/QuantizerTest.cpp
diff options
context:
space:
mode:
authorMatteo Martincigh <matteo.martincigh@arm.com>2019-05-14 10:36:13 +0100
committerMatteo Martincigh <matteo.martincigh@arm.com>2019-05-14 13:33:59 +0100
commitfc598e15ff30bc375c95c9536d4a56662d867926 (patch)
tree0d17a7928ae4faab6978552e666123bfc1926d93 /src/armnn/test/QuantizerTest.cpp
parent906f94631aa7ef590b9d8ff45507e818a0d1ac2c (diff)
downloadarmnn-fc598e15ff30bc375c95c9536d4a56662d867926.tar.gz
Use the new deprecation API
* Used the new ARMNN_DEPRECATED_MSG macro instead of @deprecated * Refactored the code to no longer use the deprecated methods where applicable !android-nn-driver:1126 Change-Id: Ib0578d3d6fc5a763f5fb922f67ba91fafc7796f6 Signed-off-by: Matteo Martincigh <matteo.martincigh@arm.com>
Diffstat (limited to 'src/armnn/test/QuantizerTest.cpp')
-rw-r--r--src/armnn/test/QuantizerTest.cpp30
1 files changed, 12 insertions, 18 deletions
diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp
index 4f22317651..f2c739d274 100644
--- a/src/armnn/test/QuantizerTest.cpp
+++ b/src/armnn/test/QuantizerTest.cpp
@@ -686,16 +686,14 @@ INetworkPtr CreateNetworkWithFullyConnectedLayer(const bool biasEnabled,
// Add the layers
IConnectableLayer* input0 = network->AddInputLayer(0);
IConnectableLayer* fullyConnected;
+ Optional<ConstTensor> optionalBias;
+ std::vector<float> biasData{10.0f, 20.0f, 30.0f};
if (desc.m_BiasEnabled)
{
- std::vector<float> biasData{10.0f, 20.0f, 30.0f};
ConstTensor bias(info, biasData);
- fullyConnected = network->AddFullyConnectedLayer(desc, weights, bias);
- }
- else
- {
- fullyConnected = network->AddFullyConnectedLayer(desc, weights);
+ optionalBias = Optional<ConstTensor>(bias);
}
+ fullyConnected = network->AddFullyConnectedLayer(desc, weights, optionalBias);
IConnectableLayer* output = network->AddOutputLayer(1);
// Establish connections
@@ -814,16 +812,14 @@ void TestQuantizeConvolution2d(bool useBiases)
// Add the layers
IConnectableLayer* input0 = network->AddInputLayer(0);
IConnectableLayer* conv2d;
+ Optional<ConstTensor> optionalBiases;
+ std::vector<float> biasesData{-1.0f, 1.5f, 2.0f};
if (useBiases)
{
- std::vector<float> biasesData{-1.0f, 1.5f, 2.0f};
ConstTensor biases(info, biasesData);
- conv2d = network->AddConvolution2dLayer(descriptor, weights, biases);
- }
- else
- {
- conv2d = network->AddConvolution2dLayer(descriptor, weights);
+ optionalBiases = Optional<ConstTensor>(biases);
}
+ conv2d = network->AddConvolution2dLayer(descriptor, weights, optionalBiases);
IConnectableLayer* output = network->AddOutputLayer(1);
// Establish connections
@@ -902,16 +898,14 @@ void TestQuantizeDepthwiseConvolution2d(bool useBiases)
// Add the layers
IConnectableLayer* input0 = network->AddInputLayer(0);
IConnectableLayer* depthwiseConv2d;
+ Optional<ConstTensor> optionalBiases;
+ std::vector<float> biasesData{-1.0f, 1.5f, 2.0f};
if (useBiases)
{
- std::vector<float> biasesData{-1.0f, 1.5f, 2.0f};
ConstTensor biases(info, biasesData);
- depthwiseConv2d = network->AddDepthwiseConvolution2dLayer(descriptor, weights, biases);
- }
- else
- {
- depthwiseConv2d = network->AddDepthwiseConvolution2dLayer(descriptor, weights);
+ optionalBiases = Optional<ConstTensor>(biases);
}
+ depthwiseConv2d = network->AddDepthwiseConvolution2dLayer(descriptor, weights, optionalBiases);
IConnectableLayer* output = network->AddOutputLayer(1);
// Establish connections