aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatteo Martincigh <matteo.martincigh@arm.com>2019-05-14 10:36:13 +0100
committerMatteo Martincigh <matteo.martincigh@arm.com>2019-05-14 13:33:59 +0100
commitfc598e15ff30bc375c95c9536d4a56662d867926 (patch)
tree0d17a7928ae4faab6978552e666123bfc1926d93
parent906f94631aa7ef590b9d8ff45507e818a0d1ac2c (diff)
downloadarmnn-fc598e15ff30bc375c95c9536d4a56662d867926.tar.gz
Use the new deprecation API
* Used the new ARMNN_DEPRECATED_MSG macro instead of @deprecated * Refactored the code to no longer use the deprecated methods where applicable !android-nn-driver:1126 Change-Id: Ib0578d3d6fc5a763f5fb922f67ba91fafc7796f6 Signed-off-by: Matteo Martincigh <matteo.martincigh@arm.com>
-rw-r--r--include/armnn/INetwork.hpp13
-rw-r--r--samples/SimpleSample.cpp4
-rw-r--r--src/armnn/Network.cpp12
-rw-r--r--src/armnn/Network.hpp12
-rw-r--r--src/armnn/QuantizerVisitor.cpp55
-rw-r--r--src/armnn/test/ConstTensorLayerVisitor.cpp45
-rw-r--r--src/armnn/test/CreateWorkload.hpp10
-rw-r--r--src/armnn/test/NetworkTests.cpp6
-rw-r--r--src/armnn/test/QuantizerTest.cpp30
-rw-r--r--src/armnnCaffeParser/CaffeParser.cpp56
-rw-r--r--src/armnnDeserializer/Deserializer.cpp24
-rw-r--r--src/armnnOnnxParser/OnnxParser.cpp13
-rw-r--r--src/armnnSerializer/test/SerializerTests.cpp15
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.cpp9
-rwxr-xr-xsrc/armnnTfParser/TfParser.cpp20
-rw-r--r--src/backends/backendsCommon/IBackendInternal.hpp6
16 files changed, 180 insertions, 150 deletions
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index bae6e94955..ef8524377d 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -11,6 +11,7 @@
#include <armnn/Optional.hpp>
#include <armnn/TensorFwd.hpp>
#include <armnn/Types.hpp>
+#include <armnn/Deprecated.hpp>
#include <memory>
#include <vector>
@@ -121,12 +122,12 @@ public:
const Optional<ConstTensor>& biases,
const char* name = nullptr) = 0;
- /// @deprecated
+ ARMNN_DEPRECATED_MSG("This AddConvolution2dLayer overload is deprecated")
virtual IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
const char* name = nullptr) = 0;
- /// @deprecated
+ ARMNN_DEPRECATED_MSG("This AddConvolution2dLayer overload is deprecated")
virtual IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
const ConstTensor& biases,
@@ -144,13 +145,13 @@ public:
const Optional<ConstTensor>& biases,
const char* name = nullptr) = 0;
- /// @deprecated
+ ARMNN_DEPRECATED_MSG("This AddDepthwiseConvolution2dLayer overload is deprecated")
virtual IConnectableLayer* AddDepthwiseConvolution2dLayer(
const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
const char* name = nullptr) = 0;
- /// @deprecated
+ ARMNN_DEPRECATED_MSG("This AddDepthwiseConvolution2dLayer overload is deprecated")
virtual IConnectableLayer* AddDepthwiseConvolution2dLayer(
const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
@@ -182,12 +183,12 @@ public:
const Optional<ConstTensor>& biases,
const char* name = nullptr) = 0;
- /// @deprecated
+ ARMNN_DEPRECATED_MSG("This AddFullyConnectedLayer overload is deprecated")
virtual IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
const ConstTensor& weights,
const char* name = nullptr) = 0;
- /// @deprecated
+ ARMNN_DEPRECATED_MSG("This AddFullyConnectedLayer overload is deprecated")
virtual IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
const ConstTensor& weights,
const ConstTensor& biases,
diff --git a/samples/SimpleSample.cpp b/samples/SimpleSample.cpp
index 0950b2688a..ed7c0bfb08 100644
--- a/samples/SimpleSample.cpp
+++ b/samples/SimpleSample.cpp
@@ -23,7 +23,9 @@ int main()
float weightsData[] = {1.0f}; // Identity
TensorInfo weightsInfo(TensorShape({1, 1}), DataType::Float32);
armnn::ConstTensor weights(weightsInfo, weightsData);
- IConnectableLayer *fullyConnected = myNetwork->AddFullyConnectedLayer(fullyConnectedDesc, weights,
+ IConnectableLayer *fullyConnected = myNetwork->AddFullyConnectedLayer(fullyConnectedDesc,
+ weights,
+ EmptyOptional(),
"fully connected");
IConnectableLayer *InputLayer = myNetwork->AddInputLayer(0);
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 087ec0f8e9..956d2a4bde 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -583,16 +583,14 @@ IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescripto
return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, biases, name);
}
-/// @deprecated
IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
const ConstTensor& weights,
const char* name)
{
- Optional<ConstTensor> biases = EmptyOptional();
+ Optional<ConstTensor> biases;
return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, biases, name);
}
-/// @deprecated
IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
const ConstTensor& weights,
const ConstTensor& biases,
@@ -640,16 +638,14 @@ IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor&
return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
}
-/// @deprecated
IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
const char* name)
{
- Optional<ConstTensor> biases = EmptyOptional();
+ Optional<ConstTensor> biases;
return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
}
-/// @deprecated
IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
const ConstTensor& biases,
@@ -691,17 +687,15 @@ IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
}
-/// @deprecated
IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
const char* name)
{
- Optional<ConstTensor> biases = EmptyOptional();
+ Optional<ConstTensor> biases;
return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
}
-/// @deprecated
IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index a569a7c847..d26c2864ff 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -45,12 +45,12 @@ public:
const Optional<ConstTensor>& biases,
const char* name = nullptr) override;
- /// @deprecated
+ ARMNN_DEPRECATED_MSG("This AddConvolution2dLayer overload is deprecated")
IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
const char* name = nullptr) override;
- /// @deprecated
+ ARMNN_DEPRECATED_MSG("This AddConvolution2dLayer overload is deprecated")
IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
const ConstTensor& biases,
@@ -62,13 +62,13 @@ public:
const Optional<ConstTensor>& biases,
const char* name = nullptr) override;
- /// @deprecated
+ ARMNN_DEPRECATED_MSG("This AddDepthwiseConvolution2dLayer overload is deprecated")
IConnectableLayer* AddDepthwiseConvolution2dLayer(
const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
const char* name = nullptr) override;
- /// @deprecated
+ ARMNN_DEPRECATED_MSG("This AddDepthwiseConvolution2dLayer overload is deprecated")
IConnectableLayer* AddDepthwiseConvolution2dLayer(
const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
@@ -87,12 +87,12 @@ public:
const Optional<ConstTensor>& biases,
const char* name = nullptr) override;
- /// @deprecated
+ ARMNN_DEPRECATED_MSG("This AddFullyConnectedLayer overload is deprecated")
IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
const ConstTensor& weights,
const char* name = nullptr) override;
- /// @deprecated
+ ARMNN_DEPRECATED_MSG("This AddFullyConnectedLayer overload is deprecated")
IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
const ConstTensor& weights,
const ConstTensor& biases,
diff --git a/src/armnn/QuantizerVisitor.cpp b/src/armnn/QuantizerVisitor.cpp
index 38e33cf2a3..4a87ca16ce 100644
--- a/src/armnn/QuantizerVisitor.cpp
+++ b/src/armnn/QuantizerVisitor.cpp
@@ -90,19 +90,20 @@ void QuantizerVisitor::VisitFullyConnectedLayer(const IConnectableLayer *layer,
{
std::vector<uint8_t> weightsBacking;
ConstTensor qWeights = CreateQuantizedConst(weights, weightsBacking);
+ Optional<ConstTensor> optionalQBiases;
+ std::vector<uint8_t> biasesBacking;
- IConnectableLayer* newLayer;
if (biases.has_value())
{
- std::vector<uint8_t> biasBacking;
- ConstTensor qBias = CreateQuantizedConst(biases.value(), biasBacking);
- newLayer = m_QuantizedNetwork->AddFullyConnectedLayer(desc, qWeights, qBias, name);
- }
- else
- {
- newLayer = m_QuantizedNetwork->AddFullyConnectedLayer(desc, qWeights, name);
+ ConstTensor qBiases = CreateQuantizedConst(biases.value(), biasesBacking);
+ optionalQBiases = Optional<ConstTensor>(qBiases);
}
+ IConnectableLayer* newLayer = m_QuantizedNetwork->AddFullyConnectedLayer(desc,
+ qWeights,
+ optionalQBiases,
+ name);
+
RecordLayer(layer, newLayer);
SetQuantizedInputConnections(layer, newLayer);
}
@@ -185,23 +186,20 @@ void QuantizerVisitor::VisitConvolution2dLayer(const IConnectableLayer* layer,
{
std::vector<uint8_t> weightsBacking;
ConstTensor qWeights = CreateQuantizedConst(weights, weightsBacking);
+ Optional<ConstTensor> optionalQBiases;
+ std::vector<uint8_t> biasesBacking;
- IConnectableLayer* newLayer;
if (biases.has_value())
{
- std::vector<uint8_t> biasesBacking;
ConstTensor qBiases = CreateQuantizedConst(biases.value(), biasesBacking);
-
- newLayer = m_QuantizedNetwork->AddConvolution2dLayer(convolution2dDescriptor,
- qWeights,
- qBiases,
- name);
- }
- else
- {
- newLayer = m_QuantizedNetwork->AddConvolution2dLayer(convolution2dDescriptor, qWeights, name);
+ optionalQBiases = Optional<ConstTensor>(qBiases);
}
+ IConnectableLayer* newLayer = m_QuantizedNetwork->AddConvolution2dLayer(convolution2dDescriptor,
+ qWeights,
+ optionalQBiases,
+ name);
+
RecordLayer(layer, newLayer);
SetQuantizedInputConnections(layer, newLayer);
}
@@ -214,23 +212,20 @@ void QuantizerVisitor::VisitDepthwiseConvolution2dLayer(const IConnectableLayer*
{
std::vector<uint8_t> weightsBacking;
ConstTensor qWeights = CreateQuantizedConst(weights, weightsBacking);
+ Optional<ConstTensor> optionalQBiases;
+ std::vector<uint8_t> biasesBacking;
- IConnectableLayer* newLayer;
if (biases.has_value())
{
- std::vector<uint8_t> biasesBacking;
ConstTensor qBiases = CreateQuantizedConst(biases.value(), biasesBacking);
-
- newLayer = m_QuantizedNetwork->AddDepthwiseConvolution2dLayer(desc,
- qWeights,
- qBiases,
- name);
- }
- else
- {
- newLayer = m_QuantizedNetwork->AddDepthwiseConvolution2dLayer(desc, qWeights, name);
+ optionalQBiases = Optional<ConstTensor>(qBiases);
}
+ IConnectableLayer* newLayer = m_QuantizedNetwork->AddDepthwiseConvolution2dLayer(desc,
+ qWeights,
+ optionalQBiases,
+ name);
+
RecordLayer(layer, newLayer);
SetQuantizedInputConnections(layer, newLayer);
}
diff --git a/src/armnn/test/ConstTensorLayerVisitor.cpp b/src/armnn/test/ConstTensorLayerVisitor.cpp
index 5b77ddeb97..e17ee46c81 100644
--- a/src/armnn/test/ConstTensorLayerVisitor.cpp
+++ b/src/armnn/test/ConstTensorLayerVisitor.cpp
@@ -128,7 +128,7 @@ BOOST_AUTO_TEST_CASE(CheckConvolution2dLayer)
Network net;
- IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights);
+ IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, EmptyOptional());
layer->Accept(visitor);
}
@@ -152,7 +152,7 @@ BOOST_AUTO_TEST_CASE(CheckNamedConvolution2dLayer)
Network net;
- IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, layerName);
+ IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, EmptyOptional(), layerName);
layer->Accept(visitor);
}
@@ -175,12 +175,13 @@ BOOST_AUTO_TEST_CASE(CheckConvolution2dLayerWithBiases)
std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32), biasData);
+ Optional<ConstTensor> optionalBiases(biases);
- TestConvolution2dLayerVisitor visitor(descriptor, weights, Optional<ConstTensor>(biases));
+ TestConvolution2dLayerVisitor visitor(descriptor, weights, optionalBiases);
Network net;
- IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, biases);
+ IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, optionalBiases);
layer->Accept(visitor);
}
@@ -204,12 +205,13 @@ BOOST_AUTO_TEST_CASE(CheckNamedConvolution2dLayerWithBiases)
std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32), biasData);
+ Optional<ConstTensor> optionalBiases(biases);
- TestConvolution2dLayerVisitor visitor(descriptor, weights, Optional<ConstTensor>(biases), layerName);
+ TestConvolution2dLayerVisitor visitor(descriptor, weights, optionalBiases, layerName);
Network net;
- IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, biases, layerName);
+ IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, optionalBiases, layerName);
layer->Accept(visitor);
}
@@ -232,7 +234,7 @@ BOOST_AUTO_TEST_CASE(CheckDepthwiseConvolution2dLayer)
Network net;
- IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights);
+ IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights, EmptyOptional());
layer->Accept(visitor);
}
@@ -256,7 +258,10 @@ BOOST_AUTO_TEST_CASE(CheckNamedDepthwiseConvolution2dLayer)
Network net;
- IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights, layerName);
+ IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor,
+ weights,
+ EmptyOptional(),
+ layerName);
layer->Accept(visitor);
}
@@ -279,12 +284,13 @@ BOOST_AUTO_TEST_CASE(CheckDepthwiseConvolution2dLayerWithBiases)
std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32), biasData);
+ Optional<ConstTensor> optionalBiases(biases);
- TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights, Optional<ConstTensor>(biases));
+ TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights, optionalBiases);
Network net;
- IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights, biases);
+ IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights, optionalBiases);
layer->Accept(visitor);
}
@@ -308,12 +314,13 @@ BOOST_AUTO_TEST_CASE(CheckNamedDepthwiseConvolution2dLayerWithBiases)
std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32), biasData);
+ Optional<ConstTensor> optionalBiases(biases);
- TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights, Optional<ConstTensor>(biases), layerName);
+ TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights, optionalBiases, layerName);
Network net;
- IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights, biases, layerName);
+ IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights, optionalBiases, layerName);
layer->Accept(visitor);
}
@@ -330,7 +337,7 @@ BOOST_AUTO_TEST_CASE(CheckFullyConnectedLayer)
Network net;
- IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, weights);
+ IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, weights, EmptyOptional());
layer->Accept(visitor);
}
@@ -348,7 +355,7 @@ BOOST_AUTO_TEST_CASE(CheckNamedFullyConnectedLayer)
Network net;
- IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, weights, layerName);
+ IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, weights, EmptyOptional(), layerName);
layer->Accept(visitor);
}
@@ -365,12 +372,13 @@ BOOST_AUTO_TEST_CASE(CheckFullyConnectedLayerWithBiases)
std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32), biasData);
+ Optional<ConstTensor> optionalBiases(biases);
- TestFullyConnectedLayerVistor visitor(descriptor, weights, Optional<ConstTensor>(biases));
+ TestFullyConnectedLayerVistor visitor(descriptor, weights, optionalBiases);
Network net;
- IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, weights, biases);
+ IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, weights, optionalBiases);
layer->Accept(visitor);
}
@@ -388,12 +396,13 @@ BOOST_AUTO_TEST_CASE(CheckNamedFullyConnectedLayerWithBiases)
std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32), biasData);
+ Optional<ConstTensor> optionalBiases(biases);
- TestFullyConnectedLayerVistor visitor(descriptor, weights, Optional<ConstTensor>(biases), layerName);
+ TestFullyConnectedLayerVistor visitor(descriptor, weights, optionalBiases, layerName);
Network net;
- IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, weights, biases, layerName);
+ IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, weights, optionalBiases, layerName);
layer->Accept(visitor);
}
diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp
index a68a6e3f42..1193ab721e 100644
--- a/src/armnn/test/CreateWorkload.hpp
+++ b/src/armnn/test/CreateWorkload.hpp
@@ -1148,12 +1148,18 @@ std::pair<armnn::IOptimizedNetworkPtr, std::unique_ptr<PreCompiledWorkload>> Cre
armnn::ConstTensor biases(biasTensorInfo, biasData);
// Create convolution layer with biases
- convLayer = net.AddConvolution2dLayer(convDesc2d, weights, biases, convLayerName.c_str());
+ convLayer = net.AddConvolution2dLayer(convDesc2d,
+ weights,
+ Optional<ConstTensor>(biases),
+ convLayerName.c_str());
}
else
{
// Create convolution layer without biases
- convLayer = net.AddConvolution2dLayer(convDesc2d, weights, convLayerName.c_str());
+ convLayer = net.AddConvolution2dLayer(convDesc2d,
+ weights,
+ EmptyOptional(),
+ convLayerName.c_str());
}
BOOST_TEST(convLayer);
diff --git a/src/armnn/test/NetworkTests.cpp b/src/armnn/test/NetworkTests.cpp
index 155304be36..47fd67b8d4 100644
--- a/src/armnn/test/NetworkTests.cpp
+++ b/src/armnn/test/NetworkTests.cpp
@@ -78,7 +78,10 @@ BOOST_AUTO_TEST_CASE(NetworkModification)
armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32), convWeightsData);
armnn::Convolution2dDescriptor convDesc2d;
- armnn::IConnectableLayer* const convLayer = net.AddConvolution2dLayer(convDesc2d, weights, "conv layer");
+ armnn::IConnectableLayer* const convLayer = net.AddConvolution2dLayer(convDesc2d,
+ weights,
+ armnn::EmptyOptional(),
+ "conv layer");
BOOST_TEST(convLayer);
inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0));
@@ -86,6 +89,7 @@ BOOST_AUTO_TEST_CASE(NetworkModification)
armnn::FullyConnectedDescriptor fullyConnectedDesc;
armnn::IConnectableLayer* const fullyConnectedLayer = net.AddFullyConnectedLayer(fullyConnectedDesc,
weights,
+ armnn::EmptyOptional(),
"fully connected");
BOOST_TEST(fullyConnectedLayer);
diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp
index 4f22317651..f2c739d274 100644
--- a/src/armnn/test/QuantizerTest.cpp
+++ b/src/armnn/test/QuantizerTest.cpp
@@ -686,16 +686,14 @@ INetworkPtr CreateNetworkWithFullyConnectedLayer(const bool biasEnabled,
// Add the layers
IConnectableLayer* input0 = network->AddInputLayer(0);
IConnectableLayer* fullyConnected;
+ Optional<ConstTensor> optionalBias;
+ std::vector<float> biasData{10.0f, 20.0f, 30.0f};
if (desc.m_BiasEnabled)
{
- std::vector<float> biasData{10.0f, 20.0f, 30.0f};
ConstTensor bias(info, biasData);
- fullyConnected = network->AddFullyConnectedLayer(desc, weights, bias);
- }
- else
- {
- fullyConnected = network->AddFullyConnectedLayer(desc, weights);
+ optionalBias = Optional<ConstTensor>(bias);
}
+ fullyConnected = network->AddFullyConnectedLayer(desc, weights, optionalBias);
IConnectableLayer* output = network->AddOutputLayer(1);
// Establish connections
@@ -814,16 +812,14 @@ void TestQuantizeConvolution2d(bool useBiases)
// Add the layers
IConnectableLayer* input0 = network->AddInputLayer(0);
IConnectableLayer* conv2d;
+ Optional<ConstTensor> optionalBiases;
+ std::vector<float> biasesData{-1.0f, 1.5f, 2.0f};
if (useBiases)
{
- std::vector<float> biasesData{-1.0f, 1.5f, 2.0f};
ConstTensor biases(info, biasesData);
- conv2d = network->AddConvolution2dLayer(descriptor, weights, biases);
- }
- else
- {
- conv2d = network->AddConvolution2dLayer(descriptor, weights);
+ optionalBiases = Optional<ConstTensor>(biases);
}
+ conv2d = network->AddConvolution2dLayer(descriptor, weights, optionalBiases);
IConnectableLayer* output = network->AddOutputLayer(1);
// Establish connections
@@ -902,16 +898,14 @@ void TestQuantizeDepthwiseConvolution2d(bool useBiases)
// Add the layers
IConnectableLayer* input0 = network->AddInputLayer(0);
IConnectableLayer* depthwiseConv2d;
+ Optional<ConstTensor> optionalBiases;
+ std::vector<float> biasesData{-1.0f, 1.5f, 2.0f};
if (useBiases)
{
- std::vector<float> biasesData{-1.0f, 1.5f, 2.0f};
ConstTensor biases(info, biasesData);
- depthwiseConv2d = network->AddDepthwiseConvolution2dLayer(descriptor, weights, biases);
- }
- else
- {
- depthwiseConv2d = network->AddDepthwiseConvolution2dLayer(descriptor, weights);
+ optionalBiases = Optional<ConstTensor>(biases);
}
+ depthwiseConv2d = network->AddDepthwiseConvolution2dLayer(descriptor, weights, optionalBiases);
IConnectableLayer* output = network->AddOutputLayer(1);
// Establish connections
diff --git a/src/armnnCaffeParser/CaffeParser.cpp b/src/armnnCaffeParser/CaffeParser.cpp
index 90579e67fd..a27abc7cb9 100644
--- a/src/armnnCaffeParser/CaffeParser.cpp
+++ b/src/armnnCaffeParser/CaffeParser.cpp
@@ -543,19 +543,17 @@ void CaffeParserBase::AddConvLayerWithSplits(const caffe::LayerParameter& layerP
weightData.data() + numWeightsPerGroup * g);
IConnectableLayer* convLayer = nullptr;
+ Optional<ConstTensor> optionalBiases;
if (desc.m_BiasEnabled)
{
// Pulls out the biases for this group from that loaded from the model file earlier.
ConstTensor biases(biasInfo, biasData.data() + numBiasesPerGroup * g);
-
- convLayer =
- m_Network->AddConvolution2dLayer(desc, weights, biases, convLayerNames[g].c_str());
- }
- else
- {
- convLayer =
- m_Network->AddConvolution2dLayer(desc, weights, convLayerNames[g].c_str());
+ optionalBiases = Optional<ConstTensor>(biases);
}
+ convLayer = m_Network->AddConvolution2dLayer(desc,
+ weights,
+ optionalBiases,
+ convLayerNames[g].c_str());
convLayers[g] = convLayer;
// If we have more than one group then the input to the nth convolution the splitter layer's nth output,
@@ -665,11 +663,11 @@ void CaffeParserBase::AddConvLayerWithDepthwiseConv(const caffe::LayerParameter&
armnn::IConnectableLayer* returnLayer = nullptr;
ConstTensor weights(TensorInfo(4, weightDimSizes, DataType::Float32), weightData.data());
-
+ Optional<ConstTensor> optionalBiases;
+ vector<float> biasData;
if (desc.m_BiasEnabled)
{
TensorInfo biasInfo;
- vector<float> biasData;
biasData.resize(boost::numeric_cast<size_t>(outputShape.dim(1)), 1.f);
GetDataFromBlob(layerParam, biasData, 1);
@@ -678,12 +676,12 @@ void CaffeParserBase::AddConvLayerWithDepthwiseConv(const caffe::LayerParameter&
biasInfo = TensorInfo(1, biasDimSizes, DataType::Float32);
ConstTensor biases(biasInfo, biasData.data());
- returnLayer = m_Network->AddDepthwiseConvolution2dLayer(desc, weights, biases, layerParam.name().c_str());
- }
- else
- {
- returnLayer = m_Network->AddDepthwiseConvolution2dLayer(desc, weights, layerParam.name().c_str());
+ optionalBiases = Optional<ConstTensor>(biases);
}
+ returnLayer = m_Network->AddDepthwiseConvolution2dLayer(desc,
+ weights,
+ optionalBiases,
+ layerParam.name().c_str());
if (!returnLayer)
{
@@ -843,11 +841,11 @@ void CaffeParserBase::ParseConvLayer(const LayerParameter& layerParam)
// Pull out the weights for this group from that loaded from the model file earlier
ConstTensor weights(TensorInfo(4, weightDimSizes, DataType::Float32), weightData.data());
-
+ Optional<ConstTensor> optionalBiases;
+ vector<float> biasData;
if (convolution2dDescriptor.m_BiasEnabled)
{
TensorInfo biasInfo;
- vector<float> biasData;
biasData.resize(boost::numeric_cast<size_t>(outputShape.dim(1)), 1.f);
GetDataFromBlob(layerParam, biasData, 1);
@@ -857,14 +855,12 @@ void CaffeParserBase::ParseConvLayer(const LayerParameter& layerParam)
// Pull out the biases for this group from that loaded from the model file earlier
ConstTensor biases(biasInfo, biasData.data());
-
- returnLayer =
- m_Network->AddConvolution2dLayer(convolution2dDescriptor, weights, biases, layerParam.name().c_str());
- }
- else
- {
- returnLayer = m_Network->AddConvolution2dLayer(convolution2dDescriptor, weights, layerParam.name().c_str());
+ optionalBiases = Optional<ConstTensor>(biases);
}
+ returnLayer = m_Network->AddConvolution2dLayer(convolution2dDescriptor,
+ weights,
+ optionalBiases,
+ layerParam.name().c_str());
armnn::IOutputSlot& inputConnection = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0));
inputConnection.Connect(returnLayer->GetInputSlot(0));
@@ -1192,13 +1188,17 @@ void CaffeParserBase::ParseInnerProductLayer(const LayerParameter& layerParam)
ConstTensor biases(TensorInfo(1, sbTD, DataType::Float32), biasDataPtr);
- fullyConnectedLayer = m_Network->AddFullyConnectedLayer(tensorFullyConnectedDescriptor, weights, biases,
- layerParam.name().c_str());
+ fullyConnectedLayer = m_Network->AddFullyConnectedLayer(tensorFullyConnectedDescriptor,
+ weights,
+ Optional<ConstTensor>(biases),
+ layerParam.name().c_str());
}
else
{
- fullyConnectedLayer = m_Network->AddFullyConnectedLayer(tensorFullyConnectedDescriptor, weights,
- layerParam.name().c_str());
+ fullyConnectedLayer = m_Network->AddFullyConnectedLayer(tensorFullyConnectedDescriptor,
+ weights,
+ EmptyOptional(),
+ layerParam.name().c_str());
}
TensorInfo outputInfo({ inputInfo.GetShape()[0], outputSize }, DataType::Float32);
diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp
index b7d45e0a7d..170917e5cf 100644
--- a/src/armnnDeserializer/Deserializer.cpp
+++ b/src/armnnDeserializer/Deserializer.cpp
@@ -988,13 +988,15 @@ void Deserializer::ParseConvolution2d(GraphPtr graph, unsigned int layerIndex)
armnn::ConstTensor weights = ToConstTensor(serializerLayer->weights());
armnn::ConstTensor biases;
+ armnn::Optional<armnn::ConstTensor> optionalBiases = armnn::EmptyOptional();
if (descriptor.m_BiasEnabled)
{
biases = ToConstTensor(serializerLayer->biases());
+ optionalBiases = armnn::Optional<armnn::ConstTensor>(biases);
}
IConnectableLayer* layer = m_Network->AddConvolution2dLayer(descriptor,
weights,
- biases,
+ optionalBiases,
layerName.c_str());
armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -1030,13 +1032,15 @@ void Deserializer::ParseDepthwiseConvolution2d(GraphPtr graph, unsigned int laye
armnn::ConstTensor weights = ToConstTensor(serializerLayer->weights());
armnn::ConstTensor biases;
+ armnn::Optional<armnn::ConstTensor> optionalBiases = armnn::EmptyOptional();
if (descriptor.m_BiasEnabled)
{
biases = ToConstTensor(serializerLayer->biases());
+ optionalBiases = armnn::Optional<armnn::ConstTensor>(biases);
}
IConnectableLayer* layer = m_Network->AddDepthwiseConvolution2dLayer(descriptor,
weights,
- biases,
+ optionalBiases,
layerName.c_str());
armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
@@ -1317,20 +1321,16 @@ void Deserializer::ParseFullyConnected(GraphPtr graph, unsigned int layerIndex)
armnn::ConstTensor weightsTensor = ToConstTensor(flatBufferLayer->weights());
armnn::IConnectableLayer* layer;
+ armnn::Optional<armnn::ConstTensor> optionalBiases = armnn::EmptyOptional();
if (flatBufferDescriptor->biasEnabled())
{
armnn::ConstTensor biasTensorData = ToConstTensor(flatBufferLayer->biases());
- layer = m_Network->AddFullyConnectedLayer(fullyConnectedDescriptor,
- weightsTensor,
- biasTensorData,
- layerName.c_str());
- }
- else
- {
- layer = m_Network->AddFullyConnectedLayer(fullyConnectedDescriptor,
- weightsTensor,
- layerName.c_str());
+ optionalBiases = armnn::Optional<armnn::ConstTensor>(biasTensorData);
}
+ layer = m_Network->AddFullyConnectedLayer(fullyConnectedDescriptor,
+ weightsTensor,
+ optionalBiases,
+ layerName.c_str());
armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
diff --git a/src/armnnOnnxParser/OnnxParser.cpp b/src/armnnOnnxParser/OnnxParser.cpp
index a62383b563..2b4ce7d853 100644
--- a/src/armnnOnnxParser/OnnxParser.cpp
+++ b/src/armnnOnnxParser/OnnxParser.cpp
@@ -809,7 +809,7 @@ void OnnxParser::AddFullyConnected(const onnx::NodeProto& matmulNode, const onnx
}
layer = m_Network->AddFullyConnectedLayer(desc,
CreateConstTensor(weightName).first,
- CreateConstTensor(biasName).first,
+ Optional<ConstTensor>(CreateConstTensor(biasName).first),
matmulNode.name().c_str());
BOOST_ASSERT(layer != nullptr);
@@ -824,7 +824,10 @@ void OnnxParser::AddFullyConnected(const onnx::NodeProto& matmulNode, const onnx
}
else
{
- layer = m_Network->AddFullyConnectedLayer(desc, CreateConstTensor(weightName).first, matmulNode.name().c_str());
+ layer = m_Network->AddFullyConnectedLayer(desc,
+ CreateConstTensor(weightName).first,
+ EmptyOptional(),
+ matmulNode.name().c_str());
BOOST_ASSERT(layer != nullptr);
auto outputInfo = ComputeOutputInfo({matmulNode.output(0)}, layer,
@@ -1120,13 +1123,14 @@ void OnnxParser::AddConvLayerWithDepthwiseConv(const onnx::NodeProto& node, cons
auto biasTensor = CreateConstTensor(node.input(2));
layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
weightTensor.first,
- biasTensor.first,
+ Optional<ConstTensor>(biasTensor.first),
node.name().c_str());
}
else
{
layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
weightTensor.first,
+ EmptyOptional(),
node.name().c_str());
}
BOOST_ASSERT(layer != nullptr);
@@ -1319,13 +1323,14 @@ void OnnxParser::ParseConv(const onnx::NodeProto& node)
auto biasTensor = CreateConstTensor(node.input(2));
layer = m_Network->AddConvolution2dLayer(desc,
weightTensor.first,
- biasTensor.first,
+ Optional<ConstTensor>(biasTensor.first),
node.name().c_str());
}
else
{
layer = m_Network->AddConvolution2dLayer(desc,
weightTensor.first,
+ EmptyOptional(),
node.name().c_str());
}
BOOST_ASSERT(layer != nullptr);
diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp
index 4b3a09e47c..42111e6155 100644
--- a/src/armnnSerializer/test/SerializerTests.cpp
+++ b/src/armnnSerializer/test/SerializerTests.cpp
@@ -490,7 +490,10 @@ BOOST_AUTO_TEST_CASE(SerializeConvolution2d)
armnn::INetworkPtr network = armnn::INetwork::Create();
armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0);
armnn::IConnectableLayer* const convLayer =
- network->AddConvolution2dLayer(descriptor, weights, biases, layerName.c_str());
+ network->AddConvolution2dLayer(descriptor,
+ weights,
+ armnn::Optional<armnn::ConstTensor>(biases),
+ layerName.c_str());
armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0));
@@ -580,7 +583,10 @@ BOOST_AUTO_TEST_CASE(SerializeDepthwiseConvolution2d)
armnn::INetworkPtr network = armnn::INetwork::Create();
armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0);
armnn::IConnectableLayer* const depthwiseConvLayer =
- network->AddDepthwiseConvolution2dLayer(descriptor, weights, biases, layerName.c_str());
+ network->AddDepthwiseConvolution2dLayer(descriptor,
+ weights,
+ armnn::Optional<armnn::ConstTensor>(biases),
+ layerName.c_str());
armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
inputLayer->GetOutputSlot(0).Connect(depthwiseConvLayer->GetInputSlot(0));
@@ -925,7 +931,10 @@ BOOST_AUTO_TEST_CASE(SerializeFullyConnected)
armnn::INetworkPtr network = armnn::INetwork::Create();
armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0);
armnn::IConnectableLayer* const fullyConnectedLayer =
- network->AddFullyConnectedLayer(descriptor, weights, biases, layerName.c_str());
+ network->AddFullyConnectedLayer(descriptor,
+ weights,
+ armnn::Optional<armnn::ConstTensor>(biases),
+ layerName.c_str());
armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
inputLayer->GetOutputSlot(0).Connect(fullyConnectedLayer->GetInputSlot(0));
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 036a881d1c..11d5b5c93e 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -748,13 +748,14 @@ void TfLiteParser::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
armnn::Optional<armnn::PermutationVector&>());
layer = m_Network->AddConvolution2dLayer(desc,
filterTensorAndData.first,
- biasTensorAndData.first,
+ Optional<ConstTensor>(biasTensorAndData.first),
layerName.c_str());
}
else
{
layer = m_Network->AddConvolution2dLayer(desc,
filterTensorAndData.first,
+ EmptyOptional(),
layerName.c_str());
}
@@ -836,13 +837,14 @@ void TfLiteParser::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorInd
armnn::Optional<armnn::PermutationVector&>());
layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
filterTensorAndData.first,
- biasTensorAndData.first,
+ Optional<ConstTensor>(biasTensorAndData.first),
layerName.c_str());
}
else
{
layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
filterTensorAndData.first,
+ EmptyOptional(),
layerName.c_str());
}
BOOST_ASSERT(layer != nullptr);
@@ -1757,13 +1759,14 @@ void TfLiteParser::ParseFullyConnected(size_t subgraphIndex, size_t operatorInde
armnn::Optional<armnn::PermutationVector&>());
layer = m_Network->AddFullyConnectedLayer(desc,
filterTensorAndData.first,
- biasTensorAndData.first,
+ Optional<ConstTensor>(biasTensorAndData.first),
layerName.c_str());
}
else
{
layer = m_Network->AddFullyConnectedLayer(desc,
filterTensorAndData.first,
+ EmptyOptional(),
layerName.c_str());
}
BOOST_ASSERT(layer != nullptr);
diff --git a/src/armnnTfParser/TfParser.cpp b/src/armnnTfParser/TfParser.cpp
index e5948d55f4..78f479063e 100755
--- a/src/armnnTfParser/TfParser.cpp
+++ b/src/armnnTfParser/TfParser.cpp
@@ -1308,7 +1308,10 @@ ParsedTfOperationPtr TfParser::ParseConv2D(const tensorflow::NodeDef& nodeDef,
CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, padding);
CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, padding);
- IConnectableLayer* layer = m_Network->AddConvolution2dLayer(desc, weightTensor, nodeDef.name().c_str());
+ IConnectableLayer* layer = m_Network->AddConvolution2dLayer(desc,
+ weightTensor,
+ EmptyOptional(),
+ nodeDef.name().c_str());
layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
inputSlot.Connect(layer->GetInputSlot(0));
@@ -1426,7 +1429,10 @@ ParsedTfOperationPtr TfParser::ParseDepthwiseConv2D(const tensorflow::NodeDef& n
CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, padding);
CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, padding);
- IConnectableLayer* layer = m_Network->AddDepthwiseConvolution2dLayer(desc, weightTensor, nodeDef.name().c_str());
+ IConnectableLayer* layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
+ weightTensor,
+ EmptyOptional(),
+ nodeDef.name().c_str());
layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
inputSlot.Connect(layer->GetInputSlot(0));
@@ -3127,10 +3133,11 @@ IConnectableLayer* TfParser::AddFullyConnectedLayer(const tensorflow::NodeDef& m
desc.m_BiasEnabled = addNodeDef != nullptr;
IConnectableLayer* layer = nullptr;
+ Optional<ConstTensor> optionalBiases;
+ std::vector<float> biasTensorData;
// Makes the layer.
if (addNodeDef != nullptr)
{
- std::vector<float> biasTensorData;
ConstTensor biases = biasNode->GetConstTensor(biasTensorData);
if (weights.GetShape()[1] != biases.GetShape()[0])
@@ -3145,12 +3152,9 @@ IConnectableLayer* TfParser::AddFullyConnectedLayer(const tensorflow::NodeDef& m
% CHECK_LOCATION().AsString()));
}
- layer = m_Network->AddFullyConnectedLayer(desc, weights, biases, armnnLayerName);
- }
- else
- {
- layer = m_Network->AddFullyConnectedLayer(desc, weights, armnnLayerName);
+ optionalBiases = Optional<ConstTensor>(biases);
}
+ layer = m_Network->AddFullyConnectedLayer(desc, weights, optionalBiases, armnnLayerName);
BOOST_ASSERT(layer != nullptr);
diff --git a/src/backends/backendsCommon/IBackendInternal.hpp b/src/backends/backendsCommon/IBackendInternal.hpp
index 3d94d6e9d9..826730a800 100644
--- a/src/backends/backendsCommon/IBackendInternal.hpp
+++ b/src/backends/backendsCommon/IBackendInternal.hpp
@@ -6,6 +6,7 @@
#include <armnn/Types.hpp>
#include <armnn/IRuntime.hpp>
+#include <armnn/Deprecated.hpp>
#include <ISubgraphViewConverter.hpp>
#include <SubgraphView.hpp>
@@ -56,7 +57,7 @@ public:
virtual Optimizations GetOptimizations() const = 0;
virtual ILayerSupportSharedPtr GetLayerSupport() const = 0;
- // @deprecated Use "OptimizationViews OptimizeSubgraphView(const SubgraphView&);" instead.
+ ARMNN_DEPRECATED_MSG("Use \"OptimizationViews OptimizeSubgraphView(const SubgraphView&)\" instead")
virtual SubgraphViewUniquePtr OptimizeSubgraphView(const SubgraphView& subgraph, bool& optimizationAttempted) const
{
optimizationAttempted = false;
@@ -68,7 +69,10 @@ public:
virtual OptimizationViews OptimizeSubgraphView(const SubgraphView& subgraph) const
{
bool optimizationAttempted = false;
+
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
SubgraphViewUniquePtr optSubgraph = OptimizeSubgraphView(subgraph, optimizationAttempted);
+ ARMNN_NO_DEPRECATE_WARN_END
OptimizationViews result;
if (!optimizationAttempted)