aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKeith Davis <keith.davis@arm.com>2022-05-17 10:06:53 +0100
committerKeith Davis <keith.davis@arm.com>2022-05-23 09:59:34 +0100
commit721e629fa07e65d6a53c093518021e71e48eeac2 (patch)
tree1f3826741777e5d2cb28be964f46163f49abc271
parent4a09159930f37dffa51c194ea8b565612bbe8431 (diff)
downloadarmnn-721e629fa07e65d6a53c093518021e71e48eeac2.tar.gz
IVGCVSW-6123 ConstTensorsAsInputs: Conv2d
* Use new INetwork::AddConvolution2dLayer instead of deprecated version * Remove duplicated test in SerlializerTests * Fix some cosmetics Signed-off-by: Keith Davis <keith.davis@arm.com> Change-Id: I3407815bfdc1cdc01ca0a667b8e4d80d8621783f
-rw-r--r--src/armnn/Network.cpp6
-rw-r--r--src/armnn/test/NetworkTests.cpp11
-rw-r--r--src/armnn/test/OptimizerTests.cpp8
-rw-r--r--src/armnn/test/SubgraphViewTests.cpp4
-rw-r--r--src/armnn/test/optimizations/FoldPadTests.cpp18
-rw-r--r--src/armnn/test/optimizations/FuseBatchNormTests.cpp23
-rw-r--r--src/armnnOnnxParser/OnnxParser.cpp28
-rw-r--r--src/armnnSerializer/test/SerializerTests.cpp55
-rw-r--r--src/armnnTestUtils/CreateWorkload.hpp29
-rw-r--r--src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp2
-rw-r--r--src/backends/backendsCommon/test/OptimizedNetworkTests.cpp16
-rw-r--r--src/backends/cl/test/ClImportTensorHandleTests.cpp37
-rw-r--r--src/profiling/test/ProfilingTestUtils.cpp15
13 files changed, 102 insertions, 150 deletions
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index fecc766836..f2ba94f597 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -90,9 +90,9 @@ IConnectableLayer* INetwork::AddConvolution2dLayer(const Convolution2dDescriptor
ARMNN_NO_DEPRECATE_WARN_BEGIN
IConnectableLayer* INetwork::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
- const char* name)
+ const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
+ const char* name)
{
return pNetworkImpl->AddConvolution2dLayer(convolution2dDescriptor,
weights,
diff --git a/src/armnn/test/NetworkTests.cpp b/src/armnn/test/NetworkTests.cpp
index 7756f40623..9d9810408e 100644
--- a/src/armnn/test/NetworkTests.cpp
+++ b/src/armnn/test/NetworkTests.cpp
@@ -605,10 +605,6 @@ TEST_CASE("ObtainConv2DDescriptorFromIConnectableLayer")
{
armnn::NetworkImpl net;
- unsigned int dims[] = { 10,1,1,1 };
- std::vector<float> convWeightsData(10);
- armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32, 0.0f, 0, true), convWeightsData);
-
armnn::Convolution2dDescriptor convDesc2d;
convDesc2d.m_PadLeft = 2;
convDesc2d.m_PadRight = 3;
@@ -620,12 +616,7 @@ TEST_CASE("ObtainConv2DDescriptorFromIConnectableLayer")
convDesc2d.m_DilationY = 3;
convDesc2d.m_BiasEnabled = false;
convDesc2d.m_DataLayout = armnn::DataLayout::NCHW;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- armnn::IConnectableLayer* const convLayer = net.AddConvolution2dLayer(convDesc2d,
- weights,
- armnn::EmptyOptional(),
- "conv layer");
- ARMNN_NO_DEPRECATE_WARN_END
+ armnn::IConnectableLayer* const convLayer = net.AddConvolution2dLayer(convDesc2d, "conv layer");
CHECK(convLayer);
const armnn::BaseDescriptor& descriptor = convLayer->GetParameters();
diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp
index 3dd55279c6..b78863dddc 100644
--- a/src/armnn/test/OptimizerTests.cpp
+++ b/src/armnn/test/OptimizerTests.cpp
@@ -885,11 +885,11 @@ TEST_CASE("OptimizeForExclusiveConnectionsFuseTest")
// Define the network
Graph graph;
- auto input = graph.AddLayer<InputLayer>(0, "input");
+ auto input = graph.AddLayer<InputLayer>(0, "input");
auto weightsLayer = graph.AddLayer<ConstantLayer>("Weights");
- auto conv = graph.AddLayer<Convolution2dLayer>(convolution2dDescriptor, "convolution");
- auto batchNorm = graph.AddLayer<BatchNormalizationLayer>(batchNormDescriptor, "batchNorm");
- auto output = graph.AddLayer<OutputLayer>(0, "output");
+ auto conv = graph.AddLayer<Convolution2dLayer>(convolution2dDescriptor, "convolution");
+ auto batchNorm = graph.AddLayer<BatchNormalizationLayer>(batchNormDescriptor, "batchNorm");
+ auto output = graph.AddLayer<OutputLayer>(0, "output");
// Set layer information
input->GetOutputSlot().SetTensorInfo(inputInfo);
diff --git a/src/armnn/test/SubgraphViewTests.cpp b/src/armnn/test/SubgraphViewTests.cpp
index d7465c8361..feeea5d478 100644
--- a/src/armnn/test/SubgraphViewTests.cpp
+++ b/src/armnn/test/SubgraphViewTests.cpp
@@ -978,8 +978,8 @@ TEST_CASE("MultipleSimpleSubgraphs")
"m3");
auto x2 = graph.InsertNewLayer<Convolution2dLayer>(m3->GetInputSlot(0),
- Convolution2dDescriptor{},
- "x2");
+ Convolution2dDescriptor{},
+ "x2");
auto w2 = graph.InsertNewLayer<ConstantLayer>(x2->GetInputSlot(1), "w2");
diff --git a/src/armnn/test/optimizations/FoldPadTests.cpp b/src/armnn/test/optimizations/FoldPadTests.cpp
index 027b10377d..14c211f9bf 100644
--- a/src/armnn/test/optimizations/FoldPadTests.cpp
+++ b/src/armnn/test/optimizations/FoldPadTests.cpp
@@ -636,13 +636,9 @@ TEST_CASE("FoldPadLayerIntoConv2dLayer_ExecuteInferenceWithAndWithoutOptimizatio
std::vector<float> biasVector = {5, 6, 7, 8};
TensorInfo biasInfo({4}, DataType::Float32, 0.0f, 0, true);
ConstTensor bias(biasInfo, biasVector);
- Optional<ConstTensor> optionalBias = Optional<ConstTensor>(bias);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- IConnectableLayer* conv2dLayer = network->AddConvolution2dLayer(convDescriptor,
- weights,
- optionalBias,
- "Conv2D");
- ARMNN_NO_DEPRECATE_WARN_END
+
+ IConnectableLayer* conv2dLayer = network->AddConvolution2dLayer(convDescriptor, "Conv2D");
+
TensorInfo outputInfo(4, outputShape, DataType::Float32);
conv2dLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
@@ -653,6 +649,14 @@ TEST_CASE("FoldPadLayerIntoConv2dLayer_ExecuteInferenceWithAndWithoutOptimizatio
padLayer->GetOutputSlot(0).Connect(conv2dLayer->GetInputSlot(0));
conv2dLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+ auto weightsLayer = network->AddConstantLayer(weights, "Weights");
+ weightsLayer->GetOutputSlot(0).SetTensorInfo(weights.GetInfo());
+ weightsLayer->GetOutputSlot(0).Connect(conv2dLayer->GetInputSlot(1));
+
+ auto biasLayer = network->AddConstantLayer(bias, "Bias");
+ biasLayer->GetOutputSlot(0).SetTensorInfo(bias.GetInfo());
+ biasLayer->GetOutputSlot(0).Connect(conv2dLayer->GetInputSlot(2));
+
// Create ArmNN runtime
IRuntimePtr run = IRuntime::Create(IRuntime::CreationOptions()); // default options
// Optimise the network
diff --git a/src/armnn/test/optimizations/FuseBatchNormTests.cpp b/src/armnn/test/optimizations/FuseBatchNormTests.cpp
index 4a94f7889b..54cbbce89f 100644
--- a/src/armnn/test/optimizations/FuseBatchNormTests.cpp
+++ b/src/armnn/test/optimizations/FuseBatchNormTests.cpp
@@ -31,9 +31,10 @@ public:
const Optional<ConstTensor> &biases,
const char *name)
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- return network->AddConvolution2dLayer(descriptor, weights, biases, name);
- ARMNN_NO_DEPRECATE_WARN_END
+ IgnoreUnused(weights);
+ IgnoreUnused(biases);
+
+ return network->AddConvolution2dLayer(descriptor, name);
}
static std::vector<IConnectableLayer*> AddConstantLayers(INetwork *network,
@@ -41,12 +42,18 @@ public:
const ConstTensor &weights,
const Optional<ConstTensor> &biases)
{
- IgnoreUnused(network);
- IgnoreUnused(descriptor);
- IgnoreUnused(weights);
- IgnoreUnused(biases);
+ auto weightsLayer = network->AddConstantLayer(weights, "Weights");
+ weightsLayer->GetOutputSlot(0).SetTensorInfo(weights.GetInfo());
+ std::vector<IConnectableLayer*> layers = {weightsLayer};
- return {};
+ if (descriptor.m_BiasEnabled)
+ {
+ auto biasLayer = network->AddConstantLayer(biases.value(), "Bias");
+ biasLayer->GetOutputSlot(0).SetTensorInfo(biases.value().GetInfo());
+ layers.emplace_back(biasLayer);
+ }
+
+ return layers;
}
};
diff --git a/src/armnnOnnxParser/OnnxParser.cpp b/src/armnnOnnxParser/OnnxParser.cpp
index 60bd962db7..63fb60382c 100644
--- a/src/armnnOnnxParser/OnnxParser.cpp
+++ b/src/armnnOnnxParser/OnnxParser.cpp
@@ -1762,11 +1762,16 @@ void OnnxParserImpl::ParseConv(const onnx::NodeProto& node)
}
}
- armnn::IConnectableLayer* layer;
+ node.input_size() == 3 ? desc.m_BiasEnabled = true : desc.m_BiasEnabled = false;
+ armnn::IConnectableLayer* layer = m_Network->AddConvolution2dLayer(desc, node.name().c_str());
std::vector<std::string> tensorIndexes= {node.input(0), node.input(1)};
auto weightTensor = CreateConstTensor(node.input(1));
+ IConnectableLayer* weightsLayer = m_Network->AddConstantLayer(weightTensor.first);
+ weightsLayer->GetOutputSlot(0).SetTensorInfo(weightTensor.first.GetInfo());
+ weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
+
if (node.input_size() == 3)
{
if(!m_TensorsInfo[node.input(2)].isConstant())
@@ -1777,22 +1782,15 @@ void OnnxParserImpl::ParseConv(const onnx::NodeProto& node)
CHECK_LOCATION().AsString()));
}
desc.m_BiasEnabled = true;
- tensorIndexes.emplace_back(node.input(2));
auto biasTensor = CreateConstTensor(node.input(2));
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- layer = m_Network->AddConvolution2dLayer(desc,
- weightTensor.first,
- Optional<ConstTensor>(biasTensor.first),
- node.name().c_str());
- }
- else
- {
- layer = m_Network->AddConvolution2dLayer(desc,
- weightTensor.first,
- EmptyOptional(),
- node.name().c_str());
- ARMNN_NO_DEPRECATE_WARN_END
+
+ IConnectableLayer* biasLayer = m_Network->AddConstantLayer(biasTensor.first);
+ biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensor.first.GetInfo());
+ biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
+
+ tensorIndexes.emplace_back(node.input(2));
}
+
ARMNN_ASSERT(layer != nullptr);
auto outputInfo = ComputeOutputInfo({ node.output(0) }, layer,
diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp
index a042939265..43a8aae9a7 100644
--- a/src/armnnSerializer/test/SerializerTests.cpp
+++ b/src/armnnSerializer/test/SerializerTests.cpp
@@ -417,7 +417,7 @@ TEST_CASE("SerializeConvolution2d")
deserializedNetwork->ExecuteStrategy(verifier);
}
-TEST_CASE("SerializeConvolution2dWithPerAxisParams")
+TEST_CASE("SerializeConvolution2dWithPerAxisParamsTestDeprecatedMethod")
{
using namespace armnn;
@@ -521,59 +521,6 @@ TEST_CASE("SerializeConvolution2dWeightsAndBiasesAsConstantLayers")
armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
CHECK(deserializedNetwork);
- Convolution2dLayerVerifier verifier(layerName, {inputInfo, weightsInfo, biasesInfo}, {outputInfo}, descriptor);
-
- deserializedNetwork->ExecuteStrategy(verifier);
-}
-
-TEST_CASE("SerializeConvolution2dWeightsAndBiasesAsConstantLayers")
-{
- const std::string layerName("convolution2d");
- const armnn::TensorInfo inputInfo ({ 1, 5, 5, 1 }, armnn::DataType::Float32);
- const armnn::TensorInfo outputInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32);
-
- const armnn::TensorInfo weightsInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32, 0.0f, 0, true);
- const armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32, 0.0f, 0, true);
-
- std::vector<float> weightsData = GenerateRandomData<float>(weightsInfo.GetNumElements());
- armnn::ConstTensor weights(weightsInfo, weightsData);
-
- std::vector<float> biasesData = GenerateRandomData<float>(biasesInfo.GetNumElements());
- armnn::ConstTensor biases(biasesInfo, biasesData);
-
- armnn::Convolution2dDescriptor descriptor;
- descriptor.m_PadLeft = 1;
- descriptor.m_PadRight = 1;
- descriptor.m_PadTop = 1;
- descriptor.m_PadBottom = 1;
- descriptor.m_StrideX = 2;
- descriptor.m_StrideY = 2;
- descriptor.m_DilationX = 2;
- descriptor.m_DilationY = 2;
- descriptor.m_BiasEnabled = true;
- descriptor.m_DataLayout = armnn::DataLayout::NHWC;
-
- armnn::INetworkPtr network = armnn::INetwork::Create();
- armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0);
- armnn::IConnectableLayer* const weightsLayer = network->AddConstantLayer(weights, "Weights");
- armnn::IConnectableLayer* const biasesLayer = network->AddConstantLayer(biases, "Biases");
- armnn::IConnectableLayer* const convLayer = network->AddConvolution2dLayer(descriptor,
- layerName.c_str());
- armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
-
- inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0));
- weightsLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(1));
- biasesLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(2));
- convLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
-
- inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
- weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsInfo);
- biasesLayer->GetOutputSlot(0).SetTensorInfo(biasesInfo);
- convLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
-
- armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
- CHECK(deserializedNetwork);
-
const std::vector<armnn::ConstTensor>& constants {weights, biases};
LayerVerifierBaseWithDescriptorAndConstants<armnn::Convolution2dDescriptor> verifier(
layerName, {inputInfo, weightsInfo, biasesInfo}, {outputInfo}, descriptor, constants);
diff --git a/src/armnnTestUtils/CreateWorkload.hpp b/src/armnnTestUtils/CreateWorkload.hpp
index 7700a5573a..905b8fa50b 100644
--- a/src/armnnTestUtils/CreateWorkload.hpp
+++ b/src/armnnTestUtils/CreateWorkload.hpp
@@ -2122,9 +2122,15 @@ std::pair<armnn::IOptimizedNetworkPtr, std::unique_ptr<PreCompiledWorkload>> Cre
convDesc2d.m_BiasEnabled = biasEnabled;
convDesc2d.m_DataLayout = armnn::DataLayout::NHWC;
- armnn::IConnectableLayer* convLayer = nullptr;
+
const std::string convLayerName("conv layer");
+ armnn::IConnectableLayer* convLayer = net->AddConvolution2dLayer(convDesc2d, convLayerName.c_str());
+
+ IConnectableLayer* weightsLayer = net->AddConstantLayer(weights);
+ weightsLayer->GetOutputSlot(0).SetTensorInfo(weights.GetInfo());
+ weightsLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(1u));
+
if (biasEnabled)
{
constexpr armnn::DataType biasDataType = ( dataType == armnn::DataType::QAsymmU8) ?
@@ -2139,23 +2145,10 @@ std::pair<armnn::IOptimizedNetworkPtr, std::unique_ptr<PreCompiledWorkload>> Cre
armnn::ConstTensor biases(biasTensorInfo, biasData);
- // Create convolution layer with biases
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- convLayer = net->AddConvolution2dLayer(convDesc2d,
- weights,
- Optional<ConstTensor>(biases),
- convLayerName.c_str());
- ARMNN_NO_DEPRECATE_WARN_END
- }
- else
- {
- // Create convolution layer without biases
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- convLayer = net->AddConvolution2dLayer(convDesc2d,
- weights,
- EmptyOptional(),
- convLayerName.c_str());
- ARMNN_NO_DEPRECATE_WARN_END
+ IConnectableLayer* biasLayer = net->AddConstantLayer(biases);
+
+ biasLayer->GetOutputSlot(0).SetTensorInfo(biases.GetInfo());
+ biasLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(2u));
}
CHECK(convLayer);
diff --git a/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp b/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp
index 45fcf19f90..67354696b0 100644
--- a/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp
+++ b/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp
@@ -315,7 +315,7 @@ SubgraphView::SubgraphViewPtr BuildFullyOptimizableSubgraph1(Graph& graph, Layer
const TensorInfo inputInfo ({ 1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
const TensorInfo outputInfo({ 1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
TensorInfo weightInfo({ 16, 1, 1, 16 }, DataType::QAsymmU8, 0.9f, 0);
- TensorInfo biasInfo ({ 1, 1, 1, 16 }, DataType::Signed32, 0.9f, 0);
+ TensorInfo biasInfo({ 1, 1, 1, 16 }, DataType::Signed32, 0.9f, 0);
weightInfo.SetConstant(true);
biasInfo.SetConstant(true);
diff --git a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
index 8e3b275649..bcea0610db 100644
--- a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
+++ b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
@@ -402,14 +402,16 @@ TEST_CASE("OptimizeNetworkCopy")
armnn::INetworkPtr network = armnn::INetwork::Create();
armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- armnn::IConnectableLayer* const convLayer =
- network->AddConvolution2dLayer(descriptor,
- weights,
- armnn::Optional<armnn::ConstTensor>(biases),
- layerName.c_str());
- ARMNN_NO_DEPRECATE_WARN_END
+ armnn::IConnectableLayer* const convLayer = network->AddConvolution2dLayer(descriptor, layerName.c_str());
armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
+ armnn::IConnectableLayer* weightsLayer = network->AddConstantLayer(weights);
+ armnn::IConnectableLayer* biasLayer = network->AddConstantLayer(biases);
+
+ weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsInfo);
+ weightsLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(1u));
+
+ biasLayer->GetOutputSlot(0).SetTensorInfo(biasesInfo);
+ biasLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(2u));
inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0));
convLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
diff --git a/src/backends/cl/test/ClImportTensorHandleTests.cpp b/src/backends/cl/test/ClImportTensorHandleTests.cpp
index 9bfd1fb46d..20537b3c81 100644
--- a/src/backends/cl/test/ClImportTensorHandleTests.cpp
+++ b/src/backends/cl/test/ClImportTensorHandleTests.cpp
@@ -320,14 +320,14 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportConv2dEndToEnd")
convDesc2d.m_PadBottom = 1;
convDesc2d.m_DataLayout = DataLayout::NHWC;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- armnn::IConnectableLayer* const convLayer = network->AddConvolution2dLayer(convDesc2d,
- weights,
- armnn::EmptyOptional(),
- "conv");
- ARMNN_NO_DEPRECATE_WARN_END
+ armnn::IConnectableLayer* const convLayer = network->AddConvolution2dLayer(convDesc2d, "conv");
+ armnn::IConnectableLayer* weightsLayer = network->AddConstantLayer(weights);
+
ARMNN_ASSERT(convLayer);
+ weightsLayer->GetOutputSlot(0).SetTensorInfo(weights.GetInfo());
+ weightsLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(1u));
+
inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0));
inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
@@ -878,14 +878,14 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportRepeatedInferencesEndTo
convDesc2d.m_PadTop = 1;
convDesc2d.m_PadBottom = 1;
convDesc2d.m_DataLayout = DataLayout::NHWC;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- armnn::IConnectableLayer* const convLayer = network->AddConvolution2dLayer(convDesc2d,
- weights,
- armnn::EmptyOptional(),
- "conv");
- ARMNN_NO_DEPRECATE_WARN_END
+ armnn::IConnectableLayer* const convLayer = network->AddConvolution2dLayer(convDesc2d, "conv");
ARMNN_ASSERT(convLayer);
+ armnn::IConnectableLayer* weightsLayer = network->AddConstantLayer(weights);
+
+ weightsLayer->GetOutputSlot(0).SetTensorInfo(weights.GetInfo());
+ weightsLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(1u));
+
inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0));
inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
@@ -1098,14 +1098,15 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportRepeatedInferencesInver
convDesc2d.m_PadTop = 1;
convDesc2d.m_PadBottom = 1;
convDesc2d.m_DataLayout = DataLayout::NHWC;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- armnn::IConnectableLayer* const convLayer = network->AddConvolution2dLayer(convDesc2d,
- weights,
- armnn::EmptyOptional(),
- "conv");
- ARMNN_NO_DEPRECATE_WARN_END
+
+ armnn::IConnectableLayer* const convLayer = network->AddConvolution2dLayer(convDesc2d, "conv");
ARMNN_ASSERT(convLayer);
+ armnn::IConnectableLayer* weightsLayer = network->AddConstantLayer(weights);
+
+ weightsLayer->GetOutputSlot(0).SetTensorInfo(weights.GetInfo());
+ weightsLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(1u));
+
inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0));
inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
diff --git a/src/profiling/test/ProfilingTestUtils.cpp b/src/profiling/test/ProfilingTestUtils.cpp
index 58708cab9a..215ae343d6 100644
--- a/src/profiling/test/ProfilingTestUtils.cpp
+++ b/src/profiling/test/ProfilingTestUtils.cpp
@@ -424,9 +424,18 @@ void VerifyPostOptimisationStructureTestImpl(armnn::BackendId backendId)
conv2dDesc.m_PadTop = 2;
conv2dDesc.m_PadBottom = 2;
conv2dDesc.m_BiasEnabled = true;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- IConnectableLayer* conv2d = net->AddConvolution2dLayer(conv2dDesc, weights, optionalBiases);
- ARMNN_NO_DEPRECATE_WARN_END
+
+ IConnectableLayer* conv2d = net->AddConvolution2dLayer(conv2dDesc);
+
+ armnn::IConnectableLayer* weightsLayer = net->AddConstantLayer(weights, "Weights");
+ armnn::IConnectableLayer* biasLayer = net->AddConstantLayer(biases, "Bias");
+
+ weightsLayer->GetOutputSlot(0).SetTensorInfo(weightInfo);
+ weightsLayer->GetOutputSlot(0).Connect(conv2d->GetInputSlot(1u));
+
+ biasLayer->GetOutputSlot(0).SetTensorInfo(biasInfo);
+ biasLayer->GetOutputSlot(0).Connect(conv2d->GetInputSlot(2u));
+
// Abs layer
armnn::ElementwiseUnaryDescriptor absDesc;
armnn::IConnectableLayer* const abs = net->AddElementwiseUnaryLayer(absDesc, "abs");