diff options
author | Cathal Corbett <cathal.corbett@arm.com> | 2022-04-14 17:55:11 +0100 |
---|---|---|
committer | Cathal Corbett <cathal.corbett@arm.com> | 2022-05-05 16:10:06 +0000 |
commit | 0690265d83e5aa79bd174544a7b35330781619dd (patch) | |
tree | 2cb825017ee202ebcfa9c8428271a4dccaed72a4 /src/armnnOnnxParser/OnnxParser.cpp | |
parent | 3a3a6bfaedc64fac3644c8fe88dbfc3947e2b3ab (diff) | |
download | armnn-0690265d83e5aa79bd174544a7b35330781619dd.tar.gz |
IVGCVSW-6127 ConstTensorsAsInput: DepthwiseConvolution2d
!android-nn-driver:7418
* Update Front-end and Tools.
* Updated Serializer, Deserializer and unit tests to reflect this.
* Updated TfLiteDelegate, TfLiteParser and OnnxParser.
* Change NNDriver to new API.
* Updated Ref.
* Neon and Cl backend partially completed (Backend.cpp files).
* Added dynamic or constant input EndToEnd tests.
* Added ConstantTensorAsInputMemeberVariableRedirect Optimization.
Signed-off-by: Cathal Corbett <cathal.corbett@arm.com>
Change-Id: Ib18b6c10a093042e165e25237dc04a4c67ba82da
Diffstat (limited to 'src/armnnOnnxParser/OnnxParser.cpp')
-rw-r--r-- | src/armnnOnnxParser/OnnxParser.cpp | 27 |
1 files changed, 14 insertions, 13 deletions
diff --git a/src/armnnOnnxParser/OnnxParser.cpp b/src/armnnOnnxParser/OnnxParser.cpp index d97fa1c4f1..dd6a06fd00 100644 --- a/src/armnnOnnxParser/OnnxParser.cpp +++ b/src/armnnOnnxParser/OnnxParser.cpp @@ -1042,12 +1042,17 @@ void OnnxParserImpl::AddConvLayerWithDepthwiseConv(const onnx::NodeProto& node, desc.m_StrideY = convDesc.m_StrideY; desc.m_BiasEnabled = convDesc.m_BiasEnabled; - armnn::IConnectableLayer* layer; + armnn::IConnectableLayer* layer = m_Network->AddDepthwiseConvolution2dLayer(desc, node.name().c_str()); + std::vector<std::string> tensorIndexes= {node.input(0), node.input(1)}; // weights come in as [O,1,H,W] from ONNX and need to be converted to ArmNNs dephtwise weights layout [1,H,W,O] armnn::PermutationVector perVec {3,0,1,2}; auto weightTensor = CreateConstTensor(node.input(1), perVec); + IConnectableLayer* weightsLayer = m_Network->AddConstantLayer(weightTensor.first); + weightsLayer->GetOutputSlot(0).SetTensorInfo(weightTensor.first.GetInfo()); + weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u)); + if (node.input_size() == 3) { if(!m_TensorsInfo[node.input(2)].isConstant()) @@ -1057,20 +1062,16 @@ void OnnxParserImpl::AddConvLayerWithDepthwiseConv(const onnx::NodeProto& node, node.name(), CHECK_LOCATION().AsString())); } + desc.m_BiasEnabled = true; auto biasTensor = CreateConstTensor(node.input(2)); - layer = m_Network->AddDepthwiseConvolution2dLayer(desc, - weightTensor.first, - Optional<ConstTensor>(biasTensor.first), - node.name().c_str()); - } - else - { - layer = m_Network->AddDepthwiseConvolution2dLayer(desc, - weightTensor.first, - EmptyOptional(), - node.name().c_str()); + tensorIndexes.emplace_back(node.input(2)); + + IConnectableLayer* biasLayer = m_Network->AddConstantLayer(biasTensor.first); + biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensor.first.GetInfo()); + biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u)); } + ARMNN_ASSERT(layer != nullptr); auto outputInfo = ComputeOutputInfo({ node.output(0) }, layer, @@ -1081,7 +1082,7 @@ void OnnxParserImpl::AddConvLayerWithDepthwiseConv(const onnx::NodeProto& node, // register the input connection slots for the layer, connections are made after all layers have been created // only the tensors for the inputs are relevant, exclude the const tensors - RegisterInputSlots(layer, {node.input(0)}); + RegisterInputSlots(layer, tensorIndexes); // register the output connection slots for the layer, connections are made after all layers have been created RegisterOutputSlots(layer, {node.output(0)}); |