From 0690265d83e5aa79bd174544a7b35330781619dd Mon Sep 17 00:00:00 2001 From: Cathal Corbett Date: Thu, 14 Apr 2022 17:55:11 +0100 Subject: IVGCVSW-6127 ConstTensorsAsInput: DepthwiseConvolution2d !android-nn-driver:7418 * Update Front-end and Tools. * Updated Serializer, Deserializer and unit tests to reflect this. * Updated TfLiteDelegate, TfLiteParser and OnnxParser. * Change NNDriver to new API. * Updated Ref. * Neon and Cl backend partially completed (Backend.cpp files). * Added dynamic or constant input EndToEnd tests. * Added ConstantTensorAsInputMemeberVariableRedirect Optimization. Signed-off-by: Cathal Corbett Change-Id: Ib18b6c10a093042e165e25237dc04a4c67ba82da --- src/armnnOnnxParser/OnnxParser.cpp | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) (limited to 'src/armnnOnnxParser/OnnxParser.cpp') diff --git a/src/armnnOnnxParser/OnnxParser.cpp b/src/armnnOnnxParser/OnnxParser.cpp index d97fa1c4f1..dd6a06fd00 100644 --- a/src/armnnOnnxParser/OnnxParser.cpp +++ b/src/armnnOnnxParser/OnnxParser.cpp @@ -1042,12 +1042,17 @@ void OnnxParserImpl::AddConvLayerWithDepthwiseConv(const onnx::NodeProto& node, desc.m_StrideY = convDesc.m_StrideY; desc.m_BiasEnabled = convDesc.m_BiasEnabled; - armnn::IConnectableLayer* layer; + armnn::IConnectableLayer* layer = m_Network->AddDepthwiseConvolution2dLayer(desc, node.name().c_str()); + std::vector tensorIndexes= {node.input(0), node.input(1)}; // weights come in as [O,1,H,W] from ONNX and need to be converted to ArmNNs dephtwise weights layout [1,H,W,O] armnn::PermutationVector perVec {3,0,1,2}; auto weightTensor = CreateConstTensor(node.input(1), perVec); + IConnectableLayer* weightsLayer = m_Network->AddConstantLayer(weightTensor.first); + weightsLayer->GetOutputSlot(0).SetTensorInfo(weightTensor.first.GetInfo()); + weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u)); + if (node.input_size() == 3) { if(!m_TensorsInfo[node.input(2)].isConstant()) @@ -1057,20 +1062,16 @@ void OnnxParserImpl::AddConvLayerWithDepthwiseConv(const onnx::NodeProto& node, node.name(), CHECK_LOCATION().AsString())); } + desc.m_BiasEnabled = true; auto biasTensor = CreateConstTensor(node.input(2)); - layer = m_Network->AddDepthwiseConvolution2dLayer(desc, - weightTensor.first, - Optional(biasTensor.first), - node.name().c_str()); - } - else - { - layer = m_Network->AddDepthwiseConvolution2dLayer(desc, - weightTensor.first, - EmptyOptional(), - node.name().c_str()); + tensorIndexes.emplace_back(node.input(2)); + + IConnectableLayer* biasLayer = m_Network->AddConstantLayer(biasTensor.first); + biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensor.first.GetInfo()); + biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u)); } + ARMNN_ASSERT(layer != nullptr); auto outputInfo = ComputeOutputInfo({ node.output(0) }, layer, @@ -1081,7 +1082,7 @@ void OnnxParserImpl::AddConvLayerWithDepthwiseConv(const onnx::NodeProto& node, // register the input connection slots for the layer, connections are made after all layers have been created // only the tensors for the inputs are relevant, exclude the const tensors - RegisterInputSlots(layer, {node.input(0)}); + RegisterInputSlots(layer, tensorIndexes); // register the output connection slots for the layer, connections are made after all layers have been created RegisterOutputSlots(layer, {node.output(0)}); -- cgit v1.2.1