diff options
author | Derek Lamberti <derek.lamberti@arm.com> | 2019-12-17 09:25:19 +0000 |
---|---|---|
committer | Kevin May <kevin.may@arm.com> | 2019-12-18 10:18:28 +0000 |
commit | 24c3bca3e5607eddc88c028d41532f5d419c02dd (patch) | |
tree | 83f07601a217860209c91af17ff7730fcf2562e2 | |
parent | 93e023b9917f695e4e18f8c6ae8c4e1c84ba3b37 (diff) | |
download | armnn-24c3bca3e5607eddc88c028d41532f5d419c02dd.tar.gz |
IVGCVSW-4293 Fix multiple Concat issues.
* Fix issue with InputLayer or ConstantLayer being used as inputs to Concat.
* Fix issue with same input being used multiple times for same Concat.
* Fix issue where input is used by multiple concats.
Change-Id: Id4819aeec5a40e2afa0351838ba082b9f74aba33
Signed-off-by: Derek Lamberti <derek.lamberti@arm.com>
-rw-r--r-- | src/armnn/layers/ConcatLayer.cpp | 16 |
1 files changed, 12 insertions, 4 deletions
diff --git a/src/armnn/layers/ConcatLayer.cpp b/src/armnn/layers/ConcatLayer.cpp index 9b1785850a..0f847eae32 100644 --- a/src/armnn/layers/ConcatLayer.cpp +++ b/src/armnn/layers/ConcatLayer.cpp @@ -68,9 +68,17 @@ void ConcatLayer::CreateTensors(const FactoryType& factory) auto CreateSubTensor = [&]() { - // Make sure quantization parameters are in the same space - if (parentInfo.IsTypeSpaceMatch(info) && - factoryId == slot->GetTensorHandleFactoryId()) + // Make sure: + // 1) quantization parameters are in the same space + // 2) the same TensorHandleFactory is used for input and Concat layer output + // 3) the input does not come from a Constant layer or input layer + // 4) the input is only read by this concat layer + if (slot && + parentInfo.IsTypeSpaceMatch(info) && //(1) + factoryId == slot->GetTensorHandleFactoryId() && //(2) + slot->GetOwningLayer().GetType() != LayerType::Constant && //(3) + slot->GetOwningLayer().GetType() != LayerType::Input && //(3) + slot->GetNumConnections() == 1) //(4) { return factory.CreateSubTensorHandle(*parentTensor, info.GetShape(), @@ -93,7 +101,7 @@ void ConcatLayer::CreateTensors(const FactoryType& factory) // Ensure that ALL inputs can be substituted with valid sub-tensors if (subTensors.size() < numInputSlots) { - continue; // Don't optimize this Merge layer with sub-tensors + continue; // Don't optimize this Concat layer with sub-tensors } // Substitute input tensors with sub-tensors by replacing the output tensors on the connected layers. |