aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDerek Lamberti <derek.lamberti@arm.com>2019-12-17 09:25:19 +0000
committerDerek Lamberti <derek.lamberti@arm.com>2019-12-17 10:11:25 +0000
commit7f834729c3d967f5b0f774c13eb4ebb3bf9041d5 (patch)
tree787a1d6139534a8924963b82d31cbab0a75461a6
parentdf2a9b9a978ef50752923d0d730fa36e9f6d357a (diff)
downloadarmnn-7f834729c3d967f5b0f774c13eb4ebb3bf9041d5.tar.gz
IVGCVSW-4293 Fix multiple Concat issues.
* Fix issue with InputLayer or ConstantLayer being used as inputs to Concat. * Fix issue with same input being used multiple times for same Concat. * Fix issue where input is used by multiple concats. Change-Id: Id4819aeec5a40e2afa0351838ba082b9f74aba33 Signed-off-by: Derek Lamberti <derek.lamberti@arm.com>
-rw-r--r--src/armnn/layers/ConcatLayer.cpp16
1 files changed, 12 insertions, 4 deletions
diff --git a/src/armnn/layers/ConcatLayer.cpp b/src/armnn/layers/ConcatLayer.cpp
index 9b1785850a..0f847eae32 100644
--- a/src/armnn/layers/ConcatLayer.cpp
+++ b/src/armnn/layers/ConcatLayer.cpp
@@ -68,9 +68,17 @@ void ConcatLayer::CreateTensors(const FactoryType& factory)
auto CreateSubTensor = [&]()
{
- // Make sure quantization parameters are in the same space
- if (parentInfo.IsTypeSpaceMatch(info) &&
- factoryId == slot->GetTensorHandleFactoryId())
+ // Make sure:
+ // 1) quantization parameters are in the same space
+ // 2) the same TensorHandleFactory is used for input and Concat layer output
+ // 3) the input does not come from a Constant layer or input layer
+ // 4) the input is only read by this concat layer
+ if (slot &&
+ parentInfo.IsTypeSpaceMatch(info) && //(1)
+ factoryId == slot->GetTensorHandleFactoryId() && //(2)
+ slot->GetOwningLayer().GetType() != LayerType::Constant && //(3)
+ slot->GetOwningLayer().GetType() != LayerType::Input && //(3)
+ slot->GetNumConnections() == 1) //(4)
{
return factory.CreateSubTensorHandle(*parentTensor,
info.GetShape(),
@@ -93,7 +101,7 @@ void ConcatLayer::CreateTensors(const FactoryType& factory)
// Ensure that ALL inputs can be substituted with valid sub-tensors
if (subTensors.size() < numInputSlots)
{
- continue; // Don't optimize this Merge layer with sub-tensors
+ continue; // Don't optimize this Concat layer with sub-tensors
}
// Substitute input tensors with sub-tensors by replacing the output tensors on the connected layers.