ArmNN
 23.08
ConcatLayer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "ConcatLayer.hpp"
6 #include "LayerCloneBase.hpp"
7 
8 #include <armnn/TypesUtils.hpp>
12 
13 #include <queue>
14 
15 namespace armnn
16 {
17 
18 ConcatLayer::ConcatLayer(const OriginsDescriptor& param, const char* name)
19  : LayerWithParameters(param.GetNumViews(), 1, LayerType::Concat, param, name)
20 {
21 }
22 
23 std::unique_ptr<IWorkload> ConcatLayer::CreateWorkload(const IWorkloadFactory& factory) const
24 {
25  ConcatQueueDescriptor descriptor;
26 
27  // Copies the view origins to the descriptor.
28  descriptor.m_ViewOrigins.reserve(m_Param.GetNumViews());
29  for (unsigned int i = 0; i < m_Param.GetNumViews(); ++i)
30  {
31  descriptor.m_ViewOrigins.emplace_back(
32  std::vector<unsigned int>(m_Param.GetViewOrigin(i), m_Param.GetViewOrigin(i) + m_Param.GetNumDimensions()));
33  }
34  SetAdditionalInfo(descriptor);
35 
36  return factory.CreateWorkload(LayerType::Concat, descriptor, PrepInfoAndDesc(descriptor));
37 }
38 
39 template<typename FactoryType>
40 void ConcatLayer::CreateTensors(const TensorHandleFactoryRegistry& registry,
41  const FactoryType& factory,
42  bool isMemoryManaged)
43 {
44  //If sub tensors are supported then the concat
45  //just needs to make sure that the outputs of the prev layer
46  //are made subtensors of the output of the concat layer.
47  m_OutputHandlers[0].CreateTensorHandles(factory, isMemoryManaged);
48 
49  if (factory.SupportsSubTensors())
50  {
51  // check if concat is along the x or y (2 innermost dimensions)
52  uint32_t concatAxis = m_Param.GetConcatAxis();
53  auto numberOfDimensions = m_Param.GetNumDimensions();
54  bool isConcatOnXorY = m_Param.GetNumDimensions() >= 3
55  && ((concatAxis == numberOfDimensions - 1) || (concatAxis == numberOfDimensions - 2));
56 
58 
59  std::queue<ConcatLayer*> m_ConcatLayers;
60 
61  m_ConcatLayers.push(this);
62  while (!m_ConcatLayers.empty())
63  {
64  ConcatLayer* currentLayer = m_ConcatLayers.front();
65  ITensorHandle* parentTensor = currentLayer->GetOutputHandler(0).GetData();
66  const TensorInfo& parentInfo = currentLayer->GetOutputHandler(0).GetTensorInfo();
67  m_ConcatLayers.pop();
68 
69  const unsigned int numInputSlots = currentLayer->GetNumInputSlots();
70 
71  // if concat along x or y (2 innermost dimensions) and the previous layers do not require padding
72  bool canUseSubTensorOnXorY = true;
73  bool isTensorHandleFactory = std::is_same<armnn::ITensorHandleFactory, FactoryType>::value;
74  if (isTensorHandleFactory)
75  {
76  for (unsigned int i = 0; i < numInputSlots; ++i)
77  {
78  OutputSlot* slot = currentLayer->GetInputSlot(i).GetConnectedOutputSlot();
79  ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId);
80  std::vector<Capability> capabilities =
81  handleFactory->GetCapabilities(&(slot->GetOwningLayer()),
82  currentLayer,
84  if (isConcatOnXorY)
85  {
86  canUseSubTensorOnXorY = false;
87  if (capabilities.empty())
88  {
89  canUseSubTensorOnXorY = true;
90  }
91  }
92 
93  // Splitter layer outputs are subtensors on the inputs whereas concat inputs are subtensors on
94  // the output. If the parent is a Splitter layer we cannot use subtensors.
95  if ((PolymorphicDowncast<const Layer*>(&(slot->GetOwningLayer())))->GetType() == LayerType::Splitter
96  && (PolymorphicDowncast<const Layer*>(currentLayer))->GetType() == LayerType::Concat)
97  {
98  canUseSubTensorOnXorY = false;
99  }
100 
101  if (!canUseSubTensorOnXorY)
102  {
103  break;
104  }
105  }
106  }
107  // First go through all the input slots and verify that we can sub-tensor all the inputs.
108  std::vector<std::unique_ptr<ITensorHandle>> subTensors(0);
109  subTensors.reserve(numInputSlots);
110  for (unsigned int i = 0; i < numInputSlots; ++i)
111  {
112  OutputSlot* slot = currentLayer->GetInputSlot(i).GetConnectedOutputSlot();
113  const TensorInfo& info = currentLayer->GetInputSlot(i).GetTensorInfo();
114 
115  auto CreateSubTensor = [&]()
116  {
117  // Make sure:
118  // 1) quantization parameters are in the same space
119  // 2) the same TensorHandleFactory is used for input and Concat layer output
120  // 3) the input does not come from a Constant layer or input layer
121  // 4) the input is only read by this concat layer
122  // 5) if concat along x or y (2 innermost dimensions) and the previous layers do not require padding
123  // 6) neither the inputs nor the output have an Overridden TensorInfo
124  if (slot &&
125  parentInfo.IsTypeSpaceMatch(info) && //(1)
126  factoryId == slot->GetTensorHandleFactoryId() && //(2)
127  slot->GetOwningLayer().GetType() != LayerType::Constant && //(3)
128  slot->GetOwningLayer().GetType() != LayerType::Input && //(3)
129  slot->GetNumConnections() == 1 &&
130  canUseSubTensorOnXorY && //(5)
132  !currentLayer->GetInputSlot(i).IsTensorInfoOverridden()) //(6)
133  {
135  return factory.CreateSubTensorHandle(*parentTensor,
136  info.GetShape(),
137  currentLayer->m_Param.GetViewOrigin(i));
139  }
140  return std::unique_ptr<ITensorHandle>();
141  };
142 
143  auto subTensor = CreateSubTensor();
144  if (!subTensor)
145  {
146  break; //Failed to create a valid sub-tensor, so stop trying with the rest of the inputs.
147  }
148  else
149  {
150  subTensors.push_back(std::move(subTensor)); // store the valid sub-tensor.
151  }
152  }
153 
154  // Ensure that ALL inputs can be substituted with valid sub-tensors
155  if (subTensors.size() < numInputSlots)
156  {
157  continue; // Don't optimize this Concat layer with sub-tensors
158  }
159 
160  // Substitute input tensors with sub-tensors by replacing the output tensors on the connected layers.
161  unsigned int i=0;
162  for (auto& subTensor : subTensors)
163  {
164  OutputSlot* slot = currentLayer->GetInputSlot(i).GetConnectedOutputSlot();
165  OutputHandler& outputHandler = slot->GetOutputHandler();
166 
167  ARMNN_ASSERT_MSG(subTensor, "ConcatLayer: Expected a valid sub-tensor for substitution.");
168  outputHandler.SetData(std::move(subTensor));
169 
170  Layer& inputLayer = slot->GetOwningLayer();
171  if (inputLayer.GetType() == LayerType::Concat)
172  {
173  // Continue with the substitution if the connected inputs are also concat layers
174  m_ConcatLayers.push(PolymorphicDowncast<ConcatLayer*>(&inputLayer));
175  }
176  ++i;
177  }
178  }
179  }
180 }
181 
183  const IWorkloadFactory& workloadFactory,
184  const bool isMemoryManaged)
185 {
186  OutputSlot& slot = GetOutputSlot(0);
188 
189  if (factoryId == ITensorHandleFactory::LegacyFactoryId)
190  {
191  CreateTensors(registry, workloadFactory, isMemoryManaged);
192  }
193  else
194  {
195  ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId);
196  ARMNN_ASSERT(handleFactory);
197  CreateTensors(registry, *handleFactory, isMemoryManaged);
198  }
199 }
200 
202 {
203  return CloneBase<ConcatLayer>(graph, m_Param, GetName());
204 }
205 
206 std::vector<TensorShape> ConcatLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
207 {
208  ARMNN_ASSERT(inputShapes.size() == m_Param.GetNumViews());
209 
210  unsigned int numDims = m_Param.GetNumDimensions();
211  for (unsigned int i=0; i< inputShapes.size(); i++)
212  {
213  auto& inputShape = inputShapes[i];
214 
215  ConditionalThrowIfNotEqual<LayerValidationException>(
216  "ConcatLayer: Num Dimensions must match all inputs.",
217  numDims,
218  inputShape.GetNumDimensions());
219  }
220 
221  // Finds the bounding box (extents) of all the views.
222  std::vector<unsigned int> extentMin(numDims);
223  std::vector<unsigned int> extentMax(numDims);
224  for (unsigned int i = 0; i < inputShapes.size(); i++)
225  {
226  const uint32_t* origin = m_Param.GetViewOrigin(i);
227  const armnn::TensorShape& shape = inputShapes[i];
228  for (unsigned int d = 0; d < numDims; d++)
229  {
230  extentMin[d] = std::min(extentMin[d], origin[d]);
231  extentMax[d] = std::max(extentMax[d], origin[d] + shape[d]);
232  }
233  }
234 
235  // Checks that the bounding box starts at the origin.
236  if (!std::all_of(extentMin.begin(), extentMin.end(), [](unsigned int s) { return s == 0; }))
237  {
238  throw LayerValidationException("ConcatLayer: there is no view that starts at the origin");
239  }
240 
241  // Checks that there are no overlaps of views (this would lead to undefined output at those locations).
242  // Checks each pair of views against each other
243  // (and doesn't bother to check against self, or check the same pair both ways round).
244  for (unsigned int a = 0; a < inputShapes.size(); a++)
245  {
246  const uint32_t* aOrigin = m_Param.GetViewOrigin(a);
247  const armnn::TensorShape& aShape = inputShapes[a];
248  for (unsigned int b = 0; b < a; b++)
249  {
250  const uint32_t* bOrigin = m_Param.GetViewOrigin(b);
251  const armnn::TensorShape& bShape = inputShapes[b];
252 
253  bool allAxesOverlap = true;
254  for (unsigned int d = 0; d < numDims && allAxesOverlap; d++)
255  {
256  unsigned int a1 = aOrigin[d];
257  unsigned int a2 = aOrigin[d] + aShape[d];
258 
259  unsigned int b1 = bOrigin[d];
260  unsigned int b2 = bOrigin[d] + bShape[d];
261 
262  if (a2 <= b1 || b2 <= a1)
263  {
264  allAxesOverlap = false;
265  }
266  }
267  if (allAxesOverlap)
268  {
269  throw LayerValidationException("ConcatLayer: Some views overlap.");
270  }
271  }
272  }
273 
274  // Checks that there are no "holes", i.e. regions of the output which is not covered by a view.
275  // Because we already checked that there are no overlaps, this can be done simply by checking that
276  // the total 'volume' of the views is the same as the output.
277  unsigned int totalViewsVolume = 0;
278  for (unsigned int i = 0; i < inputShapes.size(); i++)
279  {
280  totalViewsVolume += inputShapes[i].GetNumElements();
281  }
282  unsigned int outputVolume = 1;
283  for (unsigned int d = 0; d < numDims; d++)
284  {
285  outputVolume *= (extentMax[d] - extentMin[d]);
286  }
287 
288  ConditionalThrowIfNotEqual<LayerValidationException>(
289  "ConcatLayer: there are some gaps between views",
290  totalViewsVolume,
291  outputVolume);
292 
293  return std::vector<TensorShape>({ TensorShape({numDims, extentMax.data()}) });
294 }
295 
297 {
298  // Validates Concat layer.
299  ConditionalThrowIfNotEqual<LayerValidationException>(
300  "ConcatLayer: Num Inputs must match num views.",
302  GetNumInputSlots());
303 
305 
306  const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
307 
309 
310  std::vector<TensorShape> inputShapes;
311  for (unsigned int i = 0; i < GetNumInputSlots(); ++i)
312  {
313  inputShapes.push_back(GetInputSlot(i).GetTensorInfo().GetShape());
314  }
315 
316  auto inferredShapes = InferOutputShapes(inputShapes);
317 
318  ARMNN_ASSERT(inferredShapes.size() == 1);
319 
320  ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ConcatLayer");
321 }
322 
324 {
325  strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
326 }
327 
328 } // namespace armnn armnn
ARMNN_ASSERT
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
armnn::OriginsDescriptor::GetConcatAxis
unsigned int GetConcatAxis() const
Get the concatenation axis value.
Definition: Descriptors.cpp:162
armnn::OriginsDescriptor::GetNumViews
uint32_t GetNumViews() const
Get the number of views.
Definition: Descriptors.cpp:187
armnn::LayerType::Splitter
@ Splitter
armnn::ConcatQueueDescriptor
Definition: WorkloadData.hpp:130
armnn::ConcatLayer
This layer represents a merge operation.
Definition: ConcatLayer.hpp:13
armnn::OutputSlot::GetTensorInfo
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:92
WorkloadData.hpp
armnn::OutputSlot::GetOutputHandler
const OutputHandler & GetOutputHandler() const
Definition: Layer.hpp:139
armnn::TensorHandleFactoryRegistry
Definition: TensorHandleFactoryRegistry.hpp:23
armnn::OutputSlot
Definition: Layer.hpp:100
armnn::ConcatLayer::Clone
ConcatLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
Definition: ConcatLayer.cpp:201
TypesUtils.hpp
armnn::TensorHandleFactoryRegistry::GetFactory
ITensorHandleFactory * GetFactory(ITensorHandleFactory::FactoryId id) const
Find a TensorHandleFactory by Id Returns nullptr if not found.
Definition: TensorHandleFactoryRegistry.cpp:39
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::OriginsDescriptor::GetNumDimensions
uint32_t GetNumDimensions() const
Get the number of dimensions.
Definition: Descriptors.cpp:192
CHECK_LOCATION
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
armnn::Layer::ValidateAndCopyShape
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
Definition: Layer.cpp:435
armnn::ITensorHandle
Definition: ITensorHandle.hpp:16
armnn::Layer::GetOutputSlot
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:339
armnn::ConcatLayer::CreateWorkload
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the Concat type.
Definition: ConcatLayer.cpp:23
ARMNN_NO_DEPRECATE_WARN_BEGIN
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
armnn::Layer::m_OutputHandlers
std::vector< OutputHandler > m_OutputHandlers
Definition: Layer.hpp:440
armnn::IStrategy
Definition: IStrategy.hpp:16
ARMNN_ASSERT_MSG
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
armnn::Layer::GetInputSlot
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:337
armnn::LayerWithParameters< OriginsDescriptor >::GetParameters
const OriginsDescriptor & GetParameters() const override
Definition: LayerWithParameters.hpp:19
WorkloadFactory.hpp
armnn::OutputHandler::GetData
ITensorHandle * GetData() const
Gets the allocated tensor memory.
Definition: OutputHandler.hpp:46
armnn::Layer::Layer
Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char *name)
Definition: Layer.cpp:247
armnn::LayerWithParameters
Definition: LayerWithParameters.hpp:14
armnn::Layer::GetName
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:332
armnn::ITensorHandleFactory::LegacyFactoryId
static const FactoryId LegacyFactoryId
Definition: ITensorHandleFactory.hpp:50
armnn::ITensorHandleFactory::GetCapabilities
virtual std::vector< Capability > GetCapabilities(const IConnectableLayer *layer, const IConnectableLayer *connectedLayer, CapabilityClass capabilityClass)
Definition: ITensorHandleFactory.hpp:93
armnn::InputSlot::GetTensorInfo
const TensorInfo & GetTensorInfo() const override
Gets the TensorInfo for this InputSlot.
Definition: Layer.cpp:592
armnn::TensorShape
Definition: Tensor.hpp:20
armnn::ConcatLayer::InferOutputShapes
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs,...
Definition: ConcatLayer.cpp:206
armnn::OutputSlot::GetOwningLayer
Layer & GetOwningLayer() const
Definition: Layer.hpp:132
armnn::LayerWithParameters< OriginsDescriptor >::m_Param
OriginsDescriptor m_Param
The parameters for the layer (not including tensor-valued weights etc.).
Definition: LayerWithParameters.hpp:52
armnn::LayerType::Concat
@ Concat
armnn::LayerWithParameters< OriginsDescriptor >::PrepInfoAndDesc
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *Layer::CreateWorkload.
Definition: LayerWithParameters.hpp:44
PolymorphicDowncast.hpp
armnn::TensorInfo::IsTypeSpaceMatch
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same.
Definition: Tensor.cpp:432
armnn::LayerValidationException
Definition: Exceptions.hpp:105
armnn::IWorkloadFactory
Definition: WorkloadFactory.hpp:22
armnn::ConcatLayer::ValidateTensorShapesFromInputs
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of ConcatLayer.
Definition: ConcatLayer.cpp:296
armnn::Layer::GetOutputHandler
const OutputHandler & GetOutputHandler(unsigned int i=0) const
Definition: Layer.hpp:245
armnn::Layer::VerifyShapeInferenceType
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Definition: Layer.cpp:504
armnn::ITensorHandleFactory
Definition: ITensorHandleFactory.hpp:46
armnn::Layer::SetAdditionalInfo
void SetAdditionalInfo(QueueDescriptor &descriptor) const
Definition: Layer.cpp:287
armnn::ConcatLayer::CreateTensorHandles
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry &registry, const IWorkloadFactory &factory, const bool IsMemoryManaged=true) override
Set the outputs to be appropriate sub tensors of the input if sub tensors are supported otherwise cre...
Definition: ConcatLayer.cpp:182
armnn::InputSlot::IsTensorInfoOverridden
bool IsTensorInfoOverridden() const override
Returns true if this InputSlot has an overridden TensorInfo that was set through a call to SetTensorI...
Definition: Layer.cpp:609
armnn::BoostLogSeverityMapping::info
@ info
armnn::ConcatLayer::ExecuteStrategy
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
Definition: ConcatLayer.cpp:323
armnn::Layer::GetNumInputSlots
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
Definition: Layer.hpp:334
armnn::CapabilityClass::PaddingRequired
@ PaddingRequired
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
ARMNN_NO_DEPRECATE_WARN_END
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
armnn::ConcatQueueDescriptor::m_ViewOrigins
std::vector< ViewOrigin > m_ViewOrigins
Definition: WorkloadData.hpp:143
armnn::OriginsDescriptor
An OriginsDescriptor for the ConcatLayer.
Definition: Descriptors.hpp:201
armnn::OutputSlot::GetTensorHandleFactoryId
ITensorHandleFactory::FactoryId GetTensorHandleFactoryId() const
Definition: Layer.cpp:205
armnn::InputSlot::GetConnectedOutputSlot
const OutputSlot * GetConnectedOutputSlot() const
Definition: Layer.hpp:56
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::ITensorHandleFactory::FactoryId
std::string FactoryId
Definition: ITensorHandleFactory.hpp:49
armnn::Layer::VerifyLayerConnections
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
Definition: Layer.cpp:391
armnn::LayerType::Input
@ Input
armnn::OutputHandler::GetTensorInfo
const TensorInfo & GetTensorInfo() const
Gets the matching TensorInfo for the output.
Definition: OutputHandler.hpp:42
armnn::OriginsDescriptor::GetViewOrigin
const uint32_t * GetViewOrigin(uint32_t idx) const
Return the view origin at the int value idx.
Definition: Descriptors.cpp:197
ConcatLayer.hpp
armnn::Layer::m_ShapeInferenceMethod
ShapeInferenceMethod m_ShapeInferenceMethod
Definition: Layer.hpp:441
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:483
armnn::OutputSlot::GetConnection
const InputSlot * GetConnection(unsigned int index) const override
Definition: Layer.cpp:75
armnn::ConcatLayer::ConcatLayer
ConcatLayer(const OriginsDescriptor &param, const char *name)
Constructor to create a ConcatLayer.
Definition: ConcatLayer.cpp:18
armnn::Graph
Definition: Graph.hpp:30
armnn::IWorkloadFactory::CreateWorkload
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const =0
Backends should implement their own CreateWorkload function with a switch statement.
armnn::IStrategy::ExecuteStrategy
virtual void ExecuteStrategy(const IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
LayerCloneBase.hpp
armnn::LayerType::Constant
@ Constant