ArmNN
 23.05
ConcatLayer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "ConcatLayer.hpp"
6 #include "LayerCloneBase.hpp"
7 
8 #include <armnn/TypesUtils.hpp>
12 
13 #include <queue>
14 
15 namespace armnn
16 {
17 
18 ConcatLayer::ConcatLayer(const OriginsDescriptor& param, const char* name)
19  : LayerWithParameters(param.GetNumViews(), 1, LayerType::Concat, param, name)
20 {
21 }
22 
23 std::unique_ptr<IWorkload> ConcatLayer::CreateWorkload(const IWorkloadFactory& factory) const
24 {
25  ConcatQueueDescriptor descriptor;
26 
27  // Copies the view origins to the descriptor.
28  descriptor.m_ViewOrigins.reserve(m_Param.GetNumViews());
29  for (unsigned int i = 0; i < m_Param.GetNumViews(); ++i)
30  {
31  descriptor.m_ViewOrigins.emplace_back(
32  std::vector<unsigned int>(m_Param.GetViewOrigin(i), m_Param.GetViewOrigin(i) + m_Param.GetNumDimensions()));
33  }
34  SetAdditionalInfo(descriptor);
35 
36  return factory.CreateWorkload(LayerType::Concat, descriptor, PrepInfoAndDesc(descriptor));
37 }
38 
39 template<typename FactoryType>
40 void ConcatLayer::CreateTensors(const TensorHandleFactoryRegistry& registry,
41  const FactoryType& factory,
42  bool isMemoryManaged)
43 {
44  //If sub tensors are supported then the concat
45  //just needs to make sure that the outputs of the prev layer
46  //are made subtensors of the output of the concat layer.
47  m_OutputHandlers[0].CreateTensorHandles(factory, isMemoryManaged);
48 
49  if (factory.SupportsSubTensors())
50  {
51  // check if concat is along the x or y (2 innermost dimensions)
52  uint32_t concatAxis = m_Param.GetConcatAxis();
53  auto numberOfDimensions = m_Param.GetNumDimensions();
54  bool isConcatOnXorY = m_Param.GetNumDimensions() >= 3
55  && ((concatAxis == numberOfDimensions - 1) || (concatAxis == numberOfDimensions - 2));
56 
58 
59  std::queue<ConcatLayer*> m_ConcatLayers;
60 
61  m_ConcatLayers.push(this);
62  while (!m_ConcatLayers.empty())
63  {
64  ConcatLayer* currentLayer = m_ConcatLayers.front();
65  ITensorHandle* parentTensor = currentLayer->GetOutputHandler(0).GetData();
66  const TensorInfo& parentInfo = currentLayer->GetOutputHandler(0).GetTensorInfo();
67  m_ConcatLayers.pop();
68 
69  const unsigned int numInputSlots = currentLayer->GetNumInputSlots();
70 
71  // if concat along x or y (2 innermost dimensions) and the previous layers do not require padding
72  bool canUseSubTensorOnXorY = true;
73  bool isTensorHandleFactory = std::is_same<armnn::ITensorHandleFactory, FactoryType>::value;
74  if (isTensorHandleFactory)
75  {
76  for (unsigned int i = 0; i < numInputSlots; ++i)
77  {
78  OutputSlot* slot = currentLayer->GetInputSlot(i).GetConnectedOutputSlot();
79  ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId);
80  std::vector<Capability> capabilities =
81  handleFactory->GetCapabilities(&(slot->GetOwningLayer()),
82  currentLayer,
84  if (isConcatOnXorY)
85  {
86  canUseSubTensorOnXorY = false;
87  if (capabilities.empty())
88  {
89  canUseSubTensorOnXorY = true;
90  }
91  }
92 
93  // Splitter layer outputs are subtensors on the inputs whereas concat inputs are subtensors on
94  // the output. If the parent is a Splitter layer we cannot use subtensors.
95  if ((PolymorphicDowncast<const Layer*>(&(slot->GetOwningLayer())))->GetType() == LayerType::Splitter
96  && (PolymorphicDowncast<const Layer*>(currentLayer))->GetType() == LayerType::Concat)
97  {
98  canUseSubTensorOnXorY = false;
99  }
100 
101  if (!canUseSubTensorOnXorY)
102  {
103  break;
104  }
105  }
106  }
107 
108  // First go through all the input slots and verify that we can sub-tensor all the inputs.
109  std::vector<std::unique_ptr<ITensorHandle>> subTensors(0);
110  subTensors.reserve(numInputSlots);
111  for (unsigned int i = 0; i < numInputSlots; ++i)
112  {
113  OutputSlot* slot = currentLayer->GetInputSlot(i).GetConnectedOutputSlot();
114  const TensorInfo& info = slot->GetTensorInfo();
115 
116  auto CreateSubTensor = [&]()
117  {
118  // Make sure:
119  // 1) quantization parameters are in the same space
120  // 2) the same TensorHandleFactory is used for input and Concat layer output
121  // 3) the input does not come from a Constant layer or input layer
122  // 4) the input is only read by this concat layer
123  // 5) if concat along x or y (2 innermost dimensions) and the previous layers do not require padding
124  if (slot &&
125  parentInfo.IsTypeSpaceMatch(info) && //(1)
126  factoryId == slot->GetTensorHandleFactoryId() && //(2)
127  slot->GetOwningLayer().GetType() != LayerType::Constant && //(3)
128  slot->GetOwningLayer().GetType() != LayerType::Input && //(3)
129  slot->GetNumConnections() == 1 &&
130  canUseSubTensorOnXorY) //(5)
131  {
133  return factory.CreateSubTensorHandle(*parentTensor,
134  info.GetShape(),
135  currentLayer->m_Param.GetViewOrigin(i));
137  }
138  return std::unique_ptr<ITensorHandle>();
139  };
140 
141  auto subTensor = CreateSubTensor();
142  if (!subTensor)
143  {
144  break; //Failed to create a valid sub-tensor, so stop trying with the rest of the inputs.
145  }
146  else
147  {
148  subTensors.push_back(std::move(subTensor)); // store the valid sub-tensor.
149  }
150  }
151 
152  // Ensure that ALL inputs can be substituted with valid sub-tensors
153  if (subTensors.size() < numInputSlots)
154  {
155  continue; // Don't optimize this Concat layer with sub-tensors
156  }
157 
158  // Substitute input tensors with sub-tensors by replacing the output tensors on the connected layers.
159  unsigned int i=0;
160  for (auto& subTensor : subTensors)
161  {
162  OutputSlot* slot = currentLayer->GetInputSlot(i).GetConnectedOutputSlot();
163  OutputHandler& outputHandler = slot->GetOutputHandler();
164 
165  ARMNN_ASSERT_MSG(subTensor, "ConcatLayer: Expected a valid sub-tensor for substitution.");
166  outputHandler.SetData(std::move(subTensor));
167 
168  Layer& inputLayer = slot->GetOwningLayer();
169  if (inputLayer.GetType() == LayerType::Concat)
170  {
171  // Continue with the substitution if the connected inputs are also concat layers
172  m_ConcatLayers.push(PolymorphicDowncast<ConcatLayer*>(&inputLayer));
173  }
174  ++i;
175  }
176  }
177  }
178 }
179 
181  const IWorkloadFactory& workloadFactory,
182  const bool isMemoryManaged)
183 {
184  OutputSlot& slot = GetOutputSlot(0);
186 
187  if (factoryId == ITensorHandleFactory::LegacyFactoryId)
188  {
189  CreateTensors(registry, workloadFactory, isMemoryManaged);
190  }
191  else
192  {
193  ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId);
194  ARMNN_ASSERT(handleFactory);
195  CreateTensors(registry, *handleFactory, isMemoryManaged);
196  }
197 }
198 
200 {
201  return CloneBase<ConcatLayer>(graph, m_Param, GetName());
202 }
203 
204 std::vector<TensorShape> ConcatLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
205 {
206  ARMNN_ASSERT(inputShapes.size() == m_Param.GetNumViews());
207 
208  unsigned int numDims = m_Param.GetNumDimensions();
209  for (unsigned int i=0; i< inputShapes.size(); i++)
210  {
211  auto& inputShape = inputShapes[i];
212 
213  ConditionalThrowIfNotEqual<LayerValidationException>(
214  "ConcatLayer: Num Dimensions must match all inputs.",
215  numDims,
216  inputShape.GetNumDimensions());
217  }
218 
219  // Finds the bounding box (extents) of all the views.
220  std::vector<unsigned int> extentMin(numDims);
221  std::vector<unsigned int> extentMax(numDims);
222  for (unsigned int i = 0; i < inputShapes.size(); i++)
223  {
224  const uint32_t* origin = m_Param.GetViewOrigin(i);
225  const armnn::TensorShape& shape = inputShapes[i];
226  for (unsigned int d = 0; d < numDims; d++)
227  {
228  extentMin[d] = std::min(extentMin[d], origin[d]);
229  extentMax[d] = std::max(extentMax[d], origin[d] + shape[d]);
230  }
231  }
232 
233  // Checks that the bounding box starts at the origin.
234  if (!std::all_of(extentMin.begin(), extentMin.end(), [](unsigned int s) { return s == 0; }))
235  {
236  throw LayerValidationException("ConcatLayer: there is no view that starts at the origin");
237  }
238 
239  // Checks that there are no overlaps of views (this would lead to undefined output at those locations).
240  // Checks each pair of views against each other
241  // (and doesn't bother to check against self, or check the same pair both ways round).
242  for (unsigned int a = 0; a < inputShapes.size(); a++)
243  {
244  const uint32_t* aOrigin = m_Param.GetViewOrigin(a);
245  const armnn::TensorShape& aShape = inputShapes[a];
246  for (unsigned int b = 0; b < a; b++)
247  {
248  const uint32_t* bOrigin = m_Param.GetViewOrigin(b);
249  const armnn::TensorShape& bShape = inputShapes[b];
250 
251  bool allAxesOverlap = true;
252  for (unsigned int d = 0; d < numDims && allAxesOverlap; d++)
253  {
254  unsigned int a1 = aOrigin[d];
255  unsigned int a2 = aOrigin[d] + aShape[d];
256 
257  unsigned int b1 = bOrigin[d];
258  unsigned int b2 = bOrigin[d] + bShape[d];
259 
260  if (a2 <= b1 || b2 <= a1)
261  {
262  allAxesOverlap = false;
263  }
264  }
265  if (allAxesOverlap)
266  {
267  throw LayerValidationException("ConcatLayer: Some views overlap.");
268  }
269  }
270  }
271 
272  // Checks that there are no "holes", i.e. regions of the output which is not covered by a view.
273  // Because we already checked that there are no overlaps, this can be done simply by checking that
274  // the total 'volume' of the views is the same as the output.
275  unsigned int totalViewsVolume = 0;
276  for (unsigned int i = 0; i < inputShapes.size(); i++)
277  {
278  totalViewsVolume += inputShapes[i].GetNumElements();
279  }
280  unsigned int outputVolume = 1;
281  for (unsigned int d = 0; d < numDims; d++)
282  {
283  outputVolume *= (extentMax[d] - extentMin[d]);
284  }
285 
286  ConditionalThrowIfNotEqual<LayerValidationException>(
287  "ConcatLayer: there are some gaps between views",
288  totalViewsVolume,
289  outputVolume);
290 
291  return std::vector<TensorShape>({ TensorShape({numDims, extentMax.data()}) });
292 }
293 
295 {
296  // Validates Concat layer.
297  ConditionalThrowIfNotEqual<LayerValidationException>(
298  "ConcatLayer: Num Inputs must match num views.",
300  GetNumInputSlots());
301 
303 
304  const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
305 
307 
308  std::vector<TensorShape> inputShapes;
309  for (unsigned int i = 0; i < GetNumInputSlots(); ++i)
310  {
311  inputShapes.push_back(GetInputSlot(i).GetConnection()->GetTensorInfo().GetShape());
312  }
313 
314  auto inferredShapes = InferOutputShapes(inputShapes);
315 
316  ARMNN_ASSERT(inferredShapes.size() == 1);
317 
318  ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ConcatLayer");
319 }
320 
322 {
323  strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
324 }
325 
326 } // namespace armnn armnn
armnn::ConcatLayer::ConcatLayer
ConcatLayer(const OriginsDescriptor &param, const char *name)
Constructor to create a ConcatLayer.
Definition: ConcatLayer.cpp:18
armnn::ConcatLayer::InferOutputShapes
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs,...
Definition: ConcatLayer.cpp:204
armnn::OutputSlot
Definition: Layer.hpp:87
armnn::OriginsDescriptor::GetNumViews
uint32_t GetNumViews() const
Get the number of views.
Definition: Descriptors.cpp:187
armnn::OutputHandler::GetTensorInfo
const TensorInfo & GetTensorInfo() const
Gets the matching TensorInfo for the output.
Definition: OutputHandler.hpp:42
armnn::OriginsDescriptor::GetNumDimensions
uint32_t GetNumDimensions() const
Get the number of dimensions.
Definition: Descriptors.cpp:192
armnn::ConcatLayer::CreateTensorHandles
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry &registry, const IWorkloadFactory &factory, const bool IsMemoryManaged=true) override
Set the outputs to be appropriate sub tensors of the input if sub tensors are supported otherwise cre...
Definition: ConcatLayer.cpp:180
armnn::InputSlot::GetConnectedOutputSlot
const OutputSlot * GetConnectedOutputSlot() const
Definition: Layer.hpp:56
CHECK_LOCATION
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
armnn::LayerType::Input
@ Input
ConcatLayer.hpp
armnn::ITensorHandleFactory::LegacyFactoryId
static const FactoryId LegacyFactoryId
Definition: ITensorHandleFactory.hpp:50
armnn::TensorHandleFactoryRegistry
Definition: TensorHandleFactoryRegistry.hpp:23
armnn::Layer::VerifyLayerConnections
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
Definition: Layer.cpp:378
armnn::InputSlot::GetConnection
const IOutputSlot * GetConnection() const override
Definition: Layer.hpp:206
armnn::ConcatLayer::CreateWorkload
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the Concat type.
Definition: ConcatLayer.cpp:23
armnn::ConcatLayer
This layer represents a merge operation.
Definition: ConcatLayer.hpp:13
PolymorphicDowncast.hpp
armnn::OriginsDescriptor::GetViewOrigin
const uint32_t * GetViewOrigin(uint32_t idx) const
Return the view origin at the int value idx.
Definition: Descriptors.cpp:197
armnn::LayerWithParameters
Definition: LayerWithParameters.hpp:14
armnn::Layer::VerifyShapeInferenceType
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Definition: Layer.cpp:491
armnn::IWorkloadFactory
Definition: WorkloadFactory.hpp:22
armnn::OutputSlot::GetOutputHandler
const OutputHandler & GetOutputHandler() const
Definition: Layer.hpp:126
WorkloadFactory.hpp
armnn::OriginsDescriptor::GetConcatAxis
unsigned int GetConcatAxis() const
Get the concatenation axis value.
Definition: Descriptors.cpp:162
armnn::OutputHandler::GetData
ITensorHandle * GetData() const
Gets the allocated tensor memory.
Definition: OutputHandler.hpp:46
armnn::Layer::SetAdditionalInfo
void SetAdditionalInfo(QueueDescriptor &descriptor) const
Definition: Layer.cpp:274
armnn::Layer::m_ShapeInferenceMethod
ShapeInferenceMethod m_ShapeInferenceMethod
Definition: Layer.hpp:427
armnn::Layer::ValidateAndCopyShape
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
Definition: Layer.cpp:422
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::ITensorHandleFactory::GetCapabilities
virtual std::vector< Capability > GetCapabilities(const IConnectableLayer *layer, const IConnectableLayer *connectedLayer, CapabilityClass capabilityClass)
Definition: ITensorHandleFactory.hpp:93
armnn::IStrategy
Definition: IStrategy.hpp:16
armnn::OutputSlot::GetTensorInfo
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:92
armnn::ITensorHandle
Definition: ITensorHandle.hpp:15
armnn::OutputSlot::GetTensorHandleFactoryId
ITensorHandleFactory::FactoryId GetTensorHandleFactoryId() const
Definition: Layer.cpp:205
armnn::TensorShape
Definition: Tensor.hpp:20
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:479
armnn::Layer::GetNumInputSlots
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
Definition: Layer.hpp:321
armnn::IOutputSlot::GetTensorInfo
virtual const TensorInfo & GetTensorInfo() const =0
armnn::Layer::GetOutputSlot
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:326
armnn::Layer::GetOutputHandler
const OutputHandler & GetOutputHandler(unsigned int i=0) const
Definition: Layer.hpp:232
armnn::TensorInfo::IsTypeSpaceMatch
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same.
Definition: Tensor.cpp:432
armnn::LayerWithParameters< OriginsDescriptor >::PrepInfoAndDesc
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *Layer::CreateWorkload.
Definition: LayerWithParameters.hpp:44
armnn::ConcatLayer::Clone
ConcatLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
Definition: ConcatLayer.cpp:199
armnn::ConcatQueueDescriptor
Definition: WorkloadData.hpp:130
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::LayerType::Constant
@ Constant
armnn::Layer::Layer
Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char *name)
Definition: Layer.cpp:247
armnn::LayerValidationException
Definition: Exceptions.hpp:105
armnn::CapabilityClass::PaddingRequired
@ PaddingRequired
armnn::OutputSlot::GetOwningLayer
Layer & GetOwningLayer() const
Definition: Layer.hpp:119
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
armnn::TensorHandleFactoryRegistry::GetFactory
ITensorHandleFactory * GetFactory(ITensorHandleFactory::FactoryId id) const
Find a TensorHandleFactory by Id Returns nullptr if not found.
Definition: TensorHandleFactoryRegistry.cpp:39
armnn::Layer::m_OutputHandlers
std::vector< OutputHandler > m_OutputHandlers
Definition: Layer.hpp:426
ARMNN_ASSERT_MSG
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
LayerCloneBase.hpp
armnn::ConcatLayer::ValidateTensorShapesFromInputs
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of ConcatLayer.
Definition: ConcatLayer.cpp:294
armnn::Layer::GetInputSlot
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:324
armnn::ConcatLayer::ExecuteStrategy
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
Definition: ConcatLayer.cpp:321
ARMNN_NO_DEPRECATE_WARN_BEGIN
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
armnn::IStrategy::ExecuteStrategy
virtual void ExecuteStrategy(const IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
armnn::OriginsDescriptor
An OriginsDescriptor for the ConcatLayer.
Definition: Descriptors.hpp:201
armnn::Graph
Definition: Graph.hpp:30
armnn::ITensorHandleFactory
Definition: ITensorHandleFactory.hpp:46
ARMNN_ASSERT
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
armnn::IWorkloadFactory::CreateWorkload
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
Definition: WorkloadFactory.cpp:1590
armnn::LayerType::Concat
@ Concat
armnn::Layer::GetName
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:319
TypesUtils.hpp
armnn::LayerType::Splitter
@ Splitter
WorkloadData.hpp
armnn::ITensorHandleFactory::FactoryId
std::string FactoryId
Definition: ITensorHandleFactory.hpp:49
armnn::LayerWithParameters< OriginsDescriptor >::GetParameters
const OriginsDescriptor & GetParameters() const override
Definition: LayerWithParameters.hpp:19
armnn::LayerWithParameters< OriginsDescriptor >::m_Param
OriginsDescriptor m_Param
The parameters for the layer (not including tensor-valued weights etc.).
Definition: LayerWithParameters.hpp:52
armnn::ConcatQueueDescriptor::m_ViewOrigins
std::vector< ViewOrigin > m_ViewOrigins
Definition: WorkloadData.hpp:143
armnn::BoostLogSeverityMapping::info
@ info
ARMNN_NO_DEPRECATE_WARN_END
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34