ArmNN
 20.08
ConcatLayer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "ConcatLayer.hpp"
6 #include "LayerCloneBase.hpp"
7 
8 #include <armnn/TypesUtils.hpp>
12 
13 #include <queue>
14 
15 namespace armnn
16 {
17 
18 ConcatLayer::ConcatLayer(const OriginsDescriptor& param, const char* name)
19  : LayerWithParameters(param.GetNumViews(), 1, LayerType::Concat, param, name)
20 {
21 }
22 
23 std::unique_ptr<IWorkload> ConcatLayer::CreateWorkload(const IWorkloadFactory& factory) const
24 {
25  ConcatQueueDescriptor descriptor;
26 
27  // Copies the view origins to the descriptor.
28  descriptor.m_ViewOrigins.reserve(m_Param.GetNumViews());
29  for (unsigned int i = 0; i < m_Param.GetNumViews(); ++i)
30  {
31  descriptor.m_ViewOrigins.emplace_back(
32  std::vector<unsigned int>(m_Param.GetViewOrigin(i), m_Param.GetViewOrigin(i) + m_Param.GetNumDimensions()));
33  }
34 
35  return factory.CreateConcat(descriptor, PrepInfoAndDesc(descriptor));
36 }
37 
38 template<typename FactoryType>
39 void ConcatLayer::CreateTensors(const TensorHandleFactoryRegistry& registry,
40  const FactoryType& factory,
41  bool isMemoryManaged)
42 {
43  //If sub tensors are supported then the concat
44  //just needs to make sure that the outputs of the prev layer
45  //are made subtensors of the output of the concat layer.
46  m_OutputHandlers[0].CreateTensorHandles(factory, isMemoryManaged);
47 
48  if (factory.SupportsSubTensors())
49  {
50  // check if concat is along the x or y (2 innermost dimensions)
51  uint32_t concatAxis = m_Param.GetConcatAxis();
52  auto numberOfDimensions = m_Param.GetNumDimensions();
53  bool isConcatOnXorY = m_Param.GetNumDimensions() >= 3
54  && ((concatAxis == numberOfDimensions - 1) || (concatAxis == numberOfDimensions - 2));
55 
57 
58  std::queue<ConcatLayer*> m_ConcatLayers;
59 
60  m_ConcatLayers.push(this);
61  while (!m_ConcatLayers.empty())
62  {
63  ConcatLayer* currentLayer = m_ConcatLayers.front();
64  ITensorHandle* parentTensor = currentLayer->GetOutputHandler(0).GetData();
65  const TensorInfo& parentInfo = currentLayer->GetOutputHandler(0).GetTensorInfo();
66  m_ConcatLayers.pop();
67 
68  const unsigned int numInputSlots = currentLayer->GetNumInputSlots();
69 
70  // if concat along x or y (2 innermost dimensions) and the previous layers do not require padding
71  bool canUseSubTensorOnXorY = true;
72  bool isTensorHandleFactory = std::is_same<armnn::ITensorHandleFactory, FactoryType>::value;
73  if (isTensorHandleFactory)
74  {
75  for (unsigned int i = 0; i < numInputSlots; ++i)
76  {
77  OutputSlot* slot = currentLayer->GetInputSlot(i).GetConnectedOutputSlot();
78  ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId);
79  std::vector<Capability> capabilities =
80  handleFactory->GetCapabilities(&(slot->GetOwningLayer()),
81  currentLayer,
83  if (isConcatOnXorY)
84  {
85  canUseSubTensorOnXorY = false;
86  if (capabilities.empty())
87  {
88  canUseSubTensorOnXorY = true;
89  }
90  }
91 
92  if (!canUseSubTensorOnXorY)
93  {
94  break;
95  }
96  }
97  }
98 
99  // First go through all the input slots and verify that we can sub-tensor all the inputs.
100  std::vector<std::unique_ptr<ITensorHandle>> subTensors(0);
101  subTensors.reserve(numInputSlots);
102  for (unsigned int i = 0; i < numInputSlots; ++i)
103  {
104  OutputSlot* slot = currentLayer->GetInputSlot(i).GetConnectedOutputSlot();
105  const TensorInfo& info = slot->GetTensorInfo();
106 
107  auto CreateSubTensor = [&]()
108  {
109  // Make sure:
110  // 1) quantization parameters are in the same space
111  // 2) the same TensorHandleFactory is used for input and Concat layer output
112  // 3) the input does not come from a Constant layer or input layer
113  // 4) the input is only read by this concat layer
114  // 5) if concat along x or y (2 innermost dimensions) and the previous layers do not require padding
115  if (slot &&
116  parentInfo.IsTypeSpaceMatch(info) && //(1)
117  factoryId == slot->GetTensorHandleFactoryId() && //(2)
118  slot->GetOwningLayer().GetType() != LayerType::Constant && //(3)
119  slot->GetOwningLayer().GetType() != LayerType::Input && //(3)
120  slot->GetNumConnections() == 1 &&
121  canUseSubTensorOnXorY) //(5)
122  {
124  return factory.CreateSubTensorHandle(*parentTensor,
125  info.GetShape(),
126  currentLayer->m_Param.GetViewOrigin(i));
128  }
129  return std::unique_ptr<ITensorHandle>();
130  };
131 
132  auto subTensor = CreateSubTensor();
133  if (!subTensor)
134  {
135  break; //Failed to create a valid sub-tensor, so stop trying with the rest of the inputs.
136  }
137  else
138  {
139  subTensors.push_back(std::move(subTensor)); // store the valid sub-tensor.
140  }
141  }
142 
143  // Ensure that ALL inputs can be substituted with valid sub-tensors
144  if (subTensors.size() < numInputSlots)
145  {
146  continue; // Don't optimize this Concat layer with sub-tensors
147  }
148 
149  // Substitute input tensors with sub-tensors by replacing the output tensors on the connected layers.
150  unsigned int i=0;
151  for (auto& subTensor : subTensors)
152  {
153  OutputSlot* slot = currentLayer->GetInputSlot(i).GetConnectedOutputSlot();
154  OutputHandler& outputHandler = slot->GetOutputHandler();
155 
156  ARMNN_ASSERT_MSG(subTensor, "ConcatLayer: Expected a valid sub-tensor for substitution.");
157  outputHandler.SetData(std::move(subTensor));
158 
159  Layer& inputLayer = slot->GetOwningLayer();
160  if (inputLayer.GetType() == LayerType::Concat)
161  {
162  // Continue with the substitution if the connected inputs are also concat layers
163  m_ConcatLayers.push(PolymorphicDowncast<ConcatLayer*>(&inputLayer));
164  }
165  ++i;
166  }
167  }
168  }
169 }
170 
172  const IWorkloadFactory& workloadFactory,
173  const bool isMemoryManaged)
174 {
175  OutputSlot& slot = GetOutputSlot(0);
177 
178  if (factoryId == ITensorHandleFactory::LegacyFactoryId)
179  {
180  CreateTensors(registry, workloadFactory, isMemoryManaged);
181  }
182  else
183  {
184  ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId);
185  ARMNN_ASSERT(handleFactory);
186  CreateTensors(registry, *handleFactory, isMemoryManaged);
187  }
188 }
189 
191 {
192  return CloneBase<ConcatLayer>(graph, m_Param, GetName());
193 }
194 
195 std::vector<TensorShape> ConcatLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
196 {
197  ARMNN_ASSERT(inputShapes.size() == m_Param.GetNumViews());
198 
199  unsigned int numDims = m_Param.GetNumDimensions();
200  for (unsigned int i=0; i< inputShapes.size(); i++)
201  {
202  auto& inputShape = inputShapes[i];
203 
204  ConditionalThrowIfNotEqual<LayerValidationException>(
205  "ConcatLayer: Num Dimensions must match all inputs.",
206  numDims,
207  inputShape.GetNumDimensions());
208  }
209 
210  // Finds the bounding box (extents) of all the views.
211  std::vector<unsigned int> extentMin(numDims);
212  std::vector<unsigned int> extentMax(numDims);
213  for (unsigned int i = 0; i < inputShapes.size(); i++)
214  {
215  const uint32_t* origin = m_Param.GetViewOrigin(i);
216  const armnn::TensorShape& shape = inputShapes[i];
217  for (unsigned int d = 0; d < numDims; d++)
218  {
219  extentMin[d] = std::min(extentMin[d], origin[d]);
220  extentMax[d] = std::max(extentMax[d], origin[d] + shape[d]);
221  }
222  }
223 
224  // Checks that the bounding box starts at the origin.
225  if (!std::all_of(extentMin.begin(), extentMin.end(), [](unsigned int s) { return s == 0; }))
226  {
227  throw LayerValidationException("ConcatLayer: there is no view that starts at the origin");
228  }
229 
230  // Checks that there are no overlaps of views (this would lead to undefined output at those locations).
231  // Checks each pair of views against each other
232  // (and doesn't bother to check against self, or check the same pair both ways round).
233  for (unsigned int a = 0; a < inputShapes.size(); a++)
234  {
235  const uint32_t* aOrigin = m_Param.GetViewOrigin(a);
236  const armnn::TensorShape& aShape = inputShapes[a];
237  for (unsigned int b = 0; b < a; b++)
238  {
239  const uint32_t* bOrigin = m_Param.GetViewOrigin(b);
240  const armnn::TensorShape& bShape = inputShapes[b];
241 
242  bool allAxesOverlap = true;
243  for (unsigned int d = 0; d < numDims && allAxesOverlap; d++)
244  {
245  unsigned int a1 = aOrigin[d];
246  unsigned int a2 = aOrigin[d] + aShape[d];
247 
248  unsigned int b1 = bOrigin[d];
249  unsigned int b2 = bOrigin[d] + bShape[d];
250 
251  if (a2 <= b1 || b2 <= a1)
252  {
253  allAxesOverlap = false;
254  }
255  }
256  if (allAxesOverlap)
257  {
258  throw LayerValidationException("ConcatLayer: Some views overlap.");
259  }
260  }
261  }
262 
263  // Checks that there are no "holes", i.e. regions of the output which is not covered by a view.
264  // Because we already checked that there are no overlaps, this can be done simply by checking that
265  // the total 'volume' of the views is the same as the output.
266  unsigned int totalViewsVolume = 0;
267  for (unsigned int i = 0; i < inputShapes.size(); i++)
268  {
269  totalViewsVolume += inputShapes[i].GetNumElements();
270  }
271  unsigned int outputVolume = 1;
272  for (unsigned int d = 0; d < numDims; d++)
273  {
274  outputVolume *= (extentMax[d] - extentMin[d]);
275  }
276 
277  ConditionalThrowIfNotEqual<LayerValidationException>(
278  "ConcatLayer: there are some gaps between views",
279  totalViewsVolume,
280  outputVolume);
281 
282  return std::vector<TensorShape>({ TensorShape({numDims, extentMax.data()}) });
283 }
284 
286 {
287  // Validates Concat layer.
288  ConditionalThrowIfNotEqual<LayerValidationException>(
289  "ConcatLayer: Num Inputs must match num views.",
291  GetNumInputSlots());
292 
294 
295  const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
296 
298 
299  std::vector<TensorShape> inputShapes;
300  for (unsigned int i = 0; i < GetNumInputSlots(); ++i)
301  {
302  inputShapes.push_back(GetInputSlot(i).GetConnection()->GetTensorInfo().GetShape());
303  }
304 
305  auto inferredShapes = InferOutputShapes(inputShapes);
306 
307  ARMNN_ASSERT(inferredShapes.size() == 1);
308 
309  ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ConcatLayer");
310 }
311 
312 void ConcatLayer::Accept(ILayerVisitor& visitor) const
313 {
314  visitor.VisitConcatLayer(this, GetParameters(), GetName());
315 }
316 
317 } // namespace armnn armnn
ConcatLayer(const OriginsDescriptor &param, const char *name)
Constructor to create a ConcatLayer.
Definition: ConcatLayer.cpp:18
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs, otherwise infers the output shapes from given input shapes and layer properties.
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same...
Definition: Tensor.cpp:424
OriginsDescriptor m_Param
The parameters for the layer (not including tensor-valued weights etc.).
const OriginsDescriptor & GetParameters() const
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
Definition: Layer.hpp:309
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
void SetData(std::unique_ptr< ITensorHandle > data)
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the Concat type.
Definition: ConcatLayer.cpp:23
Layer & GetOwningLayer() const
Definition: Layer.hpp:115
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Definition: Layer.cpp:432
Copyright (c) 2020 ARM Limited.
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
Definition: Layer.cpp:392
ConcatLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
Definition: Layer.cpp:344
unsigned int GetNumConnections() const override
Definition: Layer.hpp:138
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:312
const uint32_t * GetViewOrigin(uint32_t idx) const
Return the view origin at the int value idx.
virtual std::vector< Capability > GetCapabilities(const IConnectableLayer *layer, const IConnectableLayer *connectedLayer, CapabilityClass capabilityClass)
std::vector< ViewOrigin > m_ViewOrigins
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
An OriginsDescriptor for the ConcatLayer.
void Accept(ILayerVisitor &visitor) const override
Apply a visitor to this layer.
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of ConcatLayer.
This layer represents a merge operation.
Definition: ConcatLayer.hpp:13
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
const OutputSlot * GetConnectedOutputSlot() const
Definition: Layer.hpp:55
ITensorHandle * GetData() const
Gets the allocated tensor memory.
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
std::vector< OutputHandler > m_OutputHandlers
Definition: Layer.hpp:386
virtual void VisitConcatLayer(const IConnectableLayer *layer, const OriginsDescriptor &concatDescriptor, const char *name=nullptr)
Function that a concat layer should call back to when its Accept(ILayerVisitor&) function is invoked...
const OutputHandler & GetOutputHandler(unsigned int i=0) const
Definition: Layer.hpp:221
ClWorkloadFactory FactoryType
ITensorHandleFactory * GetFactory(ITensorHandleFactory::FactoryId id) const
Find a TensorHandleFactory by Id Returns nullptr if not found.
uint32_t GetNumDimensions() const
Get the number of dimensions.
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *LayerCreateWorkload.
LayerType GetType() const
Definition: Layer.hpp:261
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:314
const OutputHandler & GetOutputHandler() const
Definition: Layer.hpp:119
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:307
ITensorHandleFactory::FactoryId GetTensorHandleFactoryId() const
Definition: Layer.cpp:177
uint32_t GetNumViews() const
Get the number of views.
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
unsigned int GetConcatAxis() const
Get the concatenation axis value.
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:63
static const FactoryId LegacyFactoryId
ShapeInferenceMethod m_ShapeInferenceMethod
Definition: Layer.hpp:387
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry &registry, const IWorkloadFactory &factory, const bool IsMemoryManaged=true) override
Set the outputs to be appropriate sub tensors of the input if sub tensors are supported otherwise cre...
const TensorInfo & GetTensorInfo() const
Gets the matching TensorInfo for the output.