ArmNN
 20.02
ConcatLayer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "ConcatLayer.hpp"
6 #include "LayerCloneBase.hpp"
7 
8 #include <armnn/TypesUtils.hpp>
11 
12 #include <queue>
13 
14 namespace armnn
15 {
16 
17 ConcatLayer::ConcatLayer(const OriginsDescriptor& param, const char* name)
18  : LayerWithParameters(param.GetNumViews(), 1, LayerType::Concat, param, name)
19 {
20 }
21 
22 std::unique_ptr<IWorkload> ConcatLayer::CreateWorkload(const IWorkloadFactory& factory) const
23 {
24  ConcatQueueDescriptor descriptor;
25 
26  // Copies the view origins to the descriptor.
27  descriptor.m_ViewOrigins.reserve(m_Param.GetNumViews());
28  for (unsigned int i = 0; i < m_Param.GetNumViews(); ++i)
29  {
30  descriptor.m_ViewOrigins.emplace_back(
31  std::vector<unsigned int>(m_Param.GetViewOrigin(i), m_Param.GetViewOrigin(i) + m_Param.GetNumDimensions()));
32  }
33 
34  return factory.CreateConcat(descriptor, PrepInfoAndDesc(descriptor));
35 }
36 
37 template<typename FactoryType>
38 void ConcatLayer::CreateTensors(const FactoryType& factory)
39 {
40  //If sub tensors are supported then the concat
41  //just needs to make sure that the outputs of the prev layer
42  //are made subtensors of the output of the concat layer.
43  m_OutputHandlers[0].CreateTensorHandles(factory);
44 
45  if (factory.SupportsSubTensors())
46  {
48 
49  std::queue<ConcatLayer*> m_ConcatLayers;
50 
51  m_ConcatLayers.push(this);
52  while (!m_ConcatLayers.empty())
53  {
54  ConcatLayer* currentLayer = m_ConcatLayers.front();
55  ITensorHandle* parentTensor = currentLayer->GetOutputHandler(0).GetData();
56  const TensorInfo& parentInfo = currentLayer->GetOutputHandler(0).GetTensorInfo();
57  m_ConcatLayers.pop();
58 
59  const unsigned int numInputSlots = currentLayer->GetNumInputSlots();
60 
61  // First go through all the input slots and verify that we can sub-tensor all the inputs.
62  std::vector<std::unique_ptr<ITensorHandle>> subTensors(0);
63  subTensors.reserve(numInputSlots);
64  for (unsigned int i = 0; i < numInputSlots; ++i)
65  {
66  OutputSlot* slot = currentLayer->GetInputSlot(i).GetConnectedOutputSlot();
67  const TensorInfo& info = slot->GetTensorInfo();
68 
69  auto CreateSubTensor = [&]()
70  {
71  // Make sure:
72  // 1) quantization parameters are in the same space
73  // 2) the same TensorHandleFactory is used for input and Concat layer output
74  // 3) the input does not come from a Constant layer or input layer
75  // 4) the input is only read by this concat layer
76  if (slot &&
77  parentInfo.IsTypeSpaceMatch(info) && //(1)
78  factoryId == slot->GetTensorHandleFactoryId() && //(2)
79  slot->GetOwningLayer().GetType() != LayerType::Constant && //(3)
80  slot->GetOwningLayer().GetType() != LayerType::Input && //(3)
81  slot->GetNumConnections() == 1) //(4)
82  {
83  return factory.CreateSubTensorHandle(*parentTensor,
84  info.GetShape(),
85  currentLayer->m_Param.GetViewOrigin(i));
86  }
87  return std::unique_ptr<ITensorHandle>();
88  };
89 
90  auto subTensor = CreateSubTensor();
91  if (!subTensor)
92  {
93  break; //Failed to create a valid sub-tensor, so stop trying with the rest of the inputs.
94  }
95  else
96  {
97  subTensors.push_back(std::move(subTensor)); // store the valid sub-tensor.
98  }
99  }
100 
101  // Ensure that ALL inputs can be substituted with valid sub-tensors
102  if (subTensors.size() < numInputSlots)
103  {
104  continue; // Don't optimize this Concat layer with sub-tensors
105  }
106 
107  // Substitute input tensors with sub-tensors by replacing the output tensors on the connected layers.
108  unsigned int i=0;
109  for (auto& subTensor : subTensors)
110  {
111  OutputSlot* slot = currentLayer->GetInputSlot(i).GetConnectedOutputSlot();
112  OutputHandler& outputHandler = slot->GetOutputHandler();
113 
114  BOOST_ASSERT_MSG(subTensor, "ConcatLayer: Expected a valid sub-tensor for substitution.");
115  outputHandler.SetData(std::move(subTensor));
116 
117  Layer& inputLayer = slot->GetOwningLayer();
118  if (inputLayer.GetType() == LayerType::Concat)
119  {
120  // Continue with the substitution if the connected inputs are also concat layers
121  m_ConcatLayers.push(boost::polymorphic_downcast<ConcatLayer*>(&inputLayer));
122  }
123  ++i;
124  }
125  }
126  }
127 }
128 
130  const IWorkloadFactory& workloadFactory,
131  const bool IsMemoryManaged)
132 {
133  IgnoreUnused(IsMemoryManaged);
134  OutputSlot& slot = GetOutputSlot(0);
136 
137  if (factoryId == ITensorHandleFactory::LegacyFactoryId)
138  {
139  CreateTensors(workloadFactory);
140  }
141  else
142  {
143  ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId);
144  BOOST_ASSERT(handleFactory);
145  CreateTensors(*handleFactory);
146  }
147 }
148 
150 {
151  return CloneBase<ConcatLayer>(graph, m_Param, GetName());
152 }
153 
154 std::vector<TensorShape> ConcatLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
155 {
156  BOOST_ASSERT(inputShapes.size() == m_Param.GetNumViews());
157 
158  unsigned int numDims = m_Param.GetNumDimensions();
159  for (unsigned int i=0; i< inputShapes.size(); i++)
160  {
161  auto& inputShape = inputShapes[i];
162 
163  ConditionalThrowIfNotEqual<LayerValidationException>(
164  "ConcatLayer: Num Dimensions must match all inputs.",
165  numDims,
166  inputShape.GetNumDimensions());
167  }
168 
169  // Finds the bounding box (extents) of all the views.
170  std::vector<unsigned int> extentMin(numDims);
171  std::vector<unsigned int> extentMax(numDims);
172  for (unsigned int i = 0; i < inputShapes.size(); i++)
173  {
174  const uint32_t* origin = m_Param.GetViewOrigin(i);
175  const armnn::TensorShape& shape = inputShapes[i];
176  for (unsigned int d = 0; d < numDims; d++)
177  {
178  extentMin[d] = std::min(extentMin[d], origin[d]);
179  extentMax[d] = std::max(extentMax[d], origin[d] + shape[d]);
180  }
181  }
182 
183  // Checks that the bounding box starts at the origin.
184  if (!std::all_of(extentMin.begin(), extentMin.end(), [](unsigned int s) { return s == 0; }))
185  {
186  throw LayerValidationException("ConcatLayer: there is no view that starts at the origin");
187  }
188 
189  // Checks that there are no overlaps of views (this would lead to undefined output at those locations).
190  // Checks each pair of views against each other
191  // (and doesn't bother to check against self, or check the same pair both ways round).
192  for (unsigned int a = 0; a < inputShapes.size(); a++)
193  {
194  const uint32_t* aOrigin = m_Param.GetViewOrigin(a);
195  const armnn::TensorShape& aShape = inputShapes[a];
196  for (unsigned int b = 0; b < a; b++)
197  {
198  const uint32_t* bOrigin = m_Param.GetViewOrigin(b);
199  const armnn::TensorShape& bShape = inputShapes[b];
200 
201  bool allAxesOverlap = true;
202  for (unsigned int d = 0; d < numDims && allAxesOverlap; d++)
203  {
204  unsigned int a1 = aOrigin[d];
205  unsigned int a2 = aOrigin[d] + aShape[d];
206 
207  unsigned int b1 = bOrigin[d];
208  unsigned int b2 = bOrigin[d] + bShape[d];
209 
210  if (a2 <= b1 || b2 <= a1)
211  {
212  allAxesOverlap = false;
213  }
214  }
215  if (allAxesOverlap)
216  {
217  throw LayerValidationException("ConcatLayer: Some views overlap.");
218  }
219  }
220  }
221 
222  // Checks that there are no "holes", i.e. regions of the output which is not covered by a view.
223  // Because we already checked that there are no overlaps, this can be done simply by checking that
224  // the total 'volume' of the views is the same as the output.
225  unsigned int totalViewsVolume = 0;
226  for (unsigned int i = 0; i < inputShapes.size(); i++)
227  {
228  totalViewsVolume += inputShapes[i].GetNumElements();
229  }
230  unsigned int outputVolume = 1;
231  for (unsigned int d = 0; d < numDims; d++)
232  {
233  outputVolume *= (extentMax[d] - extentMin[d]);
234  }
235 
236  ConditionalThrowIfNotEqual<LayerValidationException>(
237  "ConcatLayer: there are some gaps between views",
238  totalViewsVolume,
239  outputVolume);
240 
241  return std::vector<TensorShape>({ TensorShape({numDims, extentMax.data()}) });
242 }
243 
245 {
246  // Validates Concat layer.
247  ConditionalThrowIfNotEqual<LayerValidationException>(
248  "ConcatLayer: Num Inputs must match num views.",
250  GetNumInputSlots());
251 
253 
254  std::vector<TensorShape> inputShapes;
255  for (unsigned int i = 0; i < GetNumInputSlots(); ++i)
256  {
257  inputShapes.push_back(GetInputSlot(i).GetConnection()->GetTensorInfo().GetShape());
258  }
259 
260  auto inferredShapes = InferOutputShapes(inputShapes);
261 
262  BOOST_ASSERT(inferredShapes.size() == 1);
263 
264  ConditionalThrowIfNotEqual<LayerValidationException>(
265  "ConcatLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
267  inferredShapes[0]);
268 }
269 
270 void ConcatLayer::Accept(ILayerVisitor& visitor) const
271 {
272  visitor.VisitConcatLayer(this, GetParameters(), GetName());
273 }
274 
275 } // namespace armnn armnn
ConcatLayer(const OriginsDescriptor &param, const char *name)
Constructor to create a ConcatLayer.
Definition: ConcatLayer.cpp:17
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs, otherwise infers the output shapes from given input shapes and layer properties.
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same...
Definition: Tensor.cpp:218
OriginsDescriptor m_Param
The parameters for the layer (not including tensor-valued weights etc.).
const OriginsDescriptor & GetParameters() const
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
Definition: Layer.hpp:307
const TensorShape & GetShape() const
Definition: Tensor.hpp:88
void SetData(std::unique_ptr< ITensorHandle > data)
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the Concat type.
Definition: ConcatLayer.cpp:22
Layer & GetOwningLayer() const
Definition: Layer.hpp:115
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
Copyright (c) 2020 ARM Limited.
void IgnoreUnused(Ts &&...)
ConcatLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
Definition: Layer.cpp:338
unsigned int GetNumConnections() const override
Definition: Layer.hpp:138
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:310
const uint32_t * GetViewOrigin(uint32_t idx) const
Return the view origin at the int value idx.
std::vector< ViewOrigin > m_ViewOrigins
An OriginsDescriptor for the ConcatLayer.
void Accept(ILayerVisitor &visitor) const override
Apply a visitor to this layer.
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of ConcatLayer.
This layer represents a merge operation.
Definition: ConcatLayer.hpp:13
const OutputSlot * GetConnectedOutputSlot() const
Definition: Layer.hpp:55
ITensorHandle * GetData() const
Gets the allocated tensor memory.
#define CHECK_LOCATION()
Definition: Exceptions.hpp:192
std::vector< OutputHandler > m_OutputHandlers
Definition: Layer.hpp:371
virtual void VisitConcatLayer(const IConnectableLayer *layer, const OriginsDescriptor &concatDescriptor, const char *name=nullptr)
Function that a concat layer should call back to when its Accept(ILayerVisitor&) function is invoked...
const OutputHandler & GetOutputHandler(unsigned int i=0) const
Definition: Layer.hpp:221
ClWorkloadFactory FactoryType
ITensorHandleFactory * GetFactory(ITensorHandleFactory::FactoryId id) const
Find a TensorHandleFactory by Id Returns nullptr if not found.
uint32_t GetNumDimensions() const
Get the number of dimensions.
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *LayerCreateWorkload.
LayerType GetType() const
Definition: Layer.hpp:259
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:312
const OutputHandler & GetOutputHandler() const
Definition: Layer.hpp:119
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:305
ITensorHandleFactory::FactoryId GetTensorHandleFactoryId() const
Definition: Layer.cpp:172
uint32_t GetNumViews() const
Get the number of views.
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:63
static const FactoryId LegacyFactoryId
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry &registry, const IWorkloadFactory &factory, const bool IsMemoryManaged=true) override
Set the outputs to be appropriate sub tensors of the input if sub tensors are supported otherwise cre...
const TensorInfo & GetTensorInfo() const
Gets the matching TensorInfo for the output.