ArmNN
 20.05
ConcatLayer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "ConcatLayer.hpp"
6 #include "LayerCloneBase.hpp"
7 
8 #include <armnn/TypesUtils.hpp>
12 
13 #include <queue>
14 
15 namespace armnn
16 {
17 
18 ConcatLayer::ConcatLayer(const OriginsDescriptor& param, const char* name)
19  : LayerWithParameters(param.GetNumViews(), 1, LayerType::Concat, param, name)
20 {
21 }
22 
23 std::unique_ptr<IWorkload> ConcatLayer::CreateWorkload(const IWorkloadFactory& factory) const
24 {
25  ConcatQueueDescriptor descriptor;
26 
27  // Copies the view origins to the descriptor.
28  descriptor.m_ViewOrigins.reserve(m_Param.GetNumViews());
29  for (unsigned int i = 0; i < m_Param.GetNumViews(); ++i)
30  {
31  descriptor.m_ViewOrigins.emplace_back(
32  std::vector<unsigned int>(m_Param.GetViewOrigin(i), m_Param.GetViewOrigin(i) + m_Param.GetNumDimensions()));
33  }
34 
35  return factory.CreateConcat(descriptor, PrepInfoAndDesc(descriptor));
36 }
37 
38 template<typename FactoryType>
39 void ConcatLayer::CreateTensors(const FactoryType& factory)
40 {
41  //If sub tensors are supported then the concat
42  //just needs to make sure that the outputs of the prev layer
43  //are made subtensors of the output of the concat layer.
44  m_OutputHandlers[0].CreateTensorHandles(factory);
45 
46  if (factory.SupportsSubTensors())
47  {
49 
50  std::queue<ConcatLayer*> m_ConcatLayers;
51 
52  m_ConcatLayers.push(this);
53  while (!m_ConcatLayers.empty())
54  {
55  ConcatLayer* currentLayer = m_ConcatLayers.front();
56  ITensorHandle* parentTensor = currentLayer->GetOutputHandler(0).GetData();
57  const TensorInfo& parentInfo = currentLayer->GetOutputHandler(0).GetTensorInfo();
58  m_ConcatLayers.pop();
59 
60  const unsigned int numInputSlots = currentLayer->GetNumInputSlots();
61 
62  // First go through all the input slots and verify that we can sub-tensor all the inputs.
63  std::vector<std::unique_ptr<ITensorHandle>> subTensors(0);
64  subTensors.reserve(numInputSlots);
65  for (unsigned int i = 0; i < numInputSlots; ++i)
66  {
67  OutputSlot* slot = currentLayer->GetInputSlot(i).GetConnectedOutputSlot();
68  const TensorInfo& info = slot->GetTensorInfo();
69 
70  auto CreateSubTensor = [&]()
71  {
72  // Make sure:
73  // 1) quantization parameters are in the same space
74  // 2) the same TensorHandleFactory is used for input and Concat layer output
75  // 3) the input does not come from a Constant layer or input layer
76  // 4) the input is only read by this concat layer
77  if (slot &&
78  parentInfo.IsTypeSpaceMatch(info) && //(1)
79  factoryId == slot->GetTensorHandleFactoryId() && //(2)
80  slot->GetOwningLayer().GetType() != LayerType::Constant && //(3)
81  slot->GetOwningLayer().GetType() != LayerType::Input && //(3)
82  slot->GetNumConnections() == 1) //(4)
83  {
84  return factory.CreateSubTensorHandle(*parentTensor,
85  info.GetShape(),
86  currentLayer->m_Param.GetViewOrigin(i));
87  }
88  return std::unique_ptr<ITensorHandle>();
89  };
90 
91  auto subTensor = CreateSubTensor();
92  if (!subTensor)
93  {
94  break; //Failed to create a valid sub-tensor, so stop trying with the rest of the inputs.
95  }
96  else
97  {
98  subTensors.push_back(std::move(subTensor)); // store the valid sub-tensor.
99  }
100  }
101 
102  // Ensure that ALL inputs can be substituted with valid sub-tensors
103  if (subTensors.size() < numInputSlots)
104  {
105  continue; // Don't optimize this Concat layer with sub-tensors
106  }
107 
108  // Substitute input tensors with sub-tensors by replacing the output tensors on the connected layers.
109  unsigned int i=0;
110  for (auto& subTensor : subTensors)
111  {
112  OutputSlot* slot = currentLayer->GetInputSlot(i).GetConnectedOutputSlot();
113  OutputHandler& outputHandler = slot->GetOutputHandler();
114 
115  ARMNN_ASSERT_MSG(subTensor, "ConcatLayer: Expected a valid sub-tensor for substitution.");
116  outputHandler.SetData(std::move(subTensor));
117 
118  Layer& inputLayer = slot->GetOwningLayer();
119  if (inputLayer.GetType() == LayerType::Concat)
120  {
121  // Continue with the substitution if the connected inputs are also concat layers
122  m_ConcatLayers.push(PolymorphicDowncast<ConcatLayer*>(&inputLayer));
123  }
124  ++i;
125  }
126  }
127  }
128 }
129 
131  const IWorkloadFactory& workloadFactory,
132  const bool IsMemoryManaged)
133 {
134  IgnoreUnused(IsMemoryManaged);
135  OutputSlot& slot = GetOutputSlot(0);
137 
138  if (factoryId == ITensorHandleFactory::LegacyFactoryId)
139  {
140  CreateTensors(workloadFactory);
141  }
142  else
143  {
144  ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId);
145  ARMNN_ASSERT(handleFactory);
146  CreateTensors(*handleFactory);
147  }
148 }
149 
151 {
152  return CloneBase<ConcatLayer>(graph, m_Param, GetName());
153 }
154 
155 std::vector<TensorShape> ConcatLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
156 {
157  ARMNN_ASSERT(inputShapes.size() == m_Param.GetNumViews());
158 
159  unsigned int numDims = m_Param.GetNumDimensions();
160  for (unsigned int i=0; i< inputShapes.size(); i++)
161  {
162  auto& inputShape = inputShapes[i];
163 
164  ConditionalThrowIfNotEqual<LayerValidationException>(
165  "ConcatLayer: Num Dimensions must match all inputs.",
166  numDims,
167  inputShape.GetNumDimensions());
168  }
169 
170  // Finds the bounding box (extents) of all the views.
171  std::vector<unsigned int> extentMin(numDims);
172  std::vector<unsigned int> extentMax(numDims);
173  for (unsigned int i = 0; i < inputShapes.size(); i++)
174  {
175  const uint32_t* origin = m_Param.GetViewOrigin(i);
176  const armnn::TensorShape& shape = inputShapes[i];
177  for (unsigned int d = 0; d < numDims; d++)
178  {
179  extentMin[d] = std::min(extentMin[d], origin[d]);
180  extentMax[d] = std::max(extentMax[d], origin[d] + shape[d]);
181  }
182  }
183 
184  // Checks that the bounding box starts at the origin.
185  if (!std::all_of(extentMin.begin(), extentMin.end(), [](unsigned int s) { return s == 0; }))
186  {
187  throw LayerValidationException("ConcatLayer: there is no view that starts at the origin");
188  }
189 
190  // Checks that there are no overlaps of views (this would lead to undefined output at those locations).
191  // Checks each pair of views against each other
192  // (and doesn't bother to check against self, or check the same pair both ways round).
193  for (unsigned int a = 0; a < inputShapes.size(); a++)
194  {
195  const uint32_t* aOrigin = m_Param.GetViewOrigin(a);
196  const armnn::TensorShape& aShape = inputShapes[a];
197  for (unsigned int b = 0; b < a; b++)
198  {
199  const uint32_t* bOrigin = m_Param.GetViewOrigin(b);
200  const armnn::TensorShape& bShape = inputShapes[b];
201 
202  bool allAxesOverlap = true;
203  for (unsigned int d = 0; d < numDims && allAxesOverlap; d++)
204  {
205  unsigned int a1 = aOrigin[d];
206  unsigned int a2 = aOrigin[d] + aShape[d];
207 
208  unsigned int b1 = bOrigin[d];
209  unsigned int b2 = bOrigin[d] + bShape[d];
210 
211  if (a2 <= b1 || b2 <= a1)
212  {
213  allAxesOverlap = false;
214  }
215  }
216  if (allAxesOverlap)
217  {
218  throw LayerValidationException("ConcatLayer: Some views overlap.");
219  }
220  }
221  }
222 
223  // Checks that there are no "holes", i.e. regions of the output which is not covered by a view.
224  // Because we already checked that there are no overlaps, this can be done simply by checking that
225  // the total 'volume' of the views is the same as the output.
226  unsigned int totalViewsVolume = 0;
227  for (unsigned int i = 0; i < inputShapes.size(); i++)
228  {
229  totalViewsVolume += inputShapes[i].GetNumElements();
230  }
231  unsigned int outputVolume = 1;
232  for (unsigned int d = 0; d < numDims; d++)
233  {
234  outputVolume *= (extentMax[d] - extentMin[d]);
235  }
236 
237  ConditionalThrowIfNotEqual<LayerValidationException>(
238  "ConcatLayer: there are some gaps between views",
239  totalViewsVolume,
240  outputVolume);
241 
242  return std::vector<TensorShape>({ TensorShape({numDims, extentMax.data()}) });
243 }
244 
246 {
247  // Validates Concat layer.
248  ConditionalThrowIfNotEqual<LayerValidationException>(
249  "ConcatLayer: Num Inputs must match num views.",
251  GetNumInputSlots());
252 
254 
255  std::vector<TensorShape> inputShapes;
256  for (unsigned int i = 0; i < GetNumInputSlots(); ++i)
257  {
258  inputShapes.push_back(GetInputSlot(i).GetConnection()->GetTensorInfo().GetShape());
259  }
260 
261  auto inferredShapes = InferOutputShapes(inputShapes);
262 
263  ARMNN_ASSERT(inferredShapes.size() == 1);
264 
265  ConditionalThrowIfNotEqual<LayerValidationException>(
266  "ConcatLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
268  inferredShapes[0]);
269 }
270 
271 void ConcatLayer::Accept(ILayerVisitor& visitor) const
272 {
273  visitor.VisitConcatLayer(this, GetParameters(), GetName());
274 }
275 
276 } // namespace armnn armnn
ConcatLayer(const OriginsDescriptor &param, const char *name)
Constructor to create a ConcatLayer.
Definition: ConcatLayer.cpp:18
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs, otherwise infers the output shapes from given input shapes and layer properties.
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same...
Definition: Tensor.cpp:219
OriginsDescriptor m_Param
The parameters for the layer (not including tensor-valued weights etc.).
const OriginsDescriptor & GetParameters() const
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
Definition: Layer.hpp:307
const TensorShape & GetShape() const
Definition: Tensor.hpp:88
void SetData(std::unique_ptr< ITensorHandle > data)
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the Concat type.
Definition: ConcatLayer.cpp:23
Layer & GetOwningLayer() const
Definition: Layer.hpp:115
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
Copyright (c) 2020 ARM Limited.
void IgnoreUnused(Ts &&...)
ConcatLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
Definition: Layer.cpp:339
unsigned int GetNumConnections() const override
Definition: Layer.hpp:138
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:310
const uint32_t * GetViewOrigin(uint32_t idx) const
Return the view origin at the int value idx.
std::vector< ViewOrigin > m_ViewOrigins
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
An OriginsDescriptor for the ConcatLayer.
void Accept(ILayerVisitor &visitor) const override
Apply a visitor to this layer.
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of ConcatLayer.
This layer represents a merge operation.
Definition: ConcatLayer.hpp:13
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
const OutputSlot * GetConnectedOutputSlot() const
Definition: Layer.hpp:55
ITensorHandle * GetData() const
Gets the allocated tensor memory.
#define CHECK_LOCATION()
Definition: Exceptions.hpp:192
std::vector< OutputHandler > m_OutputHandlers
Definition: Layer.hpp:371
virtual void VisitConcatLayer(const IConnectableLayer *layer, const OriginsDescriptor &concatDescriptor, const char *name=nullptr)
Function that a concat layer should call back to when its Accept(ILayerVisitor&) function is invoked...
const OutputHandler & GetOutputHandler(unsigned int i=0) const
Definition: Layer.hpp:221
ClWorkloadFactory FactoryType
ITensorHandleFactory * GetFactory(ITensorHandleFactory::FactoryId id) const
Find a TensorHandleFactory by Id Returns nullptr if not found.
uint32_t GetNumDimensions() const
Get the number of dimensions.
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *LayerCreateWorkload.
LayerType GetType() const
Definition: Layer.hpp:259
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:312
const OutputHandler & GetOutputHandler() const
Definition: Layer.hpp:119
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:305
ITensorHandleFactory::FactoryId GetTensorHandleFactoryId() const
Definition: Layer.cpp:173
uint32_t GetNumViews() const
Get the number of views.
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:63
static const FactoryId LegacyFactoryId
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry &registry, const IWorkloadFactory &factory, const bool IsMemoryManaged=true) override
Set the outputs to be appropriate sub tensors of the input if sub tensors are supported otherwise cre...
const TensorInfo & GetTensorInfo() const
Gets the matching TensorInfo for the output.