ArmNN
 22.05.01
Layer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "Layer.hpp"
6 
7 #include "Graph.hpp"
8 
11 
13 
15 
16 #include <client/include/IProfilingService.hpp>
17 
18 #include <fmt/format.h>
19 
20 #include <numeric>
21 
22 namespace armnn
23 {
24 
25 // Instantiate the static member variable
26 NullDescriptor Layer::m_NullDescriptor;
27 
29 {
30  switch (layer.GetType())
31  {
35  {
36  ARMNN_ASSERT(layer.GetNumInputSlots() == 2 ||
37  layer.GetNumInputSlots() == 3);
38  break;
39  }
40  default:
41  {
42  ARMNN_ASSERT(layer.GetNumInputSlots() == 1);
43  break;
44  }
45  }
46 }
47 
49 {
50  ARMNN_ASSERT(layer.GetNumOutputSlots() == 1);
51 
52  OutputSlot* const prevSlot = GetConnectedOutputSlot();
53 
54  if (prevSlot != nullptr)
55  {
56  // Disconnects parent from this.
57  prevSlot->Disconnect(*this);
58 
60 
61  // Connects inserted layer to parent.
62  int idx = prevSlot->Connect(layer.GetInputSlot(0));
63  prevSlot->SetEdgeStrategy(armnn::numeric_cast<unsigned int>(idx), EdgeStrategy::Undefined);
64 
65  // Sets tensor info for inserted layer.
66  const TensorInfo& tensorInfo = prevSlot->GetTensorInfo();
67  layer.GetOutputHandler().SetTensorInfo(tensorInfo);
68  }
69 
70  // Connects inserted layer to this.
71  layer.GetOutputSlot(0).Connect(*this);
73 }
74 
75 const InputSlot* OutputSlot::GetConnection(unsigned int index) const
76 {
77  ValidateConnectionIndex(index);
78  return m_Connections[index];
79 }
80 
82 {
83  ValidateConnectionIndex(index);
84  return m_Connections[index];
85 }
86 
87 void OutputSlot::SetTensorInfo(const TensorInfo& tensorInfo)
88 {
89  GetOutputHandler().SetTensorInfo(tensorInfo);
90 }
91 
93 {
94  return GetOutputHandler().GetTensorInfo();
95 }
96 
98 {
99  if (GetOwningLayer().GetShapeInferenceMethod() == ShapeInferenceMethod::InferAndValidate)
100  {
102  }
103  return GetOutputHandler().IsTensorInfoSet();
104 }
105 
107 {
108  ARMNN_ASSERT_MSG(IsTensorInfoSet(), "TensorInfo must be set in order to validate the shape.");
109  return shape == m_OutputHandler.GetTensorInfo().GetShape();
110 }
111 
113 {
114  destination.SetConnection(this);
115  m_Connections.push_back(&destination);
116  m_EdgeStrategies.push_back(EdgeStrategy::Undefined);
117  return armnn::numeric_cast<int>(m_Connections.size() - 1);
118 }
119 
121 {
122  slot.SetConnection(nullptr);
123  auto it = std::find(m_Connections.begin(), m_Connections.end(), &slot);
124 
125  if (it == m_Connections.end())
126  {
127  return;
128  }
129 
130  auto idx = std::distance(m_Connections.begin(), it);
131  m_Connections.erase(std::remove(m_Connections.begin(), m_Connections.end(), &slot), m_Connections.end());
132 
133  m_EdgeStrategies.erase(m_EdgeStrategies.begin() + idx);
134 }
135 
137 {
138  while (GetNumConnections() > 0)
139  {
140  InputSlot& connection = *GetConnection(0);
141  Disconnect(connection);
142  }
143 }
144 
146 {
147  while (GetNumConnections() > 0)
148  {
149  ARMNN_ASSERT_MSG(m_EdgeStrategies[0] == EdgeStrategy::Undefined,
150  "Cannot move connections once memory strategies have be established.");
151 
152  InputSlot& connection = *GetConnection(0);
153  Disconnect(connection);
154  destination.Connect(connection);
155  destination.GetOutputHandler().SetTensorInfo(GetOutputHandler().GetTensorInfo());
156  }
157 }
158 
160 {
161  for (unsigned int i = 0; i < GetOwningLayer().GetNumOutputSlots(); i++)
162  {
163  if (GetOwningLayer().GetOutputSlot(i) == (*this))
164  {
165  return i;
166  }
167  }
168  ARMNN_ASSERT_MSG(false, "Did not find slot on owner.");
169  return 0; // Error
170 }
171 
172 bool OutputSlot::operator==(const OutputSlot& other) const
173 {
174  bool isSame = other.GetNumConnections() == GetNumConnections();
175  if (!isSame)
176  {
177  return false;
178  }
179 
180  for (unsigned int i = 0; i < GetNumConnections(); i++)
181  {
182  isSame &= other.GetConnection(i) == GetConnection(i);
183  }
184  return isSame;
185 }
186 
187 void OutputSlot::ValidateConnectionIndex(unsigned int index) const
188 {
189  if (armnn::numeric_cast<std::size_t>(index) >= m_Connections.size())
190  {
191  throw InvalidArgumentException((fmt::format("GetConnection: Invalid index {} provided", index)));
192  }
193 }
194 
196 {
197  return GetOwningLayer().GetGuid();
198 }
199 
201 {
202  m_TensorHandleFactoryId = id;
203 }
204 
206 {
207  return m_TensorHandleFactoryId;
208 }
209 
210 void OutputSlot::SetEdgeStrategy(unsigned int connectionIndex, EdgeStrategy strategy)
211 {
212  m_EdgeStrategies[connectionIndex] = strategy;
213 }
214 
216 {
217  return m_EdgeStrategies[connectionIdx];
218 }
219 
220 Layer::Layer(unsigned int numInputSlots,
221  unsigned int numOutputSlots,
222  LayerType type,
223  DataLayout layout,
224  const char* name)
225 : m_OutputHandlers(numOutputSlots)
226 , m_ShapeInferenceMethod(ShapeInferenceMethod::ValidateOnly)
227 , m_LayerName(name ? name : "")
228 , m_Type(type)
229 , m_BackendId()
230 , m_BackendHint(EmptyOptional())
231 , m_Guid(arm::pipe::IProfilingService::GetNextGuid())
232 {
233  IgnoreUnused(layout);
234  m_InputSlots.reserve(numInputSlots);
235  for (unsigned int i = 0; i < numInputSlots; ++i)
236  {
237  m_InputSlots.emplace_back(*this, i);
238  }
239 
240  m_OutputSlots.reserve(numOutputSlots);
241  for (unsigned int i = 0; i < numOutputSlots; ++i)
242  {
243  m_OutputSlots.emplace_back(*this, m_OutputHandlers[i]);
244  }
245 }
246 
247 Layer::Layer(unsigned int numInputSlots,
248  unsigned int numOutputSlots,
249  LayerType type,
250  const char* name)
251 : Layer(numInputSlots, numOutputSlots, type, DataLayout::NCHW, name)
252 {
253 }
254 
255 void Layer::CollectWorkloadInputs(WorkloadDataCollector& dataCollector) const
256 {
257  for (auto&& inputSlot : GetInputSlots())
258  {
259  // The graph must be well-formed at this point.
260  ARMNN_ASSERT(inputSlot.GetConnection());
261  const OutputHandler& outputHandler = inputSlot.GetConnectedOutputSlot()->GetOutputHandler();
262  dataCollector.Push(outputHandler.GetData(), outputHandler.GetTensorInfo());
263  }
264 }
265 
266 void Layer::CollectWorkloadOutputs(WorkloadDataCollector& dataCollector) const
267 {
268  for (auto&& outputHandler : m_OutputHandlers)
269  {
270  outputHandler.CollectWorkloadOutputs(dataCollector);
271  }
272 }
273 
275 {
277 }
278 
280  const IWorkloadFactory& workloadFactory,
281  const bool IsMemoryManaged)
282 {
283  for (unsigned int idx=0; idx < GetNumOutputSlots(); idx++)
284  {
285 
286  OutputSlot& slot = GetOutputSlot(idx);
288 
289  OutputHandler& handler = GetOutputHandler(idx);
290  if (factoryId == ITensorHandleFactory::LegacyFactoryId)
291  {
292  handler.CreateTensorHandles(workloadFactory, IsMemoryManaged);
293  }
294  else
295  {
296  ITensorHandleFactory* handleFactory;
297  handleFactory = registry.GetFactory(factoryId);
298  ARMNN_ASSERT(handleFactory);
299  handler.CreateTensorHandles(*handleFactory, IsMemoryManaged);
300  }
301  }
302 }
303 
305 {
306  // Now free up the static data.
307  OperateOnConstantTensors([](std::shared_ptr<ConstTensorHandle>& handle)
308  {
309  handle.reset();
310  });
311 }
312 
314 {
315  if (GetNumInputSlots() > 0) // Ignore the input layer.
316  {
318  }
320 }
321 
323 {
324  m_Priority = 0;
325  m_Visiting = false;
326 }
327 
329 {
330  constexpr LayerPriority inputPrio = std::numeric_limits<LayerPriority>::lowest();
331  constexpr LayerPriority outputPrio = std::numeric_limits<LayerPriority>::max();
332 
333  if (GetType() == LayerType::Input)
334  {
335  m_Priority = inputPrio;
336  }
337  else if (GetType() == LayerType::Output)
338  {
339  m_Priority = outputPrio;
340  }
341  else if (m_Priority == 0)
342  {
343  if (m_Visiting)
344  {
345  throw GraphValidationException("Graph has circular dependencies: cannot walk");
346  }
347 
348  auto maxPrio = [](const LayerPriority prio, const InputSlot& slot) -> LayerPriority
349  {
350  const OutputSlot *outputSlot = slot.GetConnectedOutputSlot();
351  if (outputSlot)
352  {
353  const Layer& input = outputSlot->GetOwningLayer();
354  return std::max(prio, input.GetPriority());
355  }
356  else
357  {
358  // unconnected input slot
359  return prio;
360  }
361  };
362 
363  m_Visiting = true;
364  LayerPriority parentPrio = std::accumulate(GetInputSlots().cbegin(), GetInputSlots().cend(), 0U, maxPrio);
365  m_Visiting = false;
366 
367  if (parentPrio >= outputPrio)
368  {
369  throw GraphValidationException("Graph has too many edges");
370  }
371 
372  m_Priority = parentPrio + 1U;
373  }
374 
375  return m_Priority;
376 }
377 
378 void Layer::VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation& location) const
379 {
380  ARMNN_ASSERT(GetNumInputSlots() == expectedConnections);
381 
382  for (unsigned int i=0; i<expectedConnections; ++i)
383  {
384  if (GetInputSlot(i).GetConnection() == nullptr)
385  {
387  fmt::format("Input connection #{0} must be connected "
388  "for {1} layer {2} {3}",
389  i,
391  GetNameStr(),
392  location.AsString()));
393  }
394  }
395 }
396 
397 std::vector<TensorShape> Layer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
398 {
401 
402  // By default we return what we got, meaning the output shape(s) are the same as the input(s).
403  // This only works if the number of inputs and outputs are the same. Since we are in the Layer
404  // base class, this means the implementation needs to be overridden in the specific layers for
405  // the other cases. So the missing implementation justifies the UnimplementedException.
406 
408  {
410  fmt::format("Default implementation for InferOutputShapes can only be used for "
411  "layers with the same number of input and output slots. This doesn't "
412  "hold for {0} layer {1} (#inputs={2} #outputs={3}) {4}",
414  GetNameStr(),
417  CHECK_LOCATION().AsString()));
418  }
419  return inputShapes;
420 }
421 
422 void Layer::ValidateAndCopyShape(const TensorShape& outputShape,
423  const TensorShape& inferredShape,
424  const ShapeInferenceMethod shapeInferenceMethod,
425  const std::string& layerName,
426  const unsigned int outputSlotIndex)
427 {
428  if (shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
429  {
430  if (m_AllowExpandedDims)
431  {
432  std::vector<unsigned int> outputDims = armnnUtils::SqueezeDims(outputShape);
433  std::vector<unsigned int> inferredDims = armnnUtils::SqueezeDims(inferredShape);
434 
435  if (outputDims.size() != inferredDims.size())
436  {
437  std::stringstream ss;
438  ss << layerName << ": TensorShape set on OutputSlot[" << outputSlotIndex <<
439  "] does not match the inferred shape. ";
440  ss << outputShape << " != " << inferredShape;
441  throw LayerValidationException(ss.str());
442  }
443  for (unsigned int i = 0; i < outputDims.size(); ++i)
444  {
445  if (outputDims[i] != inferredDims[i])
446  {
447  std::stringstream ss;
448  ss << layerName << ": TensorShape set on OutputSlot[" << outputSlotIndex <<
449  "] does not match the inferred shape at dimension index [";
450  ss << i << "] " << outputShape << " != " << inferredShape;
451  throw LayerValidationException(ss.str());
452  }
453  }
454  return;
455  }
456  else
457  {
458  ConditionalThrowIfNotEqual<LayerValidationException>(
459  layerName + ": TensorShape set on OutputSlot[0] does not match the inferred shape.",
460  outputShape,
461  inferredShape);
462  return;
463  }
464  }
465 
466  if (outputShape.GetDimensionality() == Dimensionality::Specified)
467  {
468  for (unsigned int i = 0; i < outputShape.GetNumDimensions(); ++i)
469  {
470  if (outputShape.GetDimensionSpecificity(i) && outputShape[i] != inferredShape[i])
471  {
472  std::stringstream ss;
473  ss << layerName << ": TensorShape set on OutputSlot[" << outputSlotIndex <<
474  "] does not match the inferred shape at dimension index [";
475  ss << i << "] " << outputShape << " != " << inferredShape;
476  throw LayerValidationException(ss.str());
477  }
478  }
479  }
480 
481  TensorInfo info = GetOutputSlot(outputSlotIndex).GetTensorInfo();
482 
483  armnn::TensorInfo inferredTensorInfo(inferredShape,
484  info.GetDataType(),
485  info.GetQuantizationScale(),
486  info.GetQuantizationOffset());
487 
488  GetOutputSlot(outputSlotIndex).SetTensorInfo(inferredTensorInfo);
489 }
490 
491 void Layer::VerifyShapeInferenceType(const TensorShape& outputShape, ShapeInferenceMethod shapeInferenceMethod)
492 {
493  if (shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
494  {
495  ConditionalThrow<LayerValidationException>(
497  "Dimensionality can not be NotSpecified while using ShapeInferenceMethod::ValidateOnly");
498 
499  ConditionalThrow<LayerValidationException>(
500  outputShape.AreAllDimensionsSpecified(),
501  "Unspecified dimension while using ShapeInferenceMethod::ValidateOnly");
502  }
503 }
504 
506 {
507  std::string guid = std::to_string(m_Guid);
508  std::string layerType = GetLayerTypeAsCString(m_Type);
509  std::string backendId = std::string(m_BackendId);
510  if (!(guid.compare("") == 0) && !guid.empty())
511  {
512  fn("Guid", guid);
513  }
514  if(!(m_LayerName.compare("") == 0) && !m_LayerName.empty())
515  {
516  fn("LayerName",m_LayerName);
517  }
518  if(!(layerType.compare("") == 0) && !layerType.empty())
519  {
520  fn("LayerType",layerType);
521  }
522  if(!(backendId.compare("") == 0) && !backendId.empty())
523  {
524  fn("BackendID",backendId);
525  }
526  std::shared_ptr<ActivationDescriptor>
527  activationDescPtr = GetAdditionalInformation<ActivationDescriptor>();
528 
529  if (activationDescPtr)
530  {
531  StringifyLayerParameters<ActivationDescriptor>::Serialize(fn, *activationDescPtr.get());
532  }
533 }
534 
535 // default implementation of ExecuteStrategy
536 void Layer::ExecuteStrategy(IStrategy& strategy) const
537 {
538  strategy.ExecuteStrategy(this, BaseDescriptor(), {}, GetName());
539 }
540 
542 {
543  return m_OwningLayer;
544 }
545 
547 {
548  return m_OwningLayer;
549 }
550 
551 } // namespace armnn
void DisconnectAll()
Definition: Layer.cpp:136
void AssertNumberOfInputSlots(Layer &layer)
Definition: Layer.cpp:28
virtual void ReleaseConstantData()
Definition: Layer.cpp:304
bool ValidateTensorShape(const TensorShape &shape) const
Definition: Layer.cpp:106
void Insert(Layer &layer)
Definition: Layer.cpp:48
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:66
void SetEdgeStrategy(unsigned int connectionIndex, EdgeStrategy strategy)
Definition: Layer.cpp:210
DataLayout
Definition: Types.hpp:62
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
Definition: Layer.hpp:319
std::string AsString() const
Definition: Exceptions.hpp:29
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
Infer the shape of the output(s) based on the provided input shape(s)
Definition: Layer.cpp:397
bool AreAllDimensionsSpecified() const
Checks if there is at least one dimension not specified.
Definition: Tensor.cpp:241
std::vector< unsigned int > SqueezeDims(const armnn::TensorShape &tensorShape)
LayerGuid GetOwningLayerGuid() const override
Definition: Layer.cpp:195
void OperateOnConstantTensors(Op op)
Definition: Layer.hpp:304
Dimensionality GetDimensionality() const
Function that returns the tensor type.
Definition: Tensor.hpp:92
Layer & GetOwningLayer() const
Definition: Layer.hpp:118
int Connect(InputSlot &destination)
Definition: Layer.cpp:112
virtual void ExecuteStrategy(const armnn::IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
unsigned int LayerPriority
Definition: Layer.hpp:212
EdgeStrategy GetEdgeStrategyForConnection(unsigned int connectionIdx) const
Definition: Layer.cpp:215
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Definition: Layer.cpp:491
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
const std::vector< InputSlot > & GetInputSlots() const
Definition: Layer.hpp:243
const IOutputSlot * GetConnection() const override
Definition: Layer.hpp:204
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
Definition: Layer.hpp:320
bool GetDimensionSpecificity(unsigned int i) const
Gets information about if the dimension size has been specified or not.
Definition: Tensor.cpp:211
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
Definition: Layer.cpp:422
void Disconnect(InputSlot &slot)
Definition: Layer.cpp:120
const IConnectableLayer & GetOwningIConnectableLayer() const override
Definition: Layer.cpp:541
Base class for all descriptors.
Definition: Descriptors.hpp:22
void CreateTensorHandles(const IWorkloadFactory &factory, const bool IsMemoryManaged=true)
Creates tensor handles used by the intermediate tensors.
const IConnectableLayer & GetOwningIConnectableLayer() const override
Definition: Layer.cpp:546
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
Definition: Layer.cpp:378
unsigned int GetNumConnections() const override
Definition: Layer.hpp:143
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:322
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
Definition: Layer.cpp:536
DataType
Definition: Types.hpp:48
void ResetPriority() const
Definition: Layer.cpp:322
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
AdditionalInfoObjectPtr m_AdditionalInfoObject
Definition: Layer.hpp:407
DataType GetDataType() const
Definition: Tensor.hpp:198
void Push(ITensorHandle *handle, const TensorInfo &info)
Validate all output shapes.
const std::string & GetNameStr() const
Definition: Layer.hpp:225
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
Definition: Layer.hpp:271
LayerPriority GetPriority() const
Definition: Layer.cpp:328
Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char *name)
Definition: Layer.cpp:247
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
const OutputSlot * GetConnectedOutputSlot() const
Definition: Layer.hpp:56
virtual void ValidateTensorShapesFromInputs()=0
Layer & GetOwningLayer() const
Definition: Layer.hpp:53
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
std::vector< OutputHandler > m_OutputHandlers
Definition: Layer.hpp:420
void SetTensorInfo(const TensorInfo &tensorInfo)
Sets the TensorInfo used by this output handler.
void SetAdditionalInfo(QueueDescriptor &descriptor) const
Definition: Layer.cpp:274
void SetTensorHandleFactory(const ITensorHandleFactory::FactoryId &id)
Definition: Layer.cpp:200
arm::pipe::ProfilingGuid LayerGuid
Define LayerGuid type.
Definition: Types.hpp:26
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
bool operator==(const OutputSlot &other) const
Definition: Layer.cpp:172
void SetConnection(OutputSlot *source)
Links the slot to an output slot or breaks an existing link if passing nullptr.
Definition: Layer.hpp:62
const OutputHandler & GetOutputHandler(unsigned int i=0) const
Definition: Layer.hpp:230
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
ITensorHandleFactory * GetFactory(ITensorHandleFactory::FactoryId id) const
Find a TensorHandleFactory by Id Returns nullptr if not found.
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:87
virtual void SerializeLayerParameters(ParameterStringifyFunction &fn) const
Helper to serialize the layer parameters to string.
Definition: Layer.cpp:505
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
DataType GetDataType() const
Definition: Layer.cpp:313
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:324
Infer missing output shapes and validate all output shapes.
virtual const TensorInfo & GetTensorInfo() const =0
const OutputHandler & GetOutputHandler() const
Definition: Layer.hpp:124
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:317
ITensorHandleFactory::FactoryId GetTensorHandleFactoryId() const
Definition: Layer.cpp:205
bool IsTensorInfoSet() const override
Definition: Layer.cpp:97
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction
static void Serialize(ParameterStringifyFunction &, const LayerParameter &)
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry &registry, const IWorkloadFactory &factory, const bool IsMemoryManaged=true)
Definition: Layer.cpp:279
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:92
ShapeInferenceMethod
The ShapeInferenceMethod modify how the output shapes are treated.
Definition: Types.hpp:221
const char * GetLayerTypeAsCString(LayerType type)
static const FactoryId LegacyFactoryId
void MoveAllConnections(OutputSlot &destination)
Moves all connections to another OutputSlot.
Definition: Layer.cpp:145
const InputSlot * GetConnection(unsigned int index) const override
Definition: Layer.cpp:75
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
Definition: Types.hpp:467
LayerGuid GetGuid() const final
Returns the unique id of the layer.
Definition: Layer.hpp:328
unsigned int CalculateIndexOnOwner() const override
Definition: Layer.cpp:159