ArmNN
 21.11
ClBackend.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ClBackend.hpp"
7 #include "ClBackendContext.hpp"
9 #include "ClBackendId.hpp"
12 #include "ClLayerSupport.hpp"
14 #include "ClWorkloadFactory.hpp"
15 
17 #include <armnn/Descriptors.hpp>
18 
22 
26 
36 
37 #include <Optimizer.hpp>
38 
39 #include <arm_compute/core/Types.h>
40 #include <arm_compute/runtime/CL/CLBufferAllocator.h>
41 
42 namespace armnn
43 {
44 
46 {
47  static const BackendId s_Id{ClBackendId()};
48  return s_Id;
49 }
50 
52 {
54  {
55  return std::make_unique<ClMemoryManager>(m_CustomAllocator);
56  }
57  return std::make_unique<ClMemoryManager>(std::make_unique<arm_compute::CLBufferAllocator>());
58 }
59 
61  const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) const
62 {
63  return std::make_unique<ClWorkloadFactory>(
64  PolymorphicPointerDowncast<ClMemoryManager>(memoryManager));
65 }
66 
68  const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const ModelOptions& modelOptions) const
69 {
70  return std::make_unique<ClWorkloadFactory>(
71  PolymorphicPointerDowncast<ClMemoryManager>(memoryManager), CreateBackendSpecificModelContext(modelOptions));
72 }
73 
75  TensorHandleFactoryRegistry& registry) const
76 {
77  std::shared_ptr<ClMemoryManager> memoryManager;
79  {
80  memoryManager = std::make_shared<ClMemoryManager>(m_CustomAllocator);
81  }
82  else
83  {
84  memoryManager = std::make_shared<ClMemoryManager>(std::make_unique<arm_compute::CLBufferAllocator>());
85  }
86 
87  registry.RegisterMemoryManager(memoryManager);
88  registry.RegisterFactory(std::make_unique<ClTensorHandleFactory>(memoryManager));
89  registry.RegisterFactory(std::make_unique<ClImportTensorHandleFactory>(
90  static_cast<MemorySourceFlags>(MemorySource::Malloc), static_cast<MemorySourceFlags>(MemorySource::Malloc)));
91 
92  return std::make_unique<ClWorkloadFactory>(
93  PolymorphicPointerDowncast<ClMemoryManager>(memoryManager));
94 }
95 
97  TensorHandleFactoryRegistry& registry, const ModelOptions& modelOptions) const
98 {
99  std::shared_ptr<ClMemoryManager> memoryManager;
101  {
102  memoryManager = std::make_shared<ClMemoryManager>(m_CustomAllocator);
103  }
104  else
105  {
106  memoryManager = std::make_shared<ClMemoryManager>(std::make_unique<arm_compute::CLBufferAllocator>());
107  }
108 
109  registry.RegisterMemoryManager(memoryManager);
110  registry.RegisterFactory(std::make_unique<ClTensorHandleFactory>(memoryManager));
111  registry.RegisterFactory(std::make_unique<ClImportTensorHandleFactory>(
112  static_cast<MemorySourceFlags>(MemorySource::Malloc), static_cast<MemorySourceFlags>(MemorySource::Malloc)));
113 
114  return std::make_unique<ClWorkloadFactory>(
115  PolymorphicPointerDowncast<ClMemoryManager>(memoryManager), CreateBackendSpecificModelContext(modelOptions));
116 }
117 
119  TensorHandleFactoryRegistry& registry,
120  const ModelOptions& modelOptions,
121  MemorySourceFlags inputFlags,
122  MemorySourceFlags outputFlags) const
123 {
124  std::shared_ptr<ClMemoryManager> memoryManager;
126  {
127  memoryManager = std::make_shared<ClMemoryManager>(m_CustomAllocator);
128  }
129  else
130  {
131  memoryManager = std::make_shared<ClMemoryManager>(std::make_unique<arm_compute::CLBufferAllocator>());
132  }
133 
134  registry.RegisterMemoryManager(memoryManager);
135  registry.RegisterFactory(std::make_unique<ClTensorHandleFactory>(memoryManager));
136  registry.RegisterFactory(std::make_unique<ClImportTensorHandleFactory>(inputFlags, outputFlags));
137 
138  return std::make_unique<ClWorkloadFactory>(
139  PolymorphicPointerDowncast<ClMemoryManager>(memoryManager), CreateBackendSpecificModelContext(modelOptions));
140 }
141 
142 std::vector<ITensorHandleFactory::FactoryId> ClBackend::GetHandleFactoryPreferences() const
143 {
144  return std::vector<ITensorHandleFactory::FactoryId> {ClTensorHandleFactory::GetIdStatic(),
146 }
147 
149 {
150  std::shared_ptr<ClMemoryManager> memoryManager;
152  {
153  memoryManager = std::make_shared<ClMemoryManager>(m_CustomAllocator);
154  }
155  else
156  {
157  memoryManager = std::make_shared<ClMemoryManager>(std::make_unique<arm_compute::CLBufferAllocator>());
158  }
159 
160  registry.RegisterMemoryManager(memoryManager);
161  registry.RegisterFactory(std::make_unique<ClTensorHandleFactory>(memoryManager));
162  registry.RegisterFactory(std::make_unique<ClImportTensorHandleFactory>(
163  static_cast<MemorySourceFlags>(MemorySource::Malloc), static_cast<MemorySourceFlags>(MemorySource::Malloc)));
164 }
165 
167  MemorySourceFlags inputFlags,
168  MemorySourceFlags outputFlags)
169 {
170  std::shared_ptr<ClMemoryManager> memoryManager;
172  {
173  memoryManager = std::make_shared<ClMemoryManager>(m_CustomAllocator);
174  }
175  else
176  {
177  memoryManager = std::make_shared<ClMemoryManager>(std::make_unique<arm_compute::CLBufferAllocator>());
178  }
179 
180  registry.RegisterMemoryManager(memoryManager);
181  registry.RegisterFactory(std::make_unique<ClTensorHandleFactory>(memoryManager));
182  registry.RegisterFactory(std::make_unique<ClImportTensorHandleFactory>(inputFlags, outputFlags));
183 }
184 
186 {
187  return IBackendContextPtr{new ClBackendContext{options}};
188 }
189 
192 {
194 }
195 
197  const ModelOptions& modelOptions) const
198 {
199  return IBackendSpecificModelContextPtr{new ClBackendModelContext{modelOptions}};
200 }
201 
203 {
204  static ILayerSupportSharedPtr layerSupport
205  {
207  };
208  return layerSupport;
209 }
210 
212 {
213  static ILayerSupportSharedPtr layerSupport
214  {
216  };
217  return layerSupport;
218 }
219 
220 std::unique_ptr<ICustomAllocator> ClBackend::GetDefaultAllocator() const
221 {
222  return std::make_unique<ClBackendDefaultAllocator>();
223 }
224 
226  const ModelOptions& modelOptions) const
227 {
228  OptimizationViews optimizationViews;
229 
230  auto it = subgraph.end();
231  bool isFastMathEnabled = false;
232  std::map<LayerGuid, Layer*> untouched;
233 
234  while (it != subgraph.begin())
235  {
236  --it;
237  Layer& base = **it;
238  untouched.insert({base.GetGuid(), &base});
239  }
240 
241  it = subgraph.end();
242 #if defined(ARMCOMPUTECL_ENABLED)
244 
245  if (modelContextPtr)
246  {
247  auto clModelOptions = dynamic_cast<ClBackendModelContext*>(modelContextPtr.get());
248  if (clModelOptions)
249  {
250  isFastMathEnabled = clModelOptions->IsFastMathEnabled();
251  }
252  }
253 #endif
254  while (it != subgraph.begin())
255  {
256  --it;
257  Layer& base = **it;
258 
259  // Fuse activation into previous layer if supported by backend
263  || base.GetType() == LayerType::Subtraction || base.GetType() == LayerType::Division)
264  && (base.GetAdditionalInformation<ActivationDescriptor>() == nullptr))
265  {
266  for (auto output = base.BeginOutputSlots(); output != base.EndOutputSlots(); ++output)
267  {
268  if (output->GetNumConnections() == 1)
269  {
270  for (auto&& childInput : output->GetConnections())
271  {
272  if ((childInput->GetOwningLayer().GetType() == LayerType::Activation) &&
273  (checkDataTypeInputandOutput(childInput->GetOwningLayer())))
274  {
275  Layer& child = childInput->GetOwningLayer();
276 
277  auto* activationLayer = PolymorphicDowncast<ActivationLayer*>(&child);
278 
279  const std::string name = std::string("fused-") + child.GetName() + std::string("-into-") +
280  base.GetName();
281 
282  // Get params from activation layer
283  ActivationDescriptor activationDesc = activationLayer->GetParameters();
284 
285  if (base.GetType() == LayerType::Convolution2d)
286  {
287  Convolution2dLayer* baseLayer = PolymorphicDowncast<Convolution2dLayer*>(&base);
288 
289  Optional<TensorInfo> biases;
290 
291  if (baseLayer->GetParameters().m_BiasEnabled)
292  {
293  biases = baseLayer->m_Bias->GetTensorInfo();
294  }
295 
298  activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
299  baseLayer->GetParameters(),
300  baseLayer->m_Weight->GetTensorInfo(),
301  biases,
302  isFastMathEnabled,
303  &activationDesc);
304 
305  if (status)
306  {
307  FuseLayerWithWeightsAndBiases<Convolution2dLayer>(optimizationViews,
308  baseLayer,
309  activationLayer,
310  activationDesc,
311  name);
312  untouched.erase(baseLayer->GetGuid());
313  untouched.erase(activationLayer->GetGuid());
314  }
315  }
316  else if (base.GetType() == LayerType::DepthwiseConvolution2d)
317  {
318  DepthwiseConvolution2dLayer* baseLayer =
319  PolymorphicDowncast<DepthwiseConvolution2dLayer*>(&base);
320 
321  Optional<TensorInfo> biases;
322 
323  if (baseLayer->GetParameters().m_BiasEnabled)
324  {
325  biases = baseLayer->m_Bias->GetTensorInfo();
326  }
327 
330  activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
331  baseLayer->GetParameters(),
332  baseLayer->m_Weight->GetTensorInfo(),
333  biases,
334  &activationDesc);
335 
336  if (status)
337  {
338  FuseLayerWithWeightsAndBiases<DepthwiseConvolution2dLayer>(optimizationViews,
339  baseLayer,
340  activationLayer,
341  activationDesc,
342  name);
343  untouched.erase(baseLayer->GetGuid());
344  untouched.erase(activationLayer->GetGuid());
345  }
346  }
347  else if (base.GetType() == LayerType::FullyConnected)
348  {
349  FullyConnectedLayer* baseLayer = PolymorphicDowncast<FullyConnectedLayer*>(&base);
350 
353  activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
354  baseLayer->m_Weight->GetTensorInfo(),
355  baseLayer->m_Bias->GetTensorInfo(),
356  baseLayer->GetParameters(),
357  &activationDesc);
358 
359  if (status)
360  {
361  FuseLayerWithWeightsAndBiases<FullyConnectedLayer>(optimizationViews,
362  baseLayer,
363  activationLayer,
364  activationDesc,
365  name);
366  untouched.erase(baseLayer->GetGuid());
367  untouched.erase(activationLayer->GetGuid());
368  }
369  }
370  else if (base.GetType() == LayerType::BatchNormalization)
371  {
372  BatchNormalizationLayer* baseLayer =
373  PolymorphicDowncast<BatchNormalizationLayer*>(&base);
374 
377  activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
378  baseLayer->m_Mean->GetTensorInfo(),
379  baseLayer->m_Variance->GetTensorInfo(),
380  baseLayer->m_Beta->GetTensorInfo(),
381  baseLayer->m_Gamma->GetTensorInfo(),
382  baseLayer->GetParameters(),
383  &activationDesc);
384 
385  if (status)
386  {
387  BatchNormalizationLayer* replacementLayer =
388  FuseLayerWithParameters<BatchNormalizationLayer>(optimizationViews,
389  baseLayer,
390  activationLayer,
391  activationDesc,
392  name);
393 
394  replacementLayer->m_Beta = std::move(baseLayer->m_Beta);
395  replacementLayer->m_Gamma = std::move(baseLayer->m_Gamma);
396  replacementLayer->m_Mean = std::move(baseLayer->m_Mean);
397  replacementLayer->m_Variance = std::move(baseLayer->m_Variance);
398  untouched.erase(baseLayer->GetGuid());
399  untouched.erase(activationLayer->GetGuid());
400  }
401  }
402  else if (base.GetType() == LayerType::Addition)
403  {
404  AdditionLayer* baseLayer = PolymorphicDowncast<AdditionLayer*>(&base);
405 
409  activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
410  &activationDesc);
411 
412  if (status)
413  {
414  FuseLayerWithoutParameters<AdditionLayer>(optimizationViews,
415  baseLayer,
416  activationLayer,
417  activationDesc,
418  name);
419  untouched.erase(baseLayer->GetGuid());
420  untouched.erase(activationLayer->GetGuid());
421  }
422  }
423  else if (base.GetType() == LayerType::Division)
424  {
425  DivisionLayer* baseLayer = PolymorphicDowncast<DivisionLayer*>(&base);
426 
430  activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
431  &activationDesc);
432 
433  if (status)
434  {
435  FuseLayerWithoutParameters<DivisionLayer>(optimizationViews,
436  baseLayer,
437  activationLayer,
438  activationDesc,
439  name);
440  untouched.erase(baseLayer->GetGuid());
441  untouched.erase(activationLayer->GetGuid());
442  }
443  }
444  else if (base.GetType() == LayerType::Multiplication)
445  {
446  MultiplicationLayer* baseLayer = PolymorphicDowncast<MultiplicationLayer*>(&base);
447 
451  activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
452  &activationDesc);
453 
454  if (status)
455  {
456  FuseLayerWithoutParameters<MultiplicationLayer>(optimizationViews,
457  baseLayer,
458  activationLayer,
459  activationDesc,
460  name);
461  untouched.erase(baseLayer->GetGuid());
462  untouched.erase(activationLayer->GetGuid());
463  }
464  }
465  else if (base.GetType() == LayerType::Subtraction)
466  {
467  SubtractionLayer* baseLayer = PolymorphicDowncast<SubtractionLayer*>(&base);
468 
472  activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
473  &activationDesc);
474 
475  if (status)
476  {
477  FuseLayerWithoutParameters<SubtractionLayer>(optimizationViews,
478  baseLayer,
479  activationLayer,
480  activationDesc,
481  name);
482  untouched.erase(baseLayer->GetGuid());
483  untouched.erase(activationLayer->GetGuid());
484  }
485  }
486  }
487  }
488  }
489  }
490  }
491 
492  // Separate reduce layer with multiple axes into multiple reduce layers with 1 axis.
493  if (base.GetType() == LayerType::Reduce)
494  {
495  ReduceLayer* baseLayer = PolymorphicDowncast<ReduceLayer*>(&base);
496  ReduceDescriptor reduceDescriptor = baseLayer->GetParameters();
497 
498  if (!reduceDescriptor.m_vAxis.empty() && reduceDescriptor.m_vAxis.size() > 1)
499  {
500  // Add new layers to the graph and connect them.
501  std::vector<Layer*> layers = ChainReduceLayers<ReduceLayer>(optimizationViews,
502  baseLayer,
503  reduceDescriptor);
504 
505  // Replace existing baselayer with new subgraph.
506  ReplaceLayers<ReduceLayer>(optimizationViews, baseLayer, layers);
507  untouched.erase(baseLayer->GetGuid());
508  }
509  }
510  }
511 
512  if (optimizationViews.GetSubstitutions().empty())
513  {
514  optimizationViews.AddUntouchedSubgraph(SubgraphView(subgraph));
515  }
516  else
517  {
518  ReportUntouchedLayers(optimizationViews, untouched);
519  }
520 
521  return optimizationViews;
522 }
523 
524 } // namespace armnn
arm_compute::Status ClAdditionValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
bool m_BiasEnabled
Enable/disable bias.
void RegisterMemoryManager(std::shared_ptr< IMemoryManager > memoryManger)
Register a memory manager with shared ownership.
arm_compute::Status ClFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
static const FactoryId & GetIdStatic()
This layer represents a batch normalization operation.
std::unique_ptr< IWorkloadFactory > IWorkloadFactoryPtr
bool m_BiasEnabled
Enable/disable bias.
arm_compute::Status ClDivisionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
const Parameters & GetParameters() const
This layer represents a depthwise convolution 2d operation.
constexpr const char * ClBackendId()
Definition: ClBackendId.hpp:10
std::vector< BackendOptions > ModelOptions
void RegisterFactory(std::unique_ptr< ITensorHandleFactory > allocator)
Register a TensorHandleFactory and transfer ownership.
void ReportUntouchedLayers(OptimizationViews &optimizationViews, std::map< LayerGuid, Layer *> untouched)
arm_compute::Status ClSubtractionValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
std::shared_ptr< ConstTensorHandle > m_Weight
A unique pointer to store Weight values.
std::shared_ptr< ConstTensorHandle > m_Mean
A unique pointer to store Mean values.
std::shared_ptr< ClBackendCustomAllocatorWrapper > m_CustomAllocator
Definition: ClBackend.hpp:299
unsigned int MemorySourceFlags
Copyright (c) 2021 ARM Limited and Contributors.
arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
std::unique_ptr< IMemoryManager > IMemoryManagerUniquePtr
IBackendInternal::IMemoryManagerUniquePtr CreateMemoryManager() const override
Definition: ClBackend.cpp:51
This layer represents a reduction operation.
Definition: ReduceLayer.hpp:13
std::shared_ptr< ConstTensorHandle > m_Beta
A unique pointer to store Beta values.
void RegisterTensorHandleFactories(TensorHandleFactoryRegistry &registry) override
(Optional) Register TensorHandleFactories Either this method or CreateMemoryManager() and IWorkloadFa...
Definition: ClBackend.cpp:148
The SubgraphView class represents a subgraph of a Graph.
IBackendInternal::IBackendSpecificModelContextPtr CreateBackendSpecificModelContext(const ModelOptions &modelOptions) const override
Definition: ClBackend.cpp:196
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:316
std::unique_ptr< armnn::profiling::IBackendProfiling > IBackendProfilingPtr
OptimizationViews OptimizeSubgraphView(const SubgraphView &subgraph, const ModelOptions &modelOptions) const override
Definition: ClBackend.cpp:225
This layer represents a fully connected operation.
std::shared_ptr< ConstTensorHandle > m_Weight
A unique pointer to store Weight values.
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
A ReduceDescriptor for the REDUCE operators.
IBackendInternal::IBackendContextPtr CreateBackendContext(const IRuntime::CreationOptions &) const override
Create the runtime context of the backend.
Definition: ClBackend.cpp:185
std::shared_ptr< ConstTensorHandle > m_Bias
A unique pointer to store Bias values.
arm_compute::Status ClMultiplicationWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
std::vector< ITensorHandleFactory::FactoryId > GetHandleFactoryPreferences() const override
(Optional) Returns a vector of supported TensorHandleFactory ids in preference order.
Definition: ClBackend.cpp:142
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
Definition: Layer.hpp:265
Status
enumeration
Definition: Types.hpp:29
const OutputSlot * GetConnectedOutputSlot() const
Definition: Layer.hpp:55
std::shared_ptr< ConstTensorHandle > m_Gamma
A unique pointer to store Gamma values.
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
arm_compute::Status ClBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
void AddUntouchedSubgraph(SubgraphView &&subgraph)
std::shared_ptr< ConstTensorHandle > m_Variance
A unique pointer to store Variance values.
std::shared_ptr< ConstTensorHandle > m_Bias
A unique pointer to store Bias values.
IBackendInternal::IWorkloadFactoryPtr CreateWorkloadFactory(const IBackendInternal::IMemoryManagerSharedPtr &memoryManager=nullptr) const override
Definition: ClBackend.cpp:60
This layer represents an addition operation.
std::shared_ptr< ILayerSupport > ILayerSupportSharedPtr
const Substitutions & GetSubstitutions() const
This layer represents a subtraction operation.
std::vector< OutputSlot >::iterator BeginOutputSlots()
Definition: Layer.hpp:245
IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override
Definition: ClBackend.cpp:202
std::shared_ptr< ConstTensorHandle > m_Bias
A unique pointer to store Bias values.
std::shared_ptr< ConstTensorHandle > m_Weight
A unique pointer to store Weight values.
IBackendInternal::IBackendProfilingContextPtr CreateBackendProfilingContext(const IRuntime::CreationOptions &, IBackendProfilingPtr &backendProfiling) override
Create context specifically used for profiling interaction from backends.
Definition: ClBackend.cpp:190
bool m_UsingCustomAllocator
Definition: ClBackend.hpp:300
This layer represents a division operation.
std::vector< OutputSlot >::iterator EndOutputSlots()
Definition: Layer.hpp:246
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:311
This layer represents a convolution 2d operation.
The ClBackendModelContext is used to pass in CL specific backend ModelOptions.
This layer represents a multiplication operation.
std::unique_ptr< ICustomAllocator > GetDefaultAllocator() const override
Returns the default memory allocator for the backend.
Definition: ClBackend.cpp:220
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:63
static const BackendId & GetIdStatic()
Definition: ClBackend.cpp:45
arm_compute::Status ClDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, const ActivationDescriptor *activationDescriptor)
std::shared_ptr< armnn::profiling::IBackendProfilingContext > IBackendProfilingContextPtr
This is the bridge between backend and backend profiling we&#39;ll keep it in the backend namespace...
std::shared_ptr< T > GetAdditionalInformation() const
Definition: Layer.hpp:342
LayerGuid GetGuid() const final
Returns the unique id of the layer.
Definition: Layer.hpp:322
std::unique_ptr< IBackendContext > IBackendContextPtr