ArmNN
 23.11
NeonBackend.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "NeonBackend.hpp"
7 #include "NeonBackendId.hpp"
10 #include "NeonLayerSupport.hpp"
13 
15 #include <armnn/Descriptors.hpp>
16 
20 
23 
25 
37 
38 #include <Optimizer.hpp>
39 
40 #include <arm_compute/core/Types.h>
41 #include <arm_compute/runtime/Allocator.h>
42 
43 namespace armnn
44 {
45 
47 {
48  static const BackendId s_Id{NeonBackendId()};
49  return s_Id;
50 }
51 
53 {
54  return std::make_unique<NeonMemoryManager>(std::make_unique<arm_compute::Allocator>(),
56 }
57 
59  const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) const
60 {
61  return std::make_unique<NeonWorkloadFactory>(
62  PolymorphicPointerDowncast<NeonMemoryManager>(memoryManager));
63 }
64 
66  const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const ModelOptions& modelOptions) const
67 {
68  return std::make_unique<NeonWorkloadFactory>(
69  PolymorphicPointerDowncast<NeonMemoryManager>(memoryManager), CreateBackendSpecificModelContext(modelOptions));
70 }
71 
73  class TensorHandleFactoryRegistry& tensorHandleFactoryRegistry) const
74 {
75  auto memoryManager = std::make_shared<NeonMemoryManager>(std::make_unique<arm_compute::Allocator>(),
77 
78  tensorHandleFactoryRegistry.RegisterMemoryManager(memoryManager);
79 
80  auto factory = std::make_unique<NeonTensorHandleFactory>(memoryManager);
81  // Register copy and import factory pair
82  tensorHandleFactoryRegistry.RegisterCopyAndImportFactoryPair(factory->GetId(), factory->GetId());
83  // Register the factory
84  tensorHandleFactoryRegistry.RegisterFactory(std::move(factory));
85 
86 
87  return std::make_unique<NeonWorkloadFactory>(
88  PolymorphicPointerDowncast<NeonMemoryManager>(memoryManager));
89 }
90 
92  TensorHandleFactoryRegistry& tensorHandleFactoryRegistry, const ModelOptions& modelOptions) const
93 {
94  auto memoryManager = std::make_shared<NeonMemoryManager>(std::make_unique<arm_compute::Allocator>(),
96 
97  tensorHandleFactoryRegistry.RegisterMemoryManager(memoryManager);
98 
99  auto factory = std::make_unique<NeonTensorHandleFactory>(memoryManager);
100  // Register copy and import factory pair
101  tensorHandleFactoryRegistry.RegisterCopyAndImportFactoryPair(factory->GetId(), factory->GetId());
102  // Register the factory
103  tensorHandleFactoryRegistry.RegisterFactory(std::move(factory));
104 
105  return std::make_unique<NeonWorkloadFactory>(
106  PolymorphicPointerDowncast<NeonMemoryManager>(memoryManager), CreateBackendSpecificModelContext(modelOptions));
107 }
108 
110 {
111  return IBackendContextPtr{};
112 }
113 
116 {
118 }
119 
121  const ModelOptions& modelOptions) const
122 {
124 }
125 
127 {
128  static ILayerSupportSharedPtr layerSupport
129  {
131  };
132  return layerSupport;
133 }
134 
136 {
137  static ILayerSupportSharedPtr layerSupport
138  {
140  };
141  return layerSupport;
142 }
143 
145  const ModelOptions& modelOptions) const
146 {
147  OptimizationViews optimizationViews(modelOptions);
148 
149  auto it = subgraph.end();
150  std::map<LayerGuid, Layer*> untouched;
151 
152  while (it != subgraph.begin())
153  {
154  --it;
155  Layer& base = *(PolymorphicDowncast<Layer*>(*it));
156  untouched.insert({base.GetGuid(), &base});
157  }
158 
159  it = subgraph.end();
160  while (it != subgraph.begin())
161  {
162  --it;
163  Layer& base = *(PolymorphicDowncast<Layer*>(*it));
164 
165  // Fuse activation into previous layer if supported by backend
171  && (base.GetAdditionalInformation<ActivationDescriptor>() == nullptr))
172  {
173  for (auto output = base.BeginOutputSlots(); output != base.EndOutputSlots(); ++output)
174  {
175  if (output->GetNumConnections() == 1)
176  {
177  for (auto&& childInput : output->GetConnections())
178  {
179  if ((childInput->GetOwningLayer().GetType() == LayerType::Activation) &&
180  (checkDataTypeInputandOutput(childInput->GetOwningLayer())))
181  {
182  Layer& child = childInput->GetOwningLayer();
183 
184  auto* activationLayer = PolymorphicDowncast<ActivationLayer*>(&child);
185 
186  const std::string name = std::string("fused-") + child.GetName() + std::string("-into-") +
187  base.GetName();
188 
189  // Get params from activation layer
190  ActivationDescriptor activationDesc = activationLayer->GetParameters();
191 
192  if (base.GetType() == LayerType::Convolution2d)
193  {
194  Convolution2dLayer* baseLayer = PolymorphicDowncast<Convolution2dLayer*>(&base);
195 
196  Optional<TensorInfo> biases;
197 
198  if (baseLayer->GetParameters().m_BiasEnabled)
199  {
200  biases = baseLayer->GetInputSlot(2).GetConnectedOutputSlot()->GetTensorInfo();
201  }
202 
205  activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
206  baseLayer->GetParameters(),
208  biases,
209  false,
210  &activationDesc);
211 
212  if (status)
213  {
214  FuseConvolution2dLayer<Convolution2dLayer>(optimizationViews,
215  baseLayer,
216  activationLayer,
217  activationDesc,
218  name);
219  untouched.erase(baseLayer->GetGuid());
220  untouched.erase(activationLayer->GetGuid());
221  }
222  }
223  else if (base.GetType() == LayerType::DepthwiseConvolution2d)
224  {
225  DepthwiseConvolution2dLayer* baseLayer =
226  PolymorphicDowncast<DepthwiseConvolution2dLayer*>(&base);
227 
228  Optional<TensorInfo> biases;
229 
230  if (baseLayer->GetParameters().m_BiasEnabled)
231  {
232  biases = baseLayer->GetInputSlot(2).GetConnectedOutputSlot()->GetTensorInfo();
233  }
234 
237  activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
238  baseLayer->GetParameters(),
240  biases,
241  &activationDesc);
242 
243  if (status)
244  {
245  FuseDepthwiseConvolution2dLayer<DepthwiseConvolution2dLayer>(optimizationViews,
246  baseLayer,
247  activationLayer,
248  activationDesc,
249  name);
250  untouched.erase(baseLayer->GetGuid());
251  untouched.erase(activationLayer->GetGuid());
252  }
253  }
254  else if (base.GetType() == LayerType::FullyConnected)
255  {
256  FullyConnectedLayer* baseLayer = PolymorphicDowncast<FullyConnectedLayer*>(&base);
257  FullyConnectedDescriptor descriptor = baseLayer->GetParameters();
258 
259  // As bias is optional only try to get TensorInfo from input if bias is enabled.
260  Optional<TensorInfo> biases;
261  if (descriptor.m_BiasEnabled)
262  {
263  biases = baseLayer->GetInputSlot(2).GetConnectedOutputSlot()->GetTensorInfo();
264  }
265 
268  activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
270  biases,
271  baseLayer->GetParameters(),
272  &activationDesc);
273 
274  if (status)
275  {
276  FuseFullyConnectedLayer<FullyConnectedLayer>(optimizationViews,
277  baseLayer,
278  activationLayer,
279  activationDesc,
280  name);
281  untouched.erase(baseLayer->GetGuid());
282  untouched.erase(activationLayer->GetGuid());
283  }
284  }
285  else if (base.GetType() == LayerType::BatchNormalization)
286  {
287  BatchNormalizationLayer* baseLayer =
288  PolymorphicDowncast<BatchNormalizationLayer*>(&base);
289 
292  activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
293  baseLayer->m_Mean->GetTensorInfo(),
294  baseLayer->m_Variance->GetTensorInfo(),
295  baseLayer->m_Beta->GetTensorInfo(),
296  baseLayer->m_Gamma->GetTensorInfo(),
297  baseLayer->GetParameters(),
298  &activationDesc);
299 
300  if (status)
301  {
302  BatchNormalizationLayer* replacementLayer =
303  FuseBatchNormalizationLayer<BatchNormalizationLayer>(optimizationViews,
304  baseLayer,
305  activationLayer,
306  activationDesc,
307  name);
308 
309  replacementLayer->m_Beta = std::move(baseLayer->m_Beta);
310  replacementLayer->m_Gamma = std::move(baseLayer->m_Gamma);
311  replacementLayer->m_Mean = std::move(baseLayer->m_Mean);
312  replacementLayer->m_Variance = std::move(baseLayer->m_Variance);
313  untouched.erase(baseLayer->GetGuid());
314  untouched.erase(activationLayer->GetGuid());
315  }
316  }
317  else if (base.GetType() == LayerType::Addition)
318  {
319  AdditionLayer* baseLayer = PolymorphicDowncast<AdditionLayer*>(&base);
320 
324  activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
325  &activationDesc);
326 
327  if (status)
328  {
329  FuseAdditionLayer<AdditionLayer>(optimizationViews,
330  baseLayer,
331  activationLayer,
332  activationDesc,
333  name);
334  untouched.erase(baseLayer->GetGuid());
335  untouched.erase(activationLayer->GetGuid());
336  }
337  }
338  else if (base.GetType() == LayerType::Division)
339  {
340  DivisionLayer* baseLayer = PolymorphicDowncast<DivisionLayer*>(&base);
341 
345  activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
346  &activationDesc);
347 
348  if (status)
349  {
350  FuseDivisionLayer<DivisionLayer>(optimizationViews,
351  baseLayer,
352  activationLayer,
353  activationDesc,
354  name);
355  untouched.erase(baseLayer->GetGuid());
356  untouched.erase(activationLayer->GetGuid());
357  }
358  }
359  else if (base.GetType() == LayerType::Multiplication)
360  {
361  MultiplicationLayer* baseLayer = PolymorphicDowncast<MultiplicationLayer*>(&base);
362 
366  activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
367  &activationDesc);
368 
369  if (status)
370  {
371  FuseMultiplicationLayer<MultiplicationLayer>(optimizationViews,
372  baseLayer,
373  activationLayer,
374  activationDesc,
375  name);
376  untouched.erase(baseLayer->GetGuid());
377  untouched.erase(activationLayer->GetGuid());
378  }
379  }
380  else if (base.GetType() == LayerType::Subtraction)
381  {
382  SubtractionLayer* baseLayer = PolymorphicDowncast<SubtractionLayer*>(&base);
383 
387  activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
388  &activationDesc);
389 
390  if (status)
391  {
392  FuseSubtractionLayer<SubtractionLayer>(optimizationViews,
393  baseLayer,
394  activationLayer,
395  activationDesc,
396  name);
397  untouched.erase(baseLayer->GetGuid());
398  untouched.erase(activationLayer->GetGuid());
399  }
400  }
401  else if (base.GetType() == LayerType::ElementwiseBinary)
402  {
403  ElementwiseBinaryLayer* baseLayer = PolymorphicDowncast<ElementwiseBinaryLayer*>(&base);
404 
405  if (baseLayer->GetParameters().m_Operation == BinaryOperation::Add)
406  {
410  activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
411  &activationDesc);
412 
413  if (status)
414  {
415  FuseElementwiseBinaryLayer<ElementwiseBinaryLayer>(optimizationViews,
416  baseLayer,
417  activationLayer,
418  activationDesc,
420  name);
421  untouched.erase(baseLayer->GetGuid());
422  untouched.erase(activationLayer->GetGuid());
423  }
424  }
425  else if (baseLayer->GetParameters().m_Operation == BinaryOperation::Div)
426  {
430  activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
431  &activationDesc);
432 
433  if (status)
434  {
435  FuseElementwiseBinaryLayer<ElementwiseBinaryLayer>(optimizationViews,
436  baseLayer,
437  activationLayer,
438  activationDesc,
440  name);
441  untouched.erase(baseLayer->GetGuid());
442  untouched.erase(activationLayer->GetGuid());
443  }
444  }
445  else if (baseLayer->GetParameters().m_Operation == BinaryOperation::Mul)
446  {
450  activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
451  &activationDesc);
452 
453  if (status)
454  {
455  FuseElementwiseBinaryLayer<ElementwiseBinaryLayer>(optimizationViews,
456  baseLayer,
457  activationLayer,
458  activationDesc,
460  name);
461  untouched.erase(baseLayer->GetGuid());
462  untouched.erase(activationLayer->GetGuid());
463  }
464  }
465  else if (baseLayer->GetParameters().m_Operation == BinaryOperation::Sub)
466  {
470  activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
471  &activationDesc);
472 
473  if (status)
474  {
475  FuseElementwiseBinaryLayer<ElementwiseBinaryLayer>(optimizationViews,
476  baseLayer,
477  activationLayer,
478  activationDesc,
480  name);
481  untouched.erase(baseLayer->GetGuid());
482  untouched.erase(activationLayer->GetGuid());
483  }
484  }
485  // No fusion available for other BinaryOperations
486  }
487  }
488  }
489  }
490  }
491  }
492 
493  // Separate reduce layer with multiple axes into multiple reduce layers with 1 axis.
494  if (base.GetType() == LayerType::Reduce)
495  {
496  ReduceLayer* baseLayer = PolymorphicDowncast<ReduceLayer*>(&base);
497  ReduceDescriptor reduceDescriptor = baseLayer->GetParameters();
498 
499  if (!reduceDescriptor.m_vAxis.empty() && reduceDescriptor.m_vAxis.size() > 1)
500  {
501  // Add new layers to the graph and connect them.
502  std::vector<IConnectableLayer*> layers = ChainReduceLayers<ReduceLayer>(optimizationViews,
503  baseLayer,
504  reduceDescriptor);
505 
506  // Replace existing baselayer with new subgraph.
507  ReplaceLayers<ReduceLayer>(optimizationViews, baseLayer, layers);
508  untouched.erase(baseLayer->GetGuid());
509  }
510  }
511 
512  // Remove Reshape where possible
513  if (base.GetType() == LayerType::Reshape)
514  {
515  ReshapeLayer* baseLayer = PolymorphicDowncast<ReshapeLayer*>(&base);
516 
517  // Cannot remove a Reshape if it's connected to any layer that has an NCHW layout
518  if (ConnectedToLayerWithNCHW(baseLayer))
519  {
520  continue;
521  }
522  // Cannot remove a Reshape if it's connected to a SplitterLayer
524  {
525  continue;
526  }
527  RemoveReshapeLayer(baseLayer, untouched, optimizationViews);
528  }
529 
530  // Replace Add/Mul/Add where possible
531  Layer* layerList[4] = {nullptr, nullptr, nullptr, nullptr};
532  const std::vector<ActivationFunction> validActivates = { ActivationFunction::ReLu,
534  if (IsLayerSequence<BinaryOperation>(base,
536  layerList,
537  true, // handleValidActivates
538  validActivates))
539  {
540  bool fuseReLu = false;
541  unsigned int numInputs = 0;
542  unsigned int numOutputs = 0;
543  std::vector<TensorInfo> inputInfos;
544  std::vector<TensorInfo> outputInfos;
545  const ActivationDescriptor* activationDescriptor = nullptr;
546 
547  if (BuildAddMulAddTensorInfoLists<Layer>(layerList,
548  numInputs,
549  numOutputs,
550  inputInfos,
551  outputInfos,
552  activationDescriptor,
553  fuseReLu))
554  {
555  // Create the new Add/Mul/Add layer and set the Relu activation function
556  FusedDescriptor fusedDescriptor(numInputs, numOutputs, FusedKernelType::AddMulAdd);
557  arm_compute::Status status = NeonFusedWorkloadValidate({inputInfos.begin(), inputInfos.end()},
558  {outputInfos.begin(), outputInfos.end()},
559  fusedDescriptor,
560  activationDescriptor);
561  if (status)
562  {
563  std::string fusedName;
564  GetFusedName(layerList, fusedName);
565 
566  IConnectableLayer* addMulAddLayer =
567  optimizationViews.GetINetwork()->AddFusedLayer(fusedDescriptor, fusedName.c_str());
568 
569  if (fuseReLu)
570  {
571  FusedLayer* addMulAddFusedLayer = PolymorphicDowncast<FusedLayer*>(addMulAddLayer);
572  addMulAddFusedLayer->SetAdditionalInfoForObject(
573  std::make_shared<ActivationDescriptor>(*activationDescriptor));
574  }
575 
576  // Update the graph
577  std::vector<IConnectableLayer*> originalLayers;
578  for (unsigned int layerIdx = 0; layerIdx < 4; ++layerIdx)
579  {
580  if (layerList[layerIdx])
581  {
582  originalLayers.push_back(layerList[layerIdx]);
583  }
584  }
585 
586  std::vector<SlotList> inputLayersSlotLists, outputLayersSlotLists;
587  BuildAddMulAddSlotLists<SlotList>(fuseReLu,
588  outputInfos.size() > 1,
589  inputLayersSlotLists,
590  outputLayersSlotLists);
591 
592  ReplaceMultipleLayers<FusedLayer>(optimizationViews,
593  originalLayers,
594  PolymorphicDowncast<FusedLayer*>(addMulAddLayer),
595  inputLayersSlotLists,
596  outputLayersSlotLists);
597 
598  // Remove unused layers
599  for (unsigned int layerIdx = 0; layerIdx < 4; ++layerIdx)
600  {
601  if (layerList[layerIdx])
602  {
603  untouched.erase(layerList[layerIdx]->GetGuid());
604  }
605  }
606  }
607  }
608  }
609  }
610 
611  if (optimizationViews.GetSubstitutions().empty() && optimizationViews.GetDeletedSubgraphs().empty())
612  {
613  optimizationViews.AddUntouchedSubgraph(SubgraphView(subgraph));
614  }
615  else
616  {
617  ReportUntouchedLayers(optimizationViews, untouched);
618  }
619 
620  return optimizationViews;
621 }
622 
623 std::vector<ITensorHandleFactory::FactoryId> NeonBackend::GetHandleFactoryPreferences() const
624 {
625  return std::vector<ITensorHandleFactory::FactoryId>() = { NeonTensorHandleFactory::GetIdStatic() };
626 }
627 
629 {
630  auto memoryManager = std::make_shared<NeonMemoryManager>(std::make_unique<arm_compute::Allocator>(),
632 
633  registry.RegisterMemoryManager(memoryManager);
634 
635  auto factory = std::make_unique<NeonTensorHandleFactory>(memoryManager);
636  // Register copy and import factory pair
637  registry.RegisterCopyAndImportFactoryPair(factory->GetId(), factory->GetId());
638  // Register the factory
639  registry.RegisterFactory(std::move(factory));
640 }
641 
642 std::unique_ptr<ICustomAllocator> NeonBackend::GetDefaultAllocator() const
643 {
644  return std::make_unique<DefaultAllocator>();
645 }
646 
647 
648 } // namespace armnn
armnn::NeonFullyConnectedWorkloadValidate
arm_compute::Status NeonFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const Optional< TensorInfo > &biases, const FullyConnectedDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: NeonFullyConnectedWorkload.cpp:24
armnn::OptimizationViews::AddUntouchedSubgraph
void AddUntouchedSubgraph(SubgraphView &&subgraph)
Definition: OptimizationViews.hpp:48
armnn::BaseMemoryManager::MemoryAffinity::Offset
@ Offset
armnn::BinaryOperation::Mul
@ Mul
NeonBackend.hpp
armnn::ActivationDescriptor
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
armnn::FullyConnectedDescriptor
A FullyConnectedDescriptor for the FullyConnectedLayer.
Definition: Descriptors.hpp:507
armnn::BinaryOperation::Add
@ Add
armnn::NeonAdditionWorkloadValidate
arm_compute::Status NeonAdditionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: NeonAdditionWorkload.cpp:20
armnn::LayerType::Splitter
@ Splitter
armnn::LayerType::BatchNormalization
@ BatchNormalization
armnn::Optional
Definition: Optional.hpp:270
armnn::NeonMultiplicationWorkloadValidate
arm_compute::Status NeonMultiplicationWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: NeonMultiplicationWorkload.cpp:19
Descriptors.hpp
armnn::OutputSlot::GetTensorInfo
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:92
armnn::DepthwiseConvolution2dDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:708
NeonFusedWorkload.hpp
NeonAdditionWorkload.hpp
DefaultAllocator.hpp
armnn::BatchNormalizationLayer::m_Mean
std::shared_ptr< ConstTensorHandle > m_Mean
A unique pointer to store Mean values.
Definition: BatchNormalizationLayer.hpp:19
armnn::SubtractionLayer
This layer represents a subtraction operation.
Definition: SubtractionLayer.hpp:14
armnn::NeonSubtractionWorkloadValidate
arm_compute::Status NeonSubtractionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: NeonSubtractionWorkload.cpp:22
armnn::IBackendInternal::IMemoryManagerSharedPtr
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
Definition: IBackendInternal.hpp:99
armnn::TensorHandleFactoryRegistry
Definition: TensorHandleFactoryRegistry.hpp:23
armnn::ConnectedToLayerType
bool ConnectedToLayerType(Layer *baseLayer, LayerType layerType, unsigned int dimSize=0)
Checks the Layer's Connections to see if it's connected to a Layer with the provided layerType.
Definition: SubgraphUtils.hpp:271
armnn::DepthwiseConvolution2dLayer
This layer represents a depthwise convolution 2d operation.
Definition: DepthwiseConvolution2dLayer.hpp:15
armnn::NeonBackendModelContext
The NeonBackendModelContext is used to pass in Neon specific backend ModelOptions.
Definition: NeonBackendModelContext.hpp:19
armnn::NeonLayerSupport
Definition: NeonLayerSupport.hpp:14
NeonDivisionWorkload.hpp
armnn::FusedLayer
Definition: FusedLayer.hpp:19
armnn::GetFusedName
void GetFusedName(Layer *layerList[4], std::string &fusedName)
Definition: NeonBackendOptimizationUtils.hpp:71
armnn::BinaryOperation::Sub
@ Sub
NeonBatchNormalizationWorkload.hpp
armnn::NeonTensorHandleFactory::GetIdStatic
static const FactoryId & GetIdStatic()
Definition: NeonTensorHandleFactory.cpp:89
BaseMemoryManager.hpp
armnn::TensorHandleFactoryRegistry::RegisterMemoryManager
void RegisterMemoryManager(std::shared_ptr< IMemoryManager > memoryManger)
Register a memory manager with shared ownership.
Definition: TensorHandleFactoryRegistry.cpp:34
NeonConvolution2dWorkload.hpp
armnn::ActivationFunction::BoundedReLu
@ BoundedReLu
min(a, max(b, input)) ReLu1 & ReLu6.
BackendRegistry.hpp
armnn::LayerType::Reduce
@ Reduce
armnn::BatchNormalizationLayer
This layer represents a batch normalization operation.
Definition: BatchNormalizationLayer.hpp:15
armnn::IBackendInternal::IBackendContextPtr
std::unique_ptr< IBackendContext > IBackendContextPtr
Definition: IBackendInternal.hpp:90
Optimizer.hpp
armnn::NeonBackend::GetHandleFactoryPreferences
std::vector< ITensorHandleFactory::FactoryId > GetHandleFactoryPreferences() const override
(Optional) Returns a vector of supported TensorHandleFactory ids in preference order.
Definition: NeonBackend.cpp:623
armnn::Layer::GetInputSlot
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:337
armnn::LayerWithParameters::GetParameters
const Parameters & GetParameters() const override
If the layer has a descriptor return it.
Definition: LayerWithParameters.hpp:19
armnn::LayerType::ElementwiseBinary
@ ElementwiseBinary
armnn::INetwork::AddFusedLayer
IConnectableLayer * AddFusedLayer(const FusedDescriptor &fusedDescriptor, const char *name=nullptr)
Adds a Fused layer to the network.
Definition: Network.cpp:338
armnn::Layer::GetName
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:332
armnn::Convolution2dLayer
This layer represents a convolution 2d operation.
Definition: Convolution2dLayer.hpp:15
armnn::FusedKernelType::AddMulAdd
@ AddMulAdd
armnn::Layer
Definition: Layer.hpp:230
armnn::NeonBackend::CreateBackendContext
IBackendInternal::IBackendContextPtr CreateBackendContext(const IRuntime::CreationOptions &) const override
Create the runtime context of the backend.
Definition: NeonBackend.cpp:109
armnn::AdditionLayer
This layer represents an addition operation.
Definition: AdditionLayer.hpp:13
armnn::BatchNormalizationLayer::m_Gamma
std::shared_ptr< ConstTensorHandle > m_Gamma
A unique pointer to store Gamma values.
Definition: BatchNormalizationLayer.hpp:25
armnn::NeonBackend::GetDefaultAllocator
std::unique_ptr< ICustomAllocator > GetDefaultAllocator() const override
Returns the default memory allocator for the backend.
Definition: NeonBackend.cpp:642
armnn::Layer::GetAdditionalInformation
std::shared_ptr< T > GetAdditionalInformation() const
Definition: Layer.hpp:368
armnn::FusedDescriptor
A FusedDescriptor for the FusedLayer.
Definition: Descriptors.hpp:944
armnn::ReshapeLayer
This layer represents a reshape operation.
Definition: ReshapeLayer.hpp:15
armnn::NeonBackend::OptimizeSubgraphView
OptimizationViews OptimizeSubgraphView(const SubgraphView &subgraph, const ModelOptions &modelOptions) const override
Definition: NeonBackend.cpp:144
armnn::NeonDivisionWorkloadValidate
arm_compute::Status NeonDivisionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: NeonDivisionWorkload.cpp:18
armnn::BatchNormalizationLayer::m_Variance
std::shared_ptr< ConstTensorHandle > m_Variance
A unique pointer to store Variance values.
Definition: BatchNormalizationLayer.hpp:21
armnn::SubgraphView::begin
IConnectableLayerIterator begin()
Definition: SubgraphView.cpp:283
armnn::LayerType::Subtraction
@ Subtraction
armnn::FullyConnectedDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:526
armnn::RemoveReshapeLayer
void RemoveReshapeLayer(ReshapeLayer *baseLayer, std::map< LayerGuid, Layer * > &untouched, OptimizationViews &optimizationViews)
Definition: SubgraphUtils.hpp:293
NeonWorkloadFactory.hpp
NeonReduceWorkload.hpp
armnn::ReduceLayer
This layer represents a reduction operation.
Definition: ReduceLayer.hpp:14
armnn::MultiplicationLayer
This layer represents a multiplication operation.
Definition: MultiplicationLayer.hpp:14
IBackendContext.hpp
PolymorphicDowncast.hpp
armnn::Convolution2dDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:582
armnn::NeonBackend::CreateBackendProfilingContext
IBackendInternal::IBackendProfilingContextPtr CreateBackendProfilingContext(const IRuntime::CreationOptions &, IBackendProfilingPtr &backendProfiling) override
Create context specifically used for profiling interaction from backends.
Definition: NeonBackend.cpp:114
NeonDepthwiseConvolutionWorkload.hpp
armnn::Layer::GetGuid
LayerGuid GetGuid() const final
Returns the unique id of the layer.
Definition: Layer.hpp:343
armnn::Layer::SetAdditionalInfoForObject
void SetAdditionalInfoForObject(const AdditionalInfoObjectPtr &additionalInfo)
Definition: Layer.hpp:373
armnn::LayerType::Multiplication
@ Multiplication
NeonBackendId.hpp
armnn::SubgraphView
The SubgraphView class represents a subgraph of a Graph.
Definition: SubgraphView.hpp:31
armnn::LayerType::Addition
@ Addition
armnn::FullyConnectedLayer
This layer represents a fully connected operation.
Definition: FullyConnectedLayer.hpp:15
armnn::OptimizationViews
Definition: OptimizationViews.hpp:17
ArmComputeUtils.hpp
armnn::ElementwiseBinaryLayer
This layer represents a elementwiseBinary operation.
Definition: ElementwiseBinaryLayer.hpp:14
armnn::DivisionLayer
This layer represents a division operation.
Definition: DivisionLayer.hpp:14
NeonSubtractionWorkload.hpp
armnn::NeonDepthwiseConvolutionWorkloadValidate
arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, const ActivationDescriptor *activationDescriptor)
Definition: NeonDepthwiseConvolutionWorkload.cpp:29
armnn::NeonBackend::CreateMemoryManager
IBackendInternal::IMemoryManagerUniquePtr CreateMemoryManager() const override
Definition: NeonBackend.cpp:52
armnn::NeonBackend::GetIdStatic
static const BackendId & GetIdStatic()
Definition: NeonBackend.cpp:46
armnn::LayerType::Division
@ Division
armnn::IBackendInternal::IBackendProfilingContextPtr
std::shared_ptr< arm::pipe::IBackendProfilingContext > IBackendProfilingContextPtr
This is the bridge between backend and backend profiling we'll keep it in the backend namespace.
Definition: IBackendInternal.hpp:92
armnn::NeonBackend::GetLayerSupport
IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override
Definition: NeonBackend.cpp:126
armnn::LayerType::FullyConnected
@ FullyConnected
NeonFullyConnectedWorkload.hpp
armnn::NeonBackend::CreateBackendSpecificModelContext
IBackendInternal::IBackendSpecificModelContextPtr CreateBackendSpecificModelContext(const ModelOptions &modelOptions) const override
Definition: NeonBackend.cpp:120
armnn::BatchNormalizationLayer::m_Beta
std::shared_ptr< ConstTensorHandle > m_Beta
A unique pointer to store Beta values.
Definition: BatchNormalizationLayer.hpp:23
NeonTensorHandleFactory.hpp
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::Layer::GetType
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
Definition: Layer.hpp:286
armnn::NeonBackend::RegisterTensorHandleFactories
void RegisterTensorHandleFactories(class TensorHandleFactoryRegistry &registry) override
(Optional) Register TensorHandleFactories Either this method or CreateMemoryManager() and IWorkloadFa...
Definition: NeonBackend.cpp:628
armnn::Status
Status
Definition: Types.hpp:42
armnn::IBackendInternal::IBackendProfilingPtr
std::unique_ptr< arm::pipe::IBackendProfiling > IBackendProfilingPtr
Definition: IBackendInternal.hpp:93
armnn::IRuntime::CreationOptions
Definition: IRuntime.hpp:78
armnn::LayerType::Reshape
@ Reshape
armnn::Layer::BeginOutputSlots
std::vector< OutputSlot >::iterator BeginOutputSlots()
Definition: Layer.hpp:266
armnn::IBackendInternal::IMemoryManagerUniquePtr
std::unique_ptr< IMemoryManager > IMemoryManagerUniquePtr
Definition: IBackendInternal.hpp:98
armnn::NeonFusedWorkloadValidate
arm_compute::Status NeonFusedWorkloadValidate(const std::vector< std::reference_wrapper< TensorInfo >> &inputInfos, const std::vector< std::reference_wrapper< TensorInfo >> &outputInfos, const FusedDescriptor &fusedDescriptor, const ActivationDescriptor *activationDescriptor)
Definition: NeonFusedWorkload.cpp:22
armnn::ReduceDescriptor::m_vAxis
std::vector< uint32_t > m_vAxis
The indices of the dimensions to reduce.
Definition: Descriptors.hpp:1556
armnn::NeonConvolution2dWorkloadValidate
arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
Definition: NeonConvolution2dWorkload.cpp:24
armnn::NeonBatchNormalizationValidate
arm_compute::Status NeonBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: NeonBatchNormalizationWorkload.cpp:24
armnn::OptimizationViews::GetSubstitutions
const Substitutions & GetSubstitutions() const
Definition: OptimizationViews.hpp:58
armnn::SubgraphView::end
IConnectableLayerIterator end()
Definition: SubgraphView.cpp:288
armnn::OptimizationViews::GetDeletedSubgraphs
const Subgraphs & GetDeletedSubgraphs() const
Definition: OptimizationViews.hpp:61
armnn::ElementwiseBinaryDescriptor::m_Operation
BinaryOperation m_Operation
Specifies the elementwiseBinary operation to execute.
Definition: Descriptors.hpp:125
armnn::BackendId
Definition: BackendId.hpp:75
armnn::ActivationFunction::ReLu
@ ReLu
armnn::InputSlot::GetConnectedOutputSlot
const OutputSlot * GetConnectedOutputSlot() const
Definition: Layer.hpp:56
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::TensorHandleFactoryRegistry::RegisterFactory
void RegisterFactory(std::unique_ptr< ITensorHandleFactory > allocator)
Register a TensorHandleFactory and transfer ownership.
Definition: TensorHandleFactoryRegistry.cpp:12
NeonBackendModelContext.hpp
armnn::OptimizationViews::GetINetwork
INetwork * GetINetwork()
Definition: OptimizationViews.hpp:69
armnn::IBackendInternal::ILayerSupportSharedPtr
std::shared_ptr< ILayerSupport > ILayerSupportSharedPtr
Definition: IBackendInternal.hpp:94
armnn::IConnectableLayer
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:80
armnn::TensorHandleFactoryRegistry::RegisterCopyAndImportFactoryPair
void RegisterCopyAndImportFactoryPair(ITensorHandleFactory::FactoryId copyFactoryId, ITensorHandleFactory::FactoryId importFactoryId)
Register a pair of TensorHandleFactory Id for Memory Copy and TensorHandleFactory Id for Memory Impor...
Definition: TensorHandleFactoryRegistry.cpp:66
armnn::ModelOptions
std::vector< BackendOptions > ModelOptions
Definition: BackendOptions.hpp:18
NeonBackendOptimizationUtils.hpp
armnn::ReportUntouchedLayers
void ReportUntouchedLayers(OptimizationViews &optimizationViews, std::map< LayerGuid, Layer * > untouched)
Definition: SubgraphUtils.hpp:220
armnn::ConnectedToLayerWithNCHW
bool ConnectedToLayerWithNCHW(Layer *baseLayer)
Checks if the Layer is connected to any Layer that has an NCHW layout.
Definition: SubgraphUtils.hpp:250
armnn::BinaryOperation::Div
@ Div
NeonMultiplicationWorkload.hpp
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::NeonBackendId
constexpr const char * NeonBackendId()
Definition: NeonBackendId.hpp:10
armnn::LayerType::Activation
@ Activation
armnn::IBackendInternal::IWorkloadFactoryPtr
std::unique_ptr< IWorkloadFactory > IWorkloadFactoryPtr
Definition: IBackendInternal.hpp:89
armnn::ReduceDescriptor
A ReduceDescriptor for the REDUCE operators.
Definition: Descriptors.hpp:1538
armnn::NeonBackend::CreateWorkloadFactory
IWorkloadFactoryPtr CreateWorkloadFactory(const IBackendInternal::IMemoryManagerSharedPtr &memoryManager=nullptr) const override
Definition: NeonBackend.cpp:58
armnn::Layer::EndOutputSlots
std::vector< OutputSlot >::iterator EndOutputSlots()
Definition: Layer.hpp:267
IMemoryManager.hpp
ArmComputeSubgraphUtils.hpp
armnn::IBackendInternal::IBackendSpecificModelContextPtr
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr
Definition: IBackendInternal.hpp:96
NeonLayerSupport.hpp