ArmNN
 20.11
NeonBackend Class Reference

#include <NeonBackend.hpp>

Inheritance diagram for NeonBackend:
IBackendInternal IBackend

Public Member Functions

 NeonBackend ()=default
 
 ~NeonBackend ()=default
 
const BackendIdGetId () const override
 
IBackendInternal::IMemoryManagerUniquePtr CreateMemoryManager () const override
 
IWorkloadFactoryPtr CreateWorkloadFactory (const IBackendInternal::IMemoryManagerSharedPtr &memoryManager=nullptr) const override
 
IWorkloadFactoryPtr CreateWorkloadFactory (class TensorHandleFactoryRegistry &tensorHandleFactoryRegistry) const override
 
IWorkloadFactoryPtr CreateWorkloadFactory (const IMemoryManagerSharedPtr &memoryManager, const ModelOptions &modelOptions) const override
 
IWorkloadFactoryPtr CreateWorkloadFactory (class TensorHandleFactoryRegistry &tensorHandleFactoryRegistry, const ModelOptions &modelOptions) const override
 
IBackendInternal::IBackendContextPtr CreateBackendContext (const IRuntime::CreationOptions &) const override
 Create the runtime context of the backend. More...
 
IBackendInternal::IBackendProfilingContextPtr CreateBackendProfilingContext (const IRuntime::CreationOptions &, IBackendProfilingPtr &backendProfiling) override
 Create context specifically used for profiling interaction from backends. More...
 
IBackendInternal::Optimizations GetOptimizations () const override
 
IBackendInternal::ILayerSupportSharedPtr GetLayerSupport () const override
 
IBackendInternal::ILayerSupportSharedPtr GetLayerSupport (const ModelOptions &modelOptions) const override
 
OptimizationViews OptimizeSubgraphView (const SubgraphView &subgraph) const override
 
std::vector< ITensorHandleFactory::FactoryIdGetHandleFactoryPreferences () const override
 (Optional) Returns a vector of supported TensorHandleFactory ids in preference order. More...
 
void RegisterTensorHandleFactories (class TensorHandleFactoryRegistry &registry) override
 (Optional) Register TensorHandleFactories Either this method or CreateMemoryManager() and IWorkloadFactory::CreateTensor()/IWorkloadFactory::CreateSubtensor() methods must be implemented. More...
 
IBackendInternal::IBackendSpecificModelContextPtr CreateBackendSpecificModelContext (const ModelOptions &modelOptions) const override
 
- Public Member Functions inherited from IBackendInternal
 ~IBackendInternal () override=default
 Allow backends created by the factory function to be destroyed through IBackendInternal. More...
 
virtual ISubGraphConverterPtr CreateSubGraphConverter (const std::shared_ptr< SubGraph > &subGraph) const
 
virtual SubGraphUniquePtr OptimizeSubGraph (const SubGraph &subGraph, bool &optimizationAttempted) const
 
virtual OptimizationViews OptimizeSubgraphView (const SubgraphView &subgraph, const ModelOptions &modelOptions) const
 
bool SupportsTensorAllocatorAPI () const
 
ITensorHandleFactory::FactoryId GetBackwardCompatibleFavoriteHandleFactory ()
 

Static Public Member Functions

static const BackendIdGetIdStatic ()
 
- Static Public Member Functions inherited from IBackendInternal
static constexpr BackendVersion GetApiVersion ()
 Returns the version of the Backend API. More...
 

Additional Inherited Members

- Public Types inherited from IBackendInternal
using IWorkloadFactoryPtr = std::unique_ptr< IWorkloadFactory >
 
using IBackendContextPtr = std::unique_ptr< IBackendContext >
 
using IBackendProfilingContextPtr = std::shared_ptr< armnn::profiling::IBackendProfilingContext >
 This is the bridge between backend and backend profiling we'll keep it in the backend namespace. More...
 
using IBackendProfilingPtr = std::unique_ptr< armnn::profiling::IBackendProfiling >
 
using OptimizationPtr = std::unique_ptr< Optimization >
 
using Optimizations = std::vector< OptimizationPtr >
 
using ILayerSupportSharedPtr = std::shared_ptr< ILayerSupport >
 
using IBackendSpecificModelContextPtr = std::shared_ptr< IBackendModelContext >
 
using IMemoryManagerUniquePtr = std::unique_ptr< IMemoryManager >
 
using IMemoryManagerSharedPtr = std::shared_ptr< IMemoryManager >
 
using GraphUniquePtr = std::unique_ptr< Graph >
 
using SubgraphViewUniquePtr = std::unique_ptr< SubgraphView >
 
using supported = std::unique_ptr< ISubGraphConverter >
 
using instead = std::unique_ptr< SubGraph >
 
- Protected Member Functions inherited from IBackendInternal
 IBackendInternal ()=default
 Creation must be done through a specific backend interface. More...
 
- Protected Member Functions inherited from IBackend
 IBackend ()
 
virtual ~IBackend ()
 

Detailed Description

Definition at line 12 of file NeonBackend.hpp.

Constructor & Destructor Documentation

◆ NeonBackend()

NeonBackend ( )
default

◆ ~NeonBackend()

~NeonBackend ( )
default

Member Function Documentation

◆ CreateBackendContext()

IBackendInternal::IBackendContextPtr CreateBackendContext ( const IRuntime::CreationOptions ) const
overridevirtual

Create the runtime context of the backend.

Implementations may return a default-constructed IBackendContextPtr if no context is needed at runtime. Implementations must throw BackendUnavailableException if the backend cannot be used (for example, necessary accelerator hardware is not present). The default implementation always returns a default-constructed pointer.

Reimplemented from IBackendInternal.

Definition at line 94 of file NeonBackend.cpp.

Referenced by NeonBackend::GetId().

95 {
96  return IBackendContextPtr{};
97 }
std::unique_ptr< IBackendContext > IBackendContextPtr

◆ CreateBackendProfilingContext()

IBackendInternal::IBackendProfilingContextPtr CreateBackendProfilingContext ( const IRuntime::CreationOptions creationOptions,
IBackendProfilingPtr backendProfiling 
)
overridevirtual

Create context specifically used for profiling interaction from backends.

Reimplemented from IBackendInternal.

Definition at line 99 of file NeonBackend.cpp.

Referenced by NeonBackend::GetId().

101 {
103 }
std::shared_ptr< armnn::profiling::IBackendProfilingContext > IBackendProfilingContextPtr
This is the bridge between backend and backend profiling we&#39;ll keep it in the backend namespace...

◆ CreateBackendSpecificModelContext()

IBackendInternal::IBackendSpecificModelContextPtr CreateBackendSpecificModelContext ( const ModelOptions modelOptions) const
overridevirtual

Reimplemented from IBackendInternal.

Definition at line 110 of file NeonBackend.cpp.

Referenced by NeonBackend::CreateWorkloadFactory(), NeonBackend::GetId(), and NeonBackend::GetLayerSupport().

112 {
113  return IBackendSpecificModelContextPtr{new NeonBackendModelContext{modelOptions}};
114 }
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr

◆ CreateMemoryManager()

IBackendInternal::IMemoryManagerUniquePtr CreateMemoryManager ( ) const
overridevirtual

Reimplemented from IBackendInternal.

Definition at line 48 of file NeonBackend.cpp.

References BaseMemoryManager::Offset.

Referenced by NeonBackend::GetId().

49 {
50  return std::make_unique<NeonMemoryManager>(std::make_unique<arm_compute::Allocator>(),
52 }

◆ CreateWorkloadFactory() [1/4]

IBackendInternal::IWorkloadFactoryPtr CreateWorkloadFactory ( const IBackendInternal::IMemoryManagerSharedPtr memoryManager = nullptr) const
overridevirtual

Implements IBackendInternal.

Definition at line 54 of file NeonBackend.cpp.

Referenced by NeonBackend::GetId().

56 {
57  return std::make_unique<NeonWorkloadFactory>(
58  PolymorphicPointerDowncast<NeonMemoryManager>(memoryManager));
59 }

◆ CreateWorkloadFactory() [2/4]

IBackendInternal::IWorkloadFactoryPtr CreateWorkloadFactory ( class TensorHandleFactoryRegistry tensorHandleFactoryRegistry) const
overridevirtual

Reimplemented from IBackendInternal.

Definition at line 68 of file NeonBackend.cpp.

References BaseMemoryManager::Offset, TensorHandleFactoryRegistry::RegisterFactory(), and TensorHandleFactoryRegistry::RegisterMemoryManager().

70 {
71  auto memoryManager = std::make_shared<NeonMemoryManager>(std::make_unique<arm_compute::Allocator>(),
73 
74  tensorHandleFactoryRegistry.RegisterMemoryManager(memoryManager);
75  tensorHandleFactoryRegistry.RegisterFactory(std::make_unique<NeonTensorHandleFactory>(memoryManager));
76 
77  return std::make_unique<NeonWorkloadFactory>(
78  PolymorphicPointerDowncast<NeonMemoryManager>(memoryManager));
79 }

◆ CreateWorkloadFactory() [3/4]

IBackendInternal::IWorkloadFactoryPtr CreateWorkloadFactory ( const IMemoryManagerSharedPtr memoryManager,
const ModelOptions modelOptions 
) const
overridevirtual

Reimplemented from IBackendInternal.

Definition at line 61 of file NeonBackend.cpp.

References NeonBackend::CreateBackendSpecificModelContext().

63 {
64  return std::make_unique<NeonWorkloadFactory>(
65  PolymorphicPointerDowncast<NeonMemoryManager>(memoryManager), CreateBackendSpecificModelContext(modelOptions));
66 }
IBackendInternal::IBackendSpecificModelContextPtr CreateBackendSpecificModelContext(const ModelOptions &modelOptions) const override

◆ CreateWorkloadFactory() [4/4]

IBackendInternal::IWorkloadFactoryPtr CreateWorkloadFactory ( class TensorHandleFactoryRegistry tensorHandleFactoryRegistry,
const ModelOptions modelOptions 
) const
overridevirtual

Reimplemented from IBackendInternal.

Definition at line 81 of file NeonBackend.cpp.

References NeonBackend::CreateBackendSpecificModelContext(), BaseMemoryManager::Offset, TensorHandleFactoryRegistry::RegisterFactory(), and TensorHandleFactoryRegistry::RegisterMemoryManager().

83 {
84  auto memoryManager = std::make_shared<NeonMemoryManager>(std::make_unique<arm_compute::Allocator>(),
86 
87  tensorHandleFactoryRegistry.RegisterMemoryManager(memoryManager);
88  tensorHandleFactoryRegistry.RegisterFactory(std::make_unique<NeonTensorHandleFactory>(memoryManager));
89 
90  return std::make_unique<NeonWorkloadFactory>(
91  PolymorphicPointerDowncast<NeonMemoryManager>(memoryManager), CreateBackendSpecificModelContext(modelOptions));
92 }
IBackendInternal::IBackendSpecificModelContextPtr CreateBackendSpecificModelContext(const ModelOptions &modelOptions) const override

◆ GetHandleFactoryPreferences()

std::vector< ITensorHandleFactory::FactoryId > GetHandleFactoryPreferences ( ) const
overridevirtual

(Optional) Returns a vector of supported TensorHandleFactory ids in preference order.

Reimplemented from IBackendInternal.

Definition at line 399 of file NeonBackend.cpp.

References NeonTensorHandleFactory::GetIdStatic().

Referenced by NeonBackend::GetId().

400 {
401  return std::vector<ITensorHandleFactory::FactoryId>() = { NeonTensorHandleFactory::GetIdStatic() };
402 }
static const FactoryId & GetIdStatic()

◆ GetId()

◆ GetIdStatic()

const BackendId & GetIdStatic ( )
static

Definition at line 42 of file NeonBackend.cpp.

References armnn::NeonBackendId().

Referenced by NeonBackend::GetId().

43 {
44  static const BackendId s_Id{NeonBackendId()};
45  return s_Id;
46 }
constexpr const char * NeonBackendId()

◆ GetLayerSupport() [1/2]

IBackendInternal::ILayerSupportSharedPtr GetLayerSupport ( ) const
overridevirtual

Implements IBackendInternal.

Definition at line 116 of file NeonBackend.cpp.

Referenced by NeonBackend::GetId().

117 {
118  static ILayerSupportSharedPtr layerSupport
119  {
121  };
122  return layerSupport;
123 }
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr
std::shared_ptr< ILayerSupport > ILayerSupportSharedPtr

◆ GetLayerSupport() [2/2]

IBackendInternal::ILayerSupportSharedPtr GetLayerSupport ( const ModelOptions modelOptions) const
overridevirtual

Reimplemented from IBackendInternal.

Definition at line 125 of file NeonBackend.cpp.

References NeonBackend::CreateBackendSpecificModelContext().

126 {
127  static ILayerSupportSharedPtr layerSupport
128  {
129  new NeonLayerSupport(CreateBackendSpecificModelContext(modelOptions))
130  };
131  return layerSupport;
132 }
IBackendInternal::IBackendSpecificModelContextPtr CreateBackendSpecificModelContext(const ModelOptions &modelOptions) const override
std::shared_ptr< ILayerSupport > ILayerSupportSharedPtr

◆ GetOptimizations()

IBackendInternal::Optimizations GetOptimizations ( ) const
overridevirtual

Reimplemented from IBackendInternal.

Definition at line 105 of file NeonBackend.cpp.

Referenced by NeonBackend::GetId().

106 {
107  return Optimizations{};
108 }
std::vector< OptimizationPtr > Optimizations

◆ OptimizeSubgraphView()

OptimizationViews OptimizeSubgraphView ( const SubgraphView subgraph) const
overridevirtual

Reimplemented from IBackendInternal.

Definition at line 134 of file NeonBackend.cpp.

References armnn::Activation, armnn::Addition, OptimizationViews::AddUntouchedSubgraph(), armnn::BatchNormalization, SubgraphView::begin(), Layer::BeginOutputSlots(), armnn::Convolution2d, armnn::DepthwiseConvolution2d, armnn::Division, SubgraphView::end(), Layer::EndOutputSlots(), armnn::FullyConnected, Layer::GetAdditionalInformation(), InputSlot::GetConnectedOutputSlot(), Layer::GetGuid(), Layer::GetInputSlot(), Layer::GetName(), LayerWithParameters< Parameters >::GetParameters(), OptimizationViews::GetSubstitutions(), OutputSlot::GetTensorInfo(), Layer::GetType(), BatchNormalizationLayer::m_Beta, FullyConnectedLayer::m_Bias, DepthwiseConvolution2dLayer::m_Bias, Convolution2dLayer::m_Bias, Convolution2dDescriptor::m_BiasEnabled, DepthwiseConvolution2dDescriptor::m_BiasEnabled, BatchNormalizationLayer::m_Gamma, BatchNormalizationLayer::m_Mean, BatchNormalizationLayer::m_Variance, DepthwiseConvolution2dLayer::m_Weight, FullyConnectedLayer::m_Weight, Convolution2dLayer::m_Weight, armnn::Multiplication, armnn::NeonAdditionWorkloadValidate(), armnn::NeonBatchNormalizationValidate(), armnn::NeonConvolution2dWorkloadValidate(), armnn::NeonDepthwiseConvolutionWorkloadValidate(), armnn::NeonDivisionWorkloadValidate(), armnn::NeonFullyConnectedWorkloadValidate(), armnn::NeonMultiplicationWorkloadValidate(), armnn::NeonSubtractionWorkloadValidate(), armnn::ReportUntouchedLayers(), and armnn::Subtraction.

Referenced by NeonBackend::GetId().

135 {
136  OptimizationViews optimizationViews;
137 
138  auto it = subgraph.end();
139  std::map<LayerGuid, Layer*> untouched;
140 
141  while (it != subgraph.begin())
142  {
143  --it;
144  Layer& base = **it;
145  untouched.insert({base.GetGuid(), &base});
146  }
147 
148  it = subgraph.end();
149  while (it != subgraph.begin())
150  {
151  --it;
152  Layer& base = **it;
153 
154  if ((base.GetType() == LayerType::DepthwiseConvolution2d || base.GetType() == LayerType::Convolution2d
155  || base.GetType() == LayerType::BatchNormalization || base.GetType() == LayerType::FullyConnected
156  || base.GetType() == LayerType::Addition || base.GetType() == LayerType::Multiplication
157  || base.GetType() == LayerType::Subtraction || base.GetType() == LayerType::Division)
158  && (base.GetAdditionalInformation<ActivationDescriptor>() == nullptr))
159  {
160  for (auto output = base.BeginOutputSlots(); output != base.EndOutputSlots(); ++output)
161  {
162  if (output->GetNumConnections() == 1)
163  {
164  for (auto&& childInput : output->GetConnections())
165  {
166  if (childInput->GetOwningLayer().GetType() == LayerType::Activation)
167  {
168  Layer& child = childInput->GetOwningLayer();
169 
170  auto* activationLayer = PolymorphicDowncast<ActivationLayer*>(&child);
171 
172  const std::string name = std::string("fused-") + child.GetName() + std::string("-into-") +
173  base.GetName();
174 
175  // Get params from activation layer
176  ActivationDescriptor activationDesc = activationLayer->GetParameters();
177 
178  if (base.GetType() == LayerType::Convolution2d)
179  {
180  Convolution2dLayer* baseLayer = PolymorphicDowncast<Convolution2dLayer*>(&base);
181 
182  Optional<TensorInfo> biases;
183 
184  if (baseLayer->GetParameters().m_BiasEnabled)
185  {
186  biases = baseLayer->m_Bias->GetTensorInfo();
187  }
188 
190  baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
191  activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
192  baseLayer->GetParameters(),
193  baseLayer->m_Weight->GetTensorInfo(),
194  biases,
195  false,
196  &activationDesc);
197 
198  if (status)
199  {
200  FuseLayerWithWeightsAndBiases<Convolution2dLayer>(optimizationViews,
201  baseLayer,
202  activationLayer,
203  activationDesc,
204  name);
205  untouched.erase(baseLayer->GetGuid());
206  untouched.erase(activationLayer->GetGuid());
207  }
208  }
209  else if (base.GetType() == LayerType::DepthwiseConvolution2d)
210  {
211  DepthwiseConvolution2dLayer* baseLayer =
212  PolymorphicDowncast<DepthwiseConvolution2dLayer*>(&base);
213 
214  Optional<TensorInfo> biases;
215 
216  if (baseLayer->GetParameters().m_BiasEnabled)
217  {
218  biases = baseLayer->m_Bias->GetTensorInfo();
219  }
220 
222  baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
223  activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
224  baseLayer->GetParameters(),
225  baseLayer->m_Weight->GetTensorInfo(),
226  biases,
227  &activationDesc);
228 
229  if (status)
230  {
231  FuseLayerWithWeightsAndBiases<DepthwiseConvolution2dLayer>(optimizationViews,
232  baseLayer,
233  activationLayer,
234  activationDesc,
235  name);
236  untouched.erase(baseLayer->GetGuid());
237  untouched.erase(activationLayer->GetGuid());
238  }
239  }
240  else if (base.GetType() == LayerType::FullyConnected)
241  {
242  FullyConnectedLayer* baseLayer = PolymorphicDowncast<FullyConnectedLayer*>(&base);
243 
245  baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
246  activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
247  baseLayer->m_Weight->GetTensorInfo(),
248  baseLayer->m_Bias->GetTensorInfo(),
249  baseLayer->GetParameters(),
250  &activationDesc);
251 
252  if (status)
253  {
254  FuseLayerWithWeightsAndBiases<FullyConnectedLayer>(optimizationViews,
255  baseLayer,
256  activationLayer,
257  activationDesc,
258  name);
259  untouched.erase(baseLayer->GetGuid());
260  untouched.erase(activationLayer->GetGuid());
261  }
262  }
263  else if (base.GetType() == LayerType::BatchNormalization)
264  {
265  BatchNormalizationLayer* baseLayer =
266  PolymorphicDowncast<BatchNormalizationLayer*>(&base);
267 
269  baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
270  activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
271  baseLayer->m_Mean->GetTensorInfo(),
272  baseLayer->m_Variance->GetTensorInfo(),
273  baseLayer->m_Beta->GetTensorInfo(),
274  baseLayer->m_Gamma->GetTensorInfo(),
275  baseLayer->GetParameters(),
276  &activationDesc);
277 
278  if (status)
279  {
280  BatchNormalizationLayer* replacementLayer =
281  FuseLayerWithParameters<BatchNormalizationLayer>(
282  optimizationViews,
283  baseLayer,
284  activationLayer,
285  activationDesc,
286  name);
287 
288  replacementLayer->m_Beta = std::move(baseLayer->m_Beta);
289  replacementLayer->m_Gamma = std::move(baseLayer->m_Gamma);
290  replacementLayer->m_Mean = std::move(baseLayer->m_Mean);
291  replacementLayer->m_Variance = std::move(baseLayer->m_Variance);
292  untouched.erase(baseLayer->GetGuid());
293  untouched.erase(activationLayer->GetGuid());
294  }
295  }
296  else if (base.GetType() == LayerType::Addition)
297  {
298  AdditionLayer* baseLayer = PolymorphicDowncast<AdditionLayer*>(&base);
299 
301  baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
302  baseLayer->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(),
303  activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
304  &activationDesc);
305 
306  if (status)
307  {
308  FuseLayerWithoutParameters<AdditionLayer>(optimizationViews,
309  baseLayer,
310  activationLayer,
311  activationDesc,
312  name);
313  untouched.erase(baseLayer->GetGuid());
314  untouched.erase(activationLayer->GetGuid());
315  }
316  }
317  else if (base.GetType() == LayerType::Division)
318  {
319  DivisionLayer* baseLayer = PolymorphicDowncast<DivisionLayer*>(&base);
320 
322  baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
323  baseLayer->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(),
324  activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
325  &activationDesc);
326 
327  if (status)
328  {
329  FuseLayerWithoutParameters<DivisionLayer>(optimizationViews,
330  baseLayer,
331  activationLayer,
332  activationDesc,
333  name);
334  untouched.erase(baseLayer->GetGuid());
335  untouched.erase(activationLayer->GetGuid());
336  }
337  }
338  else if (base.GetType() == LayerType::Multiplication)
339  {
340  MultiplicationLayer* baseLayer = PolymorphicDowncast<MultiplicationLayer*>(&base);
341 
343  baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
344  baseLayer->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(),
345  activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
346  &activationDesc);
347 
348  if (status)
349  {
350  FuseLayerWithoutParameters<MultiplicationLayer>(optimizationViews,
351  baseLayer,
352  activationLayer,
353  activationDesc,
354  name);
355  untouched.erase(baseLayer->GetGuid());
356  untouched.erase(activationLayer->GetGuid());
357  }
358  }
359  else if (base.GetType() == LayerType::Subtraction)
360  {
361  SubtractionLayer* baseLayer = PolymorphicDowncast<SubtractionLayer*>(&base);
362 
364  baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
365  baseLayer->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(),
366  activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
367  &activationDesc);
368 
369  if (status)
370  {
371  FuseLayerWithoutParameters<SubtractionLayer>(optimizationViews,
372  baseLayer,
373  activationLayer,
374  activationDesc,
375  name);
376  untouched.erase(baseLayer->GetGuid());
377  untouched.erase(activationLayer->GetGuid());
378  }
379  }
380  }
381  }
382  }
383  }
384  }
385  }
386 
387  if (optimizationViews.GetSubstitutions().empty())
388  {
389  optimizationViews.AddUntouchedSubgraph(SubgraphView(subgraph));
390  }
391  else
392  {
393  ReportUntouchedLayers(optimizationViews, untouched);
394  }
395 
396  return optimizationViews;
397 }
arm_compute::Status NeonBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
void ReportUntouchedLayers(OptimizationViews &optimizationViews, std::map< LayerGuid, Layer *> untouched)
arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, const ActivationDescriptor *activationDescriptor)
arm_compute::Status NeonFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
arm_compute::Status NeonAdditionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
arm_compute::Status NeonSubtractionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Status
enumeration
Definition: Types.hpp:26
arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
arm_compute::Status NeonDivisionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
arm_compute::Status NeonMultiplicationWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)

◆ RegisterTensorHandleFactories()

void RegisterTensorHandleFactories ( class TensorHandleFactoryRegistry )
overridevirtual

(Optional) Register TensorHandleFactories Either this method or CreateMemoryManager() and IWorkloadFactory::CreateTensor()/IWorkloadFactory::CreateSubtensor() methods must be implemented.

Reimplemented from IBackendInternal.

Definition at line 404 of file NeonBackend.cpp.

References BaseMemoryManager::Offset, TensorHandleFactoryRegistry::RegisterFactory(), and TensorHandleFactoryRegistry::RegisterMemoryManager().

Referenced by NeonBackend::GetId().

405 {
406  auto memoryManager = std::make_shared<NeonMemoryManager>(std::make_unique<arm_compute::Allocator>(),
408 
409  registry.RegisterMemoryManager(memoryManager);
410  registry.RegisterFactory(std::make_unique<NeonTensorHandleFactory>(memoryManager));
411 }

The documentation for this class was generated from the following files: