ArmNN
 21.02
ITensorHandleFactory.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include "ITensorHandle.hpp"
9 
10 #include <armnn/IRuntime.hpp>
11 #include <armnn/MemorySources.hpp>
12 #include <armnn/Types.hpp>
14 
15 namespace armnn
16 {
17 
18 /// Capability class to calculate in the GetCapabilities function
19 /// so that only the capability in the scope can be choose to calculate
20 enum class CapabilityClass
21 {
22  PaddingRequired = 1,
23 
24  // add new enum values here
25 
26  CapabilityClassMax = 254
27 };
28 
29 /// Capability of the TensorHandleFactory
30 struct Capability
31 {
32  Capability(CapabilityClass capabilityClass, bool value)
33  : m_CapabilityClass(capabilityClass)
34  , m_Value(value)
35  {}
36 
38  bool m_Value;
39 };
40 
42 {
43 public:
44  using FactoryId = std::string;
45  static const FactoryId LegacyFactoryId; /// Use the workload factory to create the tensor handle
46  static const FactoryId DeferredFactoryId; /// Some TensorHandleFactory decisions are deferred to run-time
47 
48  virtual ~ITensorHandleFactory() {}
49 
50  virtual std::unique_ptr<ITensorHandle> CreateSubTensorHandle(ITensorHandle& parent,
51  TensorShape const& subTensorShape,
52  unsigned int const* subTensorOrigin) const = 0;
53 
54  virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo) const = 0;
55 
56  virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
57  DataLayout dataLayout) const = 0;
58 
59  /// Utility Functions for backends which require TensorHandles to have unmanaged memory.
60  /// These should be overloaded if required to facilitate direct import of input tensors
61  /// and direct export of output tensors.
62  virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
63  const bool IsMemoryManaged) const
64  {
65  IgnoreUnused(IsMemoryManaged);
66  return CreateTensorHandle(tensorInfo);
67  }
68 
69  virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
70  DataLayout dataLayout,
71  const bool IsMemoryManaged) const
72  {
73  IgnoreUnused(IsMemoryManaged);
74  return CreateTensorHandle(tensorInfo, dataLayout);
75  }
76 
77  virtual const FactoryId& GetId() const = 0;
78 
79  virtual bool SupportsInPlaceComputation() const { return false; }
80 
81  virtual bool SupportsSubTensors() const = 0;
82 
83  virtual bool SupportsMapUnmap() const final { return true; }
84 
85  virtual MemorySourceFlags GetExportFlags() const { return 0; }
86  virtual MemorySourceFlags GetImportFlags() const { return 0; }
87 
88  virtual std::vector<Capability> GetCapabilities(const IConnectableLayer* layer,
89  const IConnectableLayer* connectedLayer,
90  CapabilityClass capabilityClass)
91  {
92  IgnoreUnused(layer);
93  IgnoreUnused(connectedLayer);
94  IgnoreUnused(capabilityClass);
95  return std::vector<Capability>();
96  }
97 };
98 
99 enum class EdgeStrategy
100 {
101  Undefined, /// No strategy has been defined. Used internally to verify integrity of optimizations.
102  DirectCompatibility, /// Destination backend can work directly with tensors on source backend.
103  ExportToTarget, /// Source backends tensor data can be exported to destination backend tensor without copy.
104  CopyToTarget /// Copy contents from source backend tensor to destination backend tensor.
105 };
106 
107 } //namespace armnn
static const FactoryId DeferredFactoryId
Use the workload factory to create the tensor handle.
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
virtual bool SupportsMapUnmap() const final
DataLayout
Definition: Types.hpp:50
No strategy has been defined. Used internally to verify integrity of optimizations.
CapabilityClass
Capability class to calculate in the GetCapabilities function so that only the capability in the scop...
Capability(CapabilityClass capabilityClass, bool value)
Source backends tensor data can be exported to destination backend tensor without copy...
unsigned int MemorySourceFlags
CapabilityClass m_CapabilityClass
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
virtual bool SupportsInPlaceComputation() const
Capability of the TensorHandleFactory.
Destination backend can work directly with tensors on source backend.
virtual std::vector< Capability > GetCapabilities(const IConnectableLayer *layer, const IConnectableLayer *connectedLayer, CapabilityClass capabilityClass)
virtual MemorySourceFlags GetExportFlags() const
virtual ~ITensorHandleFactory()
Some TensorHandleFactory decisions are deferred to run-time.
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, DataLayout dataLayout, const bool IsMemoryManaged) const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged) const
Utility Functions for backends which require TensorHandles to have unmanaged memory.
virtual MemorySourceFlags GetImportFlags() const
static const FactoryId LegacyFactoryId