ArmNN
 24.02
ITensorHandleFactory.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017, 2019-2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include "ITensorHandle.hpp"
10 #include <armnn/Tensor.hpp>
11 #include <armnn/Types.hpp>
13 
14 #include <memory>
15 #include <string>
16 #include <vector>
17 
18 namespace armnn
19 {
20 class IConnectableLayer;
21 
22 /// Capability class to calculate in the GetCapabilities function
23 /// so that only the capability in the scope can be choose to calculate
24 enum class CapabilityClass
25 {
26  PaddingRequired = 1,
28 
29  // add new enum values here
30 
31  CapabilityClassMax = 254
32 };
33 
34 /// Capability of the TensorHandleFactory
35 struct Capability
36 {
37  Capability(CapabilityClass capabilityClass, bool value)
38  : m_CapabilityClass(capabilityClass)
39  , m_Value(value)
40  {}
41 
43  bool m_Value;
44 };
45 
47 {
48 public:
49  using FactoryId = std::string;
50  static const FactoryId LegacyFactoryId; /// Use the workload factory to create the tensor handle
51  static const FactoryId DeferredFactoryId; /// Some TensorHandleFactory decisions are deferred to run-time
52 
53  virtual ~ITensorHandleFactory() {}
54 
55  virtual std::unique_ptr<ITensorHandle> CreateSubTensorHandle(ITensorHandle& parent,
56  TensorShape const& subTensorShape,
57  unsigned int const* subTensorOrigin) const = 0;
58 
59  virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo) const = 0;
60 
61  virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
62  DataLayout dataLayout) const = 0;
63 
64  /// Utility Functions for backends which require TensorHandles to have unmanaged memory.
65  /// These should be overloaded if required to facilitate direct import of input tensors
66  /// and direct export of output tensors.
67  virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
68  const bool IsMemoryManaged) const
69  {
70  IgnoreUnused(IsMemoryManaged);
71  return CreateTensorHandle(tensorInfo);
72  }
73 
74  virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
75  DataLayout dataLayout,
76  const bool IsMemoryManaged) const
77  {
78  IgnoreUnused(IsMemoryManaged);
79  return CreateTensorHandle(tensorInfo, dataLayout);
80  }
81 
82  virtual const FactoryId& GetId() const = 0;
83 
84  virtual bool SupportsInPlaceComputation() const { return false; }
85 
86  virtual bool SupportsSubTensors() const = 0;
87 
88  virtual bool SupportsMapUnmap() const { return true; }
89 
90  virtual MemorySourceFlags GetExportFlags() const { return 0; }
91  virtual MemorySourceFlags GetImportFlags() const { return 0; }
92 
93  virtual std::vector<Capability> GetCapabilities(const IConnectableLayer* layer,
94  const IConnectableLayer* connectedLayer,
95  CapabilityClass capabilityClass)
96  {
97  IgnoreUnused(layer);
98  IgnoreUnused(connectedLayer);
99  IgnoreUnused(capabilityClass);
100  return std::vector<Capability>();
101  }
102 };
103 
104 enum class EdgeStrategy
105 {
106  Undefined, /// No strategy has been defined. Used internally to verify integrity of optimizations.
107  DirectCompatibility, /// Destination backend can work directly with tensors on source backend.
108  ExportToTarget, /// Source backends tensor data can be exported to destination backend tensor without copy.
109  CopyToTarget /// Copy contents from source backend tensor to destination backend tensor.
110 };
111 
112 } //namespace armnn
armnn::CapabilityClass::FallbackImportDisabled
@ FallbackImportDisabled
armnn::Capability::Capability
Capability(CapabilityClass capabilityClass, bool value)
Definition: ITensorHandleFactory.hpp:37
armnn::ITensorHandleFactory::SupportsMapUnmap
virtual bool SupportsMapUnmap() const
Definition: ITensorHandleFactory.hpp:88
armnn::DataLayout
DataLayout
Definition: Types.hpp:62
armnn::EdgeStrategy::DirectCompatibility
@ DirectCompatibility
No strategy has been defined. Used internally to verify integrity of optimizations.
armnn::ITensorHandleFactory::~ITensorHandleFactory
virtual ~ITensorHandleFactory()
Some TensorHandleFactory decisions are deferred to run-time.
Definition: ITensorHandleFactory.hpp:53
armnn::CapabilityClass
CapabilityClass
Capability class to calculate in the GetCapabilities function so that only the capability in the scop...
Definition: ITensorHandleFactory.hpp:24
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::MemorySourceFlags
unsigned int MemorySourceFlags
Definition: MemorySources.hpp:15
armnn::ITensorHandle
Definition: ITensorHandle.hpp:16
armnn::ITensorHandleFactory::GetExportFlags
virtual MemorySourceFlags GetExportFlags() const
Definition: ITensorHandleFactory.hpp:90
IgnoreUnused.hpp
armnn::ITensorHandleFactory::LegacyFactoryId
static const FactoryId LegacyFactoryId
Definition: ITensorHandleFactory.hpp:50
armnn::EdgeStrategy::CopyToTarget
@ CopyToTarget
Source backends tensor data can be exported to destination backend tensor without copy.
armnn::ITensorHandleFactory::GetCapabilities
virtual std::vector< Capability > GetCapabilities(const IConnectableLayer *layer, const IConnectableLayer *connectedLayer, CapabilityClass capabilityClass)
Definition: ITensorHandleFactory.hpp:93
armnn::TensorShape
Definition: Tensor.hpp:20
ITensorHandle.hpp
armnn::EdgeStrategy::Undefined
@ Undefined
armnn::ITensorHandleFactory::CreateSubTensorHandle
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
armnn::ITensorHandleFactory::DeferredFactoryId
static const FactoryId DeferredFactoryId
Use the workload factory to create the tensor handle.
Definition: ITensorHandleFactory.hpp:51
armnn::FactoryId
ITensorHandleFactory::FactoryId FactoryId
Definition: MockTensorHandleFactory.cpp:12
armnn::EdgeStrategy
EdgeStrategy
Definition: ITensorHandleFactory.hpp:104
armnn::ITensorHandleFactory
Definition: ITensorHandleFactory.hpp:46
armnn::ITensorHandleFactory::GetImportFlags
virtual MemorySourceFlags GetImportFlags() const
Definition: ITensorHandleFactory.hpp:91
armnn::ITensorHandleFactory::GetId
virtual const FactoryId & GetId() const =0
armnn::Capability::m_Value
bool m_Value
Definition: ITensorHandleFactory.hpp:43
armnn::Capability
Capability of the TensorHandleFactory.
Definition: ITensorHandleFactory.hpp:35
armnn::CapabilityClass::PaddingRequired
@ PaddingRequired
Tensor.hpp
armnn::ITensorHandleFactory::CreateTensorHandle
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
armnn::Capability::m_CapabilityClass
CapabilityClass m_CapabilityClass
Definition: ITensorHandleFactory.hpp:42
armnn::ITensorHandleFactory::CreateTensorHandle
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged) const
Utility Functions for backends which require TensorHandles to have unmanaged memory.
Definition: ITensorHandleFactory.hpp:67
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::EdgeStrategy::ExportToTarget
@ ExportToTarget
Destination backend can work directly with tensors on source backend.
armnn::ITensorHandleFactory::SupportsInPlaceComputation
virtual bool SupportsInPlaceComputation() const
Definition: ITensorHandleFactory.hpp:84
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
Types.hpp
armnn::ITensorHandleFactory::SupportsSubTensors
virtual bool SupportsSubTensors() const =0
armnn::ITensorHandleFactory::FactoryId
std::string FactoryId
Definition: ITensorHandleFactory.hpp:49
armnn::IConnectableLayer
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:80
armnn::ITensorHandleFactory::CreateTensorHandle
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, DataLayout dataLayout, const bool IsMemoryManaged) const
Definition: ITensorHandleFactory.hpp:74
armnn::CapabilityClass::CapabilityClassMax
@ CapabilityClassMax
MemorySources.hpp