ArmNN
 23.08
NeonTensorHandleFactory.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 #include "NeonTensorHandle.hpp"
8 
9 #include "Layer.hpp"
10 
14 
15 namespace armnn
16 {
17 
19 
20 std::unique_ptr<ITensorHandle> NeonTensorHandleFactory::CreateSubTensorHandle(ITensorHandle& parent,
21  const TensorShape& subTensorShape,
22  const unsigned int* subTensorOrigin)
23  const
24 {
25  const arm_compute::TensorShape shape = armcomputetensorutils::BuildArmComputeTensorShape(subTensorShape);
26 
28  coords.set_num_dimensions(subTensorShape.GetNumDimensions());
29  for (unsigned int i = 0; i < subTensorShape.GetNumDimensions(); ++i)
30  {
31  // Arm compute indexes tensor coords in reverse order.
32  unsigned int revertedIndex = subTensorShape.GetNumDimensions() - i - 1;
33  coords.set(i, armnn::numeric_cast<int>(subTensorOrigin[revertedIndex]));
34  }
35 
36  const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(parent.GetShape());
37 
38  if (!::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, parentShape, coords, shape))
39  {
40  return nullptr;
41  }
42 
43  return std::make_unique<NeonSubTensorHandle>(
44  PolymorphicDowncast<IAclTensorHandle*>(&parent), shape, coords);
45 }
46 
47 std::unique_ptr<ITensorHandle> NeonTensorHandleFactory::CreateTensorHandle(const TensorInfo& tensorInfo) const
48 {
49  return NeonTensorHandleFactory::CreateTensorHandle(tensorInfo, true);
50 }
51 
52 std::unique_ptr<ITensorHandle> NeonTensorHandleFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
53  DataLayout dataLayout) const
54 {
55  return NeonTensorHandleFactory::CreateTensorHandle(tensorInfo, dataLayout, true);
56 }
57 
58 std::unique_ptr<ITensorHandle> NeonTensorHandleFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
59  const bool IsMemoryManaged) const
60 {
61  auto tensorHandle = std::make_unique<NeonTensorHandle>(tensorInfo);
62  if (IsMemoryManaged)
63  {
64  tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
65  }
66  // If we are not Managing the Memory then we must be importing
67  tensorHandle->SetImportEnabledFlag(!IsMemoryManaged);
68  tensorHandle->SetImportFlags(GetImportFlags());
69 
70  return tensorHandle;
71 }
72 
73 std::unique_ptr<ITensorHandle> NeonTensorHandleFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
74  DataLayout dataLayout,
75  const bool IsMemoryManaged) const
76 {
77  auto tensorHandle = std::make_unique<NeonTensorHandle>(tensorInfo, dataLayout);
78  if (IsMemoryManaged)
79  {
80  tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
81  }
82  // If we are not Managing the Memory then we must be importing
83  tensorHandle->SetImportEnabledFlag(!IsMemoryManaged);
84  tensorHandle->SetImportFlags(GetImportFlags());
85 
86  return tensorHandle;
87 }
88 
90 {
91  static const FactoryId s_Id(NeonTensorHandleFactoryId());
92  return s_Id;
93 }
94 
96 {
97  return GetIdStatic();
98 }
99 
101 {
102  return true;
103 }
104 
106 {
107  return true;
108 }
109 
111 {
112  return m_ExportFlags;
113 }
114 
116 {
117  return m_ImportFlags;
118 }
119 
120 std::vector<Capability> NeonTensorHandleFactory::GetCapabilities(const IConnectableLayer* layer,
121  const IConnectableLayer* connectedLayer,
122  CapabilityClass capabilityClass)
123 
124 {
125  IgnoreUnused(connectedLayer);
126  std::vector<Capability> capabilities;
127  if (capabilityClass == CapabilityClass::PaddingRequired)
128  {
129  auto search = paddingRequiredLayers.find((PolymorphicDowncast<const Layer*>(layer))->GetType());
130  if ( search != paddingRequiredLayers.end())
131  {
132  Capability paddingCapability(CapabilityClass::PaddingRequired, true);
133  capabilities.push_back(paddingCapability);
134  }
135  }
136  return capabilities;
137 }
138 
139 } // namespace armnn
armnn::NeonTensorHandleFactory::GetImportFlags
MemorySourceFlags GetImportFlags() const override
Definition: NeonTensorHandleFactory.cpp:115
armnn::DataLayout
DataLayout
Definition: Types.hpp:62
armnn::CapabilityClass
CapabilityClass
Capability class to calculate in the GetCapabilities function so that only the capability in the scop...
Definition: ITensorHandleFactory.hpp:24
armnn::TensorInfo
Definition: Tensor.hpp:152
NeonTensorHandle.hpp
armnn::NeonTensorHandleFactory::GetIdStatic
static const FactoryId & GetIdStatic()
Definition: NeonTensorHandleFactory.cpp:89
armnn::MemorySourceFlags
unsigned int MemorySourceFlags
Definition: MemorySources.hpp:15
armnn::ITensorHandle
Definition: ITensorHandle.hpp:16
armnn::ITensorHandle::GetShape
virtual TensorShape GetShape() const =0
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
IgnoreUnused.hpp
armnn::NeonTensorHandleFactory::GetId
const FactoryId & GetId() const override
Definition: NeonTensorHandleFactory.cpp:95
armnn::Coordinates
std::array< unsigned int, MaxNumOfTensorDimensions > Coordinates
Definition: InternalTypes.hpp:15
NumericCast.hpp
armnn::NeonTensorHandleFactory::SupportsInPlaceComputation
bool SupportsInPlaceComputation() const override
Definition: NeonTensorHandleFactory.cpp:100
armnn::TensorShape
Definition: Tensor.hpp:20
armnn::TensorShape::GetNumDimensions
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
armnn::FactoryId
ITensorHandleFactory::FactoryId FactoryId
Definition: MockTensorHandleFactory.cpp:12
PolymorphicDowncast.hpp
armnn::paddingRequiredLayers
const std::set< armnn::LayerType > paddingRequiredLayers
Definition: NeonTensorHandleFactory.hpp:16
armnn::NeonTensorHandleFactoryId
constexpr const char * NeonTensorHandleFactoryId()
Definition: NeonTensorHandleFactory.hpp:14
armnn::NeonTensorHandleFactory::SupportsSubTensors
bool SupportsSubTensors() const override
Definition: NeonTensorHandleFactory.cpp:105
armnn::Capability
Capability of the TensorHandleFactory.
Definition: ITensorHandleFactory.hpp:35
armnn::CapabilityClass::PaddingRequired
@ PaddingRequired
armnn::NeonTensorHandleFactory::CreateTensorHandle
std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const override
Definition: NeonTensorHandleFactory.cpp:47
NeonTensorHandleFactory.hpp
armnn::NeonTensorHandleFactory::GetCapabilities
std::vector< Capability > GetCapabilities(const IConnectableLayer *layer, const IConnectableLayer *connectedLayer, CapabilityClass capabilityClass) override
Definition: NeonTensorHandleFactory.cpp:120
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::NeonTensorHandleFactory::CreateSubTensorHandle
std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, const TensorShape &subTensorShape, const unsigned int *subTensorOrigin) const override
Definition: NeonTensorHandleFactory.cpp:20
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
Layer.hpp
armnn::ITensorHandleFactory::FactoryId
std::string FactoryId
Definition: ITensorHandleFactory.hpp:49
armnn::IConnectableLayer
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:80
armnn::NeonTensorHandleFactory::GetExportFlags
MemorySourceFlags GetExportFlags() const override
Definition: NeonTensorHandleFactory.cpp:110