ArmNN
 21.05
NeonTensorHandleFactory.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 #include "NeonTensorHandle.hpp"
8 
9 #include "Layer.hpp"
10 
14 
15 namespace armnn
16 {
17 
19 
20 std::unique_ptr<ITensorHandle> NeonTensorHandleFactory::CreateSubTensorHandle(ITensorHandle& parent,
21  const TensorShape& subTensorShape,
22  const unsigned int* subTensorOrigin)
23  const
24 {
25  const arm_compute::TensorShape shape = armcomputetensorutils::BuildArmComputeTensorShape(subTensorShape);
26 
28  coords.set_num_dimensions(subTensorShape.GetNumDimensions());
29  for (unsigned int i = 0; i < subTensorShape.GetNumDimensions(); ++i)
30  {
31  // Arm compute indexes tensor coords in reverse order.
32  unsigned int revertedIndex = subTensorShape.GetNumDimensions() - i - 1;
33  coords.set(i, armnn::numeric_cast<int>(subTensorOrigin[revertedIndex]));
34  }
35 
36  const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(parent.GetShape());
37 
38  if (!::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, parentShape, coords, shape))
39  {
40  return nullptr;
41  }
42 
43  return std::make_unique<NeonSubTensorHandle>(
44  PolymorphicDowncast<IAclTensorHandle*>(&parent), shape, coords);
45 }
46 
47 std::unique_ptr<ITensorHandle> NeonTensorHandleFactory::CreateTensorHandle(const TensorInfo& tensorInfo) const
48 {
49  return NeonTensorHandleFactory::CreateTensorHandle(tensorInfo, true);
50 }
51 
52 std::unique_ptr<ITensorHandle> NeonTensorHandleFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
53  DataLayout dataLayout) const
54 {
55  return NeonTensorHandleFactory::CreateTensorHandle(tensorInfo, dataLayout, true);
56 }
57 
58 std::unique_ptr<ITensorHandle> NeonTensorHandleFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
59  const bool IsMemoryManaged) const
60 {
61  auto tensorHandle = std::make_unique<NeonTensorHandle>(tensorInfo);
62  if (IsMemoryManaged)
63  {
64  tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
65  }
66  // If we are not Managing the Memory then we must be importing
67  tensorHandle->SetImportEnabledFlag(!IsMemoryManaged);
68  tensorHandle->SetImportFlags(GetImportFlags());
69 
70  return tensorHandle;
71 }
72 
73 std::unique_ptr<ITensorHandle> NeonTensorHandleFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
74  DataLayout dataLayout,
75  const bool IsMemoryManaged) const
76 {
77  auto tensorHandle = std::make_unique<NeonTensorHandle>(tensorInfo, dataLayout);
78  if (IsMemoryManaged)
79  {
80  tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
81  }
82  // If we are not Managing the Memory then we must be importing
83  tensorHandle->SetImportEnabledFlag(!IsMemoryManaged);
84  tensorHandle->SetImportFlags(GetImportFlags());
85 
86  return tensorHandle;
87 }
88 
90 {
91  static const FactoryId s_Id(NeonTensorHandleFactoryId());
92  return s_Id;
93 }
94 
96 {
97  return GetIdStatic();
98 }
99 
101 {
102  return true;
103 }
104 
106 {
107  return true;
108 }
109 
111 {
112  return m_ExportFlags;
113 }
114 
116 {
117  return m_ImportFlags;
118 }
119 
120 std::vector<Capability> NeonTensorHandleFactory::GetCapabilities(const IConnectableLayer* layer,
121  const IConnectableLayer* connectedLayer,
122  CapabilityClass capabilityClass)
123 
124 {
125  IgnoreUnused(connectedLayer);
126  std::vector<Capability> capabilities;
127  if (capabilityClass == CapabilityClass::PaddingRequired)
128  {
129  auto search = paddingRequiredLayers.find((PolymorphicDowncast<const Layer*>(layer))->GetType());
130  if ( search != paddingRequiredLayers.end())
131  {
132  Capability paddingCapability(CapabilityClass::PaddingRequired, true);
133  capabilities.push_back(paddingCapability);
134  }
135  }
136  return capabilities;
137 }
138 
139 } // namespace armnn
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
DataLayout
Definition: Types.hpp:54
MemorySourceFlags GetImportFlags() const override
CapabilityClass
Capability class to calculate in the GetCapabilities function so that only the capability in the scop...
std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const override
std::array< unsigned int, MaxNumOfTensorDimensions > Coordinates
unsigned int MemorySourceFlags
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
const std::set< armnn::LayerType > paddingRequiredLayers
Capability of the TensorHandleFactory.
const FactoryId & GetId() const override
MemorySourceFlags GetExportFlags() const override
ITensorHandleFactory::FactoryId FactoryId
constexpr const char * NeonTensorHandleFactoryId()
virtual TensorShape GetShape() const =0
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
std::vector< Capability > GetCapabilities(const IConnectableLayer *layer, const IConnectableLayer *connectedLayer, CapabilityClass capabilityClass) override
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
bool SupportsInPlaceComputation() const override
static const FactoryId & GetIdStatic()
std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, const TensorShape &subTensorShape, const unsigned int *subTensorOrigin) const override