ArmNN
 20.08
NeonTensorHandleFactory.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 #include "NeonTensorHandle.hpp"
8 
9 #include "Layer.hpp"
10 
13 
14 namespace armnn
15 {
16 
18 
19 std::unique_ptr<ITensorHandle> NeonTensorHandleFactory::CreateSubTensorHandle(ITensorHandle& parent,
20  const TensorShape& subTensorShape,
21  const unsigned int* subTensorOrigin)
22  const
23 {
24  const arm_compute::TensorShape shape = armcomputetensorutils::BuildArmComputeTensorShape(subTensorShape);
25 
27  coords.set_num_dimensions(subTensorShape.GetNumDimensions());
28  for (unsigned int i = 0; i < subTensorShape.GetNumDimensions(); ++i)
29  {
30  // Arm compute indexes tensor coords in reverse order.
31  unsigned int revertedIndex = subTensorShape.GetNumDimensions() - i - 1;
32  coords.set(i, boost::numeric_cast<int>(subTensorOrigin[revertedIndex]));
33  }
34 
35  const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(parent.GetShape());
36 
37  if (!::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, parentShape, coords, shape))
38  {
39  return nullptr;
40  }
41 
42  return std::make_unique<NeonSubTensorHandle>(
43  PolymorphicDowncast<IAclTensorHandle*>(&parent), shape, coords);
44 }
45 
46 std::unique_ptr<ITensorHandle> NeonTensorHandleFactory::CreateTensorHandle(const TensorInfo& tensorInfo) const
47 {
48  return NeonTensorHandleFactory::CreateTensorHandle(tensorInfo, true);
49 }
50 
51 std::unique_ptr<ITensorHandle> NeonTensorHandleFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
52  DataLayout dataLayout) const
53 {
54  return NeonTensorHandleFactory::CreateTensorHandle(tensorInfo, dataLayout, true);
55 }
56 
57 std::unique_ptr<ITensorHandle> NeonTensorHandleFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
58  const bool IsMemoryManaged) const
59 {
60  auto tensorHandle = std::make_unique<NeonTensorHandle>(tensorInfo);
61  if (IsMemoryManaged)
62  {
63  tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
64  }
65  // If we are not Managing the Memory then we must be importing
66  tensorHandle->SetImportEnabledFlag(!IsMemoryManaged);
67  tensorHandle->SetImportFlags(GetImportFlags());
68 
69  return tensorHandle;
70 }
71 
72 std::unique_ptr<ITensorHandle> NeonTensorHandleFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
73  DataLayout dataLayout,
74  const bool IsMemoryManaged) const
75 {
76  auto tensorHandle = std::make_unique<NeonTensorHandle>(tensorInfo, dataLayout);
77  if (IsMemoryManaged)
78  {
79  tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
80  }
81  // If we are not Managing the Memory then we must be importing
82  tensorHandle->SetImportEnabledFlag(!IsMemoryManaged);
83  tensorHandle->SetImportFlags(GetImportFlags());
84 
85  return tensorHandle;
86 }
87 
89 {
90  static const FactoryId s_Id(NeonTensorHandleFactoryId());
91  return s_Id;
92 }
93 
95 {
96  return GetIdStatic();
97 }
98 
100 {
101  return true;
102 }
103 
105 {
106  return m_ExportFlags;
107 }
108 
110 {
111  return m_ImportFlags;
112 }
113 
114 std::vector<Capability> NeonTensorHandleFactory::GetCapabilities(const IConnectableLayer* layer,
115  const IConnectableLayer* connectedLayer,
116  CapabilityClass capabilityClass)
117 
118 {
119  IgnoreUnused(connectedLayer);
120  std::vector<Capability> capabilities;
121  if (capabilityClass == CapabilityClass::PaddingRequired)
122  {
123  auto search = paddingRequiredLayers.find((PolymorphicDowncast<const Layer*>(layer))->GetType());
124  if ( search != paddingRequiredLayers.end())
125  {
126  Capability paddingCapability(CapabilityClass::PaddingRequired, true);
127  capabilities.push_back(paddingCapability);
128  }
129  }
130  return capabilities;
131 }
132 
133 } // namespace armnn
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
DataLayout
Definition: Types.hpp:49
MemorySourceFlags GetImportFlags() const override
CapabilityClass
Capability class to calculate in the GetCapabilities function so that only the capability in the scop...
std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const override
std::array< unsigned int, MaxNumOfTensorDimensions > Coordinates
unsigned int MemorySourceFlags
Copyright (c) 2020 ARM Limited.
void IgnoreUnused(Ts &&...)
const std::set< armnn::LayerType > paddingRequiredLayers
Capability of the TensorHandleFactory.
const FactoryId & GetId() const override
MemorySourceFlags GetExportFlags() const override
ITensorHandleFactory::FactoryId FactoryId
constexpr const char * NeonTensorHandleFactoryId()
virtual TensorShape GetShape() const =0
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
std::vector< Capability > GetCapabilities(const IConnectableLayer *layer, const IConnectableLayer *connectedLayer, CapabilityClass capabilityClass) override
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:175
static const FactoryId & GetIdStatic()
std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, const TensorShape &subTensorShape, const unsigned int *subTensorOrigin) const override