ArmNN
 20.08
SplitterLayer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "SplitterLayer.hpp"
6 
7 #include "LayerCloneBase.hpp"
8 
9 #include <armnn/TypesUtils.hpp>
12 
13 namespace armnn
14 {
15 
16 SplitterLayer::SplitterLayer(const ViewsDescriptor& param, const char* name)
17  : LayerWithParameters(1, param.GetNumViews(), LayerType::Splitter, param, name)
18 {
19 }
20 
21 std::unique_ptr<IWorkload> SplitterLayer::CreateWorkload(const IWorkloadFactory& factory) const
22 {
23  SplitterQueueDescriptor descriptor;
24 
25  // Copies the window origins to the descriptor.
26  for (unsigned int i = 0; i < m_Param.GetNumViews(); ++i)
27  {
28  descriptor.m_ViewOrigins.emplace_back(
29  std::vector<unsigned int>(m_Param.GetViewOrigin(i), m_Param.GetViewOrigin(i) + m_Param.GetNumDimensions()));
30  }
31 
32  return factory.CreateSplitter(descriptor, PrepInfoAndDesc(descriptor));
33 }
34 
35 template<typename FactoryType>
36 void SplitterLayer::CreateTensors(const TensorHandleFactoryRegistry& registry,
37  const FactoryType& factory,
38  bool isMemoryManaged)
39 {
40  //If sub tensors are supported than all the "splitter" need to do is to
41  //set the outputs to be appropriate sub tensors of the input.
42  bool useSubTensors = factory.SupportsSubTensors();
43 
44  if (useSubTensors)
45  {
46  // Get outputHandler of previous layer
47  const OutputHandler& outputHandler = GetInputSlots()[0].GetConnectedOutputSlot()->GetOutputHandler();
48  const OutputSlot* slot = GetInputSlots()[0].GetConnectedOutputSlot();
49 
50  const TensorInfo& parentInfo = outputHandler.GetTensorInfo();
51 
52  ITensorHandle* inputData = outputHandler.GetData();
53 
54  std::vector<std::unique_ptr<ITensorHandle>> subTensors;
55 
56  // check if split is along the x or y (2 innermost dimensions)
57  auto numberOfDimensions = m_Param.GetNumDimensions();
58 
59  // Compute split axis within class as aclCommon function causes header issues when included
60  auto ComputeSplitAxis = [&](const armnn::SplitterDescriptor& desc, const TensorShape& input)
61  {
62  unsigned int numSplit = desc.GetNumViews();
63  unsigned int numDimensions = desc.GetNumDimensions();
64  std::set<unsigned int> splitAxis;
65 
66  for (unsigned int i = 0; i < numSplit; ++i)
67  {
68  for (unsigned int dimIdx = 0; dimIdx < numDimensions; ++dimIdx)
69  {
70  if (desc.GetViewSizes(i)[dimIdx] != input[dimIdx])
71  {
72  splitAxis.insert(dimIdx);
73  }
74  }
75  }
76  return splitAxis;
77  };
78 
79  std::set<unsigned int> axis = ComputeSplitAxis(m_Param, parentInfo.GetShape());
80  std::set<unsigned int>::iterator axisIt = axis.begin();
81 
82  bool isOnXorY = m_Param.GetNumDimensions() >= 3 &&
83  ((*axisIt == numberOfDimensions - 1) ||
84  (*axisIt == numberOfDimensions - 2));
85 
86  //Creates the outputs as subtensors of the input.
87  for (unsigned int i = 0; i < m_Param.GetNumViews(); ++i)
88  {
89  const TensorInfo& info = m_OutputHandlers[i].GetTensorInfo();
90 
91  OutputSlot& outSlot = GetOutputSlot(i);
93 
94  const unsigned int numOutputSlots = GetNumOutputSlots();
95 
96  // if split along x or y (2 innermost dimensions) and the next layers do not require padding
97  bool canUseSubTensorOnXorY = true;
98  bool isTensorHandleFactory = std::is_same<armnn::ITensorHandleFactory, FactoryType>::value;
99  if (isTensorHandleFactory)
100  {
101  for (unsigned int it = 0; it < numOutputSlots; ++it)
102  {
103  InputSlot* inputSlot = GetOutputSlot(it).GetConnection(0);
104  ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId);
105  std::vector<Capability> capabilities =
106  handleFactory->GetCapabilities(&(inputSlot->GetOwningLayer()),
107  this,
109  if (isOnXorY)
110  {
111  canUseSubTensorOnXorY = false;
112  if (capabilities.empty())
113  {
114  canUseSubTensorOnXorY = true;
115  }
116  }
117 
118  if (!canUseSubTensorOnXorY)
119  {
120  break;
121  }
122  }
123  }
124 
125  auto CreateSubTensor = [&]()
126  {
127  // Make sure:
128  // 1) quantization parameters are in the same space
129  // 2) the same TensorHandleFactory is used for input and split layer output
130  // 3) the output does not go to a Constant layer or input layer
131  // 4) if split along x or y (2 innermost dimensions) and the next layers do not require padding
132  if (parentInfo.IsTypeSpaceMatch(info) && //(1)
133  factoryId == slot->GetTensorHandleFactoryId() && //(2)
136  canUseSubTensorOnXorY) //(4)
137  {
139  return factory.CreateSubTensorHandle(*inputData,
140  info.GetShape(),
141  this->m_Param.GetViewOrigin(i));
143  }
144  return std::unique_ptr<ITensorHandle>();
145  };
146 
147  auto subTensor = CreateSubTensor();
148  if (!subTensor)
149  {
150  useSubTensors = false;
151  break; //Failed to create a valid sub-tensor, so stop trying with the rest of the views.
152  }
153  subTensors.push_back(std::move(subTensor));
154  }
155 
156  if (useSubTensors)
157  {
158  unsigned int i = 0;
159  for (auto& subTensor : subTensors)
160  {
161  m_OutputHandlers[i].SetData(std::move(subTensor));
162  ++i;
163  }
164  }
165  }
166 
167  if (!useSubTensors)
168  {
169  for (unsigned int i = 0; i < m_Param.GetNumViews(); ++i)
170  {
171  m_OutputHandlers[i].CreateTensorHandles(factory, isMemoryManaged);
172  }
173  }
174 }
175 
177  const IWorkloadFactory& workloadFactory,
178  const bool isMemoryManaged)
179 {
180  OutputSlot& slot = GetOutputSlot(0);
182 
183  if (factoryId == ITensorHandleFactory::LegacyFactoryId)
184  {
185  CreateTensors(registry, workloadFactory, isMemoryManaged);
186  }
187  else
188  {
189  ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId);
190  ARMNN_ASSERT(handleFactory);
191  CreateTensors(registry, *handleFactory, isMemoryManaged);
192  }
193 }
194 
196 {
197  return CloneBase<SplitterLayer>(graph, m_Param, GetName());
198 }
199 
200 std::vector<TensorShape> SplitterLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
201 {
202  IgnoreUnused(inputShapes);
203  ARMNN_ASSERT(inputShapes.size() == m_Param.GetNumViews());
204  std::vector<TensorShape> outShapes;
205  //Output shapes must match View shapes.
206  for (unsigned int viewIdx = 0; viewIdx < m_Param.GetNumViews(); viewIdx++)
207  {
208  const uint32_t* sizes = m_Param.GetViewSizes(viewIdx);
209  outShapes.push_back(TensorShape(m_Param.GetNumDimensions(), sizes));
210  }
211  return outShapes;
212 }
213 
215 {
216  std::for_each(BeginOutputSlots(), EndOutputSlots(), [&](OutputSlot& outputSlot)
217  {
219  });
220 
221  std::vector<TensorShape> views;
222  for (unsigned int viewIdx = 0; viewIdx < m_Param.GetNumViews(); viewIdx++)
223  {
224  const uint32_t* sizes = m_Param.GetViewSizes(viewIdx);
225  views.push_back(TensorShape(m_Param.GetNumDimensions(), sizes));
226  }
227 
228  auto inferredShapes = InferOutputShapes(views);
229 
230  ARMNN_ASSERT(inferredShapes.size() == m_Param.GetNumViews());
231 
232  for (unsigned int viewIdx = 0; viewIdx < m_Param.GetNumViews(); viewIdx++)
233  {
234  ValidateAndCopyShape(GetOutputSlot(viewIdx).GetTensorInfo().GetShape(),
235  inferredShapes[viewIdx],
237  "SplitterLayer",
238  viewIdx);
239  }
240 }
241 
243 {
244  visitor.VisitSplitterLayer(this, GetParameters(), GetName());
245 }
246 
247 } // namespace armnn
virtual std::unique_ptr< IWorkload > CreateSplitter(const SplitterQueueDescriptor &descriptor, const WorkloadInfo &info) const
SplitterLayer(const ViewsDescriptor &param, const char *name)
Constructor to create a SplitterLayer.
This layer represents a split operation.
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry &registry, const IWorkloadFactory &factory, const bool IsMemoryManaged=true) override
Set the outputs to be appropriate sub tensors of the input if sub tensors are supported otherwise cre...
A ViewsDescriptor for the SplitterLayer.
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same...
Definition: Tensor.cpp:424
ViewsDescriptor m_Param
The parameters for the layer (not including tensor-valued weights etc.).
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
uint32_t GetNumDimensions() const
Get the number of dimensions.
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs, otherwise infers the output shapes from given input shapes and layer properties.
uint32_t GetNumViews() const
Get the number of views.
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Definition: Layer.cpp:432
Copyright (c) 2020 ARM Limited.
void IgnoreUnused(Ts &&...)
const std::vector< InputSlot > & GetInputSlots() const
Definition: Layer.hpp:233
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
Definition: Layer.hpp:310
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
Definition: Layer.cpp:392
virtual std::vector< Capability > GetCapabilities(const IConnectableLayer *layer, const IConnectableLayer *connectedLayer, CapabilityClass capabilityClass)
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
void Accept(ILayerVisitor &visitor) const override
Apply a visitor to this layer.
ITensorHandle * GetData() const
Gets the allocated tensor memory.
Layer & GetOwningLayer() const
Definition: Layer.hpp:52
std::vector< OutputHandler > m_OutputHandlers
Definition: Layer.hpp:386
std::vector< ViewOrigin > m_ViewOrigins
const uint32_t * GetViewOrigin(uint32_t idx) const
Get the view origin at the int value idx.
const uint32_t * GetViewSizes(uint32_t idx) const
Get the view sizes at the int value idx.
std::vector< OutputSlot >::iterator BeginOutputSlots()
Definition: Layer.hpp:241
virtual void VisitSplitterLayer(const IConnectableLayer *layer, const ViewsDescriptor &splitterDescriptor, const char *name=nullptr)=0
Function that a splitter layer should call back to when its Accept(ILayerVisitor&) function is invoke...
ClWorkloadFactory FactoryType
ITensorHandleFactory * GetFactory(ITensorHandleFactory::FactoryId id) const
Find a TensorHandleFactory by Id Returns nullptr if not found.
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *LayerCreateWorkload.
std::vector< OutputSlot >::iterator EndOutputSlots()
Definition: Layer.hpp:242
LayerType GetType() const
Definition: Layer.hpp:261
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:314
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:307
ITensorHandleFactory::FactoryId GetTensorHandleFactoryId() const
Definition: Layer.cpp:177
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of SplitterLayer.
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
void Splitter(const SplitterQueueDescriptor &data)
Definition: Splitter.hpp:17
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:63
static const FactoryId LegacyFactoryId
SplitterLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
ShapeInferenceMethod m_ShapeInferenceMethod
Definition: Layer.hpp:387
const InputSlot * GetConnection(unsigned int index) const override
Definition: Layer.cpp:46
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the Splitter type.
const TensorInfo & GetTensorInfo() const
Gets the matching TensorInfo for the output.