ArmNN
 23.02
SplitterLayer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "SplitterLayer.hpp"
6 
7 #include "LayerCloneBase.hpp"
8 
9 #include <armnn/TypesUtils.hpp>
12 
13 namespace armnn
14 {
15 
16 SplitterLayer::SplitterLayer(const ViewsDescriptor& param, const char* name)
17  : LayerWithParameters(1, param.GetNumViews(), LayerType::Splitter, param, name)
18 {
19 }
20 
21 std::unique_ptr<IWorkload> SplitterLayer::CreateWorkload(const IWorkloadFactory& factory) const
22 {
23  SplitterQueueDescriptor descriptor;
24 
25  // Copies the window origins to the descriptor.
26  for (unsigned int i = 0; i < m_Param.GetNumViews(); ++i)
27  {
28  descriptor.m_ViewOrigins.emplace_back(
29  std::vector<unsigned int>(m_Param.GetViewOrigin(i), m_Param.GetViewOrigin(i) + m_Param.GetNumDimensions()));
30  }
31 
32  SetAdditionalInfo(descriptor);
33 
34  return factory.CreateWorkload(LayerType::Splitter, descriptor, PrepInfoAndDesc(descriptor));
35 }
36 
37 template<typename FactoryType>
38 void SplitterLayer::CreateTensors(const TensorHandleFactoryRegistry& registry,
39  const FactoryType& factory,
40  bool isMemoryManaged)
41 {
42  //If sub tensors are supported than all the "splitter" need to do is to
43  //set the outputs to be appropriate sub tensors of the input.
44  bool useSubTensors = factory.SupportsSubTensors();
45 
46  if (useSubTensors)
47  {
48  // Get outputHandler of previous layer
49  const OutputHandler& outputHandler = GetInputSlots()[0].GetConnectedOutputSlot()->GetOutputHandler();
50  const OutputSlot* slot = GetInputSlots()[0].GetConnectedOutputSlot();
51 
52  const TensorInfo& parentInfo = outputHandler.GetTensorInfo();
53 
54  ITensorHandle* inputData = outputHandler.GetData();
55 
56  std::vector<std::unique_ptr<ITensorHandle>> subTensors;
57 
58  // check if split is along the x or y (2 innermost dimensions)
59  auto numberOfDimensions = m_Param.GetNumDimensions();
60 
61  // Compute split axis within class as aclCommon function causes header issues when included
62  auto ComputeSplitAxis = [&](const armnn::SplitterDescriptor& desc, const TensorShape& input)
63  {
64  unsigned int numSplit = desc.GetNumViews();
65  unsigned int numDimensions = desc.GetNumDimensions();
66  std::set<unsigned int> splitAxis;
67 
68  for (unsigned int i = 0; i < numSplit; ++i)
69  {
70  for (unsigned int dimIdx = 0; dimIdx < numDimensions; ++dimIdx)
71  {
72  if (desc.GetViewSizes(i)[dimIdx] != input[dimIdx])
73  {
74  splitAxis.insert(dimIdx);
75  }
76  }
77  }
78  return splitAxis;
79  };
80 
81  std::set<unsigned int> axis = ComputeSplitAxis(m_Param, parentInfo.GetShape());
82  std::set<unsigned int>::iterator axisIt = axis.begin();
83 
84  bool isOnXorY = m_Param.GetNumDimensions() >= 3 &&
85  ((*axisIt == numberOfDimensions - 1) ||
86  (*axisIt == numberOfDimensions - 2));
87 
88  //Creates the outputs as subtensors of the input.
89  for (unsigned int i = 0; i < m_Param.GetNumViews(); ++i)
90  {
91  const TensorInfo& info = m_OutputHandlers[i].GetTensorInfo();
92 
93  OutputSlot& outSlot = GetOutputSlot(i);
94  ITensorHandleFactory::FactoryId factoryId = outSlot.GetTensorHandleFactoryId();
95 
96  const unsigned int numOutputSlots = GetNumOutputSlots();
97 
98  // if split along x or y (2 innermost dimensions) and the next layers do not require padding
99  bool canUseSubTensorOnXorY = true;
100  bool isTensorHandleFactory = std::is_same<armnn::ITensorHandleFactory, FactoryType>::value;
101  if (isTensorHandleFactory)
102  {
103  for (unsigned int it = 0; it < numOutputSlots; ++it)
104  {
105  InputSlot* inputSlot = GetOutputSlot(it).GetConnection(0);
106  ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId);
107  std::vector<Capability> capabilities =
108  handleFactory->GetCapabilities(&(inputSlot->GetOwningLayer()),
109  this,
111  if (isOnXorY)
112  {
113  canUseSubTensorOnXorY = false;
114  if (capabilities.empty())
115  {
116  canUseSubTensorOnXorY = true;
117  }
118  }
119 
120  if (!canUseSubTensorOnXorY)
121  {
122  break;
123  }
124  }
125  }
126 
127  auto CreateSubTensor = [&]()
128  {
129  // Make sure:
130  // 1) quantization parameters are in the same space
131  // 2) the same TensorHandleFactory is used for input and split layer output
132  // 3) the output does not go to a Constant layer or input layer
133  // 4) if split along x or y (2 innermost dimensions) and the next layers do not require padding
134  if (parentInfo.IsTypeSpaceMatch(info) && //(1)
135  factoryId == slot->GetTensorHandleFactoryId() && //(2)
138  canUseSubTensorOnXorY) //(4)
139  {
141  return factory.CreateSubTensorHandle(*inputData,
142  info.GetShape(),
143  this->m_Param.GetViewOrigin(i));
145  }
146  return std::unique_ptr<ITensorHandle>();
147  };
148 
149  auto subTensor = CreateSubTensor();
150  if (!subTensor)
151  {
152  useSubTensors = false;
153  break; //Failed to create a valid sub-tensor, so stop trying with the rest of the views.
154  }
155  subTensors.push_back(std::move(subTensor));
156  }
157 
158  if (useSubTensors)
159  {
160  unsigned int i = 0;
161  for (auto& subTensor : subTensors)
162  {
163  m_OutputHandlers[i].SetData(std::move(subTensor));
164  ++i;
165  }
166  }
167  }
168 
169  if (!useSubTensors)
170  {
171  for (unsigned int i = 0; i < m_Param.GetNumViews(); ++i)
172  {
173  m_OutputHandlers[i].CreateTensorHandles(factory, isMemoryManaged);
174  }
175  }
176 }
177 
179  const IWorkloadFactory& workloadFactory,
180  const bool isMemoryManaged)
181 {
182  OutputSlot& slot = GetOutputSlot(0);
184 
185  if (factoryId == ITensorHandleFactory::LegacyFactoryId)
186  {
187  CreateTensors(registry, workloadFactory, isMemoryManaged);
188  }
189  else
190  {
191  ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId);
192  ARMNN_ASSERT(handleFactory);
193  CreateTensors(registry, *handleFactory, isMemoryManaged);
194  }
195 }
196 
198 {
199  return CloneBase<SplitterLayer>(graph, m_Param, GetName());
200 }
201 
202 std::vector<TensorShape> SplitterLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
203 {
204  IgnoreUnused(inputShapes);
205  ARMNN_ASSERT(inputShapes.size() == m_Param.GetNumViews());
206  std::vector<TensorShape> outShapes;
207  //Output shapes must match View shapes.
208  for (unsigned int viewIdx = 0; viewIdx < m_Param.GetNumViews(); viewIdx++)
209  {
210  const uint32_t* sizes = m_Param.GetViewSizes(viewIdx);
211  outShapes.push_back(TensorShape(m_Param.GetNumDimensions(), sizes));
212  }
213  return outShapes;
214 }
215 
217 {
218  std::for_each(BeginOutputSlots(), EndOutputSlots(), [&](OutputSlot& outputSlot)
219  {
221  });
222 
223  std::vector<TensorShape> views;
224  for (unsigned int viewIdx = 0; viewIdx < m_Param.GetNumViews(); viewIdx++)
225  {
226  const uint32_t* sizes = m_Param.GetViewSizes(viewIdx);
227  views.push_back(TensorShape(m_Param.GetNumDimensions(), sizes));
228  }
229 
230  auto inferredShapes = InferOutputShapes(views);
231 
232  ARMNN_ASSERT(inferredShapes.size() == m_Param.GetNumViews());
233 
234  for (unsigned int viewIdx = 0; viewIdx < m_Param.GetNumViews(); viewIdx++)
235  {
236  ValidateAndCopyShape(GetOutputSlot(viewIdx).GetTensorInfo().GetShape(),
237  inferredShapes[viewIdx],
239  "SplitterLayer",
240  viewIdx);
241  }
242 }
243 
245 {
246  strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
247 }
248 
249 } // namespace armnn
armnn::SplitterLayer::CreateTensorHandles
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry &registry, const IWorkloadFactory &factory, const bool IsMemoryManaged=true) override
Set the outputs to be appropriate sub tensors of the input if sub tensors are supported otherwise cre...
Definition: SplitterLayer.cpp:178
armnn::GetTensorInfo
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
Definition: RefWorkloadUtils.hpp:27
armnn::SplitterLayer::Clone
SplitterLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
Definition: SplitterLayer.cpp:197
armnn::InputSlot::GetOwningLayer
Layer & GetOwningLayer() const
Definition: Layer.hpp:53
armnn::ViewsDescriptor::GetNumViews
uint32_t GetNumViews() const
Get the number of views.
Definition: Descriptors.cpp:295
armnn::ViewsDescriptor::GetViewOrigin
const uint32_t * GetViewOrigin(uint32_t idx) const
Get the view origin at the int value idx.
Definition: Descriptors.cpp:305
armnn::Layer::GetNumOutputSlots
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
Definition: Layer.hpp:322
armnn::OutputHandler
Definition: OutputHandler.hpp:28
armnn::OutputSlot
Definition: Layer.hpp:87
armnn::OutputHandler::GetTensorInfo
const TensorInfo & GetTensorInfo() const
Gets the matching TensorInfo for the output.
Definition: OutputHandler.hpp:42
armnn::SplitterLayer::InferOutputShapes
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs,...
Definition: SplitterLayer.cpp:202
armnn::LayerType::Input
@ Input
armnn::ITensorHandleFactory::LegacyFactoryId
static const FactoryId LegacyFactoryId
Definition: ITensorHandleFactory.hpp:50
armnn::TensorHandleFactoryRegistry
Definition: TensorHandleFactoryRegistry.hpp:23
armnn::SplitterLayer
This layer represents a split operation.
Definition: SplitterLayer.hpp:13
armnn::LayerWithParameters
Definition: LayerWithParameters.hpp:14
armnn::SplitterLayer::ExecuteStrategy
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
Definition: SplitterLayer.cpp:244
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::Layer::VerifyShapeInferenceType
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Definition: Layer.cpp:491
armnn::ViewsDescriptor
A ViewsDescriptor for the SplitterLayer.
Definition: Descriptors.hpp:224
armnn::IWorkloadFactory
Definition: WorkloadFactory.hpp:22
WorkloadFactory.hpp
armnn::OutputHandler::GetData
ITensorHandle * GetData() const
Gets the allocated tensor memory.
Definition: OutputHandler.hpp:46
armnn::Layer::SetAdditionalInfo
void SetAdditionalInfo(QueueDescriptor &descriptor) const
Definition: Layer.cpp:274
armnn::SplitterLayer::CreateWorkload
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the Splitter type.
Definition: SplitterLayer.cpp:21
armnn::Layer::m_ShapeInferenceMethod
ShapeInferenceMethod m_ShapeInferenceMethod
Definition: Layer.hpp:423
armnn::Layer::ValidateAndCopyShape
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
Definition: Layer.cpp:422
armnn::ViewsDescriptor::GetNumDimensions
uint32_t GetNumDimensions() const
Get the number of dimensions.
Definition: Descriptors.cpp:300
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::ITensorHandleFactory::GetCapabilities
virtual std::vector< Capability > GetCapabilities(const IConnectableLayer *layer, const IConnectableLayer *connectedLayer, CapabilityClass capabilityClass)
Definition: ITensorHandleFactory.hpp:93
armnn::IStrategy
Definition: IStrategy.hpp:16
armnn::OutputSlot::GetTensorInfo
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:92
armnn::ComputeSplitAxis
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
Definition: ArmComputeUtils.hpp:244
armnn::ITensorHandle
Definition: ITensorHandle.hpp:15
armnn::OutputSlot::GetTensorHandleFactoryId
ITensorHandleFactory::FactoryId GetTensorHandleFactoryId() const
Definition: Layer.cpp:205
armnn::TensorShape
Definition: Tensor.hpp:20
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:466
armnn::Layer::GetOutputSlot
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:326
armnn::Layer::GetType
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
Definition: Layer.hpp:273
armnn::TensorInfo::IsTypeSpaceMatch
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same.
Definition: Tensor.cpp:432
armnn::LayerWithParameters< ViewsDescriptor >::PrepInfoAndDesc
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *Layer::CreateWorkload.
Definition: LayerWithParameters.hpp:44
armnn::OutputSlot::GetConnection
const InputSlot * GetConnection(unsigned int index) const override
Definition: Layer.cpp:75
armnn::SplitterLayer::ValidateTensorShapesFromInputs
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of SplitterLayer.
Definition: SplitterLayer.cpp:216
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::LayerType::Constant
@ Constant
armnn::Layer::BeginOutputSlots
std::vector< OutputSlot >::iterator BeginOutputSlots()
Definition: Layer.hpp:253
armnn::CapabilityClass::PaddingRequired
@ PaddingRequired
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
armnn::TensorHandleFactoryRegistry::GetFactory
ITensorHandleFactory * GetFactory(ITensorHandleFactory::FactoryId id) const
Find a TensorHandleFactory by Id Returns nullptr if not found.
Definition: TensorHandleFactoryRegistry.cpp:39
armnn::Layer::m_OutputHandlers
std::vector< OutputHandler > m_OutputHandlers
Definition: Layer.hpp:422
armnn::Layer::GetInputSlots
const std::vector< InputSlot > & GetInputSlots() const
Definition: Layer.hpp:245
LayerCloneBase.hpp
armnn::Layer::EndOutputSlots
std::vector< OutputSlot >::iterator EndOutputSlots()
Definition: Layer.hpp:254
armnn::Splitter
void Splitter(const SplitterQueueDescriptor &data, std::vector< ITensorHandle * > inputs, std::vector< ITensorHandle * > outputs)
Definition: Splitter.hpp:17
ARMNN_NO_DEPRECATE_WARN_BEGIN
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
armnn::SplitterQueueDescriptor
Definition: WorkloadData.hpp:111
armnn::IStrategy::ExecuteStrategy
virtual void ExecuteStrategy(const IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
armnn::Graph
Definition: Graph.hpp:30
armnn::ITensorHandleFactory
Definition: ITensorHandleFactory.hpp:46
ARMNN_ASSERT
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
armnn::SplitterQueueDescriptor::m_ViewOrigins
std::vector< ViewOrigin > m_ViewOrigins
Definition: WorkloadData.hpp:124
armnn::IWorkloadFactory::CreateWorkload
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
Definition: WorkloadFactory.cpp:1561
armnn::Layer::GetName
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:319
TypesUtils.hpp
armnn::LayerType::Splitter
@ Splitter
WorkloadData.hpp
armnn::ITensorHandleFactory::FactoryId
std::string FactoryId
Definition: ITensorHandleFactory.hpp:49
armnn::LayerWithParameters< ViewsDescriptor >::GetParameters
const ViewsDescriptor & GetParameters() const override
Definition: LayerWithParameters.hpp:19
armnn::LayerWithParameters< ViewsDescriptor >::m_Param
ViewsDescriptor m_Param
The parameters for the layer (not including tensor-valued weights etc.).
Definition: LayerWithParameters.hpp:52
armnn::SplitterLayer::SplitterLayer
SplitterLayer(const ViewsDescriptor &param, const char *name)
Constructor to create a SplitterLayer.
Definition: SplitterLayer.cpp:16
SplitterLayer.hpp
armnn::BoostLogSeverityMapping::info
@ info
armnn::ViewsDescriptor::GetViewSizes
const uint32_t * GetViewSizes(uint32_t idx) const
Get the view sizes at the int value idx.
Definition: Descriptors.cpp:340
ARMNN_NO_DEPRECATE_WARN_END
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34