ArmNN
 24.02
SplitterLayer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "SplitterLayer.hpp"
6 
7 #include "LayerCloneBase.hpp"
8 
9 #include <armnn/TypesUtils.hpp>
12 
13 namespace armnn
14 {
15 
16 SplitterLayer::SplitterLayer(const ViewsDescriptor& param, const char* name)
17  : LayerWithParameters(1, param.GetNumViews(), LayerType::Splitter, param, name)
18 {
19 }
20 
21 std::unique_ptr<IWorkload> SplitterLayer::CreateWorkload(const IWorkloadFactory& factory) const
22 {
23  SplitterQueueDescriptor descriptor;
24 
25  // Copies the window origins to the descriptor.
26  for (unsigned int i = 0; i < m_Param.GetNumViews(); ++i)
27  {
28  descriptor.m_ViewOrigins.emplace_back(
29  std::vector<unsigned int>(m_Param.GetViewOrigin(i), m_Param.GetViewOrigin(i) + m_Param.GetNumDimensions()));
30  }
31 
32  SetAdditionalInfo(descriptor);
33 
34  return factory.CreateWorkload(LayerType::Splitter, descriptor, PrepInfoAndDesc(descriptor));
35 }
36 
37 template<typename FactoryType>
38 void SplitterLayer::CreateTensors(const TensorHandleFactoryRegistry& registry,
39  const FactoryType& factory,
40  bool isMemoryManaged)
41 {
42  //If sub tensors are supported than all the "splitter" need to do is to
43  //set the outputs to be appropriate sub tensors of the input.
44  bool useSubTensors = factory.SupportsSubTensors();
45 
46  if (useSubTensors)
47  {
48  // Get outputHandler of previous layer
49  const OutputHandler& outputHandler = GetInputSlots()[0].GetConnectedOutputSlot()->GetOutputHandler();
50  const OutputSlot* slot = GetInputSlots()[0].GetConnectedOutputSlot();
51  const TensorInfo& parentInfo = GetInputSlot(0).GetTensorInfo();
52 
53  ITensorHandle* inputData = outputHandler.GetData();
54 
55  std::vector<std::unique_ptr<ITensorHandle>> subTensors;
56 
57  // check if split is along the x or y (2 innermost dimensions)
58  auto numberOfDimensions = m_Param.GetNumDimensions();
59 
60  // Compute split axis within class as aclCommon function causes header issues when included
61  auto ComputeSplitAxis = [&](const armnn::SplitterDescriptor& desc, const TensorShape& input)
62  {
63  unsigned int numSplit = desc.GetNumViews();
64  unsigned int numDimensions = desc.GetNumDimensions();
65  std::set<unsigned int> splitAxis;
66 
67  for (unsigned int i = 0; i < numSplit; ++i)
68  {
69  for (unsigned int dimIdx = 0; dimIdx < numDimensions; ++dimIdx)
70  {
71  if (desc.GetViewSizes(i)[dimIdx] != input[dimIdx])
72  {
73  splitAxis.insert(dimIdx);
74  }
75  }
76  }
77  return splitAxis;
78  };
79 
80  std::set<unsigned int> axis = ComputeSplitAxis(m_Param, parentInfo.GetShape());
81  std::set<unsigned int>::iterator axisIt = axis.begin();
82 
83  bool isOnXorY = m_Param.GetNumDimensions() >= 3 &&
84  ((*axisIt == numberOfDimensions - 1) ||
85  (*axisIt == numberOfDimensions - 2));
86 
87  //Creates the outputs as subtensors of the input.
88  for (unsigned int i = 0; i < m_Param.GetNumViews(); ++i)
89  {
90  const TensorInfo& info = m_OutputHandlers[i].GetTensorInfo();
91 
92  OutputSlot& outSlot = GetOutputSlot(i);
93  ITensorHandleFactory::FactoryId factoryId = outSlot.GetTensorHandleFactoryId();
94 
95  const unsigned int numOutputSlots = GetNumOutputSlots();
96 
97  // if split along x or y (2 innermost dimensions) and the next layers do not require padding
98  bool canUseSubTensorOnXorY = true;
99  bool isTensorHandleFactory = std::is_same<armnn::ITensorHandleFactory, FactoryType>::value;
100  if (isTensorHandleFactory)
101  {
102  for (unsigned int it = 0; it < numOutputSlots; ++it)
103  {
104  InputSlot* inputSlot = GetOutputSlot(it).GetConnection(0);
105  ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId);
106  std::vector<Capability> capabilities =
107  handleFactory->GetCapabilities(&(inputSlot->GetOwningLayer()),
108  this,
110  if (isOnXorY)
111  {
112  canUseSubTensorOnXorY = false;
113  if (capabilities.empty())
114  {
115  canUseSubTensorOnXorY = true;
116  }
117  }
118 
119  if (!canUseSubTensorOnXorY)
120  {
121  break;
122  }
123  }
124  }
125 
126  auto CreateSubTensor = [&]()
127  {
128  // Make sure:
129  // 1) quantization parameters are in the same space
130  // 2) the same TensorHandleFactory is used for input and split layer output
131  // 3) the output does not go to a Constant layer or input layer
132  // 4) if split along x or y (2 innermost dimensions) and the next layers do not require padding
133  if (parentInfo.IsTypeSpaceMatch(info) && //(1)
134  factoryId == slot->GetTensorHandleFactoryId() && //(2)
137  canUseSubTensorOnXorY) //(4)
138  {
140  return factory.CreateSubTensorHandle(*inputData,
141  info.GetShape(),
142  this->m_Param.GetViewOrigin(i));
144  }
145  return std::unique_ptr<ITensorHandle>();
146  };
147 
148  auto subTensor = CreateSubTensor();
149  if (!subTensor)
150  {
151  useSubTensors = false;
152  break; //Failed to create a valid sub-tensor, so stop trying with the rest of the views.
153  }
154  subTensors.push_back(std::move(subTensor));
155  }
156 
157  if (useSubTensors)
158  {
159  unsigned int i = 0;
160  for (auto& subTensor : subTensors)
161  {
162  m_OutputHandlers[i].SetData(std::move(subTensor));
163  ++i;
164  }
165  }
166  }
167 
168  if (!useSubTensors)
169  {
170  for (unsigned int i = 0; i < m_Param.GetNumViews(); ++i)
171  {
172  m_OutputHandlers[i].CreateTensorHandles(factory, isMemoryManaged);
173  }
174  }
175 }
176 
178  const IWorkloadFactory& workloadFactory,
179  const bool isMemoryManaged)
180 {
181  OutputSlot& slot = GetOutputSlot(0);
183 
184  if (factoryId == ITensorHandleFactory::LegacyFactoryId)
185  {
186  CreateTensors(registry, workloadFactory, isMemoryManaged);
187  }
188  else
189  {
190  ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId);
191  ARMNN_ASSERT(handleFactory);
192  CreateTensors(registry, *handleFactory, isMemoryManaged);
193  }
194 }
195 
197 {
198  return CloneBase<SplitterLayer>(graph, m_Param, GetName());
199 }
200 
201 std::vector<TensorShape> SplitterLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
202 {
203  IgnoreUnused(inputShapes);
204  ARMNN_ASSERT(inputShapes.size() == m_Param.GetNumViews());
205  std::vector<TensorShape> outShapes;
206  //Output shapes must match View shapes.
207  for (unsigned int viewIdx = 0; viewIdx < m_Param.GetNumViews(); viewIdx++)
208  {
209  const uint32_t* sizes = m_Param.GetViewSizes(viewIdx);
210  outShapes.push_back(TensorShape(m_Param.GetNumDimensions(), sizes));
211  }
212  return outShapes;
213 }
214 
216 {
217  std::for_each(BeginOutputSlots(), EndOutputSlots(), [&](OutputSlot& outputSlot)
218  {
220  });
221 
222  std::vector<TensorShape> views;
223  for (unsigned int viewIdx = 0; viewIdx < m_Param.GetNumViews(); viewIdx++)
224  {
225  const uint32_t* sizes = m_Param.GetViewSizes(viewIdx);
226  views.push_back(TensorShape(m_Param.GetNumDimensions(), sizes));
227  }
228 
229  auto inferredShapes = InferOutputShapes(views);
230 
231  ARMNN_ASSERT(inferredShapes.size() == m_Param.GetNumViews());
232 
233  for (unsigned int viewIdx = 0; viewIdx < m_Param.GetNumViews(); viewIdx++)
234  {
235  ValidateAndCopyShape(GetOutputSlot(viewIdx).GetTensorInfo().GetShape(),
236  inferredShapes[viewIdx],
238  "SplitterLayer",
239  viewIdx);
240  }
241 }
242 
244 {
245  strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
246 }
247 
248 } // namespace armnn
ARMNN_ASSERT
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
armnn::ViewsDescriptor
A ViewsDescriptor for the SplitterLayer.
Definition: Descriptors.hpp:244
armnn::SplitterLayer::ExecuteStrategy
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
Definition: SplitterLayer.cpp:243
armnn::LayerType::Splitter
@ Splitter
armnn::SplitterLayer
This layer represents a split operation.
Definition: SplitterLayer.hpp:13
armnn::InputSlot::GetOwningLayer
Layer & GetOwningLayer() const
Definition: Layer.hpp:53
armnn::OutputSlot::GetTensorInfo
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:92
armnn::Splitter
void Splitter(const SplitterQueueDescriptor &data, std::vector< ITensorHandle * > inputs, std::vector< ITensorHandle * > outputs)
Definition: Splitter.hpp:17
WorkloadData.hpp
armnn::TensorHandleFactoryRegistry
Definition: TensorHandleFactoryRegistry.hpp:23
armnn::OutputSlot
Definition: Layer.hpp:100
TypesUtils.hpp
armnn::TensorHandleFactoryRegistry::GetFactory
ITensorHandleFactory * GetFactory(ITensorHandleFactory::FactoryId id) const
Find a TensorHandleFactory by Id Returns nullptr if not found.
Definition: TensorHandleFactoryRegistry.cpp:39
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::SplitterLayer::SplitterLayer
SplitterLayer(const ViewsDescriptor &param, const char *name)
Constructor to create a SplitterLayer.
Definition: SplitterLayer.cpp:16
armnn::SplitterLayer::ValidateTensorShapesFromInputs
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of SplitterLayer.
Definition: SplitterLayer.cpp:215
armnn::Layer::ValidateAndCopyShape
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
Definition: Layer.cpp:435
armnn::ITensorHandle
Definition: ITensorHandle.hpp:16
armnn::Layer::GetOutputSlot
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:339
ARMNN_NO_DEPRECATE_WARN_BEGIN
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
armnn::Layer::m_OutputHandlers
std::vector< OutputHandler > m_OutputHandlers
Definition: Layer.hpp:440
armnn::Layer::GetInputSlots
const std::vector< InputSlot > & GetInputSlots() const
Definition: Layer.hpp:258
armnn::IStrategy
Definition: IStrategy.hpp:16
armnn::SplitterQueueDescriptor::m_ViewOrigins
std::vector< ViewOrigin > m_ViewOrigins
Definition: WorkloadData.hpp:124
armnn::ComputeSplitAxis
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
Definition: ArmComputeUtils.hpp:246
armnn::Layer::GetInputSlot
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:337
armnn::LayerWithParameters< ViewsDescriptor >::GetParameters
const ViewsDescriptor & GetParameters() const override
Definition: LayerWithParameters.hpp:19
WorkloadFactory.hpp
armnn::OutputHandler::GetData
ITensorHandle * GetData() const
Gets the allocated tensor memory.
Definition: OutputHandler.hpp:46
armnn::LayerWithParameters
Definition: LayerWithParameters.hpp:14
armnn::SplitterLayer::CreateTensorHandles
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry &registry, const IWorkloadFactory &factory, const bool IsMemoryManaged=true) override
Set the outputs to be appropriate sub tensors of the input if sub tensors are supported otherwise cre...
Definition: SplitterLayer.cpp:177
armnn::Layer::GetName
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:332
armnn::ITensorHandleFactory::LegacyFactoryId
static const FactoryId LegacyFactoryId
Definition: ITensorHandleFactory.hpp:50
armnn::ITensorHandleFactory::GetCapabilities
virtual std::vector< Capability > GetCapabilities(const IConnectableLayer *layer, const IConnectableLayer *connectedLayer, CapabilityClass capabilityClass)
Definition: ITensorHandleFactory.hpp:93
armnn::InputSlot::GetTensorInfo
const TensorInfo & GetTensorInfo() const override
Gets the TensorInfo for this InputSlot.
Definition: Layer.cpp:592
armnn::ViewsDescriptor::GetViewSizes
const uint32_t * GetViewSizes(uint32_t idx) const
Get the view sizes at the int value idx.
Definition: Descriptors.cpp:346
armnn::SplitterQueueDescriptor
Definition: WorkloadData.hpp:111
armnn::TensorShape
Definition: Tensor.hpp:20
armnn::SplitterLayer::Clone
SplitterLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
Definition: SplitterLayer.cpp:196
armnn::LayerWithParameters< ViewsDescriptor >::m_Param
ViewsDescriptor m_Param
The parameters for the layer (not including tensor-valued weights etc.).
Definition: LayerWithParameters.hpp:52
armnn::LayerWithParameters< ViewsDescriptor >::PrepInfoAndDesc
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *Layer::CreateWorkload.
Definition: LayerWithParameters.hpp:44
armnn::TensorInfo::IsTypeSpaceMatch
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same.
Definition: Tensor.cpp:432
armnn::IWorkloadFactory
Definition: WorkloadFactory.hpp:22
armnn::Layer::GetNumOutputSlots
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
Definition: Layer.hpp:335
armnn::Layer::VerifyShapeInferenceType
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Definition: Layer.cpp:504
armnn::GetTensorInfo
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
Definition: RefWorkloadUtils.hpp:33
armnn::ITensorHandleFactory
Definition: ITensorHandleFactory.hpp:46
armnn::Layer::SetAdditionalInfo
void SetAdditionalInfo(QueueDescriptor &descriptor) const
Definition: Layer.cpp:287
armnn::OutputHandler
Definition: OutputHandler.hpp:28
armnn::BoostLogSeverityMapping::info
@ info
armnn::CapabilityClass::PaddingRequired
@ PaddingRequired
armnn::Layer::GetType
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
Definition: Layer.hpp:286
armnn::SplitterLayer::InferOutputShapes
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs,...
Definition: SplitterLayer.cpp:201
armnn::Layer::BeginOutputSlots
std::vector< OutputSlot >::iterator BeginOutputSlots()
Definition: Layer.hpp:266
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:193
ARMNN_NO_DEPRECATE_WARN_END
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::ViewsDescriptor::GetNumDimensions
uint32_t GetNumDimensions() const
Get the number of dimensions.
Definition: Descriptors.cpp:306
armnn::OutputSlot::GetTensorHandleFactoryId
ITensorHandleFactory::FactoryId GetTensorHandleFactoryId() const
Definition: Layer.cpp:205
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::ViewsDescriptor::GetNumViews
uint32_t GetNumViews() const
Get the number of views.
Definition: Descriptors.cpp:301
armnn::ITensorHandleFactory::FactoryId
std::string FactoryId
Definition: ITensorHandleFactory.hpp:49
armnn::LayerType::Input
@ Input
armnn::Layer::m_ShapeInferenceMethod
ShapeInferenceMethod m_ShapeInferenceMethod
Definition: Layer.hpp:441
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:491
armnn::OutputSlot::GetConnection
const InputSlot * GetConnection(unsigned int index) const override
Definition: Layer.cpp:75
armnn::Layer::EndOutputSlots
std::vector< OutputSlot >::iterator EndOutputSlots()
Definition: Layer.hpp:267
armnn::Graph
Definition: Graph.hpp:30
armnn::IWorkloadFactory::CreateWorkload
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const =0
Backends should implement their own CreateWorkload function with a switch statement.
armnn::IStrategy::ExecuteStrategy
virtual void ExecuteStrategy(const IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
LayerCloneBase.hpp
SplitterLayer.hpp
armnn::LayerType::Constant
@ Constant
armnn::ViewsDescriptor::GetViewOrigin
const uint32_t * GetViewOrigin(uint32_t idx) const
Get the view origin at the int value idx.
Definition: Descriptors.cpp:311
armnn::SplitterLayer::CreateWorkload
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the Splitter type.
Definition: SplitterLayer.cpp:21