ArmNN
 20.05
Layer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "Layer.hpp"
6 
7 #include "Graph.hpp"
8 #include <ProfilingService.hpp>
11 
12 #include <boost/cast.hpp>
13 #include <boost/format.hpp>
14 
15 #include <numeric>
16 
17 namespace armnn
18 {
19 
21 {
22  ARMNN_ASSERT(layer.GetNumOutputSlots() == 1);
23 
24  OutputSlot* const prevSlot = GetConnectedOutputSlot();
25 
26  if (prevSlot != nullptr)
27  {
28  // Disconnects parent from this.
29  prevSlot->Disconnect(*this);
30 
31  // Connects inserted layer to parent.
32  ARMNN_ASSERT(layer.GetNumInputSlots() == 1);
33  int idx = prevSlot->Connect(layer.GetInputSlot(0));
34  prevSlot->SetEdgeStrategy(boost::numeric_cast<unsigned int>(idx), EdgeStrategy::Undefined);
35 
36  // Sets tensor info for inserted layer.
37  const TensorInfo& tensorInfo = prevSlot->GetTensorInfo();
38  layer.GetOutputHandler().SetTensorInfo(tensorInfo);
39  }
40 
41  // Connects inserted layer to this.
42  layer.GetOutputSlot(0).Connect(*this);
44 }
45 
46 const InputSlot* OutputSlot::GetConnection(unsigned int index) const
47 {
48  ValidateConnectionIndex(index);
49  return m_Connections[index];
50 }
51 
53 {
54  ValidateConnectionIndex(index);
55  return m_Connections[index];
56 }
57 
58 void OutputSlot::SetTensorInfo(const TensorInfo& tensorInfo)
59 {
60  GetOutputHandler().SetTensorInfo(tensorInfo);
61 }
62 
64 {
65  return GetOutputHandler().GetTensorInfo();
66 }
67 
69 {
70  return GetOutputHandler().IsTensorInfoSet();
71 }
72 
74 {
75  ARMNN_ASSERT_MSG(IsTensorInfoSet(), "TensorInfo must be set in order to validate the shape.");
76  return shape == m_OutputHandler.GetTensorInfo().GetShape();
77 }
78 
79 int OutputSlot::Connect(InputSlot& destination)
80 {
81  destination.SetConnection(this);
82  m_Connections.push_back(&destination);
83  m_EdgeStrategies.push_back(EdgeStrategy::Undefined);
84  return boost::numeric_cast<int>(m_Connections.size() - 1);
85 }
86 
88 {
89  slot.SetConnection(nullptr);
90  auto it = std::find(m_Connections.begin(), m_Connections.end(), &slot);
91 
92  if (it == m_Connections.end())
93  {
94  return;
95  }
96 
97  auto idx = std::distance(m_Connections.begin(), it);
98  m_Connections.erase(std::remove(m_Connections.begin(), m_Connections.end(), &slot), m_Connections.end());
99 
100  m_EdgeStrategies.erase(m_EdgeStrategies.begin() + idx);
101 }
102 
104 {
105  while (GetNumConnections() > 0)
106  {
107  InputSlot& connection = *GetConnection(0);
108  Disconnect(connection);
109  }
110 }
111 
113 {
114  while (GetNumConnections() > 0)
115  {
116  ARMNN_ASSERT_MSG(m_EdgeStrategies[0] == EdgeStrategy::Undefined,
117  "Cannot move connections once memory strategies have be established.");
118 
119  InputSlot& connection = *GetConnection(0);
120  Disconnect(connection);
121  destination.Connect(connection);
122  destination.GetOutputHandler().SetTensorInfo(GetOutputHandler().GetTensorInfo());
123  }
124 }
125 
127 {
128  for (unsigned int i = 0; i < GetOwningLayer().GetNumOutputSlots(); i++)
129  {
130  if (GetOwningLayer().GetOutputSlot(i) == (*this))
131  {
132  return i;
133  }
134  }
135  ARMNN_ASSERT_MSG(false, "Did not find slot on owner.");
136  return 0; // Error
137 }
138 
139 bool OutputSlot::operator==(const OutputSlot& other) const
140 {
141  bool isSame = other.GetNumConnections() == GetNumConnections();
142  if (!isSame)
143  {
144  return false;
145  }
146 
147  for (unsigned int i = 0; i < GetNumConnections(); i++)
148  {
149  isSame &= other.GetConnection(i) == GetConnection(i);
150  }
151  return isSame;
152 }
153 
154 void OutputSlot::ValidateConnectionIndex(unsigned int index) const
155 {
156  if (boost::numeric_cast<std::size_t>(index) >= m_Connections.size())
157  {
159  boost::str(boost::format("GetConnection: Invalid index %1% provided") % index));
160  }
161 }
162 
164 {
165  return GetOwningLayer().GetGuid();
166 }
167 
169 {
170  m_TensorHandleFactoryId = id;
171 }
172 
174 {
175  return m_TensorHandleFactoryId;
176 }
177 
178 void OutputSlot::SetEdgeStrategy(unsigned int connectionIndex, EdgeStrategy strategy)
179 {
180  m_EdgeStrategies[connectionIndex] = strategy;
181 }
182 
184 {
185  return m_EdgeStrategies[connectionIdx];
186 }
187 
188 Layer::Layer(unsigned int numInputSlots,
189  unsigned int numOutputSlots,
190  LayerType type,
191  DataLayout layout,
192  const char* name)
193 : m_OutputHandlers(numOutputSlots)
194 , m_LayerName(name ? name : "")
195 , m_Type(type)
196 , m_BackendId()
197 , m_BackendHint(EmptyOptional())
198 , m_Guid(profiling::ProfilingService::GetNextGuid())
199 {
200  IgnoreUnused(layout);
201  m_InputSlots.reserve(numInputSlots);
202  for (unsigned int i = 0; i < numInputSlots; ++i)
203  {
204  m_InputSlots.emplace_back(*this, i);
205  }
206 
207  m_OutputSlots.reserve(numOutputSlots);
208  for (unsigned int i = 0; i < numOutputSlots; ++i)
209  {
210  m_OutputSlots.emplace_back(*this, m_OutputHandlers[i]);
211  }
212 }
213 
214 Layer::Layer(unsigned int numInputSlots,
215  unsigned int numOutputSlots,
216  LayerType type,
217  const char* name)
218 : Layer(numInputSlots, numOutputSlots, type, DataLayout::NCHW, name)
219 {
220 }
221 
222 void Layer::CollectWorkloadInputs(WorkloadDataCollector& dataCollector) const
223 {
224  for (auto&& inputSlot : GetInputSlots())
225  {
226  // The graph must be well-formed at this point.
227  ARMNN_ASSERT(inputSlot.GetConnection());
228  const OutputHandler& outputHandler = inputSlot.GetConnectedOutputSlot()->GetOutputHandler();
229  dataCollector.Push(outputHandler.GetData(), outputHandler.GetTensorInfo());
230  }
231 }
232 
233 void Layer::CollectWorkloadOutputs(WorkloadDataCollector& dataCollector) const
234 {
235  for (auto&& outputHandler : m_OutputHandlers)
236  {
237  outputHandler.CollectWorkloadOutputs(dataCollector);
238  }
239 }
240 
242  const IWorkloadFactory& workloadFactory,
243  const bool IsMemoryManaged)
244 {
245  for (unsigned int idx=0; idx < GetNumOutputSlots(); idx++)
246  {
247 
248  OutputSlot& slot = GetOutputSlot(idx);
250 
251  OutputHandler& handler = GetOutputHandler(idx);
252  if (factoryId == ITensorHandleFactory::LegacyFactoryId)
253  {
254  handler.CreateTensorHandles(workloadFactory, IsMemoryManaged);
255  }
256  else
257  {
258  ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId);
259  ARMNN_ASSERT(handleFactory);
260  handler.CreateTensorHandles(*handleFactory, IsMemoryManaged);
261  }
262  }
263 }
264 
266 {
267  // Now free up the static data.
268  OperateOnConstantTensors([](std::unique_ptr<ScopedCpuTensorHandle>& handle)
269  {
270  handle.reset(nullptr);
271  });
272 }
273 
275 {
276  if (GetNumInputSlots() > 0) // Ignore the input layer.
277  {
279  }
281 }
282 
284 {
285  m_Priority = 0;
286  m_Visiting = false;
287 }
288 
290 {
291  constexpr LayerPriority inputPrio = std::numeric_limits<LayerPriority>::lowest();
292  constexpr LayerPriority outputPrio = std::numeric_limits<LayerPriority>::max();
293 
294  if (GetType() == LayerType::Input)
295  {
296  m_Priority = inputPrio;
297  }
298  else if (GetType() == LayerType::Output)
299  {
300  m_Priority = outputPrio;
301  }
302  else if (m_Priority == 0)
303  {
304  if (m_Visiting)
305  {
306  throw GraphValidationException("Graph has circular dependencies: cannot walk");
307  }
308 
309  auto maxPrio = [](const LayerPriority prio, const InputSlot& slot) -> LayerPriority
310  {
311  const OutputSlot *outputSlot = slot.GetConnectedOutputSlot();
312  if (outputSlot)
313  {
314  const Layer& input = outputSlot->GetOwningLayer();
315  return std::max(prio, input.GetPriority());
316  }
317  else
318  {
319  // unconnected input slot
320  return prio;
321  }
322  };
323 
324  m_Visiting = true;
325  LayerPriority parentPrio = std::accumulate(GetInputSlots().cbegin(), GetInputSlots().cend(), 0U, maxPrio);
326  m_Visiting = false;
327 
328  if (parentPrio >= outputPrio)
329  {
330  throw GraphValidationException("Graph has too many edges");
331  }
332 
333  m_Priority = parentPrio + 1U;
334  }
335 
336  return m_Priority;
337 }
338 
339 void Layer::VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation& location) const
340 {
341  ARMNN_ASSERT(GetNumInputSlots() == expectedConnections);
342 
343  for (unsigned int i=0; i<expectedConnections; ++i)
344  {
345  if (GetInputSlot(i).GetConnection() == nullptr)
346  {
348  boost::str(
349  boost::format(
350  "Input connection #%1% must be connected "
351  "for %2% layer %3% %4%")
352  % i
353  % GetLayerTypeAsCString(this->GetType())
354  % GetNameStr()
355  % location.AsString()));
356  }
357  if(! GetInputSlot(i).GetConnection()->IsTensorInfoSet())
358  {
360  boost::str(
361  boost::format(
362  "TensorInfo of Input connection #%1% must be set on connected OutputSlot for "
363  "%2% layer %3% %4%")
364  % i
365  % GetLayerTypeAsCString(this->GetType())
366  % GetNameStr()
367  % location.AsString()));
368  }
369  }
370 }
371 
372 std::vector<TensorShape> Layer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
373 {
376 
377  // By default we return what we got, meaning the output shape(s) are the same as the input(s).
378  // This only works if the number of inputs and outputs are the same. Since we are in the Layer
379  // base class, this means the implementation needs to be overridden in the specific layers for
380  // the other cases. So the missing implementation justifies the UnimplementedException.
381 
383  {
385  boost::str(
386  boost::format(
387  "Default implementation for InferOutputShapes can only be used for "
388  "layers with the same number of input and output slots. This doesn't "
389  "hold for %1% layer %2% (#inputs=%3% #outputs=%4%) %5%")
390  % GetLayerTypeAsCString(this->GetType())
391  % GetNameStr()
392  % GetNumInputSlots()
394  % CHECK_LOCATION().AsString()));
395  }
396  return inputShapes;
397 }
398 
400 {
401  std::string layerType = GetLayerTypeAsCString(m_Type);
402  std::string backendId = std::string(m_BackendId);
403  if(!(m_LayerName.compare("") == 0) && !m_LayerName.empty())
404  {
405  fn("LayerName",m_LayerName);
406  }
407  if(!(layerType.compare("") == 0) && !layerType.empty())
408  {
409  fn("LayerType",layerType);
410  }
411  if(!(backendId.compare("") == 0) && !backendId.empty())
412  {
413  fn("BackendID",backendId);
414  }
415 }
416 
417 } // namespace armnn
void DisconnectAll()
Definition: Layer.cpp:103
virtual void ReleaseConstantData()
Definition: Layer.cpp:265
bool ValidateTensorShape(const TensorShape &shape) const
Definition: Layer.cpp:73
void Insert(Layer &layer)
Definition: Layer.cpp:20
void SetEdgeStrategy(unsigned int connectionIndex, EdgeStrategy strategy)
Definition: Layer.cpp:178
DataLayout
Definition: Types.hpp:49
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
Definition: Layer.hpp:307
std::string AsString() const
Definition: Exceptions.hpp:29
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
Infer the shape of the output(s) based on the provided input shape(s)
Definition: Layer.cpp:372
LayerGuid GetOwningLayerGuid() const override
Definition: Layer.cpp:163
void OperateOnConstantTensors(Op op)
Definition: Layer.hpp:292
Layer & GetOwningLayer() const
Definition: Layer.hpp:115
int Connect(InputSlot &destination)
Definition: Layer.cpp:79
unsigned int LayerPriority
Definition: Layer.hpp:207
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
EdgeStrategy GetEdgeStrategyForConnection(unsigned int connectionIdx) const
Definition: Layer.cpp:183
Copyright (c) 2020 ARM Limited.
void IgnoreUnused(Ts &&...)
const std::vector< InputSlot > & GetInputSlots() const
Definition: Layer.hpp:231
const IOutputSlot * GetConnection() const override
Definition: Layer.hpp:199
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
Definition: Layer.hpp:308
void Disconnect(InputSlot &slot)
Definition: Layer.cpp:87
void CreateTensorHandles(const IWorkloadFactory &factory, const bool IsMemoryManaged=true)
Creates tensor handles used by the intermediate tensors.
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
Definition: Layer.cpp:339
unsigned int GetNumConnections() const override
Definition: Layer.hpp:138
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:310
DataType
Definition: Types.hpp:32
void ResetPriority() const
Definition: Layer.cpp:283
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
char const * GetLayerTypeAsCString(LayerType type)
DataType GetDataType() const
Definition: Tensor.hpp:95
void Push(ITensorHandle *handle, const TensorInfo &info)
const std::string & GetNameStr() const
Definition: Layer.hpp:216
LayerPriority GetPriority() const
Definition: Layer.cpp:289
Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char *name)
Definition: Layer.cpp:214
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
const OutputSlot * GetConnectedOutputSlot() const
Definition: Layer.hpp:55
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:33
Layer & GetOwningLayer() const
Definition: Layer.hpp:52
#define CHECK_LOCATION()
Definition: Exceptions.hpp:192
std::vector< OutputHandler > m_OutputHandlers
Definition: Layer.hpp:371
void SetTensorInfo(const TensorInfo &tensorInfo)
Sets the TensorInfo used by this output handler.
void SetTensorHandleFactory(const ITensorHandleFactory::FactoryId &id)
Definition: Layer.cpp:168
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
bool operator==(const OutputSlot &other) const
Definition: Layer.cpp:139
void SetConnection(OutputSlot *source)
Links the slot to an output slot or breaks an existing link if passing nullptr.
Definition: Layer.hpp:59
const OutputHandler & GetOutputHandler(unsigned int i=0) const
Definition: Layer.hpp:221
ITensorHandleFactory * GetFactory(ITensorHandleFactory::FactoryId id) const
Find a TensorHandleFactory by Id Returns nullptr if not found.
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
virtual void SerializeLayerParameters(ParameterStringifyFunction &fn) const
Helper to serialize the layer parameters to string.
Definition: Layer.cpp:399
DataType GetDataType() const
Definition: Layer.cpp:274
LayerType GetType() const
Definition: Layer.hpp:259
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:312
virtual const TensorInfo & GetTensorInfo() const =0
const OutputHandler & GetOutputHandler() const
Definition: Layer.hpp:119
ITensorHandleFactory::FactoryId GetTensorHandleFactoryId() const
Definition: Layer.cpp:173
bool IsTensorInfoSet() const override
Definition: Layer.cpp:68
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry &registry, const IWorkloadFactory &factory, const bool IsMemoryManaged=true)
Definition: Layer.cpp:241
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:63
static const FactoryId LegacyFactoryId
void MoveAllConnections(OutputSlot &destination)
Moves all connections to another OutputSlot.
Definition: Layer.cpp:112
const InputSlot * GetConnection(unsigned int index) const override
Definition: Layer.cpp:46
LayerGuid GetGuid() const final
Returns the unique id of the layer.
Definition: Layer.hpp:316
unsigned int CalculateIndexOnOwner() const override
Definition: Layer.cpp:126