From 6940dd720ebb6b3d1df8ca203ab696daefe58189 Mon Sep 17 00:00:00 2001 From: Jim Flynn Date: Fri, 20 Mar 2020 12:25:56 +0000 Subject: renamed Documentation folder 20.02 and added .nojekyll file Signed-off-by: Jim Flynn --- 20.02/_deserializer_8cpp_source.xhtml | 427 ++++++++++++++++++++++++++++++++++ 1 file changed, 427 insertions(+) create mode 100644 20.02/_deserializer_8cpp_source.xhtml (limited to '20.02/_deserializer_8cpp_source.xhtml') diff --git a/20.02/_deserializer_8cpp_source.xhtml b/20.02/_deserializer_8cpp_source.xhtml new file mode 100644 index 0000000000..1e5d45d095 --- /dev/null +++ b/20.02/_deserializer_8cpp_source.xhtml @@ -0,0 +1,427 @@ + + + + + + + + + + + + + +ArmNN: src/armnnDeserializer/Deserializer.cpp Source File + + + + + + + + + + + + + + + + +
+
+ + + + ArmNN + + + +
+
+  20.02 +
+
+
+ + + + + + + +
+
+ +
+
+
+ +
+ +
+
+ + +
+ +
+ +
+
+
Deserializer.cpp
+
+
+Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "Deserializer.hpp"
7 
8 #include <armnn/Descriptors.hpp>
9 #include <armnn/Exceptions.hpp>
10 #include <armnn/TypesUtils.hpp>
11 #include <armnn/LstmParams.hpp>
13 
14 #include <armnnUtils/Permute.hpp>
15 #include <armnnUtils/Transpose.hpp>
17 
18 #include <ParserHelper.hpp>
19 #include <VerificationHelpers.hpp>
20 
21 #include <boost/filesystem.hpp>
22 #include <boost/format.hpp>
23 #include <boost/assert.hpp>
24 #include <boost/format.hpp>
25 #include <boost/format.hpp>
26 #include <boost/numeric/conversion/cast.hpp>
27 #include <boost/polymorphic_cast.hpp>
28 
29 #include <fstream>
30 #include <algorithm>
31 #include <limits>
32 #include <numeric>
33 
35 using namespace armnn;
36 using namespace armnnSerializer;
37 
38 namespace armnnDeserializer
39 {
40 
41 namespace
42 {
43 
44 const uint32_t VIRTUAL_LAYER_ID = std::numeric_limits<uint32_t>::max();
45 
46  void CheckGraph(const Deserializer::GraphPtr& graph,
47  unsigned int layersIndex,
48  const CheckLocation& location)
49 {
50  if (graph->layers() == nullptr)
51  {
52  throw ParseException(
53  boost::str(
54  boost::format("%1% was called with invalid (null) graph. "
55  "Possible reason is that the graph is not yet loaded and Unpack(ed). "
56  "layers:%2% at %3%") %
57  location.m_Function %
58  layersIndex %
59  location.FileLine()));
60  }
61  else if (layersIndex >= graph->layers()->size())
62  {
63  throw ParseException(
64  boost::str(
65  boost::format("%1% was called with an invalid layers index. "
66  "layers:%2% at %3%") %
67  location.m_Function %
68  layersIndex %
69  location.FileLine()));
70  }
71 }
72 
73 void CheckLayers(const Deserializer::GraphPtr& graph,
74  unsigned int layersIndex,
75  unsigned int layerIndex,
76  const CheckLocation& location)
77 {
78  if (graph->layers() == nullptr)
79  {
80  throw ParseException(
81  boost::str(
82  boost::format("%1% was called with invalid (null) graph. "
83  "Possible reason is that the graph is not yet loaded and Unpack(ed). "
84  "layers:%2% at %3%") %
85  location.m_Function %
86  layersIndex %
87  location.FileLine()));
88  }
89  else if (layersIndex >= graph->layers()->size())
90  {
91  throw ParseException(
92  boost::str(
93  boost::format("%1% was called with an invalid layers index. "
94  "layers:%2% at %3%") %
95  location.m_Function %
96  layersIndex %
97  location.FileLine()));
98  }
99  else if (layerIndex >= graph->layers()[layersIndex].size()
100  && layerIndex != VIRTUAL_LAYER_ID)
101  {
102  throw ParseException(
103  boost::str(
104  boost::format("%1% was called with an invalid layer index. "
105  "layers:%2% layer:%3% at %4%") %
106  location.m_Function %
107  layersIndex %
108  layerIndex %
109  location.FileLine()));
110  }
111 }
112 
113 void CheckTensorPtr(Deserializer::TensorRawPtr rawPtr,
114  const CheckLocation& location)
115 {
116  if (rawPtr == nullptr)
117  {
118  throw ParseException(
119  boost::str(
120  boost::format("%1% was called with a null tensor pointer. "
121  "at %2%") %
122  location.m_Function %
123  location.FileLine()));
124 
125  }
126 }
127 
128 void CheckConstTensorPtr(Deserializer::ConstTensorRawPtr rawPtr,
129  const CheckLocation& location)
130 {
131  if (rawPtr == nullptr)
132  {
133  throw ParseException(boost::str(boost::format("%1% was called with a null const tensor pointer. at %2%") %
134  location.m_Function %
135  location.FileLine()));
136  }
137 }
138 
139 void CheckConstTensorSize(const unsigned int constTensorSize,
140  const unsigned int tensorSize,
141  const CheckLocation& location)
142 {
143  if (constTensorSize != tensorSize)
144  {
145  throw ParseException(boost::str(boost::format("%1% wrong number of components supplied to tensor. at:%2%") %
146  location.m_Function %
147  location.FileLine()));
148  }
149 }
150 
151 #define CHECK_TENSOR_PTR(TENSOR_PTR) \
152  CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
153 
154 #define CHECK_CONST_TENSOR_SIZE(CONST_TENSOR_SIZE, TENSOR_SIZE) \
155  CheckConstTensorSize(CONST_TENSOR_SIZE, TENSOR_SIZE, CHECK_LOCATION())
156 
157 #define CHECK_CONST_TENSOR_PTR(TENSOR_PTR) \
158  CheckConstTensorPtr(TENSOR_PTR, CHECK_LOCATION())
159 
160 #define CHECK_LAYERS(GRAPH, LAYERS_INDEX, LAYER_INDEX) \
161  CheckLayers(GRAPH, LAYERS_INDEX, LAYER_INDEX, CHECK_LOCATION())
162 
163 #define CHECK_GRAPH(GRAPH, LAYERS_INDEX) \
164  CheckGraph(GRAPH, LAYERS_INDEX, CHECK_LOCATION())
165 }
166 
167 bool CheckShape(const armnn::TensorShape& actual, const std::vector<uint32_t>& expected)
168 {
169  const unsigned int actualSize = actual.GetNumDimensions();
170  if (actualSize != expected.size())
171  {
172  return false;
173  }
174 
175  for (unsigned int i = 0u; i < actualSize; i++)
176  {
177  if (actual[i] != static_cast<unsigned int>(expected[i]))
178  {
179  return false;
180  }
181  }
182 
183  return true;
184 }
185 
186 Deserializer::Deserializer()
187 : m_Network(nullptr, nullptr),
188 //May require LayerType_Max to be included
189 m_ParserFunctions(Layer_MAX+1, &Deserializer::ParseUnsupportedLayer)
190 {
191  // register supported layers
192  m_ParserFunctions[Layer_AbsLayer] = &Deserializer::ParseAbs;
193  m_ParserFunctions[Layer_ActivationLayer] = &Deserializer::ParseActivation;
194  m_ParserFunctions[Layer_AdditionLayer] = &Deserializer::ParseAdd;
195  m_ParserFunctions[Layer_ArgMinMaxLayer] = &Deserializer::ParseArgMinMax;
196  m_ParserFunctions[Layer_BatchToSpaceNdLayer] = &Deserializer::ParseBatchToSpaceNd;
197  m_ParserFunctions[Layer_BatchNormalizationLayer] = &Deserializer::ParseBatchNormalization;
198  m_ParserFunctions[Layer_ComparisonLayer] = &Deserializer::ParseComparison;
199  m_ParserFunctions[Layer_ConcatLayer] = &Deserializer::ParseConcat;
200  m_ParserFunctions[Layer_ConstantLayer] = &Deserializer::ParseConstant;
201  m_ParserFunctions[Layer_Convolution2dLayer] = &Deserializer::ParseConvolution2d;
202  m_ParserFunctions[Layer_DepthToSpaceLayer] = &Deserializer::ParseDepthToSpace;
203  m_ParserFunctions[Layer_DepthwiseConvolution2dLayer] = &Deserializer::ParseDepthwiseConvolution2d;
204  m_ParserFunctions[Layer_DequantizeLayer] = &Deserializer::ParseDequantize;
205  m_ParserFunctions[Layer_DetectionPostProcessLayer] = &Deserializer::ParseDetectionPostProcess;
206  m_ParserFunctions[Layer_DivisionLayer] = &Deserializer::ParseDivision;
207  m_ParserFunctions[Layer_ElementwiseUnaryLayer] = &Deserializer::ParseElementwiseUnary;
208  m_ParserFunctions[Layer_EqualLayer] = &Deserializer::ParseEqual;
209  m_ParserFunctions[Layer_FullyConnectedLayer] = &Deserializer::ParseFullyConnected;
210  m_ParserFunctions[Layer_FloorLayer] = &Deserializer::ParseFloor;
211  m_ParserFunctions[Layer_GatherLayer] = &Deserializer::ParseGather;
212  m_ParserFunctions[Layer_GreaterLayer] = &Deserializer::ParseGreater;
213  m_ParserFunctions[Layer_InstanceNormalizationLayer] = &Deserializer::ParseInstanceNormalization;
214  m_ParserFunctions[Layer_L2NormalizationLayer] = &Deserializer::ParseL2Normalization;
215  m_ParserFunctions[Layer_LogSoftmaxLayer] = &Deserializer::ParseLogSoftmax;
216  m_ParserFunctions[Layer_LstmLayer] = &Deserializer::ParseLstm;
217  m_ParserFunctions[Layer_MaximumLayer] = &Deserializer::ParseMaximum;
218  m_ParserFunctions[Layer_MeanLayer] = &Deserializer::ParseMean;
219  m_ParserFunctions[Layer_MinimumLayer] = &Deserializer::ParseMinimum;
220  m_ParserFunctions[Layer_MergeLayer] = &Deserializer::ParseMerge;
221  m_ParserFunctions[Layer_MergerLayer] = &Deserializer::ParseConcat;
222  m_ParserFunctions[Layer_MultiplicationLayer] = &Deserializer::ParseMultiplication;
223  m_ParserFunctions[Layer_NormalizationLayer] = &Deserializer::ParseNormalization;
224  m_ParserFunctions[Layer_PadLayer] = &Deserializer::ParsePad;
225  m_ParserFunctions[Layer_PermuteLayer] = &Deserializer::ParsePermute;
226  m_ParserFunctions[Layer_Pooling2dLayer] = &Deserializer::ParsePooling2d;
227  m_ParserFunctions[Layer_PreluLayer] = &Deserializer::ParsePrelu;
228  m_ParserFunctions[Layer_QuantizeLayer] = &Deserializer::ParseQuantize;
229  m_ParserFunctions[Layer_QuantizedLstmLayer] = &Deserializer::ParseQuantizedLstm;
230  m_ParserFunctions[Layer_ReshapeLayer] = &Deserializer::ParseReshape;
231  m_ParserFunctions[Layer_ResizeBilinearLayer] = &Deserializer::ParseResizeBilinear;
232  m_ParserFunctions[Layer_ResizeLayer] = &Deserializer::ParseResize;
233  m_ParserFunctions[Layer_RsqrtLayer] = &Deserializer::ParseRsqrt;
234  m_ParserFunctions[Layer_SliceLayer] = &Deserializer::ParseSlice;
235  m_ParserFunctions[Layer_SoftmaxLayer] = &Deserializer::ParseSoftmax;
236  m_ParserFunctions[Layer_SpaceToBatchNdLayer] = &Deserializer::ParseSpaceToBatchNd;
237  m_ParserFunctions[Layer_SpaceToDepthLayer] = &Deserializer::ParseSpaceToDepth;
238  m_ParserFunctions[Layer_SplitterLayer] = &Deserializer::ParseSplitter;
239  m_ParserFunctions[Layer_StackLayer] = &Deserializer::ParseStack;
240  m_ParserFunctions[Layer_StandInLayer] = &Deserializer::ParseStandIn;
241  m_ParserFunctions[Layer_StridedSliceLayer] = &Deserializer::ParseStridedSlice;
242  m_ParserFunctions[Layer_SubtractionLayer] = &Deserializer::ParseSubtraction;
243  m_ParserFunctions[Layer_SwitchLayer] = &Deserializer::ParseSwitch;
244  m_ParserFunctions[Layer_TransposeConvolution2dLayer] = &Deserializer::ParseTransposeConvolution2d;
245  m_ParserFunctions[Layer_TransposeLayer] = &Deserializer::ParseTranspose;
246 }
247 
248 Deserializer::LayerBaseRawPtr Deserializer::GetBaseLayer(const GraphPtr& graphPtr, unsigned int layerIndex)
249 {
250  auto layerType = graphPtr->layers()->Get(layerIndex)->layer_type();
251 
252  switch(layerType)
253  {
254  case Layer::Layer_AbsLayer:
255  return graphPtr->layers()->Get(layerIndex)->layer_as_AbsLayer()->base();
256  case Layer::Layer_ActivationLayer:
257  return graphPtr->layers()->Get(layerIndex)->layer_as_ActivationLayer()->base();
258  case Layer::Layer_AdditionLayer:
259  return graphPtr->layers()->Get(layerIndex)->layer_as_AdditionLayer()->base();
260  case Layer::Layer_ArgMinMaxLayer:
261  return graphPtr->layers()->Get(layerIndex)->layer_as_ArgMinMaxLayer()->base();
262  case Layer::Layer_BatchToSpaceNdLayer:
263  return graphPtr->layers()->Get(layerIndex)->layer_as_BatchToSpaceNdLayer()->base();
264  case Layer::Layer_BatchNormalizationLayer:
265  return graphPtr->layers()->Get(layerIndex)->layer_as_BatchNormalizationLayer()->base();
266  case Layer::Layer_ComparisonLayer:
267  return graphPtr->layers()->Get(layerIndex)->layer_as_ComparisonLayer()->base();
268  case Layer::Layer_ConcatLayer:
269  return graphPtr->layers()->Get(layerIndex)->layer_as_ConcatLayer()->base();
270  case Layer::Layer_ConstantLayer:
271  return graphPtr->layers()->Get(layerIndex)->layer_as_ConstantLayer()->base();
272  case Layer::Layer_Convolution2dLayer:
273  return graphPtr->layers()->Get(layerIndex)->layer_as_Convolution2dLayer()->base();
274  case Layer::Layer_DepthToSpaceLayer:
275  return graphPtr->layers()->Get(layerIndex)->layer_as_DepthToSpaceLayer()->base();
276  case Layer::Layer_DepthwiseConvolution2dLayer:
277  return graphPtr->layers()->Get(layerIndex)->layer_as_DepthwiseConvolution2dLayer()->base();
278  case Layer::Layer_DequantizeLayer:
279  return graphPtr->layers()->Get(layerIndex)->layer_as_DequantizeLayer()->base();
280  case Layer::Layer_DetectionPostProcessLayer:
281  return graphPtr->layers()->Get(layerIndex)->layer_as_DetectionPostProcessLayer()->base();
282  case Layer::Layer_DivisionLayer:
283  return graphPtr->layers()->Get(layerIndex)->layer_as_DivisionLayer()->base();
284  case Layer::Layer_EqualLayer:
285  return graphPtr->layers()->Get(layerIndex)->layer_as_EqualLayer()->base();
286  case Layer::Layer_FullyConnectedLayer:
287  return graphPtr->layers()->Get(layerIndex)->layer_as_FullyConnectedLayer()->base();
288  case Layer::Layer_FloorLayer:
289  return graphPtr->layers()->Get(layerIndex)->layer_as_FloorLayer()->base();
290  case Layer::Layer_GatherLayer:
291  return graphPtr->layers()->Get(layerIndex)->layer_as_GatherLayer()->base();
292  case Layer::Layer_GreaterLayer:
293  return graphPtr->layers()->Get(layerIndex)->layer_as_GreaterLayer()->base();
294  case Layer::Layer_InputLayer:
295  return graphPtr->layers()->Get(layerIndex)->layer_as_InputLayer()->base()->base();
296  case Layer::Layer_InstanceNormalizationLayer:
297  return graphPtr->layers()->Get(layerIndex)->layer_as_InstanceNormalizationLayer()->base();
298  case Layer::Layer_L2NormalizationLayer:
299  return graphPtr->layers()->Get(layerIndex)->layer_as_L2NormalizationLayer()->base();
300  case Layer::Layer_LogSoftmaxLayer:
301  return graphPtr->layers()->Get(layerIndex)->layer_as_LogSoftmaxLayer()->base();
302  case Layer::Layer_LstmLayer:
303  return graphPtr->layers()->Get(layerIndex)->layer_as_LstmLayer()->base();
304  case Layer::Layer_MeanLayer:
305  return graphPtr->layers()->Get(layerIndex)->layer_as_MeanLayer()->base();
306  case Layer::Layer_MinimumLayer:
307  return graphPtr->layers()->Get(layerIndex)->layer_as_MinimumLayer()->base();
308  case Layer::Layer_MaximumLayer:
309  return graphPtr->layers()->Get(layerIndex)->layer_as_MaximumLayer()->base();
310  case Layer::Layer_MergeLayer:
311  return graphPtr->layers()->Get(layerIndex)->layer_as_MergeLayer()->base();
312  case Layer::Layer_MergerLayer:
313  return graphPtr->layers()->Get(layerIndex)->layer_as_MergerLayer()->base();
314  case Layer::Layer_MultiplicationLayer:
315  return graphPtr->layers()->Get(layerIndex)->layer_as_MultiplicationLayer()->base();
316  case Layer::Layer_NormalizationLayer:
317  return graphPtr->layers()->Get(layerIndex)->layer_as_NormalizationLayer()->base();
318  case Layer::Layer_OutputLayer:
319  return graphPtr->layers()->Get(layerIndex)->layer_as_OutputLayer()->base()->base();
320  case Layer::Layer_PadLayer:
321  return graphPtr->layers()->Get(layerIndex)->layer_as_PadLayer()->base();
322  case Layer::Layer_PermuteLayer:
323  return graphPtr->layers()->Get(layerIndex)->layer_as_PermuteLayer()->base();
324  case Layer::Layer_Pooling2dLayer:
325  return graphPtr->layers()->Get(layerIndex)->layer_as_Pooling2dLayer()->base();
326  case Layer::Layer_PreluLayer:
327  return graphPtr->layers()->Get(layerIndex)->layer_as_PreluLayer()->base();
328  case Layer::Layer_QuantizeLayer:
329  return graphPtr->layers()->Get(layerIndex)->layer_as_QuantizeLayer()->base();
330  case Layer::Layer_QuantizedLstmLayer:
331  return graphPtr->layers()->Get(layerIndex)->layer_as_QuantizedLstmLayer()->base();
332  case Layer::Layer_ReshapeLayer:
333  return graphPtr->layers()->Get(layerIndex)->layer_as_ReshapeLayer()->base();
334  case Layer::Layer_ResizeBilinearLayer:
335  return graphPtr->layers()->Get(layerIndex)->layer_as_ResizeBilinearLayer()->base();
336  case Layer::Layer_ResizeLayer:
337  return graphPtr->layers()->Get(layerIndex)->layer_as_ResizeLayer()->base();
338  case Layer::Layer_RsqrtLayer:
339  return graphPtr->layers()->Get(layerIndex)->layer_as_RsqrtLayer()->base();
340  case Layer::Layer_SliceLayer:
341  return graphPtr->layers()->Get(layerIndex)->layer_as_SliceLayer()->base();
342  case Layer::Layer_SoftmaxLayer:
343  return graphPtr->layers()->Get(layerIndex)->layer_as_SoftmaxLayer()->base();
344  case Layer::Layer_SpaceToBatchNdLayer:
345  return graphPtr->layers()->Get(layerIndex)->layer_as_SpaceToBatchNdLayer()->base();
346  case Layer::Layer_SpaceToDepthLayer:
347  return graphPtr->layers()->Get(layerIndex)->layer_as_SpaceToDepthLayer()->base();
348  case Layer::Layer_SplitterLayer:
349  return graphPtr->layers()->Get(layerIndex)->layer_as_SplitterLayer()->base();
350  case Layer::Layer_StackLayer:
351  return graphPtr->layers()->Get(layerIndex)->layer_as_StackLayer()->base();
352  case Layer::Layer_StandInLayer:
353  return graphPtr->layers()->Get(layerIndex)->layer_as_StandInLayer()->base();
354  case Layer::Layer_StridedSliceLayer:
355  return graphPtr->layers()->Get(layerIndex)->layer_as_StridedSliceLayer()->base();
356  case Layer::Layer_SubtractionLayer:
357  return graphPtr->layers()->Get(layerIndex)->layer_as_SubtractionLayer()->base();
358  case Layer::Layer_SwitchLayer:
359  return graphPtr->layers()->Get(layerIndex)->layer_as_SwitchLayer()->base();
360  case Layer::Layer_TransposeConvolution2dLayer:
361  return graphPtr->layers()->Get(layerIndex)->layer_as_TransposeConvolution2dLayer()->base();
362  case Layer::Layer_TransposeLayer:
363  return graphPtr->layers()->Get(layerIndex)->layer_as_TransposeLayer()->base();
364  case Layer::Layer_NONE:
365  default:
366  throw ParseException(boost::str(
367  boost::format("Layer type %1% not recognized") %
368  layerType));
369  }
370 }
371 
372 std::string Deserializer::GetLayerName(const GraphPtr& graph, unsigned int index)
373 {
374  auto layer = GetBaseLayer(graph, index);
375  assert(layer);
376  return layer->layerName()->str();
377 }
378 
379 int32_t Deserializer::GetBindingLayerInfo(const GraphPtr& graphPtr, unsigned int layerIndex)
380 {
381  auto layerType = graphPtr->layers()->Get(layerIndex)->layer_type();
382 
383  if (layerType == Layer::Layer_InputLayer)
384  {
385  return graphPtr->layers()->Get(layerIndex)->layer_as_InputLayer()->base()->layerBindingId();
386  }
387  else if ( layerType == Layer::Layer_OutputLayer )
388  {
389  return graphPtr->layers()->Get(layerIndex)->layer_as_OutputLayer()->base()->layerBindingId();
390  }
391  return 0;
392 }
393 
395 {
396  switch (dataLayout)
397  {
398  case armnnSerializer::DataLayout::DataLayout_NHWC:
400  case armnnSerializer::DataLayout::DataLayout_NCHW:
401  default:
403  }
404 }
405 
407 {
408  switch (function)
409  {
410  case armnnSerializer::ActivationFunction_Sigmoid:
412  case armnnSerializer::ActivationFunction_TanH:
414  case armnnSerializer::ActivationFunction_Linear:
416  case armnnSerializer::ActivationFunction_ReLu:
418  case armnnSerializer::ActivationFunction_BoundedReLu:
420  case armnnSerializer::ActivationFunction_LeakyReLu:
422  case armnnSerializer::ActivationFunction_Abs:
424  case armnnSerializer::ActivationFunction_Sqrt:
426  case armnnSerializer::ActivationFunction_Square:
428  case armnnSerializer::ActivationFunction_Elu:
430  case armnnSerializer::ActivationFunction_HardSwish:
432  default:
434  }
435 }
436 
438 {
439  switch (function)
440  {
441  case armnnSerializer::ArgMinMaxFunction::ArgMinMaxFunction_Max:
443  case armnnSerializer::ArgMinMaxFunction::ArgMinMaxFunction_Min:
444  default:
446  }
447 }
448 
450 {
451  switch (operation)
452  {
453  case armnnSerializer::ComparisonOperation::ComparisonOperation_Equal:
455  case armnnSerializer::ComparisonOperation::ComparisonOperation_Greater:
457  case armnnSerializer::ComparisonOperation::ComparisonOperation_GreaterOrEqual:
459  case armnnSerializer::ComparisonOperation::ComparisonOperation_Less:
461  case armnnSerializer::ComparisonOperation::ComparisonOperation_LessOrEqual:
463  case armnnSerializer::ComparisonOperation::ComparisonOperation_NotEqual:
464  default:
466  }
467 }
468 
470 {
471  switch (operation)
472  {
473  case armnnSerializer::UnaryOperation::UnaryOperation_Abs:
475  case armnnSerializer::UnaryOperation::UnaryOperation_Rsqrt:
477  case armnnSerializer::UnaryOperation::UnaryOperation_Sqrt:
479  case armnnSerializer::UnaryOperation::UnaryOperation_Exp:
481  case armnnSerializer::UnaryOperation::UnaryOperation_Neg:
483  default:
484  throw armnn::InvalidArgumentException("Unary operation unknown");
485  }
486 }
487 
489 {
490  switch (method)
491  {
492  case armnnSerializer::ResizeMethod_NearestNeighbor:
494  case armnnSerializer::ResizeMethod_Bilinear:
496  default:
498  }
499 }
500 
502 {
503  armnn::DataType type;
504  CHECK_TENSOR_PTR(tensorPtr);
505 
506  switch (tensorPtr->dataType())
507  {
508  case DataType_QAsymmS8:
510  break;
511  case DataType_QuantisedAsymm8:
512  case DataType_QAsymmU8:
514  break;
515  case DataType_QSymmS16:
516  case DataType_QuantisedSymm16:
518  break;
519  case DataType_Signed32:
521  break;
522  case DataType_Float32:
524  break;
525  case DataType_Float16:
527  break;
528  case DataType_Boolean:
530  break;
531  default:
532  {
533  CheckLocation location = CHECK_LOCATION();
534  throw ParseException(
535  boost::str(
536  boost::format("Unsupported data type %1% = %2%. %3%") %
537  tensorPtr->dataType() %
538  EnumNameDataType(tensorPtr->dataType()) %
539  location.AsString()));
540  }
541  }
542  float quantizationScale = tensorPtr->quantizationScale();
543  int32_t quantizationOffset = tensorPtr->quantizationOffset();
544 
545  auto dimensions = tensorPtr->dimensions();
546  unsigned int size = dimensions->size();
547  std::vector<unsigned int> outputDims(dimensions->begin(), dimensions->begin() + size);
548 
549  // two statements (on purpose) for easier debugging:
550  armnn::TensorInfo result(size,
551  outputDims.data(),
552  type,
553  quantizationScale,
554  quantizationOffset);
555  return result;
556 }
557 
559 {
560  CHECK_CONST_TENSOR_PTR(constTensorPtr);
561  armnn::TensorInfo tensorInfo = ToTensorInfo(constTensorPtr->info());
562 
563  switch (constTensorPtr->data_type())
564  {
565  case ConstTensorData_ByteData:
566  {
567  auto byteData = constTensorPtr->data_as_ByteData()->data();
568  CHECK_CONST_TENSOR_SIZE(byteData->size(), tensorInfo.GetNumElements());
569  return armnn::ConstTensor(tensorInfo, byteData->data());
570  }
571  case ConstTensorData_ShortData:
572  {
573  auto shortData = constTensorPtr->data_as_ShortData()->data();
574  CHECK_CONST_TENSOR_SIZE(shortData->size(), tensorInfo.GetNumElements());
575  return armnn::ConstTensor(tensorInfo, shortData->data());
576  }
577  case ConstTensorData_IntData:
578  {
579  auto intData = constTensorPtr->data_as_IntData()->data();
580  CHECK_CONST_TENSOR_SIZE(intData->size(), tensorInfo.GetNumElements());
581  return armnn::ConstTensor(tensorInfo, intData->data());
582  }
583  case ConstTensorData_LongData:
584  {
585  auto longData = constTensorPtr->data_as_LongData()->data();
586  CHECK_CONST_TENSOR_SIZE(longData->size(), tensorInfo.GetNumElements());
587  return armnn::ConstTensor(tensorInfo, longData->data());
588  }
589  default:
590  {
591  CheckLocation location = CHECK_LOCATION();
592  throw ParseException(
593  boost::str(boost::format("Unsupported data type %1% = %2%. %3%") %
594  constTensorPtr->data_type() %
595  EnumNameConstTensorData(constTensorPtr->data_type()) %
596  location.AsString()));
597  }
598  }
599 }
600 
602  unsigned int layerIndex)
603 {
604  CHECK_LAYERS(graphPtr, 0, layerIndex);
605  auto layer = GetBaseLayer(graphPtr, layerIndex);
606  const auto& numInputs = layer->inputSlots()->size();
607 
608  TensorRawPtrVector result(numInputs);
609 
610  for (unsigned int i=0; i<numInputs; ++i)
611  {
612  auto inputId = CHECKED_NON_NEGATIVE(static_cast<int32_t>
613  (layer->inputSlots()->Get(i)->connection()->sourceLayerIndex()));
614  result[i] = GetBaseLayer(graphPtr, inputId)->outputSlots()->Get(0)->tensorInfo();
615  }
616  return result;
617 }
618 
620  unsigned int layerIndex)
621 {
622  CHECK_LAYERS(graphPtr, 0, layerIndex);
623  auto layer = GetBaseLayer(graphPtr, layerIndex);
624  const auto& numOutputs = layer->outputSlots()->size();
625 
626  TensorRawPtrVector result(numOutputs);
627 
628  for (unsigned int i=0; i<numOutputs; ++i)
629  {
630  result[i] = layer->outputSlots()->Get(i)->tensorInfo();
631  }
632  return result;
633 }
634 
635 void Deserializer::ParseUnsupportedLayer(GraphPtr graph, unsigned int layerIndex)
636 {
637  CHECK_LAYERS(graph, 0, layerIndex);
638  const auto layerName = GetBaseLayer(graph, layerIndex)->layerName()->c_str();
639  throw ParseException(
640  boost::str(
641  boost::format("Layer not supported. "
642  "layerIndex: %1% "
643  "layerName: %2% / %3%") %
644  layerIndex %
645  layerName %
646  CHECK_LOCATION().AsString()));
647 }
648 
649 void Deserializer::ResetParser()
650 {
651  m_Network = armnn::INetworkPtr(nullptr, nullptr);
652  m_InputBindings.clear();
653  m_OutputBindings.clear();
654 }
655 
657 {
658  return new Deserializer();
659 }
660 
662 {
664 }
665 
667 {
668  delete parser;
669 }
670 
671 INetworkPtr Deserializer::CreateNetworkFromBinary(const std::vector<uint8_t>& binaryContent)
672 {
673  ResetParser();
674  GraphPtr graph = LoadGraphFromBinary(binaryContent.data(), binaryContent.size());
675  return CreateNetworkFromGraph(graph);
676 }
677 
679 {
680  ResetParser();
681  std::vector<uint8_t> content((std::istreambuf_iterator<char>(binaryContent)), std::istreambuf_iterator<char>());
682  GraphPtr graph = LoadGraphFromBinary(content.data(), content.size());
683  return CreateNetworkFromGraph(graph);
684 }
685 
686 Deserializer::GraphPtr Deserializer::LoadGraphFromBinary(const uint8_t* binaryContent, size_t len)
687 {
688  if (binaryContent == nullptr)
689  {
690  throw InvalidArgumentException(boost::str(boost::format("Invalid (null) binary content %1%") %
691  CHECK_LOCATION().AsString()));
692  }
693  flatbuffers::Verifier verifier(binaryContent, len);
694  if (verifier.VerifyBuffer<SerializedGraph>() == false)
695  {
696  throw ParseException(
697  boost::str(boost::format("Buffer doesn't conform to the expected Armnn "
698  "flatbuffers format. size:%1% %2%") %
699  len %
700  CHECK_LOCATION().AsString()));
701  }
702  return GetSerializedGraph(binaryContent);
703 }
704 
705 INetworkPtr Deserializer::CreateNetworkFromGraph(GraphPtr graph)
706 {
707  m_Network = INetwork::Create();
708  BOOST_ASSERT(graph != nullptr);
709  unsigned int layerIndex = 0;
710  for (AnyLayer const* layer : *graph->layers())
711  {
712  if (layer->layer_type() != Layer_InputLayer &&
713  layer->layer_type() != Layer_OutputLayer)
714  {
715  // lookup and call the parser function
716  auto& parserFunction = m_ParserFunctions[layer->layer_type()];
717  (this->*parserFunction)(graph, layerIndex);
718  }
719  ++layerIndex;
720  }
721 
722  SetupInputLayers(graph);
723  SetupOutputLayers(graph);
724 
725  // establish the connections from the layer outputs to the inputs of the subsequent layers
726  for (auto&& graphIt : m_GraphConnections)
727  {
728  Connections& connections = graphIt.second;
729  for (auto&& outputIt : connections.outputSlots)
730  {
731  const unsigned int outputSlotIndex = outputIt.first;
732  IOutputSlot* outputSlot = outputIt.second;
733  if (connections.inputSlots.find(outputSlotIndex) != connections.inputSlots.end())
734  {
735  for (IInputSlot* inputSlot : connections.inputSlots[outputSlotIndex])
736  {
737  outputSlot->Connect(*inputSlot);
738  }
739  }
740  }
741  }
742 
743  return std::move(m_Network);
744 }
745 
747  const std::string& name) const
748 {
749  IgnoreUnused(layerIndex);
750  for (auto inputBinding : m_InputBindings)
751  {
752  if (inputBinding.first == name)
753  {
754  return inputBinding.second;
755  }
756  }
757  throw ParseException(
758  boost::str(
759  boost::format("No input binding found for layer:%1% / %2%") %
760  name %
761  CHECK_LOCATION().AsString()));
762 }
763 
765  const std::string& name) const
766 {
767  IgnoreUnused(layerIndex);
768  for (auto outputBinding : m_OutputBindings)
769  {
770  if (outputBinding.first == name)
771  {
772  return outputBinding.second;
773  }
774  }
775  throw ParseException(
776  boost::str(
777  boost::format("No output binding found for layer:%1% / %2%") %
778  name %
779  CHECK_LOCATION().AsString()));
780 }
781 
782 unsigned int Deserializer::GetInputLayerInVector(GraphPtr graph, int targetId)
783 {
784  for (unsigned int i = 0; i < graph->layers()->size(); i++)
785  {
786  auto layer = graph->layers()->Get(i);
787  if (layer->layer_type() == Layer::Layer_InputLayer)
788  {
789  auto layerBindingId = layer->layer_as_InputLayer()->base()->layerBindingId();
790  if (layerBindingId == targetId)
791  {
792  return i;
793  }
794  }
795  }
796  throw ParseException("Input layer with given layerBindingId not found");
797 }
798 
799 unsigned int Deserializer::GetOutputLayerInVector(GraphPtr graph, int targetId)
800 {
801  for (unsigned int i = 0; i < graph->layers()->size(); i++)
802  {
803  auto layer = graph->layers()->Get(i);
804  if (layer->layer_type() == Layer::Layer_OutputLayer)
805  {
806  auto layerBindingId = layer->layer_as_OutputLayer()->base()->layerBindingId();
807  if (layerBindingId == targetId)
808  {
809  return i;
810  }
811  }
812  }
813  throw ParseException("Output layer with given layerBindingId not found");
814 }
815 
816 unsigned int Deserializer::GetLayerIndexInVector(GraphPtr graph, unsigned int targetIndex)
817 {
818  for (unsigned int i = 0; i < graph->layers()->size(); i++)
819  {
820  LayerBaseRawPtr layer = GetBaseLayer(graph, i);
821  if (layer->index() == targetIndex)
822  {
823  return i;
824  }
825  }
826  throw ParseException("Layer with given index not found");
827 }
828 
829 Deserializer::FeatureVersions Deserializer::GetFeatureVersions(GraphPtr graph)
830 {
831  Deserializer::FeatureVersions versions;
832 
833  if (graph->featureVersions())
834  {
835  versions.m_BindingIdScheme = graph->featureVersions()->bindingIdsScheme();
836  }
837 
838  return versions;
839 }
840 
841 void Deserializer::SetupInputLayers(GraphPtr graph)
842 {
843  CHECK_GRAPH(graph, 0);
844  const unsigned int numInputs = graph->inputIds()->size();
845  m_InputBindings.clear();
846  m_InputBindings.reserve(numInputs);
847 
848  for (unsigned int i = 0; i < numInputs; i++)
849  {
850  unsigned int inputLayerIndex = 0xFFFFFFFF;
851  if (GetFeatureVersions(graph).m_BindingIdScheme == 0)
852  {
853  const unsigned int inputId = boost::numeric_cast<unsigned int>(graph->inputIds()->Get(i));
854  inputLayerIndex = GetLayerIndexInVector(graph, inputId);
855  }
856  else
857  {
858  const int inputId = graph->inputIds()->Get(i);
859  inputLayerIndex = GetInputLayerInVector(graph, inputId);
860  }
861 
862  LayerBaseRawPtr baseLayer = GetBaseLayer(graph, inputLayerIndex);
863 
864  // GetBindingLayerInfo expect the index to be index in the vector not index property on each layer base
865  LayerBindingId bindingId = GetBindingLayerInfo(graph, inputLayerIndex);
866  BOOST_ASSERT_MSG(baseLayer->layerName()->c_str(), "Input has no name.");
867 
868  IConnectableLayer* inputLayer =
869  m_Network->AddInputLayer(bindingId, baseLayer->layerName()->c_str());
870 
871  const armnn::TensorInfo& tensorInfo = ToTensorInfo(baseLayer->outputSlots()->Get(0)->tensorInfo());
872  inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
873  RegisterOutputSlots(graph, inputLayerIndex, inputLayer);
874 
875  BindingPointInfo bindingInfo = {bindingId, tensorInfo};
876  m_InputBindings.push_back(std::make_pair(baseLayer->layerName()->c_str(), bindingInfo));
877  }
878 }
879 
880 void Deserializer::SetupOutputLayers(GraphPtr graph)
881 {
882  CHECK_GRAPH(graph, 0);
883  const unsigned int numOutputs = graph->outputIds()->size();
884  m_OutputBindings.clear();
885  m_OutputBindings.reserve(numOutputs);
886 
887  for (unsigned int i = 0; i < numOutputs; i++)
888  {
889  unsigned int outputLayerIndex = 0xFFFFFFFF;
890  if (GetFeatureVersions(graph).m_BindingIdScheme == 0)
891  {
892  const unsigned int outputId = boost::numeric_cast<unsigned int>(graph->outputIds()->Get(i));
893  outputLayerIndex = GetLayerIndexInVector(graph, outputId);
894  }
895  else
896  {
897  const int outputId = graph->outputIds()->Get(i);
898  outputLayerIndex = GetOutputLayerInVector(graph, outputId);
899  }
900 
901  LayerBaseRawPtr baseLayer = GetBaseLayer(graph, outputLayerIndex);
902 
903  // GetBindingLayerInfo expect the index to be index in the vector not index property on each layer base
904  LayerBindingId bindingId = GetBindingLayerInfo(graph, outputLayerIndex);
905  BOOST_ASSERT_MSG(baseLayer->layerName()->c_str(), "Input has no name.");
906 
907  IConnectableLayer* outputLayer =
908  m_Network->AddOutputLayer(bindingId, baseLayer->layerName()->c_str());
909 
910  RegisterInputSlots(graph, outputLayerIndex, outputLayer);
911 
912  unsigned int sourceLayerIndex =
913  GetLayerIndexInVector(graph, baseLayer->inputSlots()->Get(0)->connection()->sourceLayerIndex());
914  LayerBaseRawPtr sourceBaseLayer = GetBaseLayer(graph, sourceLayerIndex);
915  const armnn::TensorInfo& tensorInfo = ToTensorInfo(sourceBaseLayer->outputSlots()->Get(0)->tensorInfo());
916 
917  BindingPointInfo bindingInfo = {bindingId, tensorInfo};
918  m_OutputBindings.push_back(std::make_pair(baseLayer->layerName()->c_str(), bindingInfo));
919  }
920 }
921 
922 void Deserializer::RegisterOutputSlots(GraphPtr graph,
923  uint32_t layerIndex,
924  IConnectableLayer* layer)
925 {
926  CHECK_LAYERS(graph, 0, layerIndex);
927  BOOST_ASSERT(layer != nullptr);
928  LayerBaseRawPtr baseLayer = GetBaseLayer(graph, layerIndex);
929  if (baseLayer->outputSlots()->size() != layer->GetNumOutputSlots())
930  {
931  throw ParseException(
932  boost::str(boost::format("The number of outputslots (%1%) does not match the number expected (%2%)"
933  " for layer index: %3% %4%") %
934  baseLayer->outputSlots()->size() %
935  layer->GetNumOutputSlots() %
936  layerIndex %
937  CHECK_LOCATION().AsString()));
938  }
939 
940  for (unsigned int i = 0; i < layer->GetNumOutputSlots(); ++i)
941  {
942  const unsigned int slotIndex = baseLayer->outputSlots()->Get(i)->index();
943  armnn::IOutputSlot* outputSlot = &(layer->GetOutputSlot(slotIndex));
944  // layerIndex is not necessarily the same as baseLayer->index(). The latter is needed here
945  RegisterOutputSlotOfConnection(baseLayer->index(), slotIndex, outputSlot);
946  }
947 }
948 
949 void Deserializer::RegisterInputSlots(GraphPtr graph,
950  uint32_t layerIndex,
952 {
953  CHECK_LAYERS(graph, 0, layerIndex);
954  BOOST_ASSERT(layer != nullptr);
955  LayerBaseRawPtr baseLayer = GetBaseLayer(graph, layerIndex);
956  if (baseLayer->inputSlots()->size() != layer->GetNumInputSlots())
957  {
958  throw ParseException(
959  boost::str(boost::format("The number of inputslots (%1%) does not match the number expected (%2%)"
960  " for layer index:%3% %4%") %
961  baseLayer->inputSlots()->size() %
962  layer->GetNumInputSlots() %
963  layerIndex %
964  CHECK_LOCATION().AsString()));
965  }
966 
967  for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i)
968  {
969  auto fbInputSlot = baseLayer->inputSlots()->Get(i);
970  auto fbConnection = fbInputSlot->connection();
971  armnn::IInputSlot* inputSlot = &(layer->GetInputSlot(fbInputSlot->index()));
972  RegisterInputSlotOfConnection(fbConnection->sourceLayerIndex(), fbConnection->outputSlotIndex(), inputSlot);
973  }
974 }
975 
976 void Deserializer::RegisterInputSlotOfConnection(uint32_t sourceLayerIndex,
977  uint32_t outputSlotIndex,
978  armnn::IInputSlot* inputSlot)
979 {
980  if (m_GraphConnections.find(sourceLayerIndex) == m_GraphConnections.end())
981  {
982  m_GraphConnections[sourceLayerIndex] = Connections();
983  }
984 
985  Connections& connections = m_GraphConnections[sourceLayerIndex];
986  if (connections.inputSlots.find(outputSlotIndex) == connections.inputSlots.end())
987  {
988  connections.inputSlots[outputSlotIndex] = {inputSlot};
989  }
990  else
991  {
992  connections.inputSlots[outputSlotIndex].push_back(inputSlot);
993  }
994 }
995 
996 void Deserializer::RegisterOutputSlotOfConnection(uint32_t sourceLayerIndex,
997  uint32_t outputSlotIndex,
998  armnn::IOutputSlot* outputSlot)
999 {
1000  if (m_GraphConnections.find(sourceLayerIndex) == m_GraphConnections.end())
1001  {
1002  m_GraphConnections[sourceLayerIndex] = Connections();
1003  }
1004 
1005  Connections& connections = m_GraphConnections[sourceLayerIndex];
1006  if (connections.outputSlots.find(outputSlotIndex) != connections.outputSlots.end())
1007  {
1008  throw ParseException("Same output slot index processed twice");
1009  }
1010 
1011  connections.outputSlots[outputSlotIndex] = outputSlot;
1012 }
1013 
1014 void Deserializer::ParseAbs(armnnDeserializer::Deserializer::GraphPtr graph, unsigned int layerIndex)
1015 {
1016  CHECK_LAYERS(graph, 0, layerIndex);
1017  auto inputs = GetInputs(graph, layerIndex);
1018  CHECK_LOCATION();
1019  CHECK_VALID_SIZE(inputs.size(), 1);
1020 
1021  auto outputs = GetOutputs(graph, layerIndex);
1022  CHECK_VALID_SIZE(outputs.size(), 1);
1023 
1024  auto layerName = GetLayerName(graph, layerIndex);
1025 
1027  IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(descriptor, layerName.c_str());
1028  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1029  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1030 
1031  RegisterInputSlots(graph, layerIndex, layer);
1032  RegisterOutputSlots(graph, layerIndex, layer);
1033 }
1034 
1035 void Deserializer::ParseActivation(GraphPtr graph, unsigned int layerIndex)
1036 {
1037  CHECK_LAYERS(graph, 0, layerIndex);
1038  auto inputs = GetInputs(graph, layerIndex);
1039  CHECK_LOCATION();
1040  CHECK_VALID_SIZE(inputs.size(), 1);
1041 
1042  auto outputs = GetOutputs(graph, layerIndex);
1043  CHECK_VALID_SIZE(outputs.size(), 1);
1044 
1045  auto serializerLayer = graph->layers()->Get(layerIndex)->layer_as_ActivationLayer();
1046  auto layerName = GetLayerName(graph, layerIndex);
1047  auto serializerDescriptor = serializerLayer->descriptor();
1048 
1049  armnn::ActivationDescriptor descriptor;
1050  descriptor.m_Function = ToActivationFunction(serializerDescriptor->activationFunction());
1051  descriptor.m_A = serializerDescriptor->a();
1052  descriptor.m_B = serializerDescriptor->b();
1053 
1054  IConnectableLayer* layer = m_Network->AddActivationLayer(descriptor,
1055  layerName.c_str());
1056  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1057  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1058 
1059  RegisterInputSlots(graph, layerIndex, layer);
1060  RegisterOutputSlots(graph, layerIndex, layer);
1061 }
1062 
1063 void Deserializer::ParseAdd(GraphPtr graph, unsigned int layerIndex)
1064 {
1065  CHECK_LAYERS(graph, 0, layerIndex);
1066  auto inputs = GetInputs(graph, layerIndex);
1067  CHECK_LOCATION();
1068  CHECK_VALID_SIZE(inputs.size(), 2);
1069 
1070  auto outputs = GetOutputs(graph, layerIndex);
1071  CHECK_VALID_SIZE(outputs.size(), 1);
1072 
1073  auto layerName = GetLayerName(graph, layerIndex);
1074  IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
1075 
1076  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1077  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1078 
1079  RegisterInputSlots(graph, layerIndex, layer);
1080  RegisterOutputSlots(graph, layerIndex, layer);
1081 }
1082 
1083 void Deserializer::ParseArgMinMax(GraphPtr graph, unsigned int layerIndex)
1084 {
1085  CHECK_LAYERS(graph, 0, layerIndex);
1086  auto inputs = GetInputs(graph, layerIndex);
1087  CHECK_LOCATION();
1088  CHECK_VALID_SIZE(inputs.size(), 1);
1089 
1090  auto outputs = GetOutputs(graph, layerIndex);
1091  CHECK_VALID_SIZE(outputs.size(), 1);
1092 
1093  auto serializerLayer = graph->layers()->Get(layerIndex)->layer_as_ArgMinMaxLayer();
1094  auto serializerDescriptor = serializerLayer->descriptor();
1095 
1096  armnn::ArgMinMaxDescriptor descriptor;
1097  descriptor.m_Function = ToArgMinMaxFunction(serializerDescriptor->argMinMaxFunction());
1098  descriptor.m_Axis = serializerDescriptor->axis();
1099  auto layerName = GetLayerName(graph, layerIndex);
1100  IConnectableLayer* layer = m_Network->AddArgMinMaxLayer(descriptor, layerName.c_str());
1101 
1102  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1103  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1104 
1105  RegisterInputSlots(graph, layerIndex, layer);
1106  RegisterOutputSlots(graph, layerIndex, layer);
1107 }
1108 
1109 void Deserializer::ParseBatchToSpaceNd(GraphPtr graph, unsigned int layerIndex)
1110 {
1111  CHECK_LAYERS(graph, 0, layerIndex);
1112 
1113  Deserializer::TensorRawPtrVector inputs = GetInputs(graph, layerIndex);
1114  CHECK_VALID_SIZE(inputs.size(), 1);
1115 
1116  Deserializer::TensorRawPtrVector outputs = GetOutputs(graph, layerIndex);
1117  CHECK_VALID_SIZE(outputs.size(), 1);
1118 
1119  auto flatBufferDescriptor = graph->layers()->Get(layerIndex)->layer_as_BatchToSpaceNdLayer()->descriptor();
1120  auto flatBufferCrops = flatBufferDescriptor->crops();
1121  auto flatBufferBlockShape = flatBufferDescriptor->blockShape();
1122 
1123  if (flatBufferCrops->Length() % 2 != 0)
1124  {
1125  throw ParseException(boost::str(
1126  boost::format("The size of crops must be divisible by 2 %1%") % CHECK_LOCATION().AsString()));
1127  }
1128 
1129  std::vector<std::pair<unsigned int, unsigned int>> crops;
1130  crops.reserve(flatBufferCrops->Length() / 2);
1131  for (unsigned int i = 0; i < flatBufferCrops->Length() - 1; i += 2)
1132  {
1133  crops.emplace_back(flatBufferCrops->Get(i), flatBufferCrops->Get(i+1));
1134  }
1135 
1137  descriptor.m_DataLayout = ToDataLayout(flatBufferDescriptor->dataLayout());
1138  descriptor.m_BlockShape =
1139  std::vector<unsigned int>(flatBufferBlockShape->begin(), flatBufferBlockShape->end());
1140  descriptor.m_Crops = crops;
1141 
1142  auto layerName = GetLayerName(graph, layerIndex);
1143  IConnectableLayer* layer = m_Network->AddBatchToSpaceNdLayer(descriptor, layerName.c_str());
1144 
1145  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1146  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1147 
1148  RegisterInputSlots(graph, layerIndex, layer);
1149  RegisterOutputSlots(graph, layerIndex, layer);
1150 }
1151 
1152 void Deserializer::ParseBatchNormalization(GraphPtr graph, unsigned int layerIndex)
1153 {
1154  CHECK_LAYERS(graph, 0, layerIndex);
1155 
1156  auto inputs = GetInputs(graph, layerIndex);
1157  CHECK_VALID_SIZE(inputs.size(), 1);
1158 
1159  auto outputs = GetOutputs(graph, layerIndex);
1160  CHECK_VALID_SIZE(outputs.size(), 1);
1161  auto outputInfo = ToTensorInfo(outputs[0]);
1162 
1163  auto layerName = GetLayerName(graph, layerIndex);
1164 
1165  auto serializerLayer = graph->layers()->Get(layerIndex)->layer_as_BatchNormalizationLayer();
1166  auto serializerDescriptor = serializerLayer->descriptor();
1167 
1169  descriptor.m_Eps = serializerDescriptor->eps();
1170  descriptor.m_DataLayout = ToDataLayout(serializerDescriptor->dataLayout());
1171 
1172  armnn::ConstTensor mean = ToConstTensor(serializerLayer->mean());
1173  armnn::ConstTensor variance = ToConstTensor(serializerLayer->variance());
1174  armnn::ConstTensor beta = ToConstTensor(serializerLayer->beta());
1175  armnn::ConstTensor gamma = ToConstTensor(serializerLayer->gamma());
1176 
1177  IConnectableLayer* layer = m_Network->AddBatchNormalizationLayer(descriptor,
1178  mean,
1179  variance,
1180  beta,
1181  gamma,
1182  layerName.c_str());
1183  layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1184 
1185  RegisterInputSlots(graph, layerIndex, layer);
1186  RegisterOutputSlots(graph, layerIndex, layer);
1187 }
1188 
1189 void Deserializer::ParseConstant(GraphPtr graph, unsigned int layerIndex)
1190 {
1191  CHECK_LAYERS(graph, 0, layerIndex);
1192  CHECK_LOCATION();
1193 
1194  auto outputs = GetOutputs(graph, layerIndex);
1195  CHECK_VALID_SIZE(outputs.size(), 1);
1196 
1197  auto layerName = GetLayerName(graph, layerIndex);
1198 
1199  auto serializerLayer = graph->layers()->Get(layerIndex)->layer_as_ConstantLayer();
1200  auto serializerInput = serializerLayer->input();
1201 
1202  armnn::ConstTensor input = ToConstTensor(serializerInput);
1203 
1204  IConnectableLayer* layer = m_Network->AddConstantLayer(input, layerName.c_str());
1205 
1206  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1207  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1208 
1209  RegisterOutputSlots(graph, layerIndex, layer);
1210 }
1211 
1212 void Deserializer::ParseConvolution2d(GraphPtr graph, unsigned int layerIndex)
1213 {
1214  CHECK_LAYERS(graph, 0, layerIndex);
1215  auto inputs = GetInputs(graph, layerIndex);
1216  CHECK_LOCATION();
1217  CHECK_VALID_SIZE(inputs.size(), 1);
1218 
1219  auto outputs = GetOutputs(graph, layerIndex);
1220  CHECK_VALID_SIZE(outputs.size(), 1);
1221 
1222  auto serializerLayer = graph->layers()->Get(layerIndex)->layer_as_Convolution2dLayer();
1223  auto layerName = GetLayerName(graph, layerIndex);
1224  auto serializerDescriptor = serializerLayer->descriptor();
1225 
1226  armnn::Convolution2dDescriptor descriptor;
1227  descriptor.m_PadLeft = serializerDescriptor->padLeft();
1228  descriptor.m_PadRight = serializerDescriptor->padRight();
1229  descriptor.m_PadTop = serializerDescriptor->padTop();
1230  descriptor.m_PadBottom = serializerDescriptor->padBottom();
1231  descriptor.m_StrideX = serializerDescriptor->strideX();
1232  descriptor.m_StrideY = serializerDescriptor->strideY();;
1233  descriptor.m_DilationX = serializerDescriptor->dilationX();
1234  descriptor.m_DilationY = serializerDescriptor->dilationY();;
1235  descriptor.m_BiasEnabled = serializerDescriptor->biasEnabled();;
1236  descriptor.m_DataLayout = ToDataLayout(serializerDescriptor->dataLayout());
1237 
1238  armnn::ConstTensor weights = ToConstTensor(serializerLayer->weights());
1239  armnn::ConstTensor biases;
1240 
1242  if (descriptor.m_BiasEnabled)
1243  {
1244  biases = ToConstTensor(serializerLayer->biases());
1245  optionalBiases = armnn::Optional<armnn::ConstTensor>(biases);
1246  }
1247  IConnectableLayer* layer = m_Network->AddConvolution2dLayer(descriptor,
1248  weights,
1249  optionalBiases,
1250  layerName.c_str());
1251  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1252  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1253 
1254  RegisterInputSlots(graph, layerIndex, layer);
1255  RegisterOutputSlots(graph, layerIndex, layer);
1256 }
1257 
1258 void Deserializer::ParseDepthToSpace(GraphPtr graph, unsigned int layerIndex)
1259 {
1260  CHECK_LAYERS(graph, 0, layerIndex);
1261 
1262  auto inputs = GetInputs(graph, layerIndex);
1263  CHECK_VALID_SIZE(inputs.size(), 1);
1264 
1265  auto outputs = GetOutputs(graph, layerIndex);
1266  CHECK_VALID_SIZE(outputs.size(), 1);
1267 
1268  auto fbDescriptor = graph->layers()->Get(layerIndex)->layer_as_DepthToSpaceLayer()->descriptor();
1269 
1270  armnn::DepthToSpaceDescriptor descriptor;
1271  descriptor.m_BlockSize = fbDescriptor->blockSize();
1272  descriptor.m_DataLayout = ToDataLayout(fbDescriptor->dataLayout());
1273 
1274  auto layerName = GetLayerName(graph, layerIndex);
1275  IConnectableLayer* layer = m_Network->AddDepthToSpaceLayer(descriptor, layerName.c_str());
1276 
1277  armnn::TensorInfo outputInfo = ToTensorInfo(outputs[0]);
1278  layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1279 
1280  RegisterInputSlots(graph, layerIndex, layer);
1281  RegisterOutputSlots(graph, layerIndex, layer);
1282 }
1283 
1284 void Deserializer::ParseDepthwiseConvolution2d(GraphPtr graph, unsigned int layerIndex)
1285 {
1286  CHECK_LAYERS(graph, 0, layerIndex);
1287  auto inputs = GetInputs(graph, layerIndex);
1288  CHECK_LOCATION();
1289  CHECK_VALID_SIZE(inputs.size(), 1);
1290 
1291  auto outputs = GetOutputs(graph, layerIndex);
1292  CHECK_VALID_SIZE(outputs.size(), 1);
1293 
1294  auto serializerLayer = graph->layers()->Get(layerIndex)->layer_as_DepthwiseConvolution2dLayer();
1295  auto layerName = GetLayerName(graph, layerIndex);
1296  auto serializerDescriptor = serializerLayer->descriptor();
1297 
1299  descriptor.m_PadLeft = serializerDescriptor->padLeft();
1300  descriptor.m_PadRight = serializerDescriptor->padRight();
1301  descriptor.m_PadTop = serializerDescriptor->padTop();
1302  descriptor.m_PadBottom = serializerDescriptor->padBottom();
1303  descriptor.m_StrideX = serializerDescriptor->strideX();
1304  descriptor.m_StrideY = serializerDescriptor->strideY();
1305  descriptor.m_DilationX = serializerDescriptor->dilationX();
1306  descriptor.m_DilationY = serializerDescriptor->dilationY();
1307  descriptor.m_BiasEnabled = serializerDescriptor->biasEnabled();;
1308  descriptor.m_DataLayout = ToDataLayout(serializerDescriptor->dataLayout());
1309 
1310  armnn::ConstTensor weights = ToConstTensor(serializerLayer->weights());
1311  armnn::ConstTensor biases;
1312 
1314  if (descriptor.m_BiasEnabled)
1315  {
1316  biases = ToConstTensor(serializerLayer->biases());
1317  optionalBiases = armnn::Optional<armnn::ConstTensor>(biases);
1318  }
1319  IConnectableLayer* layer = m_Network->AddDepthwiseConvolution2dLayer(descriptor,
1320  weights,
1321  optionalBiases,
1322  layerName.c_str());
1323 
1324  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1325  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1326 
1327  RegisterInputSlots(graph, layerIndex, layer);
1328  RegisterOutputSlots(graph, layerIndex, layer);
1329 }
1330 
1331 void Deserializer::ParseDetectionPostProcess(GraphPtr graph, unsigned int layerIndex)
1332 {
1333  CHECK_LAYERS(graph, 0, layerIndex);
1334  auto inputs = GetInputs(graph, layerIndex);
1335  CHECK_LOCATION();
1336  CHECK_VALID_SIZE(inputs.size(), 2);
1337 
1338  auto outputs = GetOutputs(graph, layerIndex);
1339  CHECK_VALID_SIZE(outputs.size(), 4);
1340 
1341  auto flatBufferLayer = graph->layers()->Get(layerIndex)->layer_as_DetectionPostProcessLayer();
1342  auto layerName = GetLayerName(graph, layerIndex);
1343  auto flatBufferDescriptor = flatBufferLayer->descriptor();
1344 
1346  descriptor.m_MaxDetections = flatBufferDescriptor->maxDetections();
1347  descriptor.m_MaxClassesPerDetection = flatBufferDescriptor->maxClassesPerDetection();
1348  descriptor.m_DetectionsPerClass = flatBufferDescriptor->detectionsPerClass();
1349  descriptor.m_NmsScoreThreshold = flatBufferDescriptor->nmsScoreThreshold();
1350  descriptor.m_NmsIouThreshold = flatBufferDescriptor->nmsIouThreshold();
1351  descriptor.m_NumClasses = flatBufferDescriptor->numClasses();
1352  descriptor.m_UseRegularNms = flatBufferDescriptor->useRegularNms();
1353  descriptor.m_ScaleX = flatBufferDescriptor->scaleX();
1354  descriptor.m_ScaleY = flatBufferDescriptor->scaleY();
1355  descriptor.m_ScaleW = flatBufferDescriptor->scaleW();
1356  descriptor.m_ScaleH = flatBufferDescriptor->scaleH();
1357 
1358  armnn::ConstTensor anchors = ToConstTensor(flatBufferLayer->anchors());
1359 
1360  IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(descriptor,
1361  anchors,
1362  layerName.c_str());
1363 
1364  for (unsigned int i = 0; i < 4; i++)
1365  {
1366  layer->GetOutputSlot(i).SetTensorInfo(ToTensorInfo(outputs[i]));
1367  }
1368 
1369  RegisterInputSlots(graph, layerIndex, layer);
1370  RegisterOutputSlots(graph, layerIndex, layer);
1371 }
1372 
1373 void Deserializer::ParseDivision(GraphPtr graph, unsigned int layerIndex)
1374 {
1375  CHECK_LAYERS(graph, 0, layerIndex);
1376  auto inputs = GetInputs(graph, layerIndex);
1377  CHECK_LOCATION();
1378  CHECK_VALID_SIZE(inputs.size(), 2);
1379 
1380  auto outputs = GetOutputs(graph, layerIndex);
1381  CHECK_VALID_SIZE(outputs.size(), 1);
1382 
1383  auto layerName = GetLayerName(graph, layerIndex);
1384  IConnectableLayer* layer = m_Network->AddDivisionLayer(layerName.c_str());
1385 
1386  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1387  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1388 
1389  RegisterInputSlots(graph, layerIndex, layer);
1390  RegisterOutputSlots(graph, layerIndex, layer);
1391 }
1392 
1393 void Deserializer::ParseEqual(GraphPtr graph, unsigned int layerIndex)
1394 {
1395  CHECK_LAYERS(graph, 0, layerIndex);
1396  auto inputs = GetInputs(graph, layerIndex);
1397  CHECK_LOCATION();
1398  CHECK_VALID_SIZE(inputs.size(), 2);
1399 
1400  auto outputs = GetOutputs(graph, layerIndex);
1401  CHECK_VALID_SIZE(outputs.size(), 1);
1402 
1403  auto layerName = GetLayerName(graph, layerIndex);
1405  IConnectableLayer* layer = m_Network->AddComparisonLayer(descriptor, layerName.c_str());
1406 
1407  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1408  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1409 
1410  RegisterInputSlots(graph, layerIndex, layer);
1411  RegisterOutputSlots(graph, layerIndex, layer);
1412 }
1413 
1414 void Deserializer::ParseGreater(GraphPtr graph, unsigned int layerIndex)
1415 {
1416  CHECK_LAYERS(graph, 0, layerIndex);
1417  auto inputs = GetInputs(graph, layerIndex);
1418  CHECK_LOCATION();
1419  CHECK_VALID_SIZE(inputs.size(), 2);
1420 
1421  auto outputs = GetOutputs(graph, layerIndex);
1422  CHECK_VALID_SIZE(outputs.size(), 1);
1423 
1424  auto layerName = GetLayerName(graph, layerIndex);
1426  IConnectableLayer* layer = m_Network->AddComparisonLayer(descriptor, layerName.c_str());
1427 
1428  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1429  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1430 
1431  RegisterInputSlots(graph, layerIndex, layer);
1432  RegisterOutputSlots(graph, layerIndex, layer);
1433 }
1434 
1435 void Deserializer::ParseInstanceNormalization(GraphPtr graph, unsigned int layerIndex)
1436 {
1437  CHECK_LAYERS(graph, 0, layerIndex);
1438 
1439  auto inputs = GetInputs(graph, layerIndex);
1440  CHECK_VALID_SIZE(inputs.size(), 1);
1441 
1442  auto outputs = GetOutputs(graph, layerIndex);
1443  CHECK_VALID_SIZE(outputs.size(), 1);
1444 
1445  auto fbLayer = graph->layers()->Get(layerIndex)->layer_as_InstanceNormalizationLayer();
1446  auto fbDescriptor = fbLayer->descriptor();
1447 
1449  descriptor.m_Gamma = fbDescriptor->gamma();
1450  descriptor.m_Beta = fbDescriptor->beta();
1451  descriptor.m_Eps = fbDescriptor->eps();
1452  descriptor.m_DataLayout = ToDataLayout(fbDescriptor->dataLayout());
1453 
1454  const std::string layerName = GetLayerName(graph, layerIndex);
1455  const armnn::TensorInfo outputInfo = ToTensorInfo(outputs[0]);
1456 
1457  IConnectableLayer* layer = m_Network->AddInstanceNormalizationLayer(descriptor, layerName.c_str());
1458  layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1459 
1460  RegisterInputSlots(graph, layerIndex, layer);
1461  RegisterOutputSlots(graph, layerIndex, layer);
1462 }
1463 
1464 void Deserializer::ParseL2Normalization(GraphPtr graph, unsigned int layerIndex)
1465 {
1466  CHECK_LAYERS(graph, 0, layerIndex);
1467 
1468  auto inputs = GetInputs(graph, layerIndex);
1469  CHECK_VALID_SIZE(inputs.size(), 1);
1470 
1471  auto outputs = GetOutputs(graph, layerIndex);
1472  CHECK_VALID_SIZE(outputs.size(), 1);
1473  auto outputInfo = ToTensorInfo(outputs[0]);
1474 
1475  auto flatBufferLayer = graph->layers()->Get(layerIndex)->layer_as_L2NormalizationLayer();
1476  auto flatBufferDescriptor = flatBufferLayer->descriptor();
1477 
1478  auto layerName = GetLayerName(graph, layerIndex);
1480  descriptor.m_DataLayout = ToDataLayout(flatBufferDescriptor->dataLayout());
1481  descriptor.m_Eps = flatBufferDescriptor->eps();
1482 
1483  IConnectableLayer* layer = m_Network->AddL2NormalizationLayer(descriptor, layerName.c_str());
1484  layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1485 
1486  RegisterInputSlots(graph, layerIndex, layer);
1487  RegisterOutputSlots(graph, layerIndex, layer);
1488 }
1489 
1490 void Deserializer::ParseLogSoftmax(GraphPtr graph, unsigned int layerIndex)
1491 {
1492  CHECK_LAYERS(graph, 0, layerIndex);
1493 
1494  Deserializer::TensorRawPtrVector inputs = GetInputs(graph, layerIndex);
1495  CHECK_VALID_SIZE(inputs.size(), 1);
1496 
1497  Deserializer::TensorRawPtrVector outputs = GetOutputs(graph, layerIndex);
1498  CHECK_VALID_SIZE(outputs.size(), 1);
1499 
1500  armnn::LogSoftmaxDescriptor descriptor;
1501  descriptor.m_Beta = graph->layers()->Get(layerIndex)->layer_as_LogSoftmaxLayer()->descriptor()->beta();
1502  descriptor.m_Axis = graph->layers()->Get(layerIndex)->layer_as_LogSoftmaxLayer()->descriptor()->axis();
1503  auto layerName = GetLayerName(graph, layerIndex);
1504 
1505  IConnectableLayer* layer = m_Network->AddLogSoftmaxLayer(descriptor, layerName.c_str());
1506 
1507  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1508  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1509 
1510  RegisterInputSlots(graph, layerIndex, layer);
1511  RegisterOutputSlots(graph, layerIndex, layer);
1512 }
1513 
1514 void Deserializer::ParseMinimum(GraphPtr graph, unsigned int layerIndex)
1515 {
1516  CHECK_LAYERS(graph, 0, layerIndex);
1517  auto inputs = GetInputs(graph, layerIndex);
1518  CHECK_LOCATION();
1519  CHECK_VALID_SIZE(inputs.size(), 2);
1520 
1521  auto outputs = GetOutputs(graph, layerIndex);
1522  CHECK_VALID_SIZE(outputs.size(), 1);
1523 
1524  auto layerName = GetLayerName(graph, layerIndex);
1525  IConnectableLayer* layer = m_Network->AddMinimumLayer(layerName.c_str());
1526 
1527  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1528  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1529 
1530  RegisterInputSlots(graph, layerIndex, layer);
1531  RegisterOutputSlots(graph, layerIndex, layer);
1532 }
1533 
1534 void Deserializer::ParseMaximum(GraphPtr graph, unsigned int layerIndex)
1535 {
1536  CHECK_LAYERS(graph, 0, layerIndex);
1537  auto inputs = GetInputs(graph, layerIndex);
1538  CHECK_LOCATION();
1539  CHECK_VALID_SIZE(inputs.size(), 2);
1540 
1541  auto outputs = GetOutputs(graph, layerIndex);
1542  CHECK_VALID_SIZE(outputs.size(), 1);
1543 
1544  auto layerName = GetLayerName(graph, layerIndex);
1545  IConnectableLayer* layer = m_Network->AddMaximumLayer(layerName.c_str());
1546 
1547  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1548  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1549 
1550  RegisterInputSlots(graph, layerIndex, layer);
1551  RegisterOutputSlots(graph, layerIndex, layer);
1552 }
1553 
1554 const armnnSerializer::OriginsDescriptor* GetOriginsDescriptor(const armnnSerializer::SerializedGraph* graph,
1555  unsigned int layerIndex)
1556 {
1557  auto layerType = graph->layers()->Get(layerIndex)->layer_type();
1558 
1559  switch (layerType)
1560  {
1561  case Layer::Layer_ConcatLayer:
1562  return graph->layers()->Get(layerIndex)->layer_as_ConcatLayer()->descriptor();
1563  case Layer::Layer_MergerLayer:
1564  return graph->layers()->Get(layerIndex)->layer_as_MergerLayer()->descriptor();
1565  default:
1566  throw armnn::Exception("unknown layer type, should be concat or merger");
1567  }
1568 }
1569 
1570 void Deserializer::ParseComparison(GraphPtr graph, unsigned int layerIndex)
1571 {
1572  CHECK_LAYERS(graph, 0, layerIndex);
1573  CHECK_LOCATION();
1574 
1575  auto inputs = GetInputs(graph, layerIndex);
1576  CHECK_VALID_SIZE(inputs.size(), 2);
1577 
1578  auto outputs = GetOutputs(graph, layerIndex);
1579  CHECK_VALID_SIZE(outputs.size(), 1);
1580 
1581  auto fbLayer = graph->layers()->Get(layerIndex)->layer_as_ComparisonLayer();
1582  auto fbDescriptor = fbLayer->descriptor();
1583 
1584  armnn::ComparisonDescriptor descriptor;
1585  descriptor.m_Operation = ToComparisonOperation(fbDescriptor->operation());
1586 
1587  const std::string& layerName = GetLayerName(graph, layerIndex);
1588  IConnectableLayer* layer = m_Network->AddComparisonLayer(descriptor, layerName.c_str());
1589 
1590  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1591  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1592 
1593  RegisterInputSlots(graph, layerIndex, layer);
1594  RegisterOutputSlots(graph, layerIndex, layer);
1595 }
1596 
1597 void Deserializer::ParseElementwiseUnary(GraphPtr graph, unsigned int layerIndex)
1598 {
1599  CHECK_LAYERS(graph, 0, layerIndex);
1600  CHECK_LOCATION();
1601 
1602  auto inputs = GetInputs(graph, layerIndex);
1603  CHECK_VALID_SIZE(inputs.size(), 1);
1604 
1605  auto outputs = GetOutputs(graph, layerIndex);
1606  CHECK_VALID_SIZE(outputs.size(), 1);
1607 
1608  auto fbLayer = graph->layers()->Get(layerIndex)->layer_as_ElementwiseUnaryLayer();
1609  auto fbDescriptor = fbLayer->descriptor();
1610 
1612  descriptor.m_Operation = ToUnaryOperation(fbDescriptor->operation());
1613 
1614  const std::string& layerName = GetLayerName(graph, layerIndex);
1615  IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(descriptor, layerName.c_str());
1616 
1617  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1618  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1619 
1620  RegisterInputSlots(graph, layerIndex, layer);
1621  RegisterOutputSlots(graph, layerIndex, layer);
1622 }
1623 
1624 void Deserializer::ParseConcat(GraphPtr graph, unsigned int layerIndex)
1625 {
1626  CHECK_LAYERS(graph, 0, layerIndex);
1627  CHECK_LOCATION();
1628 
1629  auto outputs = GetOutputs(graph, layerIndex);
1630  CHECK_VALID_SIZE(outputs.size(), 1);
1631 
1632  auto layerName = GetLayerName(graph, layerIndex);
1633  auto originsDescriptor = GetOriginsDescriptor(graph, layerIndex);
1634  unsigned int numViews = originsDescriptor->numViews();
1635  unsigned int numDimensions = originsDescriptor->numDimensions();
1636 
1637  // can now check the number of inputs == number of views
1638  auto inputs = GetInputs(graph, layerIndex);
1639  CHECK_VALID_SIZE(inputs.size(), numViews);
1640 
1641  armnn::OriginsDescriptor descriptor(numViews, numDimensions);
1642  auto originsPtr = originsDescriptor->viewOrigins();
1643  for (unsigned int v = 0; v < numViews; ++v)
1644  {
1645  auto originPtr = originsPtr->Get(v);
1646  for (unsigned int d = 0; d < numDimensions; ++d)
1647  {
1648  uint32_t value = originPtr->data()->Get(d);
1649  descriptor.SetViewOriginCoord(v, d, value);
1650  }
1651  }
1652  descriptor.SetConcatAxis(originsDescriptor->concatAxis());
1653 
1654  IConnectableLayer* layer = m_Network->AddConcatLayer(descriptor, layerName.c_str());
1655  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1656  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1657 
1658  RegisterInputSlots(graph, layerIndex, layer);
1659  RegisterOutputSlots(graph, layerIndex, layer);
1660 }
1661 
1662 void Deserializer::ParseMultiplication(GraphPtr graph, unsigned int layerIndex)
1663 {
1664  CHECK_LAYERS(graph, 0, layerIndex);
1665  auto inputs = GetInputs(graph, layerIndex);
1666  CHECK_LOCATION();
1667  CHECK_VALID_SIZE(inputs.size(), 2);
1668 
1669  auto outputs = GetOutputs(graph, layerIndex);
1670  CHECK_VALID_SIZE(outputs.size(), 1);
1671 
1672  auto layerName = GetLayerName(graph, layerIndex);
1673  IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
1674 
1675  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1676  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1677 
1678  RegisterInputSlots(graph, layerIndex, layer);
1679  RegisterOutputSlots(graph, layerIndex, layer);
1680 }
1681 
1682 void Deserializer::ParseFloor(GraphPtr graph, unsigned int layerIndex)
1683 {
1684  CHECK_LAYERS(graph, 0, layerIndex);
1685  CHECK_LOCATION();
1686 
1687  auto inputs = GetInputs(graph, layerIndex);
1688  CHECK_VALID_SIZE(inputs.size(), 1);
1689 
1690  auto outputs = GetOutputs(graph, layerIndex);
1691  CHECK_VALID_SIZE(outputs.size(), 1);
1692 
1693  auto layerName = GetLayerName(graph, layerIndex);
1694 
1695  armnn::IConnectableLayer* layer;
1696 
1697  layer = m_Network->AddFloorLayer(layerName.c_str());
1698 
1699  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1700  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1701 
1702  RegisterInputSlots(graph, layerIndex, layer);
1703  RegisterOutputSlots(graph, layerIndex, layer);
1704 }
1705 
1706 void Deserializer::ParseFullyConnected(GraphPtr graph, unsigned int layerIndex)
1707 {
1708  CHECK_LAYERS(graph, 0, layerIndex);
1709  auto inputs = GetInputs(graph, layerIndex);
1710  CHECK_LOCATION();
1711  CHECK_VALID_SIZE(inputs.size(), 1);
1712 
1713  auto outputs = GetOutputs(graph, layerIndex);
1714  CHECK_VALID_SIZE(outputs.size(), 1);
1715 
1716  auto flatBufferLayer = graph->layers()->Get(layerIndex)->layer_as_FullyConnectedLayer();
1717  auto layerName = GetLayerName(graph, layerIndex);
1718  auto flatBufferDescriptor = flatBufferLayer->descriptor();
1719 
1720  armnn::FullyConnectedDescriptor fullyConnectedDescriptor;
1721  fullyConnectedDescriptor.m_BiasEnabled = flatBufferDescriptor->biasEnabled();
1722  fullyConnectedDescriptor.m_TransposeWeightMatrix = flatBufferDescriptor->transposeWeightsMatrix();
1723 
1724  armnn::ConstTensor weightsTensor = ToConstTensor(flatBufferLayer->weights());
1725 
1726  armnn::IConnectableLayer* layer;
1728  if (flatBufferDescriptor->biasEnabled())
1729  {
1730  armnn::ConstTensor biasTensorData = ToConstTensor(flatBufferLayer->biases());
1731  optionalBiases = armnn::Optional<armnn::ConstTensor>(biasTensorData);
1732  }
1733  layer = m_Network->AddFullyConnectedLayer(fullyConnectedDescriptor,
1734  weightsTensor,
1735  optionalBiases,
1736  layerName.c_str());
1737 
1738  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1739  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1740 
1741  RegisterInputSlots(graph, layerIndex, layer);
1742  RegisterOutputSlots(graph, layerIndex, layer);
1743 }
1744 
1745 void Deserializer::ParsePad(GraphPtr graph, unsigned int layerIndex)
1746 {
1747  CHECK_LAYERS(graph, 0, layerIndex);
1748 
1749  Deserializer::TensorRawPtrVector inputs = GetInputs(graph, layerIndex);
1750  CHECK_VALID_SIZE(inputs.size(), 1);
1751 
1752  Deserializer::TensorRawPtrVector outputs = GetOutputs(graph, layerIndex);
1753  CHECK_VALID_SIZE(outputs.size(), 1);
1754 
1755  auto flatBufferDescriptor = graph->layers()->Get(layerIndex)->layer_as_PadLayer()->descriptor();
1756  auto flatBufferPadList = flatBufferDescriptor->padList();
1757  float padValue = flatBufferDescriptor->padValue();
1758 
1759  if (flatBufferPadList->Length() % 2 != 0)
1760  {
1761  throw ParseException(boost::str(
1762  boost::format("The size of the pad list must be divisible by 2 %1%") % CHECK_LOCATION().AsString()));
1763  }
1764 
1765  std::vector<std::pair<unsigned int, unsigned int>> padList;
1766  padList.reserve(flatBufferPadList->Length() / 2);
1767  for (unsigned int i = 0; i < flatBufferPadList->Length() - 1; i += 2)
1768  {
1769  padList.emplace_back(flatBufferPadList->Get(i), flatBufferPadList->Get(i+1));
1770  }
1771 
1772  armnn::PadDescriptor descriptor(padList, padValue);
1773 
1774  auto layerName = GetLayerName(graph, layerIndex);
1775  IConnectableLayer* layer = m_Network->AddPadLayer(descriptor, layerName.c_str());
1776 
1777  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1778  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1779 
1780  RegisterInputSlots(graph, layerIndex, layer);
1781  RegisterOutputSlots(graph, layerIndex, layer);
1782 }
1783 
1784 void Deserializer::ParsePermute(GraphPtr graph, unsigned int layerIndex)
1785 {
1786  CHECK_LAYERS(graph, 0, layerIndex);
1787 
1788  auto dimsMapping =
1789  graph->layers()->Get(layerIndex)->layer_as_PermuteLayer()->descriptor()->dimMappings();
1790 
1791  auto inputs = GetInputs(graph, layerIndex);
1792  CHECK_VALID_SIZE(inputs.size(), 1);
1793 
1794  auto outputs = GetOutputs(graph, layerIndex);
1795  CHECK_VALID_SIZE(outputs.size(), 1);
1796  auto outputInfo = ToTensorInfo(outputs[0]);
1797 
1798  auto layerName = GetLayerName(graph, layerIndex);
1799  const armnn::PermuteDescriptor descriptor(armnn::PermutationVector(dimsMapping->data(), dimsMapping->Length()));
1800 
1801  IConnectableLayer* layer = m_Network->AddPermuteLayer(descriptor, layerName.c_str());
1802  layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1803 
1804  RegisterInputSlots(graph, layerIndex, layer);
1805  RegisterOutputSlots(graph, layerIndex, layer);
1806 }
1807 
1809  unsigned int layerIndex)
1810 {
1811  IgnoreUnused(layerIndex);
1813 
1814  switch (pooling2dDesc->poolType())
1815  {
1816  case PoolingAlgorithm_Average:
1817  {
1819  break;
1820  }
1821  case PoolingAlgorithm_Max:
1822  {
1824  break;
1825  }
1826  default:
1827  {
1828  BOOST_ASSERT_MSG(false, "Unsupported pooling algorithm");
1829  }
1830  }
1831 
1832  switch (pooling2dDesc->outputShapeRounding())
1833  {
1834  case OutputShapeRounding_Floor:
1835  {
1837  break;
1838  }
1839  case OutputShapeRounding_Ceiling:
1840  {
1842  break;
1843  }
1844  default:
1845  {
1846  BOOST_ASSERT_MSG(false, "Unsupported output shape rounding");
1847  }
1848  }
1849 
1850  switch (pooling2dDesc->paddingMethod())
1851  {
1852  case PaddingMethod_Exclude:
1853  {
1855  break;
1856  }
1857  case PaddingMethod_IgnoreValue:
1858  {
1860  break;
1861  }
1862  default:
1863  {
1864  BOOST_ASSERT_MSG(false, "Unsupported padding method");
1865  }
1866  }
1867 
1868  switch (pooling2dDesc->dataLayout())
1869  {
1870  case DataLayout_NCHW:
1871  {
1873  break;
1874  }
1875  case DataLayout_NHWC:
1876  {
1878  break;
1879  }
1880  default:
1881  {
1882  BOOST_ASSERT_MSG(false, "Unsupported data layout");
1883  }
1884  }
1885 
1886  desc.m_PadRight = pooling2dDesc->padRight();
1887  desc.m_PadLeft = pooling2dDesc->padLeft();
1888  desc.m_PadBottom = pooling2dDesc->padBottom();
1889  desc.m_PadTop = pooling2dDesc->padTop();
1890  desc.m_StrideX = pooling2dDesc->strideX();
1891  desc.m_StrideY = pooling2dDesc->strideY();
1892  desc.m_PoolWidth = pooling2dDesc->poolWidth();
1893  desc.m_PoolHeight = pooling2dDesc->poolHeight();
1894 
1895  return desc;
1896 }
1897 
1898 void Deserializer::ParsePooling2d(GraphPtr graph, unsigned int layerIndex)
1899 {
1900  CHECK_LAYERS(graph, 0, layerIndex);
1901 
1902  auto pooling2dDes = graph->layers()->Get(layerIndex)->layer_as_Pooling2dLayer()->descriptor();
1903  auto inputs = GetInputs(graph, layerIndex);
1904  CHECK_VALID_SIZE(inputs.size(), 1);
1905 
1906  auto outputs = GetOutputs(graph, layerIndex);
1907  CHECK_VALID_SIZE(outputs.size(), 1);
1908  auto outputInfo = ToTensorInfo(outputs[0]);
1909 
1910  auto pooling2dDescriptor = GetPoolingDescriptor(pooling2dDes, layerIndex);
1911  auto layerName = GetLayerName(graph, layerIndex);
1912  IConnectableLayer* layer = m_Network->AddPooling2dLayer(pooling2dDescriptor, layerName.c_str());
1913  layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1914 
1915  RegisterInputSlots(graph, layerIndex, layer);
1916  RegisterOutputSlots(graph, layerIndex, layer);
1917 }
1918 
1919 void Deserializer::ParseQuantize(GraphPtr graph, unsigned int layerIndex)
1920 {
1921  CHECK_LAYERS(graph, 0, layerIndex);
1922 
1923  auto inputs = GetInputs(graph, layerIndex);
1924  CHECK_VALID_SIZE(inputs.size(), 1);
1925 
1926  auto outputs = GetOutputs(graph, layerIndex);
1927  CHECK_VALID_SIZE(outputs.size(), 1);
1928  auto outputInfo = ToTensorInfo(outputs[0]);
1929 
1930  auto layerName = GetLayerName(graph, layerIndex);
1931  IConnectableLayer* layer = m_Network->AddQuantizeLayer(layerName.c_str());
1932  layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1933 
1934  RegisterInputSlots(graph, layerIndex, layer);
1935  RegisterOutputSlots(graph, layerIndex, layer);
1936 }
1937 
1939  const std::vector<uint32_t>& targetDimsIn)
1940 {
1941  std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
1942  const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
1943 
1944  if (stretchDim != targetDimsIn.end())
1945  {
1946  if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
1947  {
1948  throw ParseException(boost::str(
1949  boost::format("At most one component of shape can be -1 %1%") % CHECK_LOCATION().AsString()));
1950  }
1951 
1952  auto targetNumElements =
1953  boost::numeric_cast<unsigned int>(
1954  std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
1955 
1956  auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
1957  outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
1958  }
1959 
1960  TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
1961 
1962  armnn::TensorInfo reshapeInfo = inputTensorInfo;
1963  reshapeInfo.SetShape(outputShape);
1964 
1965  return reshapeInfo;
1966 }
1967 
1968 void Deserializer::ParseReshape(GraphPtr graph, unsigned int layerIndex)
1969 {
1970  CHECK_LAYERS(graph, 0, layerIndex);
1971  auto inputs = GetInputs(graph, layerIndex);
1972 
1973  auto outputs = GetOutputs(graph, layerIndex);
1974  CHECK_VALID_SIZE(outputs.size(), 1);
1975 
1976  armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1977  armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
1978 
1979  const auto targetDims = graph->layers()->Get(layerIndex)->layer_as_ReshapeLayer()->descriptor()->targetShape();
1980  std::vector<uint32_t> outputDims(targetDims->begin(), targetDims->begin() + targetDims->size());
1981 
1982  armnn::TensorInfo reshapeOutputTensorInfo = Deserializer::OutputShapeOfReshape(inputTensorInfo, outputDims);
1983  const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
1984 
1985  const std::vector<uint32_t> expectedDims(outputs[0]->dimensions()->begin(),
1986  outputs[0]->dimensions()->begin() + outputs[0]->dimensions()->size());
1987 
1988  if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, expectedDims))
1989  {
1990  std::stringstream ss;
1991  ss << "New shape defined in reshape parameters "
1992  << reshapeOutputTensorShape
1993  << " does not equal output shape "
1994  << actualOutputTensorInfo.GetShape()
1995  << ": "
1996  << CHECK_LOCATION().AsString();
1997  throw ParseException(ss.str());
1998  }
1999 
2000  armnn::ReshapeDescriptor reshapeDesc;
2001  reshapeDesc.m_TargetShape = reshapeOutputTensorShape;
2002 
2003  auto layerName = GetLayerName(graph, layerIndex);
2004  IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
2005  layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
2006 
2007  RegisterInputSlots(graph, layerIndex, layer);
2008  RegisterOutputSlots(graph, layerIndex, layer);
2009 }
2010 
2011 void Deserializer::ParseResize(GraphPtr graph, unsigned int layerIndex)
2012 {
2013  CHECK_LAYERS(graph, 0, layerIndex);
2014 
2015  Deserializer::TensorRawPtrVector inputs = GetInputs(graph, layerIndex);
2016  CHECK_VALID_SIZE(inputs.size(), 1);
2017 
2018  Deserializer::TensorRawPtrVector outputs = GetOutputs(graph, layerIndex);
2019  CHECK_VALID_SIZE(outputs.size(), 1);
2020 
2021  auto flatBufferDescriptor = graph->layers()->Get(layerIndex)->layer_as_ResizeLayer()->descriptor();
2022 
2023  armnn::ResizeDescriptor descriptor;
2024  descriptor.m_TargetWidth = flatBufferDescriptor->targetWidth();
2025  descriptor.m_TargetHeight = flatBufferDescriptor->targetHeight();
2026  descriptor.m_Method = ToResizeMethod(flatBufferDescriptor->method());
2027  descriptor.m_DataLayout = ToDataLayout(flatBufferDescriptor->dataLayout());
2028 
2029  auto layerName = GetLayerName(graph, layerIndex);
2030  IConnectableLayer* layer = m_Network->AddResizeLayer(descriptor, layerName.c_str());
2031 
2032  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2033  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2034 
2035  RegisterInputSlots(graph, layerIndex, layer);
2036  RegisterOutputSlots(graph, layerIndex, layer);
2037 }
2038 
2039 void Deserializer::ParseResizeBilinear(GraphPtr graph, unsigned int layerIndex)
2040 {
2041  CHECK_LAYERS(graph, 0, layerIndex);
2042 
2043  Deserializer::TensorRawPtrVector inputs = GetInputs(graph, layerIndex);
2044  CHECK_VALID_SIZE(inputs.size(), 1);
2045 
2046  Deserializer::TensorRawPtrVector outputs = GetOutputs(graph, layerIndex);
2047  CHECK_VALID_SIZE(outputs.size(), 1);
2048 
2049  auto flatBufferDescriptor = graph->layers()->Get(layerIndex)->layer_as_ResizeBilinearLayer()->descriptor();
2050 
2051  armnn::ResizeDescriptor descriptor;
2052  descriptor.m_TargetWidth = flatBufferDescriptor->targetWidth();
2053  descriptor.m_TargetHeight = flatBufferDescriptor->targetHeight();
2055  descriptor.m_DataLayout = ToDataLayout(flatBufferDescriptor->dataLayout());
2056 
2057  auto layerName = GetLayerName(graph, layerIndex);
2058  IConnectableLayer* layer = m_Network->AddResizeLayer(descriptor, layerName.c_str());
2059 
2060  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2061  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2062 
2063  RegisterInputSlots(graph, layerIndex, layer);
2064  RegisterOutputSlots(graph, layerIndex, layer);
2065 }
2066 
2067 void Deserializer::ParseSoftmax(GraphPtr graph, unsigned int layerIndex)
2068 {
2069  CHECK_LAYERS(graph, 0, layerIndex);
2070 
2071  Deserializer::TensorRawPtrVector inputs = GetInputs(graph, layerIndex);
2072  CHECK_VALID_SIZE(inputs.size(), 1);
2073 
2074  Deserializer::TensorRawPtrVector outputs = GetOutputs(graph, layerIndex);
2075  CHECK_VALID_SIZE(outputs.size(), 1);
2076 
2077  armnn::SoftmaxDescriptor descriptor;
2078  descriptor.m_Beta = graph->layers()->Get(layerIndex)->layer_as_SoftmaxLayer()->descriptor()->beta();
2079  auto layerName = GetLayerName(graph, layerIndex);
2080 
2081  IConnectableLayer* layer = m_Network->AddSoftmaxLayer(descriptor, layerName.c_str());
2082 
2083  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2084  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2085 
2086  RegisterInputSlots(graph, layerIndex, layer);
2087  RegisterOutputSlots(graph, layerIndex, layer);
2088 }
2089 
2090 void Deserializer::ParseSpaceToBatchNd(GraphPtr graph, unsigned int layerIndex)
2091 {
2092  CHECK_LAYERS(graph, 0, layerIndex);
2093 
2094  Deserializer::TensorRawPtrVector inputs = GetInputs(graph, layerIndex);
2095  CHECK_VALID_SIZE(inputs.size(), 1);
2096 
2097  Deserializer::TensorRawPtrVector outputs = GetOutputs(graph, layerIndex);
2098  CHECK_VALID_SIZE(outputs.size(), 1);
2099 
2100  auto flatBufferDescriptor = graph->layers()->Get(layerIndex)->layer_as_SpaceToBatchNdLayer()->descriptor();
2101  auto flatBufferPadList = flatBufferDescriptor->padList();
2102  auto flatBufferBlockShape = flatBufferDescriptor->blockShape();
2103 
2104  if (flatBufferPadList->Length() % 2 != 0)
2105  {
2106  throw ParseException(boost::str(
2107  boost::format("The size of the pad list must be divisible by 2 %1%") % CHECK_LOCATION().AsString()));
2108  }
2109 
2110  std::vector<std::pair<unsigned int, unsigned int>> padList;
2111  padList.reserve(flatBufferPadList->Length() / 2);
2112  for (unsigned int i = 0; i < flatBufferPadList->Length() - 1; i += 2)
2113  {
2114  padList.emplace_back(flatBufferPadList->Get(i), flatBufferPadList->Get(i+1));
2115  }
2116 
2118  descriptor.m_DataLayout = ToDataLayout(flatBufferDescriptor->dataLayout());
2119  descriptor.m_BlockShape =
2120  std::vector<unsigned int>(flatBufferBlockShape->begin(), flatBufferBlockShape->end());
2121  descriptor.m_PadList = padList;
2122 
2123  auto layerName = GetLayerName(graph, layerIndex);
2124  IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(descriptor, layerName.c_str());
2125 
2126  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2127  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2128 
2129  RegisterInputSlots(graph, layerIndex, layer);
2130  RegisterOutputSlots(graph, layerIndex, layer);
2131 }
2132 
2133 void Deserializer::ParseSpaceToDepth(GraphPtr graph, unsigned int layerIndex)
2134 {
2135  CHECK_LAYERS(graph, 0, layerIndex);
2136 
2137  Deserializer::TensorRawPtrVector inputs = GetInputs(graph, layerIndex);
2138  CHECK_VALID_SIZE(inputs.size(), 1);
2139 
2140  Deserializer::TensorRawPtrVector outputs = GetOutputs(graph, layerIndex);
2141  CHECK_VALID_SIZE(outputs.size(), 1);
2142 
2143  auto flatBufferDescriptor = graph->layers()->Get(layerIndex)->layer_as_SpaceToDepthLayer()->descriptor();
2144 
2145  armnn::SpaceToDepthDescriptor descriptor;
2146  descriptor.m_BlockSize = flatBufferDescriptor->blockSize();
2147  descriptor.m_DataLayout = ToDataLayout(flatBufferDescriptor->dataLayout());
2148 
2149  auto layerName = GetLayerName(graph, layerIndex);
2150  IConnectableLayer* layer = m_Network->AddSpaceToDepthLayer(descriptor, layerName.c_str());
2151 
2152  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2153  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2154 
2155  RegisterInputSlots(graph, layerIndex, layer);
2156  RegisterOutputSlots(graph, layerIndex, layer);
2157 }
2158 
2160  Deserializer::NormalizationDescriptorPtr normalizationDescriptor,
2161  unsigned int layerIndex)
2162 {
2163  IgnoreUnused(layerIndex);
2165 
2166  switch (normalizationDescriptor->normChannelType())
2167  {
2168  case NormalizationAlgorithmChannel_Across:
2169  {
2171  break;
2172  }
2173  case NormalizationAlgorithmChannel_Within:
2174  {
2176  break;
2177  }
2178  default:
2179  {
2180  BOOST_ASSERT_MSG(false, "Unsupported normalization channel type");
2181  }
2182  }
2183 
2184  switch (normalizationDescriptor->normMethodType())
2185  {
2186  case NormalizationAlgorithmMethod_LocalBrightness:
2187  {
2189  break;
2190  }
2191  case NormalizationAlgorithmMethod_LocalContrast:
2192  {
2194  break;
2195  }
2196  default:
2197  {
2198  BOOST_ASSERT_MSG(false, "Unsupported normalization method type");
2199  }
2200  }
2201 
2202  switch (normalizationDescriptor->dataLayout())
2203  {
2204  case DataLayout_NCHW:
2205  {
2207  break;
2208  }
2209  case DataLayout_NHWC:
2210  {
2212  break;
2213  }
2214  default:
2215  {
2216  BOOST_ASSERT_MSG(false, "Unsupported data layout");
2217  }
2218  }
2219 
2220  desc.m_Alpha = normalizationDescriptor->alpha();
2221  desc.m_Beta = normalizationDescriptor->beta();
2222  desc.m_K = normalizationDescriptor->k();
2223  desc.m_NormSize = normalizationDescriptor->normSize();
2224 
2225  return desc;
2226 }
2227 
2228 void Deserializer::ParseNormalization(GraphPtr graph, unsigned int layerIndex)
2229 {
2230  CHECK_LAYERS(graph, 0, layerIndex);
2231 
2232  auto normalizationDes = graph->layers()->Get(layerIndex)->layer_as_NormalizationLayer()->descriptor();
2233 
2234  Deserializer::TensorRawPtrVector inputs = GetInputs(graph, layerIndex);
2235  CHECK_VALID_SIZE(inputs.size(), 1);
2236 
2237  Deserializer::TensorRawPtrVector outputs = GetOutputs(graph, layerIndex);
2238  CHECK_VALID_SIZE(outputs.size(), 1);
2239 
2240  auto outputInfo = ToTensorInfo(outputs[0]);
2241 
2242  auto normalizationDescriptor = GetNormalizationDescriptor(normalizationDes, layerIndex);
2243  auto layerName = GetLayerName(graph, layerIndex);
2244 
2245  IConnectableLayer* layer = m_Network->AddNormalizationLayer(normalizationDescriptor, layerName.c_str());
2246  layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2247 
2248  RegisterInputSlots(graph, layerIndex, layer);
2249  RegisterOutputSlots(graph, layerIndex, layer);
2250 }
2251 
2252 void Deserializer::ParseRsqrt(GraphPtr graph, unsigned int layerIndex)
2253 {
2254  CHECK_LAYERS(graph, 0, layerIndex);
2255  auto inputs = GetInputs(graph, layerIndex);
2256  CHECK_LOCATION();
2257  CHECK_VALID_SIZE(inputs.size(), 1);
2258 
2259  auto outputs = GetOutputs(graph, layerIndex);
2260  CHECK_VALID_SIZE(outputs.size(), 1);
2261 
2262  auto layerName = GetLayerName(graph, layerIndex);
2263 
2265  IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(descriptor, layerName.c_str());
2266  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2267  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2268 
2269  RegisterInputSlots(graph, layerIndex, layer);
2270  RegisterOutputSlots(graph, layerIndex, layer);
2271 }
2272 
2273 void Deserializer::ParseSlice(GraphPtr graph, unsigned int layerIndex)
2274 {
2275  CHECK_LAYERS(graph, 0, layerIndex);
2276 
2277  auto inputs = GetInputs(graph, layerIndex);
2278  CHECK_VALID_SIZE(inputs.size(), 1);
2279 
2280  auto outputs = GetOutputs(graph, layerIndex);
2281  CHECK_VALID_SIZE(outputs.size(), 1);
2282 
2283  auto fbDescriptor = graph->layers()->Get(layerIndex)->layer_as_SliceLayer()->descriptor();
2284 
2285  auto fbBegin = fbDescriptor->begin();
2286  auto fbSize = fbDescriptor->size();
2287 
2288  if (fbBegin->Length() != fbSize->Length())
2289  {
2290  throw ParseException(boost::str(
2291  boost::format("Begin and size descriptors must have the same length %1%") % CHECK_LOCATION().AsString()));
2292  }
2293 
2294  armnn::SliceDescriptor descriptor;
2295  descriptor.m_Begin.insert(descriptor.m_Begin.end(), fbBegin->begin(), fbBegin->end());
2296  descriptor.m_Size.insert(descriptor.m_Size.end(), fbSize->begin(), fbSize->end());
2297 
2298  auto layerName = GetLayerName(graph, layerIndex);
2299  IConnectableLayer* layer = m_Network->AddSliceLayer(descriptor, layerName.c_str());
2300 
2301  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2302  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2303 
2304  RegisterInputSlots(graph, layerIndex, layer);
2305  RegisterOutputSlots(graph, layerIndex, layer);
2306 }
2307 
2308 void Deserializer::ParseStridedSlice(GraphPtr graph, unsigned int layerIndex)
2309 {
2310  CHECK_LAYERS(graph, 0, layerIndex);
2311 
2312  Deserializer::TensorRawPtrVector inputs = GetInputs(graph, layerIndex);
2313  CHECK_VALID_SIZE(inputs.size(), 1);
2314 
2315  Deserializer::TensorRawPtrVector outputs = GetOutputs(graph, layerIndex);
2316  CHECK_VALID_SIZE(outputs.size(), 1);
2317 
2318  auto flatBufferDescriptor = graph->layers()->Get(layerIndex)->layer_as_StridedSliceLayer()->descriptor();
2319 
2320  auto flatBufferBegin = flatBufferDescriptor->begin();
2321  auto flatBufferEnd = flatBufferDescriptor->end();
2322  auto flatBufferStride = flatBufferDescriptor->stride();
2323 
2324  if (!(flatBufferBegin->Length() == flatBufferEnd->Length() &&
2325  flatBufferBegin->Length() == flatBufferStride->Length()))
2326  {
2327  throw ParseException(boost::str(
2328  boost::format("The size of the begin, end, and stride must be equal %1%") % CHECK_LOCATION().AsString()));
2329  }
2330 
2331  std::vector<int> begin(flatBufferBegin->begin(), flatBufferBegin->end());
2332  std::vector<int> end(flatBufferEnd->begin(), flatBufferEnd->end());
2333  std::vector<int> stride(flatBufferStride->begin(), flatBufferStride->end());
2334 
2335  armnn::StridedSliceDescriptor descriptor(begin, end, stride);
2336  descriptor.m_BeginMask = flatBufferDescriptor->beginMask();
2337  descriptor.m_EndMask = flatBufferDescriptor->endMask();
2338  descriptor.m_ShrinkAxisMask = flatBufferDescriptor->shrinkAxisMask();
2339  descriptor.m_EllipsisMask = flatBufferDescriptor->ellipsisMask();
2340  descriptor.m_NewAxisMask = flatBufferDescriptor->newAxisMask();
2341  descriptor.m_DataLayout = ToDataLayout(flatBufferDescriptor->dataLayout());
2342 
2343  auto layerName = GetLayerName(graph, layerIndex);
2344  IConnectableLayer* layer = m_Network->AddStridedSliceLayer(descriptor, layerName.c_str());
2345 
2346  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2347  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2348 
2349  RegisterInputSlots(graph, layerIndex, layer);
2350  RegisterOutputSlots(graph, layerIndex, layer);
2351 }
2352 
2353 void Deserializer::ParseSubtraction(GraphPtr graph, unsigned int layerIndex)
2354 {
2355  CHECK_LAYERS(graph, 0, layerIndex);
2356  auto inputs = GetInputs(graph, layerIndex);
2357  CHECK_LOCATION();
2358  CHECK_VALID_SIZE(inputs.size(), 2);
2359 
2360  auto outputs = GetOutputs(graph, layerIndex);
2361  CHECK_VALID_SIZE(outputs.size(), 1);
2362 
2363  auto layerName = GetLayerName(graph, layerIndex);
2364  IConnectableLayer* layer = m_Network->AddSubtractionLayer(layerName.c_str());
2365 
2366  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2367  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2368 
2369  RegisterInputSlots(graph, layerIndex, layer);
2370  RegisterOutputSlots(graph, layerIndex, layer);
2371 }
2372 
2373 void Deserializer::ParseGather(GraphPtr graph, unsigned int layerIndex)
2374 {
2375  CHECK_LAYERS(graph, 0, layerIndex);
2376 
2377  Deserializer::TensorRawPtrVector inputs = GetInputs(graph, layerIndex);
2378  CHECK_VALID_SIZE(inputs.size(), 2);
2379 
2380  Deserializer::TensorRawPtrVector outputs = GetOutputs(graph, layerIndex);
2381  CHECK_VALID_SIZE(outputs.size(), 1);
2382 
2383  auto layerName = GetLayerName(graph, layerIndex);
2384  IConnectableLayer* layer = m_Network->AddGatherLayer(layerName.c_str());
2385 
2386  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2387  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2388 
2389  RegisterInputSlots(graph, layerIndex, layer);
2390  RegisterOutputSlots(graph, layerIndex, layer);
2391 }
2392 
2393 void Deserializer::ParseMean(GraphPtr graph, unsigned int layerIndex)
2394 {
2395  CHECK_LAYERS(graph, 0, layerIndex);
2396 
2397  Deserializer::TensorRawPtrVector inputs = GetInputs(graph, layerIndex);
2398  CHECK_VALID_SIZE(inputs.size(), 1);
2399 
2400  Deserializer::TensorRawPtrVector outputs = GetOutputs(graph, layerIndex);
2401  CHECK_VALID_SIZE(outputs.size(), 1);
2402 
2403  auto flatBufferDescriptor = graph->layers()->Get(layerIndex)->layer_as_MeanLayer()->descriptor();
2404  auto flatBufferAxis = flatBufferDescriptor->axis();
2405  auto flatBufferKeepDims = flatBufferDescriptor->keepDims();
2406 
2407  armnn::MeanDescriptor descriptor;
2408  descriptor.m_Axis = std::vector<unsigned int>(flatBufferAxis->begin(), flatBufferAxis->end());
2409  descriptor.m_KeepDims = flatBufferKeepDims;
2410 
2411  auto layerName = GetLayerName(graph, layerIndex);
2412  IConnectableLayer* layer = m_Network->AddMeanLayer(descriptor, layerName.c_str());
2413 
2414  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2415  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2416 
2417  RegisterInputSlots(graph, layerIndex, layer);
2418  RegisterOutputSlots(graph, layerIndex, layer);
2419 }
2420 
2421 void Deserializer::ParseSplitter(GraphPtr graph, unsigned int layerIndex)
2422 {
2423  CHECK_LAYERS(graph, 0, layerIndex);
2424 
2425  Deserializer::TensorRawPtrVector inputs = GetInputs(graph, layerIndex);
2426  CHECK_VALID_SIZE(inputs.size(), 1);
2427 
2428  Deserializer::TensorRawPtrVector outputs = GetOutputs(graph, layerIndex);
2429 
2430  auto flatBufferViewsDescriptor = graph->layers()->Get(layerIndex)->layer_as_SplitterLayer()->descriptor();
2431  auto flatBufferViewSizes = flatBufferViewsDescriptor->viewSizes();
2432  auto flatBufferOriginsDescriptor = flatBufferViewsDescriptor->origins();
2433  auto flatBufferViewOrigins = flatBufferOriginsDescriptor->viewOrigins();
2434  uint32_t numViews = flatBufferOriginsDescriptor->numViews();
2435  uint32_t numDimensions = flatBufferOriginsDescriptor->numDimensions();
2436 
2437  // Check numViews and numDimensions corresponds to the ones already serialized ...
2438  // numViews == flatBufferViewSizes.size();
2439  // foreach: numDimensions == flatBufferViewSizes[x].size();
2440 
2441  armnn::ViewsDescriptor viewsDescriptor(numViews, numDimensions);
2442  for(unsigned int vIdx = 0; vIdx < numViews; ++vIdx)
2443  {
2444  for (unsigned int dIdx = 0; dIdx < numDimensions; ++dIdx)
2445  {
2446  viewsDescriptor.SetViewSize(vIdx, dIdx, flatBufferViewSizes->Get(vIdx)->data()->Get(dIdx));
2447  viewsDescriptor.SetViewOriginCoord(vIdx, dIdx, flatBufferViewOrigins->Get(vIdx)->data()->Get(dIdx));
2448  }
2449  }
2450 
2451  auto layerName = GetLayerName(graph, layerIndex);
2452  IConnectableLayer* layer = m_Network->AddSplitterLayer(viewsDescriptor, layerName.c_str());
2453 
2454  // I could have as many outputs as views ...
2455  for(unsigned int vIdx = 0; vIdx < numViews; ++vIdx)
2456  {
2457  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[vIdx]);
2458  layer->GetOutputSlot(vIdx).SetTensorInfo(outputTensorInfo);
2459  }
2460 
2461  RegisterInputSlots(graph, layerIndex, layer);
2462  RegisterOutputSlots(graph, layerIndex, layer);
2463 }
2464 
2466 {
2467  armnn::LstmDescriptor desc;
2468 
2469  desc.m_ActivationFunc = lstmDescriptor->activationFunc();
2470  desc.m_ClippingThresCell = lstmDescriptor->clippingThresCell();
2471  desc.m_ClippingThresProj = lstmDescriptor->clippingThresProj();
2472  desc.m_CifgEnabled = lstmDescriptor->cifgEnabled();
2473  desc.m_PeepholeEnabled = lstmDescriptor->peepholeEnabled();
2474  desc.m_ProjectionEnabled = lstmDescriptor->projectionEnabled();
2475  desc.m_LayerNormEnabled = lstmDescriptor->layerNormEnabled();
2476 
2477  return desc;
2478 }
2479 
2480 void Deserializer::ParseLstm(GraphPtr graph, unsigned int layerIndex)
2481 {
2482  CHECK_LAYERS(graph, 0, layerIndex);
2483 
2484  auto inputs = GetInputs(graph, layerIndex);
2485  CHECK_VALID_SIZE(inputs.size(), 3);
2486 
2487  auto outputs = GetOutputs(graph, layerIndex);
2488  CHECK_VALID_SIZE(outputs.size(), 4);
2489 
2490  auto flatBufferLayer = graph->layers()->Get(layerIndex)->layer_as_LstmLayer();
2491  auto layerName = GetLayerName(graph, layerIndex);
2492  auto flatBufferDescriptor = flatBufferLayer->descriptor();
2493  auto flatBufferInputParams = flatBufferLayer->inputParams();
2494 
2495  auto lstmDescriptor = GetLstmDescriptor(flatBufferDescriptor);
2496 
2497  armnn::LstmInputParams lstmInputParams;
2498 
2499  armnn::ConstTensor inputToForgetWeights = ToConstTensor(flatBufferInputParams->inputToForgetWeights());
2500  armnn::ConstTensor inputToCellWeights = ToConstTensor(flatBufferInputParams->inputToCellWeights());
2501  armnn::ConstTensor inputToOutputWeights = ToConstTensor(flatBufferInputParams->inputToOutputWeights());
2502  armnn::ConstTensor recurrentToForgetWeights = ToConstTensor(flatBufferInputParams->recurrentToForgetWeights());
2503  armnn::ConstTensor recurrentToCellWeights = ToConstTensor(flatBufferInputParams->recurrentToCellWeights());
2504  armnn::ConstTensor recurrentToOutputWeights = ToConstTensor(flatBufferInputParams->recurrentToOutputWeights());
2505  armnn::ConstTensor forgetGateBias = ToConstTensor(flatBufferInputParams->forgetGateBias());
2506  armnn::ConstTensor cellBias = ToConstTensor(flatBufferInputParams->cellBias());
2507  armnn::ConstTensor outputGateBias = ToConstTensor(flatBufferInputParams->outputGateBias());
2508 
2509  lstmInputParams.m_InputToForgetWeights = &inputToForgetWeights;
2510  lstmInputParams.m_InputToCellWeights = &inputToCellWeights;
2511  lstmInputParams.m_InputToOutputWeights = &inputToOutputWeights;
2512  lstmInputParams.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
2513  lstmInputParams.m_RecurrentToCellWeights = &recurrentToCellWeights;
2514  lstmInputParams.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
2515  lstmInputParams.m_ForgetGateBias = &forgetGateBias;
2516  lstmInputParams.m_CellBias = &cellBias;
2517  lstmInputParams.m_OutputGateBias = &outputGateBias;
2518 
2519  armnn::ConstTensor inputToInputWeights;
2520  armnn::ConstTensor recurrentToInputWeights;
2521  armnn::ConstTensor cellToInputWeights;
2522  armnn::ConstTensor inputGateBias;
2523  if (!lstmDescriptor.m_CifgEnabled)
2524  {
2525  inputToInputWeights = ToConstTensor(flatBufferInputParams->inputToInputWeights());
2526  recurrentToInputWeights = ToConstTensor(flatBufferInputParams->recurrentToInputWeights());
2527  cellToInputWeights = ToConstTensor(flatBufferInputParams->cellToInputWeights());
2528  inputGateBias = ToConstTensor(flatBufferInputParams->inputGateBias());
2529 
2530  lstmInputParams.m_InputToInputWeights = &inputToInputWeights;
2531  lstmInputParams.m_RecurrentToInputWeights = &recurrentToInputWeights;
2532  lstmInputParams.m_CellToInputWeights = &cellToInputWeights;
2533  lstmInputParams.m_InputGateBias = &inputGateBias;
2534  }
2535 
2536  armnn::ConstTensor projectionWeights;
2537  armnn::ConstTensor projectionBias;
2538  if (lstmDescriptor.m_ProjectionEnabled)
2539  {
2540  projectionWeights = ToConstTensor(flatBufferInputParams->projectionWeights());
2541  projectionBias = ToConstTensor(flatBufferInputParams->projectionBias());
2542 
2543  lstmInputParams.m_ProjectionWeights = &projectionWeights;
2544  lstmInputParams.m_ProjectionBias = &projectionBias;
2545  }
2546 
2547  armnn::ConstTensor cellToForgetWeights;
2548  armnn::ConstTensor cellToOutputWeights;
2549  if (lstmDescriptor.m_PeepholeEnabled)
2550  {
2551  cellToForgetWeights = ToConstTensor(flatBufferInputParams->cellToForgetWeights());
2552  cellToOutputWeights = ToConstTensor(flatBufferInputParams->cellToOutputWeights());
2553 
2554  lstmInputParams.m_CellToForgetWeights = &cellToForgetWeights;
2555  lstmInputParams.m_CellToOutputWeights = &cellToOutputWeights;
2556  }
2557 
2558  armnn::ConstTensor inputLayerNormWeights;
2559  armnn::ConstTensor forgetLayerNormWeights;
2560  armnn::ConstTensor cellLayerNormWeights;
2561  armnn::ConstTensor outputLayerNormWeights;
2562  if (lstmDescriptor.m_LayerNormEnabled)
2563  {
2564  if (!lstmDescriptor.m_CifgEnabled)
2565  {
2566  inputLayerNormWeights = ToConstTensor(flatBufferInputParams->inputLayerNormWeights());
2567  lstmInputParams.m_InputLayerNormWeights = &inputLayerNormWeights;
2568  }
2569  forgetLayerNormWeights = ToConstTensor(flatBufferInputParams->forgetLayerNormWeights());
2570  cellLayerNormWeights = ToConstTensor(flatBufferInputParams->cellLayerNormWeights());
2571  outputLayerNormWeights = ToConstTensor(flatBufferInputParams->outputLayerNormWeights());
2572 
2573  lstmInputParams.m_ForgetLayerNormWeights = &forgetLayerNormWeights;
2574  lstmInputParams.m_CellLayerNormWeights = &cellLayerNormWeights;
2575  lstmInputParams.m_OutputLayerNormWeights = &outputLayerNormWeights;
2576  }
2577 
2578  IConnectableLayer* layer = m_Network->AddLstmLayer(lstmDescriptor, lstmInputParams, layerName.c_str());
2579 
2580  armnn::TensorInfo outputTensorInfo1 = ToTensorInfo(outputs[0]);
2581  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo1);
2582 
2583  armnn::TensorInfo outputTensorInfo2 = ToTensorInfo(outputs[1]);
2584  layer->GetOutputSlot(1).SetTensorInfo(outputTensorInfo2);
2585 
2586  armnn::TensorInfo outputTensorInfo3 = ToTensorInfo(outputs[2]);
2587  layer->GetOutputSlot(2).SetTensorInfo(outputTensorInfo3);
2588 
2589  armnn::TensorInfo outputTensorInfo4 = ToTensorInfo(outputs[3]);
2590  layer->GetOutputSlot(3).SetTensorInfo(outputTensorInfo4);
2591 
2592  RegisterInputSlots(graph, layerIndex, layer);
2593  RegisterOutputSlots(graph, layerIndex, layer);
2594 }
2595 
2596 void Deserializer::ParseQuantizedLstm(GraphPtr graph, unsigned int layerIndex)
2597 {
2598  CHECK_LAYERS(graph, 0, layerIndex);
2599 
2600  auto inputs = GetInputs(graph, layerIndex);
2601  CHECK_VALID_SIZE(inputs.size(), 3);
2602 
2603  auto outputs = GetOutputs(graph, layerIndex);
2604  CHECK_VALID_SIZE(outputs.size(), 2);
2605 
2606  auto flatBufferLayer = graph->layers()->Get(layerIndex)->layer_as_QuantizedLstmLayer();
2607  auto layerName = GetLayerName(graph, layerIndex);
2608  auto flatBufferInputParams = flatBufferLayer->inputParams();
2609 
2610  armnn::QuantizedLstmInputParams lstmInputParams;
2611 
2612  armnn::ConstTensor inputToInputWeights = ToConstTensor(flatBufferInputParams->inputToInputWeights());
2613  armnn::ConstTensor inputToForgetWeights = ToConstTensor(flatBufferInputParams->inputToForgetWeights());
2614  armnn::ConstTensor inputToCellWeights = ToConstTensor(flatBufferInputParams->inputToCellWeights());
2615  armnn::ConstTensor inputToOutputWeights = ToConstTensor(flatBufferInputParams->inputToOutputWeights());
2616  armnn::ConstTensor recurrentToInputWeights = ToConstTensor(flatBufferInputParams->recurrentToInputWeights());
2617  armnn::ConstTensor recurrentToForgetWeights = ToConstTensor(flatBufferInputParams->recurrentToForgetWeights());
2618  armnn::ConstTensor recurrentToCellWeights = ToConstTensor(flatBufferInputParams->recurrentToCellWeights());
2619  armnn::ConstTensor recurrentToOutputWeights = ToConstTensor(flatBufferInputParams->recurrentToOutputWeights());
2620  armnn::ConstTensor inputGateBias = ToConstTensor(flatBufferInputParams->inputGateBias());
2621  armnn::ConstTensor forgetGateBias = ToConstTensor(flatBufferInputParams->forgetGateBias());
2622  armnn::ConstTensor cellBias = ToConstTensor(flatBufferInputParams->cellBias());
2623  armnn::ConstTensor outputGateBias = ToConstTensor(flatBufferInputParams->outputGateBias());
2624 
2625  lstmInputParams.m_InputToInputWeights = &inputToInputWeights;
2626  lstmInputParams.m_InputToForgetWeights = &inputToForgetWeights;
2627  lstmInputParams.m_InputToCellWeights = &inputToCellWeights;
2628  lstmInputParams.m_InputToOutputWeights = &inputToOutputWeights;
2629  lstmInputParams.m_RecurrentToInputWeights = &recurrentToInputWeights;
2630  lstmInputParams.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
2631  lstmInputParams.m_RecurrentToCellWeights = &recurrentToCellWeights;
2632  lstmInputParams.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
2633  lstmInputParams.m_InputGateBias = &inputGateBias;
2634  lstmInputParams.m_ForgetGateBias = &forgetGateBias;
2635  lstmInputParams.m_CellBias = &cellBias;
2636  lstmInputParams.m_OutputGateBias = &outputGateBias;
2637 
2638  IConnectableLayer* layer = m_Network->AddQuantizedLstmLayer(lstmInputParams, layerName.c_str());
2639 
2640  armnn::TensorInfo outputTensorInfo1 = ToTensorInfo(outputs[0]);
2641  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo1);
2642 
2643  armnn::TensorInfo outputTensorInfo2 = ToTensorInfo(outputs[1]);
2644  layer->GetOutputSlot(1).SetTensorInfo(outputTensorInfo2);
2645 
2646  RegisterInputSlots(graph, layerIndex, layer);
2647  RegisterOutputSlots(graph, layerIndex, layer);
2648 }
2649 
2650 void Deserializer::ParseDequantize(GraphPtr graph, unsigned int layerIndex)
2651 {
2652  CHECK_LAYERS(graph, 0, layerIndex);
2653 
2654  Deserializer::TensorRawPtrVector inputs = GetInputs(graph, layerIndex);
2655  CHECK_VALID_SIZE(inputs.size(), 1);
2656 
2657  Deserializer::TensorRawPtrVector outputs = GetOutputs(graph, layerIndex);
2658  CHECK_VALID_SIZE(outputs.size(), 1);
2659 
2660  const std::string layerName = GetLayerName(graph, layerIndex);
2661  IConnectableLayer* layer = m_Network->AddDequantizeLayer(layerName.c_str());
2662 
2663  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2664  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2665 
2666  RegisterInputSlots(graph, layerIndex, layer);
2667  RegisterOutputSlots(graph, layerIndex, layer);
2668 }
2669 
2670 void Deserializer::ParseMerge(GraphPtr graph, unsigned int layerIndex)
2671 {
2672  CHECK_LAYERS(graph, 0, layerIndex);
2673 
2674  Deserializer::TensorRawPtrVector inputs = GetInputs(graph, layerIndex);
2675  CHECK_VALID_SIZE(inputs.size(), 2);
2676 
2677  Deserializer::TensorRawPtrVector outputs = GetOutputs(graph, layerIndex);
2678  CHECK_VALID_SIZE(outputs.size(), 1);
2679 
2680  const std::string layerName = GetLayerName(graph, layerIndex);
2681  IConnectableLayer* layer = m_Network->AddMergeLayer(layerName.c_str());
2682 
2683  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2684  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2685 
2686  RegisterInputSlots(graph, layerIndex, layer);
2687  RegisterOutputSlots(graph, layerIndex, layer);
2688 }
2689 
2690 void Deserializer::ParseSwitch(GraphPtr graph, unsigned int layerIndex)
2691 {
2692  CHECK_LAYERS(graph, 0, layerIndex);
2693  auto inputs = GetInputs(graph, layerIndex);
2694  CHECK_LOCATION();
2695  CHECK_VALID_SIZE(inputs.size(), 2);
2696 
2697  auto outputs = GetOutputs(graph, layerIndex);
2698  CHECK_VALID_SIZE(outputs.size(), 2);
2699 
2700  auto layerName = GetLayerName(graph, layerIndex);
2701  IConnectableLayer* layer = m_Network->AddSwitchLayer(layerName.c_str());
2702 
2703  armnn::TensorInfo output0TensorInfo = ToTensorInfo(outputs[0]);
2704  layer->GetOutputSlot(0).SetTensorInfo(output0TensorInfo);
2705 
2706  armnn::TensorInfo output1TensorInfo = ToTensorInfo(outputs[1]);
2707  layer->GetOutputSlot(1).SetTensorInfo(output1TensorInfo);
2708 
2709  RegisterInputSlots(graph, layerIndex, layer);
2710  RegisterOutputSlots(graph, layerIndex, layer);
2711 }
2712 
2713 void Deserializer::ParsePrelu(GraphPtr graph, unsigned int layerIndex)
2714 {
2715  CHECK_LAYERS(graph, 0, layerIndex);
2716  auto inputs = GetInputs(graph, layerIndex);
2717  CHECK_LOCATION();
2718  CHECK_VALID_SIZE(inputs.size(), 2);
2719 
2720  auto outputs = GetOutputs(graph, layerIndex);
2721  CHECK_VALID_SIZE(outputs.size(), 1);
2722 
2723  auto layerName = GetLayerName(graph, layerIndex);
2724  IConnectableLayer* layer = m_Network->AddPreluLayer(layerName.c_str());
2725 
2726  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2727  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2728 
2729  RegisterInputSlots(graph, layerIndex, layer);
2730  RegisterOutputSlots(graph, layerIndex, layer);
2731 }
2732 
2733 void Deserializer::ParseTranspose(GraphPtr graph, unsigned int layerIndex)
2734 {
2735  CHECK_LAYERS(graph, 0, layerIndex);
2736 
2737  auto dimsMapping = graph->layers()->Get(layerIndex)->layer_as_TransposeLayer()->descriptor()->dimMappings();
2738 
2739  auto inputs = GetInputs(graph, layerIndex);
2740  CHECK_VALID_SIZE(inputs.size(), 1);
2741 
2742  auto outputs = GetOutputs(graph, layerIndex);
2743  CHECK_VALID_SIZE(outputs.size(), 1);
2744  auto outputInfo = ToTensorInfo(outputs[0]);
2745 
2746  auto layerName = GetLayerName(graph, layerIndex);
2747  const armnn::TransposeDescriptor descriptor(armnn::PermutationVector(dimsMapping->data(), dimsMapping->Length()));
2748 
2749  IConnectableLayer* layer = m_Network->AddTransposeLayer(descriptor, layerName.c_str());
2750  layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2751 
2752  RegisterInputSlots(graph, layerIndex, layer);
2753  RegisterOutputSlots(graph, layerIndex, layer);
2754 }
2755 
2756 void Deserializer::ParseTransposeConvolution2d(GraphPtr graph, unsigned int layerIndex)
2757 {
2758  CHECK_LAYERS(graph, 0, layerIndex);
2759 
2760  auto inputs = GetInputs(graph, layerIndex);
2761  CHECK_VALID_SIZE(inputs.size(), 1);
2762 
2763  auto outputs = GetOutputs(graph, layerIndex);
2764  CHECK_VALID_SIZE(outputs.size(), 1);
2765 
2766  auto serializerLayer = graph->layers()->Get(layerIndex)->layer_as_TransposeConvolution2dLayer();
2767  auto layerName = GetLayerName(graph, layerIndex);
2768  auto serializerDescriptor = serializerLayer->descriptor();
2769 
2771  descriptor.m_PadLeft = serializerDescriptor->padLeft();
2772  descriptor.m_PadRight = serializerDescriptor->padRight();
2773  descriptor.m_PadTop = serializerDescriptor->padTop();
2774  descriptor.m_PadBottom = serializerDescriptor->padBottom();
2775  descriptor.m_StrideX = serializerDescriptor->strideX();
2776  descriptor.m_StrideY = serializerDescriptor->strideY();;
2777  descriptor.m_BiasEnabled = serializerDescriptor->biasEnabled();;
2778  descriptor.m_DataLayout = ToDataLayout(serializerDescriptor->dataLayout());
2779 
2780  // weights & biases
2781  armnn::ConstTensor weights = ToConstTensor(serializerLayer->weights());
2782  armnn::Optional<armnn::ConstTensor> optionalBiases;
2783  if (descriptor.m_BiasEnabled)
2784  {
2785  armnn::ConstTensor biases = ToConstTensor(serializerLayer->biases());
2786  optionalBiases = armnn::MakeOptional<armnn::ConstTensor>(biases);
2787  }
2788 
2789  IConnectableLayer* layer = m_Network->AddTransposeConvolution2dLayer(descriptor,
2790  weights,
2791  optionalBiases,
2792  layerName.c_str());
2793 
2794  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2795  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2796 
2797  RegisterInputSlots(graph, layerIndex, layer);
2798  RegisterOutputSlots(graph, layerIndex, layer);
2799 }
2800 
2801 void Deserializer::ParseStack(GraphPtr graph, unsigned int layerIndex)
2802 {
2803  CHECK_LAYERS(graph, 0, layerIndex);
2804  auto inputs = GetInputs(graph, layerIndex);
2805 
2806  auto outputs = GetOutputs(graph, layerIndex);
2807  CHECK_VALID_SIZE(outputs.size(), 1);
2808 
2809  auto flatBufferDescriptor = graph->layers()->Get(layerIndex)->layer_as_StackLayer()->descriptor();
2810  unsigned int axis = flatBufferDescriptor->axis();
2811  unsigned int numInputs = flatBufferDescriptor->numInputs();
2812  CHECK_VALID_SIZE(inputs.size(), numInputs);
2813 
2814  auto flatBufferInputShape = flatBufferDescriptor->inputShape();
2815  std::vector<uint32_t> vectorInputShape(flatBufferInputShape->begin(),
2816  flatBufferInputShape->begin() + flatBufferInputShape->size());
2817 
2818  TensorShape inputShape(static_cast<unsigned int>(vectorInputShape.size()), vectorInputShape.data());
2819  armnn::StackDescriptor descriptor(axis, numInputs, inputShape);
2820 
2821  for (unsigned int i=0; i<inputs.size(); ++i)
2822  {
2823  armnn::TensorShape inputShape = ToTensorInfo(inputs[i]).GetShape();
2824  if (descriptor.m_InputShape != inputShape)
2825  {
2826  std::stringstream ss;
2827  ss << "Shape of input "
2828  << i
2829  << " "
2830  << inputShape
2831  << " does not equal defined input shape "
2832  << descriptor.m_InputShape
2833  << ": "
2834  << CHECK_LOCATION().AsString();
2835  throw ParseException(ss.str());
2836  }
2837  }
2838 
2839  auto layerName = GetLayerName(graph, layerIndex);
2840  IConnectableLayer* layer = m_Network->AddStackLayer(descriptor, layerName.c_str());
2841 
2842  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2843  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2844 
2845  RegisterInputSlots(graph, layerIndex, layer);
2846  RegisterOutputSlots(graph, layerIndex, layer);
2847 }
2848 
2849 void Deserializer::ParseStandIn(GraphPtr graph, unsigned int layerIndex)
2850 {
2851  CHECK_LAYERS(graph, 0, layerIndex);
2852 
2853  auto inputs = GetInputs(graph, layerIndex);
2854  auto outputs = GetOutputs(graph, layerIndex);
2855 
2856  auto fbLayer = graph->layers()->Get(layerIndex)->layer_as_StandInLayer();
2857  auto fbDescriptor = fbLayer->descriptor();
2858 
2859  armnn::StandInDescriptor descriptor;
2860  descriptor.m_NumInputs = fbDescriptor->numInputs();
2861  descriptor.m_NumOutputs = fbDescriptor->numOutputs();
2862 
2863  CHECK_VALID_SIZE(inputs.size(), descriptor.m_NumInputs);
2864  CHECK_VALID_SIZE(outputs.size(), descriptor.m_NumOutputs);
2865 
2866  const std::string layerName = GetLayerName(graph, layerIndex);
2867  armnn::IConnectableLayer* layer = m_Network->AddStandInLayer(descriptor, layerName.c_str());
2868 
2869  for (unsigned int i = 0u; i < descriptor.m_NumOutputs; ++i)
2870  {
2871  armnn::TensorInfo outputInfo = ToTensorInfo(outputs[i]);
2872  layer->GetOutputSlot(i).SetTensorInfo(outputInfo);
2873  }
2874 
2875  RegisterInputSlots(graph, layerIndex, layer);
2876  RegisterOutputSlots(graph, layerIndex, layer);
2877 }
2878 
2879 } // namespace armnnDeserializer
armnn::ConstTensor ToConstTensor(Deserializer::ConstTensorRawPtr constTensorPtr)
+
static armnn::LstmDescriptor GetLstmDescriptor(LstmDescriptorPtr lstmDescriptor)
+
uint32_t m_PadBottom
Padding bottom value in the height dimension.
+
bool m_BiasEnabled
Enable/disable bias.
+ + +
float m_Eps
Used to avoid dividing by zero.
+ +
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
+
bool m_ProjectionEnabled
Enable/disable the projection layer.
+
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
+
const ConstTensor * m_ProjectionWeights
Definition: LstmParams.hpp:55
+
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
Definition: Descriptors.hpp:98
+ +
A ViewsDescriptor for the SplitterLayer.
+
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
+
float m_ScaleW
Center size encoding scale weight.
+
#define CHECK_LAYERS(GRAPH, LAYERS_INDEX, LAYER_INDEX)
+
const ConstTensor * m_CellBias
Definition: LstmParams.hpp:53
+ +
uint32_t m_PadBottom
Padding bottom value in the height dimension.
+ +
bool m_BiasEnabled
Enable/disable bias.
+
static GraphPtr LoadGraphFromBinary(const uint8_t *binaryContent, size_t len)
+
virtual unsigned int GetNumInputSlots() const =0
Returns the number of connectable input slots.
+
DataLayout
Definition: Types.hpp:49
+
float m_K
Kappa value used for the across channel normalization equation.
+
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
+ +
const TensorShape & GetShape() const
Definition: Tensor.hpp:88
+
const ConstTensor * m_RecurrentToOutputWeights
+
uint32_t m_PadBottom
Padding bottom value in the height dimension.
+ +
uint32_t m_PadLeft
Padding left value in the width dimension.
+
float m_ClippingThresProj
Clipping threshold value for the projection.
+ + +
std::string AsString() const
Definition: Exceptions.hpp:29
+
static armnn::NormalizationDescriptor GetNormalizationDescriptor(NormalizationDescriptorPtr normalizationDescriptor, unsigned int layerIndex)
+ +
A ReshapeDescriptor for the ReshapeLayer.
+
static void Destroy(IDeserializer *parser)
+ +
const ConstTensor * m_CellToOutputWeights
Definition: LstmParams.hpp:50
+
const ConstTensor * m_RecurrentToForgetWeights
+
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
+ +
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:62
+
float m_ScaleX
Center size encoding scale x.
+
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
+
uint32_t m_PoolWidth
Pooling width value.
+
#define CHECK_TENSOR_PTR(TENSOR_PTR)
+ +
A Convolution2dDescriptor for the Convolution2dLayer.
+ +
float m_Alpha
Alpha value for the normalization equation.
+
const armnnSerializer::TensorInfo * TensorRawPtr
+ + +
const armnnSerializer::NormalizationDescriptor * NormalizationDescriptorPtr
+
uint32_t m_PadLeft
Padding left value in the width dimension.
+
bool m_BiasEnabled
Enable/disable bias.
+
const ConstTensor * m_CellToInputWeights
Definition: LstmParams.hpp:48
+ + +
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
+
float m_Gamma
Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0. ...
+
float m_Beta
Exponentiation value.
+
std::vector< unsigned int > m_Size
Size of the slice in each dimension.
+
The padding fields don&#39;t count and are ignored.
+
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
+
const ConstTensor * m_InputGateBias
Definition: LstmParams.hpp:51
+
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
+
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:56
+
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
+
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
+
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
+
void CheckLayers(Graph &graph)
+ + +
static IDeserializerPtr Create()
+
uint32_t m_PadTop
Padding top value in the height dimension.
+
const ConstTensor * m_RecurrentToCellWeights
Definition: LstmParams.hpp:46
+ + + +
uint32_t m_PadRight
Padding right value in the width dimension.
+
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
+
const ConstTensor * m_ForgetLayerNormWeights
Definition: LstmParams.hpp:58
+
const ConstTensor * m_CellToForgetWeights
Definition: LstmParams.hpp:49
+
Copyright (c) 2020 ARM Limited.
+
void IgnoreUnused(Ts &&...)
+ + +
armnn::INetworkPtr CreateNetworkFromBinary(const std::vector< uint8_t > &binaryContent) override
Create an input network from binary file contents.
+ +
uint32_t m_PadBottom
Padding bottom value in the height dimension.
+ + +
#define CHECK_GRAPH(GRAPH, LAYERS_INDEX)
+
static std::string GetLayerName(const GraphPtr &graph, unsigned int index)
+
uint32_t m_DilationY
Dilation along y axis.
+
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
+
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding values for the input dimension: heightPad{top, bottom} widthPad{left, right}.
+
uint32_t m_DilationY
Dilation factor value for height dimension.
+
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
+
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
+
BindingPointInfo GetNetworkInputBindingInfo(unsigned int layerId, const std::string &name) const override
Retrieve binding info (layer id and tensor info) for the network input identified by the given layer ...
+ +
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:171
+
const ConstTensor * m_OutputGateBias
Definition: LstmParams.hpp:54
+
armnn::ComparisonOperation ToComparisonOperation(armnnSerializer::ComparisonOperation operation)
+
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
+
uint32_t m_NumOutputs
Number of output tensors.
+
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
+
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:90
+
A ResizeDescriptor for the ResizeLayer.
+
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
+
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
+
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
+
A StackDescriptor for the StackLayer.
+
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
+ +
TensorShape m_TargetShape
Target shape value.
+
uint32_t m_PoolHeight
Pooling height value.
+
uint32_t m_PadTop
Padding top value in the height dimension.
+
uint32_t m_MaxDetections
Maximum numbers of detections.
+
A PadDescriptor for the PadLayer.
+
static int32_t GetBindingLayerInfo(const GraphPtr &graphPtr, unsigned int layerIndex)
+
const armnnSerializer::Pooling2dDescriptor * PoolingDescriptor
+
#define CHECK_CONST_TENSOR_SIZE(CONST_TENSOR_SIZE, TENSOR_SIZE)
+
armnn::TensorInfo ToTensorInfo(Deserializer::TensorRawPtr tensorPtr)
+
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
+
const ConstTensor * m_InputLayerNormWeights
Definition: LstmParams.hpp:57
+
ComparisonOperation
Definition: Types.hpp:77
+ +
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
+ + +
armnn::DataLayout ToDataLayout(armnnSerializer::DataLayout dataLayout)
+
bool CheckShape(const armnn::TensorShape &actual, const std::vector< uint32_t > &expected)
+
DataType
Definition: Types.hpp:32
+
float m_NmsIouThreshold
Intersection over union threshold.
+
const ConstTensor * m_RecurrentToOutputWeights
Definition: LstmParams.hpp:47
+
An LstmDescriptor for the LstmLayer.
+
uint32_t m_PadRight
Padding right value in the width dimension.
+
uint32_t m_DilationX
Dilation factor value for width dimension.
+
uint32_t m_PadTop
Padding top value in the height dimension.
+
std::string FileLine() const
Definition: Exceptions.hpp:37
+
Status SetViewSize(uint32_t view, uint32_t coord, uint32_t value)
Set the size of the views.
+
std::vector< unsigned int > m_Begin
Beginning indices of the slice in each dimension.
+
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept...
+
armnnSerializer::TensorInfo * TensorRawPtr
+
std::vector< unsigned int > m_BlockShape
Block shape values.
+ +
float m_Eps
Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f...
+
An output connection slot for a layer.
Definition: INetwork.hpp:37
+
A L2NormalizationDescriptor for the L2NormalizationLayer.
+
const ConstTensor * m_ProjectionBias
Definition: LstmParams.hpp:56
+
const ConstTensor * m_InputToForgetWeights
+
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:43
+
An OriginsDescriptor for the ConcatLayer.
+ +
static LayerBaseRawPtr GetBaseLayer(const GraphPtr &graphPtr, unsigned int layerIndex)
+
A FullyConnectedDescriptor for the FullyConnectedLayer.
+
bool m_BiasEnabled
Enable/disable bias.
+
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:199
+ + + +
uint32_t m_TargetWidth
Target width value.
+
#define CHECK_VALID_SIZE(ACTUAL,...)
+ +
bool m_PeepholeEnabled
Enable/disable peephole.
+
uint32_t m_NumClasses
Number of classes.
+
#define CHECKED_NON_NEGATIVE(VALUE)
+ + +
std::unique_ptr< IDeserializer, void(*)(IDeserializer *parser)> IDeserializerPtr
+
armnn::ActivationFunction ToActivationFunction(armnnSerializer::ActivationFunction function)
+
uint32_t m_PadTop
Padding top value in the height dimension.
+
armnn::UnaryOperation ToUnaryOperation(armnnSerializer::UnaryOperation operation)
+
A StandInDescriptor for the StandIn layer.
+
#define CHECK_CONST_TENSOR_PTR(TENSOR_PTR)
+
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:33
+
bool m_UseRegularNms
Use Regular NMS.
+
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
+
BindingPointInfo GetNetworkOutputBindingInfo(unsigned int layerId, const std::string &name) const override
Retrieve binding info (layer id and tensor info) for the network output identified by the given layer...
+
std::vector< unsigned int > m_BlockShape
Block shape value.
+
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:20
+ +
min(a, max(b, input)) ReLu1 & ReLu6.
+
#define CHECK_LOCATION()
Definition: Exceptions.hpp:192
+
std::vector< TensorRawPtr > TensorRawPtrVector
+
uint32_t m_TargetHeight
Target height value.
+
uint32_t m_ActivationFunc
The activation function to use.
+
A SliceDescriptor for the SliceLayer.
+ +
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
+
const ConstTensor * m_RecurrentToInputWeights
+
float m_ClippingThresCell
Clipping threshold value for the cell state.
+
unsigned int m_BlockSize
Scalar specifying the input block size. It must be >= 1.
+
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
+
static TensorRawPtrVector GetOutputs(const GraphPtr &graph, unsigned int layerIndex)
+
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
+
float m_Beta
Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0. ...
+ +
armnn::ResizeMethod ToResizeMethod(armnnSerializer::ResizeMethod method)
+
float m_ScaleH
Center size encoding scale height.
+
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
Definition: Descriptors.hpp:78
+
const ConstTensor * m_CellLayerNormWeights
Definition: LstmParams.hpp:59
+
const ConstTensor * m_ForgetGateBias
Definition: LstmParams.hpp:52
+
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
+
const ConstTensor * m_InputToCellWeights
Definition: LstmParams.hpp:42
+
const ConstTensor * m_InputToOutputWeights
Definition: LstmParams.hpp:43
+
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
+ +
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:37
+
uint32_t m_DilationX
Dilation along x axis.
+
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
+
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
+
armnn::ArgMinMaxFunction ToArgMinMaxFunction(armnnSerializer::ArgMinMaxFunction function)
+
uint32_t m_PadLeft
Padding left value in the width dimension.
+
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
+ +
const armnnSerializer::SerializedGraph * GraphPtr
+
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
+
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:82
+
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
+ +
const ConstTensor * m_RecurrentToForgetWeights
Definition: LstmParams.hpp:45
+
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
+
static IDeserializer * CreateRaw()
+
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
+
The padding fields count, but are ignored.
+
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
+
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
+
const armnnSerializer::ConstTensor * ConstTensorRawPtr
+ + + +
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:43
+
Jarret 2009: Local Contrast Normalization.
+
ArgMinMaxFunction
Definition: Types.hpp:71
+
const armnnSerializer::LstmDescriptor * LstmDescriptorPtr
+
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
+
uint32_t m_NumInputs
Number of input tensors.
+
static armnn::Pooling2dDescriptor GetPoolingDescriptor(PoolingDescriptor pooling2dDescriptor, unsigned int layerIndex)
+ + +
const ConstTensor * m_RecurrentToCellWeights
+
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
+
ResizeMethod
Definition: Types.hpp:103
+
const ConstTensor * m_RecurrentToInputWeights
Definition: LstmParams.hpp:44
+
A MeanDescriptor for the MeanLayer.
+
const ConstTensor * m_InputToOutputWeights
+
UnaryOperation
Definition: Types.hpp:87
+
static TensorRawPtrVector GetInputs(const GraphPtr &graph, unsigned int layerIndex)
+
bool m_LayerNormEnabled
Enable/disable layer normalization.
+ +
uint32_t m_PadRight
Padding right value in the width dimension.
+ +
A TransposeDescriptor for the TransposeLayer.
+
A StridedSliceDescriptor for the StridedSliceLayer.
+ +
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
+ +
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:58
+
float m_ScaleY
Center size encoding scale y.
+
float m_NmsScoreThreshold
NMS score threshold.
+
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:101
+ +
virtual int Connect(IInputSlot &destination)=0
+ +
Krichevsky 2012: Local Brightness Normalization.
+ +
const char * m_Function
Definition: Exceptions.hpp:16
+
A Pooling2dDescriptor for the Pooling2dLayer.
+ + +
const ConstTensor * m_OutputLayerNormWeights
Definition: LstmParams.hpp:60
+ +
const armnnSerializer::LayerBase * LayerBaseRawPtr
+
A NormalizationDescriptor for the NormalizationLayer.
+ +
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
+ +
+
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
+ + +
static armnn::TensorInfo OutputShapeOfReshape(const armnn::TensorInfo &inputTensorInfo, const std::vector< uint32_t > &targetDimsIn)
+
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:39
+
A SoftmaxDescriptor for the SoftmaxLayer.
+
float m_Beta
Beta value for the normalization equation.
+
const armnnSerializer::OriginsDescriptor * GetOriginsDescriptor(const armnnSerializer::SerializedGraph *graph, unsigned int layerIndex)
+
uint32_t m_NormSize
Depth radius value.
+
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
Set the view origin coordinates.
+
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square).
Definition: Descriptors.hpp:35
+
An input connection slot for a layer.
Definition: INetwork.hpp:24
+
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
+
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
+
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
+
uint32_t m_PadLeft
Padding left value in the width dimension.
+
unsigned int GetNumElements() const
Definition: Tensor.hpp:93
+ +
const ConstTensor * m_InputToForgetWeights
Definition: LstmParams.hpp:41
+
ActivationFunction
Definition: Types.hpp:55
+ +
A PermuteDescriptor for the PermuteLayer.
+
uint32_t m_PadRight
Padding right value in the width dimension.
+ +
const ConstTensor * m_InputToInputWeights
Definition: LstmParams.hpp:40
+
std::vector< float > anchors({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f })
+
+
+ + + + -- cgit v1.2.1