ArmNN  NotReleased
Serializer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "Serializer.hpp"
7 
8 #include <armnn/Descriptors.hpp>
9 #include <armnn/LstmParams.hpp>
11 
12 #include <iostream>
13 
14 #include <boost/core/ignore_unused.hpp>
15 #include <boost/numeric/conversion/cast.hpp>
16 #include <flatbuffers/util.h>
17 
18 #include "SerializerUtils.hpp"
19 
20 using namespace armnn;
21 namespace fb = flatbuffers;
22 namespace serializer = armnnSerializer;
23 
24 namespace armnnSerializer
25 {
26 
28 {
29  switch (function)
30  {
32  return serializer::ActivationFunction::ActivationFunction_Sigmoid;
34  return serializer::ActivationFunction::ActivationFunction_TanH;
36  return serializer::ActivationFunction::ActivationFunction_Linear;
38  return serializer::ActivationFunction::ActivationFunction_ReLu;
40  return serializer::ActivationFunction::ActivationFunction_BoundedReLu;
42  return serializer::ActivationFunction::ActivationFunction_LeakyReLu;
44  return serializer::ActivationFunction::ActivationFunction_Abs;
46  return serializer::ActivationFunction::ActivationFunction_Sqrt;
48  return serializer::ActivationFunction::ActivationFunction_Square;
49  default:
50  return serializer::ActivationFunction::ActivationFunction_Sigmoid;
51  }
52 }
53 
55 {
56  switch (function)
57  {
59  return serializer::ArgMinMaxFunction::ArgMinMaxFunction_Max;
61  default:
62  return serializer::ArgMinMaxFunction::ArgMinMaxFunction_Min;
63  }
64 }
65 
66 uint32_t SerializerVisitor::GetSerializedId(armnn::LayerGuid guid)
67 {
68  if (m_guidMap.empty())
69  {
70  m_guidMap.insert(std::make_pair(guid, m_layerId));
71  }
72  else if (m_guidMap.find(guid) == m_guidMap.end())
73  {
74  ++m_layerId;
75  m_guidMap.insert(std::make_pair(guid, m_layerId));
76 
77  return m_layerId;
78  }
79  return m_guidMap[guid];
80 }
81 
82 // Build FlatBuffer for Input Layer
83 void SerializerVisitor::VisitInputLayer(const armnn::IConnectableLayer* layer, LayerBindingId id, const char* name)
84 {
85  boost::ignore_unused(name);
86 
87  // Create FlatBuffer BaseLayer
88  auto flatBufferInputBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Input);
89 
90  // Create FlatBuffer BindableBaseLayer
91  auto flatBufferInputBindableBaseLayer = serializer::CreateBindableLayerBase(m_flatBufferBuilder,
92  flatBufferInputBaseLayer,
93  id);
94  // Push layer binding id to outputIds.
95  m_inputIds.push_back(id);
96 
97  // Create the FlatBuffer InputLayer
98  auto flatBufferInputLayer = serializer::CreateInputLayer(m_flatBufferBuilder, flatBufferInputBindableBaseLayer);
99 
100  // Add the AnyLayer to the FlatBufferLayers
101  CreateAnyLayer(flatBufferInputLayer.o, serializer::Layer::Layer_InputLayer);
102 }
103 
104 // Build FlatBuffer for Output Layer
105 void SerializerVisitor::VisitOutputLayer(const armnn::IConnectableLayer* layer, LayerBindingId id, const char* name)
106 {
107  boost::ignore_unused(name);
108 
109  // Create FlatBuffer BaseLayer
110  auto flatBufferOutputBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Output);
111 
112  // Create FlatBuffer BindableBaseLayer
113  auto flatBufferOutputBindableBaseLayer = serializer::CreateBindableLayerBase(m_flatBufferBuilder,
114  flatBufferOutputBaseLayer,
115  id);
116  // Push layer binding id to outputIds.
117  m_outputIds.push_back(id);
118 
119  // Create the FlatBuffer OutputLayer
120  auto flatBufferOutputLayer = serializer::CreateOutputLayer(m_flatBufferBuilder, flatBufferOutputBindableBaseLayer);
121  // Add the AnyLayer to the FlatBufferLayers
122  CreateAnyLayer(flatBufferOutputLayer.o, serializer::Layer::Layer_OutputLayer);
123 }
124 
125 void SerializerVisitor::VisitAbsLayer(const armnn::IConnectableLayer* layer, const char* name)
126 {
127  boost::ignore_unused(name);
128  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Abs);
129  auto flatBufferAbsLayer = serializer::CreateAbsLayer(m_flatBufferBuilder, flatBufferBaseLayer);
130 
131  CreateAnyLayer(flatBufferAbsLayer.o, serializer::Layer::Layer_AbsLayer);
132 }
133 
134 // Build FlatBuffer for Activation Layer
135 void SerializerVisitor::VisitActivationLayer(const armnn::IConnectableLayer* layer,
136  const armnn::ActivationDescriptor& descriptor,
137  const char* name)
138 {
139  boost::ignore_unused(name);
140 
141  // Create FlatBuffer BaseLayer
142  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Activation);
143 
144  // Create the FlatBuffer ActivationDescriptor
145  auto flatBufferDescriptor = CreateActivationDescriptor(m_flatBufferBuilder,
147  descriptor.m_A,
148  descriptor.m_B);
149 
150  // Create the FlatBuffer ActivationLayer
151  auto flatBufferAdditionLayer = CreateActivationLayer(m_flatBufferBuilder,
152  flatBufferBaseLayer,
153  flatBufferDescriptor);
154 
155  // Add the AnyLayer to the FlatBufferLayers
156  CreateAnyLayer(flatBufferAdditionLayer.o, serializer::Layer::Layer_ActivationLayer);
157 }
158 
159 // Build FlatBuffer for Addition Layer
160 void SerializerVisitor::VisitAdditionLayer(const armnn::IConnectableLayer* layer, const char* name)
161 {
162  boost::ignore_unused(name);
163 
164  // Create FlatBuffer BaseLayer
165  auto flatBufferAdditionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Addition);
166 
167  // Create the FlatBuffer AdditionLayer
168  auto flatBufferAdditionLayer = serializer::CreateAdditionLayer(m_flatBufferBuilder, flatBufferAdditionBaseLayer);
169 
170  // Add the AnyLayer to the FlatBufferLayers
171  CreateAnyLayer(flatBufferAdditionLayer.o, serializer::Layer::Layer_AdditionLayer);
172 }
173 
174 // Build FlatBuffer for ArgMinMax Layer
175 void SerializerVisitor::VisitArgMinMaxLayer(const armnn::IConnectableLayer *layer,
176  const armnn::ArgMinMaxDescriptor& descriptor,
177  const char *name)
178 {
179  boost::ignore_unused(name);
180 
181  // Create FlatBuffer BaseLayer
182  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ArgMinMax);
183 
184  // Create FlatBuffer Descriptor
185  auto flatBufferDescriptor = CreateArgMinMaxDescriptor(m_flatBufferBuilder,
187  descriptor.m_Axis);
188 
189  // Create FlatBuffer ArgMinMaxLayer
190  auto flatBufferLayer = CreateArgMinMaxLayer(m_flatBufferBuilder,
191  flatBufferBaseLayer,
192  flatBufferDescriptor);
193 
194  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ArgMinMaxLayer);
195 }
196 
197 // Build FlatBuffer for BatchToSpaceNd Layer
198 void SerializerVisitor::VisitBatchToSpaceNdLayer(const armnn::IConnectableLayer* layer,
199  const armnn::BatchToSpaceNdDescriptor& descriptor,
200  const char* name)
201 {
202  boost::ignore_unused(name);
203 
204  // Create FlatBuffer BaseLayer
205  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchToSpaceNd);
206 
207  std::vector<unsigned int> crops;
208  crops.reserve(descriptor.m_Crops.size() * 2);
209  for (auto& crop : descriptor.m_Crops)
210  {
211  crops.push_back(crop.first);
212  crops.push_back(crop.second);
213  }
214 
215  auto flatBufferDescriptor =
216  CreateBatchToSpaceNdDescriptor(m_flatBufferBuilder,
217  m_flatBufferBuilder.CreateVector(descriptor.m_BlockShape),
218  m_flatBufferBuilder.CreateVector(crops),
220 
221  auto flatBufferLayer = serializer::CreateBatchToSpaceNdLayer(m_flatBufferBuilder,
222  flatBufferBaseLayer,
223  flatBufferDescriptor);
224 
225  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_BatchToSpaceNdLayer);
226 }
227 
228 void SerializerVisitor::VisitBatchNormalizationLayer(const armnn::IConnectableLayer* layer,
229  const armnn::BatchNormalizationDescriptor& batchNormDescriptor,
230  const armnn::ConstTensor& mean,
231  const armnn::ConstTensor& variance,
232  const armnn::ConstTensor& beta,
233  const armnn::ConstTensor& gamma,
234  const char* name)
235 {
236  boost::ignore_unused(name);
237 
238  auto fbBatchNormalizationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchNormalization);
239  auto fbBatchNormalizationDescriptor = serializer::CreateBatchNormalizationDescriptor(
240  m_flatBufferBuilder,
241  batchNormDescriptor.m_Eps,
242  GetFlatBufferDataLayout(batchNormDescriptor.m_DataLayout));
243 
244  auto fbMeanConstTensorInfo = CreateConstTensorInfo(mean);
245  auto fbVarianceConstTensorInfo = CreateConstTensorInfo(variance);
246  auto fbBetaConstTensorInfo = CreateConstTensorInfo(beta);
247  auto fbGammaConstTensorInfo = CreateConstTensorInfo(gamma);
248  auto fbBatchNormalizationLayer = serializer::CreateBatchNormalizationLayer(m_flatBufferBuilder,
249  fbBatchNormalizationBaseLayer,
250  fbBatchNormalizationDescriptor,
251  fbMeanConstTensorInfo,
252  fbVarianceConstTensorInfo,
253  fbBetaConstTensorInfo,
254  fbGammaConstTensorInfo);
255 
256  CreateAnyLayer(fbBatchNormalizationLayer.o, serializer::Layer::Layer_BatchNormalizationLayer);
257 }
258 
259 void SerializerVisitor::VisitComparisonLayer(const armnn::IConnectableLayer* layer,
260  const armnn::ComparisonDescriptor& descriptor,
261  const char* name)
262 {
263  boost::ignore_unused(name);
264 
265  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Comparison);
266  auto fbDescriptor = serializer::CreateComparisonDescriptor(
267  m_flatBufferBuilder,
269 
270  auto fbLayer = serializer::CreateComparisonLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
271  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_ComparisonLayer);
272 }
273 
274 // Build FlatBuffer for Constant Layer
275 void SerializerVisitor::VisitConstantLayer(const armnn::IConnectableLayer* layer,
276  const armnn::ConstTensor& input,
277  const char* name)
278 {
279  boost::ignore_unused(name);
280 
281  // Create FlatBuffer BaseLayer
282  auto flatBufferConstantBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Constant);
283 
284  auto flatBufferConstTensorInfo = CreateConstTensorInfo(input);
285 
286  // Create the FlatBuffer ConstantLayer
287  auto flatBufferLayer = CreateConstantLayer(m_flatBufferBuilder,
288  flatBufferConstantBaseLayer,
289  flatBufferConstTensorInfo);
290 
291  // Add the AnyLayer to the FlatBufferLayers
292  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ConstantLayer);
293 }
294 
295 // Build FlatBuffer for Convolution2dLayer
296 void SerializerVisitor::VisitConvolution2dLayer(const armnn::IConnectableLayer* layer,
297  const armnn::Convolution2dDescriptor& descriptor,
298  const armnn::ConstTensor& weights,
300  const char* name)
301 {
302  boost::ignore_unused(name);
303 
304  // Create FlatBuffer BaseLayer
305  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution2d);
306 
307  auto flatBufferDescriptor = CreateConvolution2dDescriptor(m_flatBufferBuilder,
308  descriptor.m_PadLeft,
309  descriptor.m_PadRight,
310  descriptor.m_PadTop,
311  descriptor.m_PadBottom,
312  descriptor.m_StrideX,
313  descriptor.m_StrideY,
314  descriptor.m_DilationX,
315  descriptor.m_DilationY,
316  descriptor.m_BiasEnabled,
318  auto flatBufferWeightsConstTensorInfo = CreateConstTensorInfo(weights);
319  flatbuffers::Offset<serializer::ConstTensor> flatBufferBiasesConstTensorInfo;
320 
321  if (biases.has_value())
322  {
323  flatBufferBiasesConstTensorInfo = CreateConstTensorInfo(biases.value());
324  }
325 
326  // Create the FlatBuffer Convolution2dLayer
327  auto flatBufferLayer = CreateConvolution2dLayer(m_flatBufferBuilder,
328  flatBufferBaseLayer,
329  flatBufferDescriptor,
330  flatBufferWeightsConstTensorInfo,
331  flatBufferBiasesConstTensorInfo);
332 
333  // Add the AnyLayer to the FlatBufferLayers
334  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_Convolution2dLayer);
335 }
336 
337 void SerializerVisitor::VisitDepthToSpaceLayer(const armnn::IConnectableLayer* layer,
338  const armnn::DepthToSpaceDescriptor& descriptor,
339  const char* name)
340 {
341  boost::ignore_unused(name);
342 
343  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_DepthToSpace);
344  auto fbDescriptor = CreateDepthToSpaceDescriptor(m_flatBufferBuilder,
345  descriptor.m_BlockSize,
347 
348  auto fbLayer = serializer::CreateDepthToSpaceLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
349 
350  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_DepthToSpaceLayer);
351 }
352 
353 void SerializerVisitor::VisitDepthwiseConvolution2dLayer(const armnn::IConnectableLayer* layer,
354  const armnn::DepthwiseConvolution2dDescriptor& descriptor,
355  const armnn::ConstTensor& weights,
357  const char* name)
358 {
359  boost::ignore_unused(name);
360 
361  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_DepthwiseConvolution2d);
362  auto fbDescriptor = CreateDepthwiseConvolution2dDescriptor(m_flatBufferBuilder,
363  descriptor.m_PadLeft,
364  descriptor.m_PadRight,
365  descriptor.m_PadTop,
366  descriptor.m_PadBottom,
367  descriptor.m_StrideX,
368  descriptor.m_StrideY,
369  descriptor.m_DilationX,
370  descriptor.m_DilationY,
371  descriptor.m_BiasEnabled,
373 
374  flatbuffers::Offset<serializer::ConstTensor> fbWeightsConstTensorInfo = CreateConstTensorInfo(weights);
375  flatbuffers::Offset<serializer::ConstTensor> fbBiasesConstTensorInfo;
376  if (biases.has_value())
377  {
378  fbBiasesConstTensorInfo = CreateConstTensorInfo(biases.value());
379  }
380 
381  auto flatBufferLayer = CreateDepthwiseConvolution2dLayer(m_flatBufferBuilder,
382  fbBaseLayer,
383  fbDescriptor,
384  fbWeightsConstTensorInfo,
385  fbBiasesConstTensorInfo);
386 
387  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_DepthwiseConvolution2dLayer);
388 }
389 
390 void SerializerVisitor::VisitDequantizeLayer(const armnn::IConnectableLayer* layer,
391  const char* name)
392 {
393  boost::ignore_unused(name);
394 
395  auto fbDequantizeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Dequantize);
396  auto fbDequantizeLayer = serializer::CreateDequantizeLayer(m_flatBufferBuilder, fbDequantizeBaseLayer);
397 
398  CreateAnyLayer(fbDequantizeLayer.o, serializer::Layer::Layer_DequantizeLayer);
399 }
400 
401 void SerializerVisitor::VisitDetectionPostProcessLayer(const armnn::IConnectableLayer* layer,
402  const armnn::DetectionPostProcessDescriptor& descriptor,
404  const char* name)
405 {
406  boost::ignore_unused(name);
407 
408  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_DetectionPostProcess);
409  auto fbDescriptor = CreateDetectionPostProcessDescriptor(m_flatBufferBuilder,
410  descriptor.m_MaxDetections,
411  descriptor.m_MaxClassesPerDetection,
412  descriptor.m_DetectionsPerClass,
413  descriptor.m_NmsScoreThreshold,
414  descriptor.m_NmsIouThreshold,
415  descriptor.m_NumClasses,
416  descriptor.m_UseRegularNms,
417  descriptor.m_ScaleX,
418  descriptor.m_ScaleY,
419  descriptor.m_ScaleW,
420  descriptor.m_ScaleH);
421 
422  flatbuffers::Offset<serializer::ConstTensor> fbAnchorsConstTensorInfo = CreateConstTensorInfo(anchors);
423 
424  auto flatBufferLayer = CreateDetectionPostProcessLayer(m_flatBufferBuilder,
425  fbBaseLayer,
426  fbDescriptor,
427  fbAnchorsConstTensorInfo);
428 
429  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_DetectionPostProcessLayer);
430 }
431 
432 void SerializerVisitor::VisitDivisionLayer(const armnn::IConnectableLayer* layer, const char* name)
433 {
434  boost::ignore_unused(name);
435 
436  auto fbDivisionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Division);
437  auto fbDivisionLayer = serializer::CreateDivisionLayer(m_flatBufferBuilder, fbDivisionBaseLayer);
438 
439  CreateAnyLayer(fbDivisionLayer.o, serializer::Layer::Layer_DivisionLayer);
440 }
441 
442 void SerializerVisitor::VisitElementwiseUnaryLayer(const armnn::IConnectableLayer* layer,
443  const armnn::ElementwiseUnaryDescriptor& descriptor,
444  const char* name)
445 {
446  boost::ignore_unused(name);
447 
448  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ElementwiseUnary);
449  auto fbDescriptor = serializer::CreateElementwiseUnaryDescriptor(
450  m_flatBufferBuilder,
452 
453  auto fbLayer = serializer::CreateElementwiseUnaryLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
454  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_ElementwiseUnaryLayer);
455 }
456 
457 void SerializerVisitor::VisitEqualLayer(const armnn::IConnectableLayer* layer, const char* name)
458 {
459  boost::ignore_unused(name);
460 
461  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Equal);
462  auto fbEqualLayer = serializer::CreateEqualLayer(m_flatBufferBuilder, fbBaseLayer);
463 
464  CreateAnyLayer(fbEqualLayer.o, serializer::Layer::Layer_EqualLayer);
465 }
466 
467 void SerializerVisitor::VisitFloorLayer(const armnn::IConnectableLayer *layer, const char *name)
468 {
469  boost::ignore_unused(name);
470 
471  auto flatBufferFloorBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Floor);
472  auto flatBufferFloorLayer = serializer::CreateFloorLayer(m_flatBufferBuilder, flatBufferFloorBaseLayer);
473 
474  CreateAnyLayer(flatBufferFloorLayer.o, serializer::Layer::Layer_FloorLayer);
475 }
476 
477 void SerializerVisitor::VisitGatherLayer(const armnn::IConnectableLayer* layer, const char* name)
478 {
479  boost::ignore_unused(name);
480 
481  auto fbGatherBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Gather);
482  auto flatBufferLayer = serializer::CreateGatherLayer(m_flatBufferBuilder, fbGatherBaseLayer);
483 
484  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_GatherLayer);
485 }
486 
487 void SerializerVisitor::VisitGreaterLayer(const armnn::IConnectableLayer* layer, const char* name)
488 {
489  boost::ignore_unused(name);
490 
491  auto fbGreaterBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Greater);
492  auto fbGreaterLayer = serializer::CreateGreaterLayer(m_flatBufferBuilder, fbGreaterBaseLayer);
493 
494  CreateAnyLayer(fbGreaterLayer.o, serializer::Layer::Layer_GreaterLayer);
495 }
496 
497 void SerializerVisitor::VisitInstanceNormalizationLayer(
498  const armnn::IConnectableLayer* layer,
499  const armnn::InstanceNormalizationDescriptor& instanceNormalizationDescriptor,
500  const char* name)
501 {
502  boost::ignore_unused(name);
503 
504  auto fbDescriptor = serializer::CreateInstanceNormalizationDescriptor(
505  m_flatBufferBuilder,
506  instanceNormalizationDescriptor.m_Gamma,
507  instanceNormalizationDescriptor.m_Beta,
508  instanceNormalizationDescriptor.m_Eps,
509  GetFlatBufferDataLayout(instanceNormalizationDescriptor.m_DataLayout));
510 
511  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_InstanceNormalization);
512  auto fbLayer = serializer::CreateInstanceNormalizationLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
513 
514  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_InstanceNormalizationLayer);
515 }
516 
517 void SerializerVisitor::VisitL2NormalizationLayer(const armnn::IConnectableLayer* layer,
518  const armnn::L2NormalizationDescriptor& l2NormalizationDescriptor,
519  const char* name)
520 {
521  boost::ignore_unused(name);
522 
523  // Create FlatBuffer BaseLayer
524  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_L2Normalization);
525 
526  // Create the FlatBuffer L2Normalization Descriptor
527  auto fbDescriptor = serializer::CreateL2NormalizationDescriptor(
528  m_flatBufferBuilder,
529  GetFlatBufferDataLayout(l2NormalizationDescriptor.m_DataLayout),
530  l2NormalizationDescriptor.m_Eps);
531 
532  // Create FlatBuffer layer
533  auto fbLayer = serializer::CreateL2NormalizationLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
534 
535  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_L2NormalizationLayer);
536 }
537 
538 void SerializerVisitor::VisitLogSoftmaxLayer(const armnn::IConnectableLayer* layer,
539  const armnn::LogSoftmaxDescriptor& logSoftmaxDescriptor,
540  const char* name)
541 {
542  boost::ignore_unused(name);
543 
544  // Create FlatBuffer BaseLayer
545  auto flatBufferLogSoftmaxBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_LogSoftmax);
546 
547  // Create the FlatBuffer LogSoftmaxDescriptor
548  auto flatBufferLogSoftmaxDesc =
549  serializer::CreateLogSoftmaxDescriptor(m_flatBufferBuilder,
550  logSoftmaxDescriptor.m_Beta,
551  logSoftmaxDescriptor.m_Axis);
552 
553  // Create the FlatBuffer LogSoftmaxLayer
554  auto flatBufferLogSoftmaxLayer =
555  serializer::CreateLogSoftmaxLayer(m_flatBufferBuilder,
556  flatBufferLogSoftmaxBaseLayer,
557  flatBufferLogSoftmaxDesc);
558 
559  CreateAnyLayer(flatBufferLogSoftmaxLayer.o, serializer::Layer::Layer_LogSoftmaxLayer);
560 }
561 
562 void SerializerVisitor::VisitLstmLayer(const armnn::IConnectableLayer* layer,
563  const armnn::LstmDescriptor& descriptor,
564  const armnn::LstmInputParams& params,
565  const char* name)
566 {
567  boost::ignore_unused(name);
568 
569  auto fbLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Lstm);
570 
571  auto fbLstmDescriptor = serializer::CreateLstmDescriptor(
572  m_flatBufferBuilder,
573  descriptor.m_ActivationFunc,
574  descriptor.m_ClippingThresCell,
575  descriptor.m_ClippingThresProj,
576  descriptor.m_CifgEnabled,
577  descriptor.m_PeepholeEnabled,
578  descriptor.m_ProjectionEnabled,
579  descriptor.m_LayerNormEnabled);
580 
581  // Get mandatory input parameters
582  auto inputToForgetWeights = CreateConstTensorInfo(*params.m_InputToForgetWeights);
583  auto inputToCellWeights = CreateConstTensorInfo(*params.m_InputToCellWeights);
584  auto inputToOutputWeights = CreateConstTensorInfo(*params.m_InputToOutputWeights);
585  auto recurrentToForgetWeights = CreateConstTensorInfo(*params.m_RecurrentToForgetWeights);
586  auto recurrentToCellWeights = CreateConstTensorInfo(*params.m_RecurrentToCellWeights);
587  auto recurrentToOutputWeights = CreateConstTensorInfo(*params.m_RecurrentToOutputWeights);
588  auto forgetGateBias = CreateConstTensorInfo(*params.m_ForgetGateBias);
589  auto cellBias = CreateConstTensorInfo(*params.m_CellBias);
590  auto outputGateBias = CreateConstTensorInfo(*params.m_OutputGateBias);
591 
592  //Define optional parameters, these will be set depending on configuration in Lstm descriptor
593  flatbuffers::Offset<serializer::ConstTensor> inputToInputWeights;
594  flatbuffers::Offset<serializer::ConstTensor> recurrentToInputWeights;
595  flatbuffers::Offset<serializer::ConstTensor> cellToInputWeights;
596  flatbuffers::Offset<serializer::ConstTensor> inputGateBias;
597  flatbuffers::Offset<serializer::ConstTensor> projectionWeights;
598  flatbuffers::Offset<serializer::ConstTensor> projectionBias;
599  flatbuffers::Offset<serializer::ConstTensor> cellToForgetWeights;
600  flatbuffers::Offset<serializer::ConstTensor> cellToOutputWeights;
601  flatbuffers::Offset<serializer::ConstTensor> inputLayerNormWeights;
602  flatbuffers::Offset<serializer::ConstTensor> forgetLayerNormWeights;
603  flatbuffers::Offset<serializer::ConstTensor> cellLayerNormWeights;
604  flatbuffers::Offset<serializer::ConstTensor> outputLayerNormWeights;
605 
606  if (!descriptor.m_CifgEnabled)
607  {
608  inputToInputWeights = CreateConstTensorInfo(*params.m_InputToInputWeights);
609  recurrentToInputWeights = CreateConstTensorInfo(*params.m_RecurrentToInputWeights);
610  cellToInputWeights = CreateConstTensorInfo(*params.m_CellToInputWeights);
611  inputGateBias = CreateConstTensorInfo(*params.m_InputGateBias);
612  }
613 
614  if (descriptor.m_ProjectionEnabled)
615  {
616  projectionWeights = CreateConstTensorInfo(*params.m_ProjectionWeights);
617  projectionBias = CreateConstTensorInfo(*params.m_ProjectionBias);
618  }
619 
620  if (descriptor.m_PeepholeEnabled)
621  {
622  cellToForgetWeights = CreateConstTensorInfo(*params.m_CellToForgetWeights);
623  cellToOutputWeights = CreateConstTensorInfo(*params.m_CellToOutputWeights);
624  }
625 
626  if (descriptor.m_LayerNormEnabled)
627  {
628  if (!descriptor.m_CifgEnabled)
629  {
630  inputLayerNormWeights = CreateConstTensorInfo((*params.m_InputLayerNormWeights));
631  }
632  forgetLayerNormWeights = CreateConstTensorInfo(*params.m_ForgetLayerNormWeights);
633  cellLayerNormWeights = CreateConstTensorInfo(*params.m_CellLayerNormWeights);
634  outputLayerNormWeights = CreateConstTensorInfo(*params.m_OutputLayerNormWeights);
635  }
636 
637  auto fbLstmParams = serializer::CreateLstmInputParams(
638  m_flatBufferBuilder,
639  inputToForgetWeights,
640  inputToCellWeights,
641  inputToOutputWeights,
642  recurrentToForgetWeights,
643  recurrentToCellWeights,
644  recurrentToOutputWeights,
645  forgetGateBias,
646  cellBias,
647  outputGateBias,
648  inputToInputWeights,
649  recurrentToInputWeights,
650  cellToInputWeights,
651  inputGateBias,
652  projectionWeights,
653  projectionBias,
654  cellToForgetWeights,
655  cellToOutputWeights,
656  inputLayerNormWeights,
657  forgetLayerNormWeights,
658  cellLayerNormWeights,
659  outputLayerNormWeights);
660 
661  auto fbLstmLayer = serializer::CreateLstmLayer(
662  m_flatBufferBuilder,
663  fbLstmBaseLayer,
664  fbLstmDescriptor,
665  fbLstmParams);
666 
667  CreateAnyLayer(fbLstmLayer.o, serializer::Layer::Layer_LstmLayer);
668 }
669 
670 void SerializerVisitor::VisitMaximumLayer(const armnn::IConnectableLayer* layer, const char* name)
671 {
672  boost::ignore_unused(name);
673 
674  auto fbMaximumBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Maximum);
675  auto fbMaximumLayer = serializer::CreateMaximumLayer(m_flatBufferBuilder, fbMaximumBaseLayer);
676 
677  CreateAnyLayer(fbMaximumLayer.o, serializer::Layer::Layer_MaximumLayer);
678 }
679 
680 void SerializerVisitor::VisitMeanLayer(const armnn::IConnectableLayer* layer,
681  const armnn::MeanDescriptor& descriptor,
682  const char* name)
683 {
684  boost::ignore_unused(name);
685 
686  auto fbMeanBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Mean);
687  auto fbMeanDescriptor = serializer::CreateMeanDescriptor(m_flatBufferBuilder,
688  m_flatBufferBuilder.CreateVector(descriptor.m_Axis),
689  descriptor.m_KeepDims);
690 
691  auto fbMeanLayer = serializer::CreateMeanLayer(m_flatBufferBuilder,
692  fbMeanBaseLayer,
693  fbMeanDescriptor);
694 
695  CreateAnyLayer(fbMeanLayer.o, serializer::Layer::Layer_MeanLayer);
696 }
697 
698 void SerializerVisitor::VisitMinimumLayer(const armnn::IConnectableLayer* layer, const char* name)
699 {
700  boost::ignore_unused(name);
701 
702  auto fbMinimumBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Minimum);
703  auto fbMinimumLayer = serializer::CreateMinimumLayer(m_flatBufferBuilder, fbMinimumBaseLayer);
704 
705  CreateAnyLayer(fbMinimumLayer.o, serializer::Layer::Layer_MinimumLayer);
706 }
707 
708 void SerializerVisitor::VisitMergeLayer(const armnn::IConnectableLayer* layer, const char* name)
709 {
710  boost::ignore_unused(name);
711 
712  auto fbMergeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Merge);
713  auto fbMergeLayer = serializer::CreateMergeLayer(m_flatBufferBuilder, fbMergeBaseLayer);
714 
715  CreateAnyLayer(fbMergeLayer.o, serializer::Layer::Layer_MergeLayer);
716 }
717 
718 void SerializerVisitor::VisitMergerLayer(const armnn::IConnectableLayer* layer,
719  const armnn::MergerDescriptor& mergerDescriptor,
720  const char* name)
721 {
722  VisitConcatLayer(layer, mergerDescriptor, name);
723 }
724 
725 void SerializerVisitor::VisitConcatLayer(const armnn::IConnectableLayer* layer,
726  const armnn::ConcatDescriptor& concatDescriptor,
727  const char* name)
728 {
729  boost::ignore_unused(name);
730 
731  auto flatBufferConcatBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Concat);
732 
733  std::vector<flatbuffers::Offset<UintVector>> views;
734  for (unsigned int v = 0; v < concatDescriptor.GetNumViews(); ++v)
735  {
736  const uint32_t* origin = concatDescriptor.GetViewOrigin(v);
737  std::vector<uint32_t> origins;
738  for (unsigned int d = 0; d < concatDescriptor.GetNumDimensions(); ++d)
739  {
740  origins.push_back(origin[d]);
741  }
742  auto view = m_flatBufferBuilder.CreateVector(origins);
743  auto uintVector = CreateUintVector(m_flatBufferBuilder, view);
744  views.push_back(uintVector);
745  }
746 
747  auto flatBufferConcatDescriptor = CreateOriginsDescriptor(m_flatBufferBuilder,
748  concatDescriptor.GetConcatAxis(),
749  concatDescriptor.GetNumViews(),
750  concatDescriptor.GetNumDimensions(),
751  m_flatBufferBuilder.CreateVector(views));
752 
753  auto flatBufferLayer = CreateConcatLayer(m_flatBufferBuilder,
754  flatBufferConcatBaseLayer,
755  flatBufferConcatDescriptor);
756 
757  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ConcatLayer);
758 }
759 
760 void SerializerVisitor::VisitMultiplicationLayer(const armnn::IConnectableLayer* layer, const char* name)
761 {
762  boost::ignore_unused(name);
763 
764  auto fbMultiplicationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Multiplication);
765  auto fbMultiplicationLayer = serializer::CreateMultiplicationLayer(m_flatBufferBuilder,
766  fbMultiplicationBaseLayer);
767 
768  CreateAnyLayer(fbMultiplicationLayer.o, serializer::Layer::Layer_MultiplicationLayer);
769 }
770 
771 void SerializerVisitor::VisitPadLayer(const armnn::IConnectableLayer* layer,
772  const armnn::PadDescriptor& padDescriptor,
773  const char* name)
774 {
775  boost::ignore_unused(name);
776 
777  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pad);
778 
779  std::vector<unsigned int> padList;
780  for (auto& p: padDescriptor.m_PadList)
781  {
782  padList.push_back(p.first);
783  padList.push_back(p.second);
784  }
785 
786  auto flatBufferPadDesc = serializer::CreatePadDescriptor(m_flatBufferBuilder,
787  m_flatBufferBuilder.CreateVector(padList),
788  padDescriptor.m_PadValue);
789 
790  auto flatBufferPadLayer = serializer::CreatePadLayer(m_flatBufferBuilder,
791  flatBufferBaseLayer,
792  flatBufferPadDesc);
793 
794  CreateAnyLayer(flatBufferPadLayer.o, serializer::Layer::Layer_PadLayer);
795 }
796 
797 void SerializerVisitor::VisitPermuteLayer(const armnn::IConnectableLayer* layer,
798  const armnn::PermuteDescriptor& permuteDescriptor,
799  const char* name)
800 {
801  boost::ignore_unused(name);
802 
803  // Create FlatBuffer BaseLayer
804  auto flatBufferPermuteBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Permute);
805 
806  std::vector<unsigned int> dimMappings;
807  for (unsigned int i=0; i<permuteDescriptor.m_DimMappings.GetSize(); ++i)
808  {
809  dimMappings.push_back(permuteDescriptor.m_DimMappings[i]);
810  }
811 
812  auto flatBufferPermuteDesc = serializer::CreatePermuteDescriptor(m_flatBufferBuilder,
813  m_flatBufferBuilder.CreateVector(dimMappings));
814 
815  // Create the FlatBuffer PermuteLayer
816  auto flatBufferPermuteLayer = serializer::CreatePermuteLayer(m_flatBufferBuilder,
817  flatBufferPermuteBaseLayer,
818  flatBufferPermuteDesc);
819 
820  // Add the AnyLayer to the FlatBufferLayers
821  CreateAnyLayer(flatBufferPermuteLayer.o, serializer::Layer::Layer_PermuteLayer);
822 }
823 
824 // Build FlatBuffer for Reshape Layer
825 void SerializerVisitor::VisitReshapeLayer(const armnn::IConnectableLayer* layer,
826  const armnn::ReshapeDescriptor& reshapeDescriptor,
827  const char* name)
828 {
829  boost::ignore_unused(name);
830 
831  // Create FlatBuffer BaseLayer
832  auto flatBufferReshapeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Reshape);
833 
834  std::vector<unsigned int> targetShape;
835  for (unsigned int i =0; i < reshapeDescriptor.m_TargetShape.GetNumDimensions(); i++)
836  {
837  targetShape.push_back(reshapeDescriptor.m_TargetShape[i]);
838  }
839 
840  auto flatBufferReshapeDesc = serializer::CreateReshapeDescriptor(m_flatBufferBuilder,
841  m_flatBufferBuilder.CreateVector(targetShape));
842 
843  // Create the FlatBuffer ReshapeLayer
844  auto flatBufferReshapeLayer = serializer::CreateReshapeLayer(m_flatBufferBuilder, flatBufferReshapeBaseLayer,
845  flatBufferReshapeDesc);
846 
847  // Add the AnyLayer to the FlatBufferLayers
848  CreateAnyLayer(flatBufferReshapeLayer.o, serializer::Layer::Layer_ReshapeLayer);
849 }
850 
851 void SerializerVisitor::VisitResizeBilinearLayer(const armnn::IConnectableLayer* layer,
852  const armnn::ResizeBilinearDescriptor& resizeDescriptor,
853  const char* name)
854 {
855  boost::ignore_unused(name);
856 
857  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ResizeBilinear);
858 
859  auto flatBufferDescriptor =
860  CreateResizeBilinearDescriptor(m_flatBufferBuilder,
861  resizeDescriptor.m_TargetWidth,
862  resizeDescriptor.m_TargetHeight,
863  GetFlatBufferDataLayout(resizeDescriptor.m_DataLayout));
864 
865  auto flatBufferLayer = serializer::CreateResizeBilinearLayer(m_flatBufferBuilder,
866  flatBufferBaseLayer,
867  flatBufferDescriptor);
868 
869  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ResizeBilinearLayer);
870 }
871 
872 void SerializerVisitor::VisitResizeLayer(const armnn::IConnectableLayer* layer,
873  const armnn::ResizeDescriptor& resizeDescriptor,
874  const char* name)
875 {
876  boost::ignore_unused(name);
877 
878  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Resize);
879 
880  auto flatBufferDescriptor =
881  CreateResizeDescriptor(m_flatBufferBuilder,
882  resizeDescriptor.m_TargetHeight,
883  resizeDescriptor.m_TargetWidth,
884  GetFlatBufferResizeMethod(resizeDescriptor.m_Method),
885  GetFlatBufferDataLayout(resizeDescriptor.m_DataLayout));
886 
887  auto flatBufferLayer = serializer::CreateResizeLayer(m_flatBufferBuilder,
888  flatBufferBaseLayer,
889  flatBufferDescriptor);
890 
891  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ResizeLayer);
892 }
893 
894 void SerializerVisitor::VisitRsqrtLayer(const armnn::IConnectableLayer* layer, const char* name)
895 {
896  boost::ignore_unused(name);
897 
898  auto fbRsqrtBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Rsqrt);
899  auto fbRsqrtLayer = serializer::CreateRsqrtLayer(m_flatBufferBuilder, fbRsqrtBaseLayer);
900 
901  CreateAnyLayer(fbRsqrtLayer.o, serializer::Layer::Layer_RsqrtLayer);
902 }
903 
904 void SerializerVisitor::VisitSliceLayer(const armnn::IConnectableLayer* layer,
905  const armnn::SliceDescriptor& sliceDescriptor,
906  const char* name)
907 {
908  boost::ignore_unused(name);
909 
910  auto fbSliceBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Slice);
911  auto fbSliceDescriptor = CreateSliceDescriptor(m_flatBufferBuilder,
912  m_flatBufferBuilder.CreateVector(sliceDescriptor.m_Begin),
913  m_flatBufferBuilder.CreateVector(sliceDescriptor.m_Size));
914 
915  auto fbSliceLayer = serializer::CreateSliceLayer(m_flatBufferBuilder, fbSliceBaseLayer, fbSliceDescriptor);
916 
917  CreateAnyLayer(fbSliceLayer.o, serializer::Layer::Layer_SliceLayer);
918 }
919 
920 // Build FlatBuffer for Softmax Layer
921 void SerializerVisitor::VisitSoftmaxLayer(const armnn::IConnectableLayer* layer,
922  const armnn::SoftmaxDescriptor& softmaxDescriptor,
923  const char* name)
924 {
925  boost::ignore_unused(name);
926 
927  // Create FlatBuffer BaseLayer
928  auto flatBufferSoftmaxBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Softmax);
929 
930  // Create the FlatBuffer SoftmaxDescriptor
931  auto flatBufferSoftmaxDesc =
932  serializer::CreateSoftmaxDescriptor(m_flatBufferBuilder, softmaxDescriptor.m_Beta);
933 
934  // Create the FlatBuffer SoftmaxLayer
935  auto flatBufferSoftmaxLayer =
936  serializer::CreateSoftmaxLayer(m_flatBufferBuilder,
937  flatBufferSoftmaxBaseLayer,
938  flatBufferSoftmaxDesc);
939 
940  CreateAnyLayer(flatBufferSoftmaxLayer.o, serializer::Layer::Layer_SoftmaxLayer);
941 }
942 
943 void SerializerVisitor::VisitPooling2dLayer(const armnn::IConnectableLayer* layer,
944  const armnn::Pooling2dDescriptor& pooling2dDescriptor,
945  const char* name)
946 {
947  boost::ignore_unused(name);
948 
949  auto fbPooling2dBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pooling2d);
950  auto fbPooling2dDescriptor = serializer::CreatePooling2dDescriptor(
951  m_flatBufferBuilder,
952  GetFlatBufferPoolingAlgorithm(pooling2dDescriptor.m_PoolType),
953  pooling2dDescriptor.m_PadLeft,
954  pooling2dDescriptor.m_PadRight,
955  pooling2dDescriptor.m_PadTop,
956  pooling2dDescriptor.m_PadBottom,
957  pooling2dDescriptor.m_PoolWidth,
958  pooling2dDescriptor.m_PoolHeight,
959  pooling2dDescriptor.m_StrideX,
960  pooling2dDescriptor.m_StrideY,
962  GetFlatBufferPaddingMethod(pooling2dDescriptor.m_PaddingMethod),
963  GetFlatBufferDataLayout(pooling2dDescriptor.m_DataLayout));
964 
965  auto fbPooling2dLayer = serializer::CreatePooling2dLayer(m_flatBufferBuilder,
966  fbPooling2dBaseLayer,
967  fbPooling2dDescriptor);
968 
969  CreateAnyLayer(fbPooling2dLayer.o, serializer::Layer::Layer_Pooling2dLayer);
970 }
971 
972 void SerializerVisitor::VisitPreluLayer(const armnn::IConnectableLayer* layer,
973  const char* name)
974 {
975  boost::ignore_unused(name);
976 
977  // Create FlatBuffer BaseLayer
978  auto flatBufferPreluBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Prelu);
979 
980  // Create the FlatBuffer AdditionLayer
981  auto flatBufferPreluLayer = serializer::CreatePreluLayer(m_flatBufferBuilder, flatBufferPreluBaseLayer);
982 
983  // Add the AnyLayer to the FlatBufferLayers
984  CreateAnyLayer(flatBufferPreluLayer.o, serializer::Layer::Layer_PreluLayer);
985 }
986 
987 void SerializerVisitor::VisitQuantizeLayer(const armnn::IConnectableLayer *layer, const char *name)
988 {
989  boost::ignore_unused(name);
990 
991  auto fbQuantizeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Quantize);
992  auto fbQuantizeLayer = serializer::CreateQuantizeLayer(m_flatBufferBuilder,
993  fbQuantizeBaseLayer);
994  CreateAnyLayer(fbQuantizeLayer.o, serializer::Layer::Layer_QuantizeLayer);
995 }
996 
997 // Build FlatBuffer for FullyConnected Layer
998 void SerializerVisitor::VisitFullyConnectedLayer(const armnn::IConnectableLayer* layer,
999  const armnn::FullyConnectedDescriptor& fullyConnectedDescriptor,
1000  const armnn::ConstTensor& weights,
1002  const char* name)
1003 {
1004  boost::ignore_unused(name);
1005 
1006  // Create FlatBuffer BaseLayer
1007  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_FullyConnected);
1008 
1009  // Create FlatBuffer FullyConnectedDescriptor
1010  auto flatBufferDescriptor =
1011  serializer::CreateFullyConnectedDescriptor(m_flatBufferBuilder,
1012  fullyConnectedDescriptor.m_BiasEnabled,
1013  fullyConnectedDescriptor.m_TransposeWeightMatrix);
1014 
1015  // Create FlatBuffer weights data
1016  auto flatBufferWeights = CreateConstTensorInfo(weights);
1017 
1018  // Create FlatBuffer bias data
1019  flatbuffers::Offset<serializer::ConstTensor> flatBufferBiases;
1020  if (fullyConnectedDescriptor.m_BiasEnabled)
1021  {
1022  flatBufferBiases = CreateConstTensorInfo(biases.value());
1023  }
1024 
1025  // Create FlatBuffer FullyConnectedLayer
1026  auto flatBufferLayer = serializer::CreateFullyConnectedLayer(m_flatBufferBuilder,
1027  flatBufferBaseLayer,
1028  flatBufferDescriptor,
1029  flatBufferWeights,
1030  flatBufferBiases);
1031 
1032  // Add created FullyConnectedLayer to the FlatBufferLayers
1033  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_FullyConnectedLayer);
1034 }
1035 
1036 // Build FlatBuffer for SpaceToBatchNd Layer
1037 void SerializerVisitor::VisitSpaceToBatchNdLayer(const armnn::IConnectableLayer* layer,
1038  const armnn::SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
1039  const char* name)
1040 {
1041  boost::ignore_unused(name);
1042 
1043  // Create FlatBuffer BaseLayer
1044  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_SpaceToBatchNd);
1045 
1046  std::vector<unsigned int> padList;
1047  padList.reserve(spaceToBatchNdDescriptor.m_PadList.size()*2);
1048  for (auto& pad : spaceToBatchNdDescriptor.m_PadList)
1049  {
1050  padList.push_back(pad.first);
1051  padList.push_back(pad.second);
1052  }
1053 
1054  auto flatBufferDescriptor =
1055  CreateSpaceToBatchNdDescriptor(m_flatBufferBuilder,
1056  m_flatBufferBuilder.CreateVector(spaceToBatchNdDescriptor.m_BlockShape),
1057  m_flatBufferBuilder.CreateVector(padList),
1058  GetFlatBufferDataLayout(spaceToBatchNdDescriptor.m_DataLayout));
1059 
1060  auto flatBufferLayer = serializer::CreateSpaceToBatchNdLayer(m_flatBufferBuilder,
1061  flatBufferBaseLayer,
1062  flatBufferDescriptor);
1063 
1064  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_SpaceToBatchNdLayer);
1065 }
1066 
1067 // Build FlatBuffer for SpaceToDepthLayer
1068 void SerializerVisitor::VisitSpaceToDepthLayer(const armnn::IConnectableLayer* layer,
1069  const armnn::SpaceToDepthDescriptor& spaceToDepthDescriptor,
1070  const char* name)
1071 {
1072  boost::ignore_unused(name);
1073 
1074  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_SpaceToDepth);
1075  auto flatBufferDescriptor =
1076  CreateSpaceToDepthDescriptor(m_flatBufferBuilder,
1077  spaceToDepthDescriptor.m_BlockSize,
1078  GetFlatBufferDataLayout(spaceToDepthDescriptor.m_DataLayout));
1079 
1080  auto flatBufferLayer = serializer::CreateSpaceToDepthLayer(m_flatBufferBuilder,
1081  flatBufferBaseLayer,
1082  flatBufferDescriptor);
1083 
1084  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_SpaceToDepthLayer);
1085 }
1086 
1087 // Build FlatBuffer for Splitter Layer
1088 void SerializerVisitor::VisitSplitterLayer(const armnn::IConnectableLayer* layer,
1089  const armnn::ViewsDescriptor& viewsDescriptor,
1090  const char* name)
1091 {
1092  boost::ignore_unused(name);
1093 
1094  // Create FlatBuffer ViewOrigins
1095  std::vector<flatbuffers::Offset<UintVector>> flatBufferViewOrigins;
1096  flatBufferViewOrigins.reserve(viewsDescriptor.GetNumViews());
1097 
1098  for(unsigned int vIdx = 0; vIdx < viewsDescriptor.GetNumViews(); ++vIdx)
1099  {
1100  std::vector<uint32_t> viewOrigin;
1101  viewOrigin.reserve(viewsDescriptor.GetNumDimensions());
1102 
1103  // Copy vector
1104  for(unsigned int dIdx = 0; dIdx < viewsDescriptor.GetNumDimensions(); ++dIdx)
1105  {
1106  viewOrigin.push_back(viewsDescriptor.GetViewOrigin(vIdx)[dIdx]);
1107  }
1108 
1109  flatBufferViewOrigins.push_back(CreateUintVector(m_flatBufferBuilder,
1110  m_flatBufferBuilder.CreateVector(viewOrigin)));
1111  }
1112 
1113  // Create FlatBuffer OriginsDescriptor
1114  auto flatBufferOriginDescriptor = CreateOriginsDescriptor(m_flatBufferBuilder,
1115  viewsDescriptor.GetOrigins().GetConcatAxis(),
1116  viewsDescriptor.GetOrigins().GetNumViews(),
1117  viewsDescriptor.GetOrigins().GetNumDimensions(),
1118  m_flatBufferBuilder.CreateVector(flatBufferViewOrigins));
1119 
1120  // Create FlatBuffer ViewOrigins
1121  std::vector<flatbuffers::Offset<UintVector>> flatBufferViewSizes;
1122  flatBufferViewSizes.reserve(viewsDescriptor.GetNumViews());
1123 
1124  for(unsigned int vIdx = 0; vIdx < viewsDescriptor.GetNumViews(); ++vIdx)
1125  {
1126  std::vector<uint32_t> viewSize;
1127  viewSize.reserve(viewsDescriptor.GetNumDimensions());
1128 
1129  // Copy vector
1130  for(unsigned int dIdx = 0; dIdx < viewsDescriptor.GetNumDimensions(); ++dIdx)
1131  {
1132  viewSize.push_back(viewsDescriptor.GetViewSizes(vIdx)[dIdx]);
1133  }
1134 
1135  flatBufferViewSizes.push_back(CreateUintVector(m_flatBufferBuilder,
1136  m_flatBufferBuilder.CreateVector(viewSize)));
1137  }
1138 
1139  // Create FlatBuffer ViewsDescriptor
1140  auto flatBufferViewsDescriptor = CreateViewsDescriptor(m_flatBufferBuilder,
1141  flatBufferOriginDescriptor,
1142  m_flatBufferBuilder.CreateVector(flatBufferViewSizes));
1143 
1144  // Create FlatBuffer BaseLayer
1145  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Splitter);
1146 
1147  auto flatBufferSplitterLayer = serializer::CreateSplitterLayer(m_flatBufferBuilder,
1148  flatBufferBaseLayer,
1149  flatBufferViewsDescriptor);
1150 
1151  CreateAnyLayer(flatBufferSplitterLayer.o, serializer::Layer::Layer_SplitterLayer);
1152 }
1153 
1154 void SerializerVisitor::VisitNormalizationLayer(const armnn::IConnectableLayer* layer,
1155  const armnn::NormalizationDescriptor& descriptor,
1156  const char* name)
1157 {
1158  boost::ignore_unused(name);
1159 
1160  auto fbNormalizationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Normalization);
1161 
1162  auto fbNormalizationDescriptor = serializer::CreateNormalizationDescriptor(
1163  m_flatBufferBuilder,
1166  descriptor.m_NormSize,
1167  descriptor.m_Alpha,
1168  descriptor.m_Beta,
1169  descriptor.m_K,
1171 
1172  auto flatBufferLayer = serializer::CreateNormalizationLayer(m_flatBufferBuilder,
1173  fbNormalizationBaseLayer,
1174  fbNormalizationDescriptor);
1175 
1176  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_NormalizationLayer);
1177 }
1178 
1179 void SerializerVisitor::VisitStackLayer(const armnn::IConnectableLayer* layer,
1180  const armnn::StackDescriptor& stackDescriptor,
1181  const char* name)
1182 {
1183  boost::ignore_unused(name);
1184 
1185  auto stackBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Stack);
1186 
1187  std::vector<unsigned int> inputShape;
1188  for (unsigned int i =0; i < stackDescriptor.m_InputShape.GetNumDimensions(); i++)
1189  {
1190  inputShape.push_back(stackDescriptor.m_InputShape[i]);
1191  }
1192 
1193  auto flatBufferStackDescriptor = CreateStackDescriptor(m_flatBufferBuilder,
1194  stackDescriptor.m_Axis,
1195  stackDescriptor.m_NumInputs,
1196  m_flatBufferBuilder.CreateVector(inputShape));
1197 
1198  auto stackLayer = serializer::CreateStackLayer(m_flatBufferBuilder, stackBaseLayer, flatBufferStackDescriptor);
1199  CreateAnyLayer(stackLayer.o, serializer::Layer::Layer_StackLayer);
1200 }
1201 
1202 void SerializerVisitor::VisitStandInLayer(const armnn::IConnectableLayer *layer,
1203  const armnn::StandInDescriptor& standInDescriptor,
1204  const char *name)
1205 {
1206  boost::ignore_unused(name);
1207 
1208  auto fbDescriptor = serializer::CreateStandInDescriptor(m_flatBufferBuilder,
1209  standInDescriptor.m_NumInputs,
1210  standInDescriptor.m_NumOutputs);
1211 
1212  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_StandIn);
1213  auto fbLayer = serializer::CreateStandInLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
1214 
1215  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_StandInLayer);
1216 }
1217 
1218 void SerializerVisitor::VisitStridedSliceLayer(const armnn::IConnectableLayer* layer,
1219  const armnn::StridedSliceDescriptor& stridedSliceDescriptor,
1220  const char* name)
1221 {
1222  boost::ignore_unused(name);
1223 
1224  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_StridedSlice);
1225 
1226  auto flatBufferDescriptor =
1227  CreateStridedSliceDescriptor(m_flatBufferBuilder,
1228  m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.m_Begin),
1229  m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.m_End),
1230  m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.m_Stride),
1231  stridedSliceDescriptor.m_BeginMask,
1232  stridedSliceDescriptor.m_EndMask,
1233  stridedSliceDescriptor.m_ShrinkAxisMask,
1234  stridedSliceDescriptor.m_EllipsisMask,
1235  stridedSliceDescriptor.m_NewAxisMask,
1236  GetFlatBufferDataLayout(stridedSliceDescriptor.m_DataLayout));
1237 
1238  auto flatBufferLayer = serializer::CreateStridedSliceLayer(m_flatBufferBuilder,
1239  flatBufferBaseLayer,
1240  flatBufferDescriptor);
1241 
1242  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_StridedSliceLayer);
1243 }
1244 
1245 void SerializerVisitor::VisitSubtractionLayer(const armnn::IConnectableLayer* layer, const char* name)
1246 {
1247  boost::ignore_unused(name);
1248 
1249  auto fbSubtractionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Subtraction);
1250  auto fbSubtractionLayer = serializer::CreateSubtractionLayer(m_flatBufferBuilder, fbSubtractionBaseLayer);
1251 
1252  CreateAnyLayer(fbSubtractionLayer.o, serializer::Layer::Layer_SubtractionLayer);
1253 }
1254 
1255 void SerializerVisitor::VisitSwitchLayer(const armnn::IConnectableLayer* layer, const char* name)
1256 {
1257  boost::ignore_unused(name);
1258 
1259  auto fbSwitchBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Switch);
1260  auto fbSwitchLayer = serializer::CreateSwitchLayer(m_flatBufferBuilder, fbSwitchBaseLayer);
1261 
1262  CreateAnyLayer(fbSwitchLayer.o, serializer::Layer::Layer_SwitchLayer);
1263 }
1264 
1265 void SerializerVisitor::VisitTransposeConvolution2dLayer(
1266  const armnn::IConnectableLayer* layer,
1267  const armnn::TransposeConvolution2dDescriptor& descriptor,
1268  const armnn::ConstTensor& weights,
1270  const char* name)
1271 {
1272  boost::ignore_unused(name);
1273 
1274  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution2d);
1275  auto fbDescriptor = CreateTransposeConvolution2dDescriptor(m_flatBufferBuilder,
1276  descriptor.m_PadLeft,
1277  descriptor.m_PadRight,
1278  descriptor.m_PadTop,
1279  descriptor.m_PadBottom,
1280  descriptor.m_StrideX,
1281  descriptor.m_StrideY,
1282  descriptor.m_BiasEnabled,
1284 
1285  // weights & biases
1286  auto fbWeightsConstTensorInfo = CreateConstTensorInfo(weights);
1287  flatbuffers::Offset<serializer::ConstTensor> fbBiasesConstTensorInfo;
1288  if (biases.has_value())
1289  {
1290  fbBiasesConstTensorInfo = CreateConstTensorInfo(biases.value());
1291  }
1292 
1293  auto fbLayer = CreateTransposeConvolution2dLayer(m_flatBufferBuilder,
1294  fbBaseLayer,
1295  fbDescriptor,
1296  fbWeightsConstTensorInfo,
1297  fbBiasesConstTensorInfo);
1298 
1299  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_TransposeConvolution2dLayer);
1300 }
1301 
1302 void SerializerVisitor::VisitQuantizedLstmLayer(const armnn::IConnectableLayer* layer,
1303  const armnn::QuantizedLstmInputParams& params,
1304  const char* name)
1305 {
1306  boost::ignore_unused(name);
1307 
1308  auto fbQuantizedLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_QuantizedLstm);
1309 
1310  // Get input parameters
1311  auto inputToInputWeights = CreateConstTensorInfo(params.GetInputToInputWeights());
1312  auto inputToForgetWeights = CreateConstTensorInfo(params.GetInputToForgetWeights());
1313  auto inputToCellWeights = CreateConstTensorInfo(params.GetInputToCellWeights());
1314  auto inputToOutputWeights = CreateConstTensorInfo(params.GetInputToOutputWeights());
1315 
1316  auto recurrentToInputWeights = CreateConstTensorInfo(params.GetRecurrentToInputWeights());
1317  auto recurrentToForgetWeights = CreateConstTensorInfo(params.GetRecurrentToForgetWeights());
1318  auto recurrentToCellWeights = CreateConstTensorInfo(params.GetRecurrentToCellWeights());
1319  auto recurrentToOutputWeights = CreateConstTensorInfo(params.GetRecurrentToOutputWeights());
1320 
1321  auto inputGateBias = CreateConstTensorInfo(params.GetInputGateBias());
1322  auto forgetGateBias = CreateConstTensorInfo(params.GetForgetGateBias());
1323  auto cellBias = CreateConstTensorInfo(params.GetCellBias());
1324  auto outputGateBias = CreateConstTensorInfo(params.GetOutputGateBias());
1325 
1326  auto fbQuantizedLstmParams = serializer::CreateQuantizedLstmInputParams(
1327  m_flatBufferBuilder,
1328  inputToInputWeights,
1329  inputToForgetWeights,
1330  inputToCellWeights,
1331  inputToOutputWeights,
1332  recurrentToInputWeights,
1333  recurrentToForgetWeights,
1334  recurrentToCellWeights,
1335  recurrentToOutputWeights,
1336  inputGateBias,
1337  forgetGateBias,
1338  cellBias,
1339  outputGateBias);
1340 
1341  auto fbQuantizedLstmLayer = serializer::CreateQuantizedLstmLayer(
1342  m_flatBufferBuilder,
1343  fbQuantizedLstmBaseLayer,
1344  fbQuantizedLstmParams);
1345 
1346  CreateAnyLayer(fbQuantizedLstmLayer.o, serializer::Layer::Layer_QuantizedLstmLayer);
1347 }
1348 
1349 fb::Offset<serializer::LayerBase> SerializerVisitor::CreateLayerBase(const IConnectableLayer* layer,
1350  const serializer::LayerType layerType)
1351 {
1352 
1353  uint32_t fbIndex = GetSerializedId(layer->GetGuid());
1354 
1355  std::vector<fb::Offset<serializer::InputSlot>> inputSlots = CreateInputSlots(layer);
1356  std::vector<fb::Offset<serializer::OutputSlot>> outputSlots = CreateOutputSlots(layer);
1357 
1358  return serializer::CreateLayerBase(m_flatBufferBuilder,
1359  fbIndex,
1360  m_flatBufferBuilder.CreateString(layer->GetName()),
1361  layerType,
1362  m_flatBufferBuilder.CreateVector(inputSlots),
1363  m_flatBufferBuilder.CreateVector(outputSlots));
1364 }
1365 
1366 void SerializerVisitor::CreateAnyLayer(const flatbuffers::Offset<void>& layer, const serializer::Layer serializerLayer)
1367 {
1368 
1369  auto anyLayer = armnnSerializer::CreateAnyLayer(m_flatBufferBuilder, serializerLayer, layer);
1370  m_serializedLayers.push_back(anyLayer);
1371 }
1372 
1373 template <typename T>
1374 flatbuffers::Offset<flatbuffers::Vector<T>> SerializerVisitor::CreateDataVector(const void* memory, unsigned int size)
1375 {
1376  const T* buffer = reinterpret_cast<const T*>(memory);
1377  std::vector<T> vector(buffer, buffer + (size / sizeof(T)));
1378  auto fbVector = m_flatBufferBuilder.CreateVector(vector);
1379  return fbVector;
1380 }
1381 
1382 flatbuffers::Offset<serializer::ConstTensor>
1383  SerializerVisitor::CreateConstTensorInfo(const armnn::ConstTensor& constTensor)
1384 {
1385  armnn::TensorInfo tensorInfo = constTensor.GetInfo();
1386 
1387  // Get the dimensions
1388  std::vector<unsigned int> shape;
1389 
1390  for(unsigned int dim = 0; dim < tensorInfo.GetShape().GetNumDimensions(); ++dim)
1391  {
1392  shape.push_back(tensorInfo.GetShape()[dim]);
1393  }
1394 
1395  // Create FlatBuffer TensorInfo
1396  auto flatBufferTensorInfo = serializer::CreateTensorInfo(m_flatBufferBuilder,
1397  m_flatBufferBuilder.CreateVector(shape),
1398  GetFlatBufferDataType(tensorInfo.GetDataType()),
1399  tensorInfo.GetQuantizationScale(),
1400  tensorInfo.GetQuantizationOffset());
1401  flatbuffers::Offset<void> fbPayload;
1402 
1403  switch (tensorInfo.GetDataType())
1404  {
1407  {
1408  auto fbVector = CreateDataVector<int32_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1409  flatbuffers::Offset<serializer::IntData> flatBuffersData = serializer::CreateIntData(
1410  m_flatBufferBuilder,
1411  fbVector);
1412  fbPayload = flatBuffersData.o;
1413  break;
1414  }
1416  {
1417  auto fbVector = CreateDataVector<int16_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1418  flatbuffers::Offset<serializer::ShortData> flatBuffersData = serializer::CreateShortData(
1419  m_flatBufferBuilder,
1420  fbVector);
1421  fbPayload = flatBuffersData.o;
1422  break;
1423  }
1425  {
1426  auto fbVector = CreateDataVector<int16_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1427  flatbuffers::Offset<serializer::ShortData> flatBuffersData = serializer::CreateShortData(
1428  m_flatBufferBuilder,
1429  fbVector);
1430  fbPayload = flatBuffersData.o;
1431  break;
1432  }
1435  default:
1436  {
1437  auto fbVector = CreateDataVector<int8_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1438  flatbuffers::Offset<serializer::ByteData> flatBuffersData = serializer::CreateByteData(
1439  m_flatBufferBuilder,
1440  fbVector);
1441  fbPayload = flatBuffersData.o;
1442  }
1443  }
1444  flatbuffers::Offset<serializer::ConstTensor> flatBufferConstTensor = serializer::CreateConstTensor(
1445  m_flatBufferBuilder,
1446  flatBufferTensorInfo,
1448  fbPayload);
1449  return flatBufferConstTensor;
1450 }
1451 
1452 flatbuffers::Offset<armnnSerializer::FeatureCompatibilityVersions> SerializerVisitor::GetVersionTable()
1453 {
1454  flatbuffers::Offset<armnnSerializer::FeatureCompatibilityVersions> versionsTable =
1455  serializer::CreateFeatureCompatibilityVersions(
1456  m_flatBufferBuilder,
1457  1 // Binding ids scheme version
1458  );
1459  return versionsTable;
1460 }
1461 
1462 std::vector<fb::Offset<serializer::InputSlot>>
1463  SerializerVisitor::CreateInputSlots(const armnn::IConnectableLayer* layer)
1464 {
1465  std::vector<fb::Offset<serializer::InputSlot>> inputSlots;
1466 
1467  // Get the InputSlots
1468  for (unsigned int slotIndex = 0; slotIndex<layer->GetNumInputSlots(); ++slotIndex)
1469  {
1470  const IInputSlot& inputSlot = layer->GetInputSlot(slotIndex);
1471 
1472  // Get the Connection for the InputSlot
1473  const IOutputSlot* connection = inputSlot.GetConnection();
1474 
1475  // Create FlatBuffer Connection
1476  serializer::Connection conn(GetSerializedId(inputSlot.GetConnection()->GetOwningLayerGuid()),
1477  connection->CalculateIndexOnOwner());
1478  // Create FlatBuffer InputSlot
1479  inputSlots.push_back(serializer::CreateInputSlot(m_flatBufferBuilder, slotIndex, &conn));
1480  }
1481  return inputSlots;
1482 }
1483 
1484 std::vector<fb::Offset<serializer::OutputSlot>>
1485  SerializerVisitor::CreateOutputSlots(const armnn::IConnectableLayer* layer)
1486 {
1487  std::vector<fb::Offset<serializer::OutputSlot>> outputSlots;
1488 
1489  // Get the OutputSlots
1490  for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
1491  {
1492  const IOutputSlot& outputSlot = layer->GetOutputSlot(slotIndex);
1493  const armnn::TensorInfo& tensorInfo = outputSlot.GetTensorInfo();
1494 
1495  // Get the dimensions
1496  std::vector<unsigned int> shape;
1497  for(unsigned int dim = 0; dim < tensorInfo.GetShape().GetNumDimensions(); ++dim)
1498  {
1499  shape.push_back(tensorInfo.GetShape()[dim]);
1500  }
1501 
1502  // Create FlatBuffer TensorInfo
1503  auto flatBufferTensorInfo = serializer::CreateTensorInfo(m_flatBufferBuilder,
1504  m_flatBufferBuilder.CreateVector(shape),
1505  GetFlatBufferDataType(tensorInfo.GetDataType()),
1506  tensorInfo.GetQuantizationScale(),
1507  tensorInfo.GetQuantizationOffset());
1508 
1509  // Create FlatBuffer Outputslot
1510  outputSlots.push_back(serializer::CreateOutputSlot(m_flatBufferBuilder,
1511  slotIndex,
1512  flatBufferTensorInfo));
1513  }
1514  return outputSlots;
1515 }
1516 
1517 
1518 ISerializer* ISerializer::CreateRaw()
1519 {
1520  return new Serializer();
1521 }
1522 
1523 ISerializerPtr ISerializer::Create()
1524 {
1525  return ISerializerPtr(CreateRaw(), &ISerializer::Destroy);
1526 }
1527 
1528 void ISerializer::Destroy(ISerializer* serializer)
1529 {
1530  delete serializer;
1531 }
1532 
1533 void Serializer::Serialize(const INetwork& inNetwork)
1534 {
1535  // Iterate through to network
1536  inNetwork.Accept(m_SerializerVisitor);
1537  flatbuffers::FlatBufferBuilder& fbBuilder = m_SerializerVisitor.GetFlatBufferBuilder();
1538 
1539  // Create FlatBuffer SerializedGraph
1540  auto serializedGraph = serializer::CreateSerializedGraph(
1541  fbBuilder,
1542  fbBuilder.CreateVector(m_SerializerVisitor.GetSerializedLayers()),
1543  fbBuilder.CreateVector(m_SerializerVisitor.GetInputIds()),
1544  fbBuilder.CreateVector(m_SerializerVisitor.GetOutputIds()),
1545  m_SerializerVisitor.GetVersionTable());
1546 
1547  // Serialize the graph
1548  fbBuilder.Finish(serializedGraph);
1549 }
1550 
1551 bool Serializer::SaveSerializedToStream(std::ostream& stream)
1552 {
1553  flatbuffers::FlatBufferBuilder& fbBuilder = m_SerializerVisitor.GetFlatBufferBuilder();
1554 
1555  auto bytesToWrite = boost::numeric_cast<std::streamsize>(fbBuilder.GetSize());
1556  stream.write(reinterpret_cast<const char*>(fbBuilder.GetBufferPointer()), bytesToWrite);
1557  return !stream.bad();
1558 }
1559 
1560 } // namespace armnnSerializer
const ConstTensor & GetInputToForgetWeights() const
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:37
bool m_ProjectionEnabled
Enable/disable the projection layer.
virtual const IOutputSlot * GetConnection() const =0
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
virtual LayerGuid GetGuid() const =0
const ConstTensor * m_InputToOutputWeights
Definition: LstmParams.hpp:43
float m_ScaleX
Center size encoding scale x.
const ConstTensor & GetInputGateBias() const
const OriginsDescriptor & GetOrigins() const
Get the View Origins.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
const ConstTensor * m_CellToForgetWeights
Definition: LstmParams.hpp:49
unsigned int GetNumBytes() const
Definition: Tensor.hpp:174
uint32_t m_PadBottom
Padding bottom value in the height dimension.
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
Definition: Descriptors.hpp:98
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
uint32_t m_Axis
0-based axis along which to stack the input tensors.
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:264
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:43
uint32_t m_PadRight
Padding right value in the width dimension.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
uint32_t m_NumClasses
Number of classes.
uint32_t m_DilationX
Dilation factor value for width dimension.
A NormalizationDescriptor for the NormalizationLayer.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:199
armnnSerializer::OutputShapeRounding GetFlatBufferOutputShapeRounding(armnn::OutputShapeRounding outputShapeRounding)
virtual const TensorInfo & GetTensorInfo() const =0
float m_Alpha
Alpha value for the normalization equation.
float m_ScaleW
Center size encoding scale weight.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_NumOutputs
Number of output tensors.
const ConstTensor * m_InputToForgetWeights
Definition: LstmParams.hpp:41
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding values for the input dimension: heightPad{top, bottom} widthPad{left, right}.
float m_ClippingThresCell
Clipping threshold value for the cell state.
uint32_t m_PadLeft
Padding left value in the width dimension.
std::vector< int > m_Stride
Stride values for the input that will be sliced.
armnnSerializer::ConstTensorData GetFlatBufferConstTensorData(armnn::DataType dataType)
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_TargetHeight
Target height value.
const ConstTensor & GetOutputGateBias() const
int32_t m_NewAxisMask
New axis mask value. If set, the begin, end and stride is disregarded and a new 1 dimension is insert...
float m_Gamma
Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0. ...
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
const ConstTensor * m_RecurrentToOutputWeights
Definition: LstmParams.hpp:47
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
const ConstTensor & GetForgetGateBias() const
serializer::ArgMinMaxFunction GetFlatBufferArgMinMaxFunction(armnn::ArgMinMaxFunction function)
Definition: Serializer.cpp:54
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_PadTop
Padding top value in the height dimension.
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
A PadDescriptor for the PadLayer.
TensorShape m_InputShape
Required shape of all input tensors.
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:20
ActivationFunction
Definition: Types.hpp:54
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
const ConstTensor * m_OutputLayerNormWeights
Definition: LstmParams.hpp:60
float m_ScaleY
Center size encoding scale y.
std::vector< int > m_Begin
Begin values for the input that will be sliced.
uint32_t m_TargetWidth
Target width value.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_PadBottom
Padding bottom value in the height dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
const ConstTensor & GetRecurrentToInputWeights() const
std::vector< unsigned int > m_BlockShape
Block shape value.
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept...
uint32_t m_PadTop
Padding top value in the height dimension.
std::unique_ptr< ISerializer, void(*)(ISerializer *serializer)> ISerializerPtr
Definition: ISerializer.hpp:15
const ConstTensor & GetCellBias() const
const ConstTensor & GetRecurrentToOutputWeights() const
const ConstTensor * m_InputLayerNormWeights
Definition: LstmParams.hpp:57
A L2NormalizationDescriptor for the L2NormalizationLayer.
bool m_BiasEnabled
Enable/disable bias.
A ViewsDescriptor for the SplitterLayer. Descriptor to configure the splitting process. Number of Views must be equal to the number of outputs, and their order must match - e.g. first view corresponds to the first output, second view to the second output, etc.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:56
const ConstTensor & GetInputToInputWeights() const
virtual LayerGuid GetOwningLayerGuid() const =0
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
armnnSerializer::PaddingMethod GetFlatBufferPaddingMethod(armnn::PaddingMethod paddingMethod)
virtual const char * GetName() const =0
float m_Beta
Exponentiation value.
float m_Beta
Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0. ...
bool m_UseRegularNms
Use Regular NMS.
A ReshapeDescriptor for the ReshapeLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
uint32_t m_PadTop
Padding top value in the height dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
TensorShape m_TargetShape
Target shape value.
uint32_t m_PadLeft
Padding left value in the width dimension.
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:58
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:82
const ConstTensor * m_CellBias
Definition: LstmParams.hpp:53
armnnSerializer::PoolingAlgorithm GetFlatBufferPoolingAlgorithm(armnn::PoolingAlgorithm poolingAlgorithm)
const ConstTensor * m_ForgetLayerNormWeights
Definition: LstmParams.hpp:58
uint32_t m_PoolWidth
Pooling width value.
bool m_PeepholeEnabled
Enable/disable peephole.
const ConstTensor * m_OutputGateBias
Definition: LstmParams.hpp:54
uint32_t m_NumInputs
Number of input tensors.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t GetNumDimensions() const
Get the number of dimensions.
const ConstTensor * m_InputToCellWeights
Definition: LstmParams.hpp:42
const ConstTensor & GetRecurrentToForgetWeights() const
float m_NmsIouThreshold
Intersection over union threshold.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
std::vector< unsigned int > m_Begin
Beginning indices of the slice in each dimension.
const ConstTensor * m_RecurrentToCellWeights
Definition: LstmParams.hpp:46
uint32_t m_NormSize
Depth radius value.
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:39
const uint32_t * GetViewOrigin(uint32_t idx) const
Return the view origin at the int value idx.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
uint32_t GetNumViews() const
Get the number of views.
const ConstTensor * m_ProjectionWeights
Definition: LstmParams.hpp:55
An LstmDescriptor for the LstmLayer.
std::vector< int > m_End
End values for the input that will be sliced.
uint32_t GetNumDimensions() const
Get the number of dimensions.
unsigned int GetConcatAxis() const
Get the concatenation axis value.
const ConstTensor * m_CellToInputWeights
Definition: LstmParams.hpp:48
virtual unsigned int GetNumOutputSlots() const =0
uint32_t m_PadLeft
Padding left value in the width dimension.
float m_Beta
Beta value for the normalization equation.
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
An input connection slot for a layer. The input slot can be connected to an output slot of the preced...
Definition: INetwork.hpp:24
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
float m_NmsScoreThreshold
NMS score threshold.
const ConstTensor * m_RecurrentToForgetWeights
Definition: LstmParams.hpp:45
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
const uint32_t * GetViewSizes(uint32_t idx) const
Get the view sizes at the int value idx.
armnnSerializer::ResizeMethod GetFlatBufferResizeMethod(armnn::ResizeMethod method)
float m_Eps
Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f...
uint32_t m_PadRight
Padding right value in the width dimension.
bool m_BiasEnabled
Enable/disable bias.
const ConstTensor * m_CellLayerNormWeights
Definition: LstmParams.hpp:59
const ConstTensor & GetInputToCellWeights() const
const ConstTensor & GetRecurrentToCellWeights() const
armnnSerializer::NormalizationAlgorithmChannel GetFlatBufferNormalizationAlgorithmChannel(armnn::NormalizationAlgorithmChannel normalizationAlgorithmChannel)
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g. {0U, 3U, 1U, 2U}.
virtual void Accept(ILayerVisitor &visitor) const =0
const TensorInfo & GetInfo() const
Definition: Tensor.hpp:167
int32_t m_EllipsisMask
Ellipsis mask value.
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
SizeType GetSize() const
Definition: Types.hpp:199
const ConstTensor * m_RecurrentToInputWeights
Definition: LstmParams.hpp:44
A StackDescriptor for the StackLayer.
int32_t m_BeginMask
Begin mask value. If set, then the begin is disregarded and the fullest range is used for the dimensi...
A ResizeBilinearDescriptor for the ResizeBilinearLayer.
float m_PadValue
Optional value to use for padding, defaults to 0.
uint32_t m_NumInputs
Number of input tensors.
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square).
Definition: Descriptors.hpp:35
std::vector< unsigned int > m_Size
Size of the slice in each dimension.
A SoftmaxDescriptor for the SoftmaxLayer.
An output connection slot for a layer. The output slot may be connected to 1 or more input slots of s...
Definition: INetwork.hpp:37
uint32_t m_TargetWidth
Target width value.
const ConstTensor * m_ForgetGateBias
Definition: LstmParams.hpp:52
armnnSerializer::ComparisonOperation GetFlatBufferComparisonOperation(armnn::ComparisonOperation comparisonOperation)
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
const ConstTensor * m_InputGateBias
Definition: LstmParams.hpp:51
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
ArgMinMaxFunction
Definition: Types.hpp:68
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
Definition: Descriptors.hpp:78
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding for input dimension. First is the number of values to add before the tensor in ...
armnnSerializer::DataType GetFlatBufferDataType(armnn::DataType dataType)
bool m_BiasEnabled
Enable/disable bias.
unsigned int m_BlockSize
Scalar specifying the input block size. It must be >= 1.
float m_K
Kappa value used for the across channel normalization equation.
std::vector< float > anchors({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f })
const uint32_t * GetViewOrigin(uint32_t idx) const
Get the view origin at the int value idx.
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
const ConstTensor * m_InputToInputWeights
Definition: LstmParams.hpp:40
const ConstTensor * m_ProjectionBias
Definition: LstmParams.hpp:56
DataType GetDataType() const
Definition: Tensor.hpp:95
MemoryType GetMemoryArea() const
Definition: Tensor.hpp:177
A Pooling2dDescriptor for the Pooling2dLayer.
uint32_t m_ActivationFunc
The activation function to use. 0: None, 1: Relu, 3: Relu6, 4: Tanh, 6: Sigmoid.
virtual unsigned int GetNumInputSlots() const =0
uint32_t m_DilationY
Dilation along y axis.
A StandInDescriptor for the StandIn layer.
serializer::ActivationFunction GetFlatBufferActivationFunction(armnn::ActivationFunction function)
Definition: Serializer.cpp:27
A SliceDescriptor for the SliceLayer.
bool m_LayerNormEnabled
Enable/disable layer normalization.
const ConstTensor & GetInputToOutputWeights() const
bool m_BiasEnabled
Enable/disable bias.
virtual unsigned int CalculateIndexOnOwner() const =0
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
const ConstTensor * m_CellToOutputWeights
Definition: LstmParams.hpp:50
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1...
A PermuteDescriptor for the PermuteLayer.
A Convolution2dDescriptor for the Convolution2dLayer.
uint32_t m_TargetHeight
Target height value.
A MeanDescriptor for the MeanLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_DilationX
Dilation along x axis.
armnnSerializer::DataLayout GetFlatBufferDataLayout(armnn::DataLayout dataLayout)
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
const TensorShape & GetShape() const
Definition: Tensor.hpp:88
armnnSerializer::UnaryOperation GetFlatBufferUnaryOperation(armnn::UnaryOperation comparisonOperation)
float m_ClippingThresProj
Clipping threshold value for the projection.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:43
bool has_value() const noexcept
Definition: Optional.hpp:53
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:168
float m_ScaleH
Center size encoding scale height.
int32_t m_EndMask
End mask value. If set, then the end is disregarded and the fullest range is used for the dimension...
A ResizeDescriptor for the ResizeLayer.
armnnSerializer::NormalizationAlgorithmMethod GetFlatBufferNormalizationAlgorithmMethod(armnn::NormalizationAlgorithmMethod normalizationAlgorithmMethod)
float m_Eps
Used to avoid dividing by zero.
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:62
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
std::vector< unsigned int > m_BlockShape
Block shape values.
uint32_t GetNumViews() const
Get the number of views.
float GetQuantizationScale() const
Definition: Tensor.cpp:247
An OriginsDescriptor for the ConcatLayer. Descriptor to configure the concatenation process...
uint32_t m_DilationY
Dilation factor value for height dimension.
uint32_t m_MaxDetections
Maximum numbers of detections.
A StridedSliceDescriptor for the StridedSliceLayer.
uint32_t m_PadRight
Padding right value in the width dimension.