ArmNN
 22.02
Serializer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "Serializer.hpp"
6 #include "SerializerUtils.hpp"
7 
8 #include <armnn/Descriptors.hpp>
9 #include <armnn/LstmParams.hpp>
13 
14 #include <fmt/format.h>
15 #include <iostream>
16 
17 using namespace armnn;
18 namespace fb = flatbuffers;
19 namespace serializer = armnnSerializer;
20 
21 namespace armnnSerializer
22 {
23 
24 ISerializer::ISerializer() : pSerializerImpl(new SerializerImpl())
25 {
26 }
27 
28 ISerializer::~ISerializer() = default;
29 
31 {
32  return new ISerializer();
33 }
34 
36 {
38 }
39 
41 {
42  delete serializer;
43 }
44 
46 {
47  pSerializerImpl->Serialize(inNetwork);
48 }
49 
50 bool ISerializer::SaveSerializedToStream(std::ostream& stream)
51 {
52  return pSerializerImpl->SaveSerializedToStream(stream);
53 }
54 
56 {
57  switch (function)
58  {
81  default:
83  }
84 }
85 
87 {
88  switch (function)
89  {
93  default:
95  }
96 }
97 
98 uint32_t SerializerStrategy::GetSerializedId(armnn::LayerGuid guid)
99 {
100  if (m_guidMap.empty())
101  {
102  m_guidMap.insert(std::make_pair(guid, m_layerId));
103  }
104  else if (m_guidMap.find(guid) == m_guidMap.end())
105  {
106  ++m_layerId;
107  m_guidMap.insert(std::make_pair(guid, m_layerId));
108 
109  return m_layerId;
110  }
111  return m_guidMap[guid];
112 }
113 
114 // Build FlatBuffer for Input Layer
115 void SerializerStrategy::SerializeInputLayer(const armnn::IConnectableLayer* layer, LayerBindingId id, const char* name)
116 {
117  IgnoreUnused(name);
118 
119  // Create FlatBuffer BaseLayer
120  auto flatBufferInputBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Input);
121 
122  // Create FlatBuffer BindableBaseLayer
123  auto flatBufferInputBindableBaseLayer = serializer::CreateBindableLayerBase(m_flatBufferBuilder,
124  flatBufferInputBaseLayer,
125  id);
126  // Push layer binding id to outputIds.
127  m_inputIds.push_back(id);
128 
129  // Create the FlatBuffer InputLayer
130  auto flatBufferInputLayer = serializer::CreateInputLayer(m_flatBufferBuilder, flatBufferInputBindableBaseLayer);
131 
132  // Add the AnyLayer to the FlatBufferLayers
133  CreateAnyLayer(flatBufferInputLayer.o, serializer::Layer::Layer_InputLayer);
134 }
135 
136 // Build FlatBuffer for Output Layer
137 void SerializerStrategy::SerializeOutputLayer(const armnn::IConnectableLayer* layer,
138  LayerBindingId id, const char* name)
139 {
140  IgnoreUnused(name);
141 
142  // Create FlatBuffer BaseLayer
143  auto flatBufferOutputBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Output);
144 
145  // Create FlatBuffer BindableBaseLayer
146  auto flatBufferOutputBindableBaseLayer = serializer::CreateBindableLayerBase(m_flatBufferBuilder,
147  flatBufferOutputBaseLayer,
148  id);
149  // Push layer binding id to outputIds.
150  m_outputIds.push_back(id);
151 
152  // Create the FlatBuffer OutputLayer
153  auto flatBufferOutputLayer = serializer::CreateOutputLayer(m_flatBufferBuilder, flatBufferOutputBindableBaseLayer);
154  // Add the AnyLayer to the FlatBufferLayers
155  CreateAnyLayer(flatBufferOutputLayer.o, serializer::Layer::Layer_OutputLayer);
156 }
157 
158 // Build FlatBuffer for Activation Layer
159 void SerializerStrategy::SerializeActivationLayer(const armnn::IConnectableLayer* layer,
160  const armnn::ActivationDescriptor& descriptor,
161  const char* name)
162 {
163  IgnoreUnused(name);
164 
165  // Create FlatBuffer BaseLayer
166  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Activation);
167 
168  // Create the FlatBuffer ActivationDescriptor
169  auto flatBufferDescriptor = CreateActivationDescriptor(m_flatBufferBuilder,
171  descriptor.m_A,
172  descriptor.m_B);
173 
174  // Create the FlatBuffer ActivationLayer
175  auto flatBufferAdditionLayer = CreateActivationLayer(m_flatBufferBuilder,
176  flatBufferBaseLayer,
177  flatBufferDescriptor);
178 
179  // Add the AnyLayer to the FlatBufferLayers
180  CreateAnyLayer(flatBufferAdditionLayer.o, serializer::Layer::Layer_ActivationLayer);
181 }
182 
183 // Build FlatBuffer for Addition Layer
184 void SerializerStrategy::SerializeAdditionLayer(const armnn::IConnectableLayer* layer, const char* name)
185 {
186  IgnoreUnused(name);
187 
188  // Create FlatBuffer BaseLayer
189  auto flatBufferAdditionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Addition);
190 
191  // Create the FlatBuffer AdditionLayer
192  auto flatBufferAdditionLayer = serializer::CreateAdditionLayer(m_flatBufferBuilder, flatBufferAdditionBaseLayer);
193 
194  // Add the AnyLayer to the FlatBufferLayers
195  CreateAnyLayer(flatBufferAdditionLayer.o, serializer::Layer::Layer_AdditionLayer);
196 }
197 
198 // Build FlatBuffer for ArgMinMax Layer
199 void SerializerStrategy::SerializeArgMinMaxLayer(const armnn::IConnectableLayer *layer,
200  const armnn::ArgMinMaxDescriptor& descriptor,
201  const char *name)
202 {
203  IgnoreUnused(name);
204 
205  // Create FlatBuffer BaseLayer
206  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ArgMinMax);
207 
208  // Create FlatBuffer Descriptor
209  auto flatBufferDescriptor = CreateArgMinMaxDescriptor(m_flatBufferBuilder,
211  descriptor.m_Axis);
212 
213  // Create FlatBuffer ArgMinMaxLayer
214  auto flatBufferLayer = CreateArgMinMaxLayer(m_flatBufferBuilder,
215  flatBufferBaseLayer,
216  flatBufferDescriptor);
217 
219 }
220 
221 // Build FlatBuffer for BatchToSpaceNd Layer
222 void SerializerStrategy::SerializeBatchToSpaceNdLayer(const armnn::IConnectableLayer* layer,
223  const armnn::BatchToSpaceNdDescriptor& descriptor,
224  const char* name)
225 {
226  IgnoreUnused(name);
227 
228  // Create FlatBuffer BaseLayer
229  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchToSpaceNd);
230 
231  std::vector<unsigned int> crops;
232  crops.reserve(descriptor.m_Crops.size() * 2);
233  for (auto& crop : descriptor.m_Crops)
234  {
235  crops.push_back(crop.first);
236  crops.push_back(crop.second);
237  }
238 
239  auto flatBufferDescriptor =
240  CreateBatchToSpaceNdDescriptor(m_flatBufferBuilder,
241  m_flatBufferBuilder.CreateVector(descriptor.m_BlockShape),
242  m_flatBufferBuilder.CreateVector(crops),
244 
245  auto flatBufferLayer = serializer::CreateBatchToSpaceNdLayer(m_flatBufferBuilder,
246  flatBufferBaseLayer,
247  flatBufferDescriptor);
248 
250 }
251 
252 void SerializerStrategy::SerializeBatchNormalizationLayer(
253  const armnn::IConnectableLayer* layer,
254  const armnn::BatchNormalizationDescriptor& batchNormDescriptor,
255  const std::vector<armnn::ConstTensor>& constants,
256  const char* name)
257 {
258  IgnoreUnused(name);
259 
260  const armnn::ConstTensor& mean = constants[0];
261  const armnn::ConstTensor& variance = constants[1];
262  const armnn::ConstTensor& beta = constants[2];
263  const armnn::ConstTensor& gamma = constants[3];
264 
265  auto fbBatchNormalizationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchNormalization);
266  auto fbBatchNormalizationDescriptor = serializer::CreateBatchNormalizationDescriptor(
267  m_flatBufferBuilder,
268  batchNormDescriptor.m_Eps,
269  GetFlatBufferDataLayout(batchNormDescriptor.m_DataLayout));
270 
271  auto fbMeanConstTensorInfo = CreateConstTensorInfo(mean);
272  auto fbVarianceConstTensorInfo = CreateConstTensorInfo(variance);
273  auto fbBetaConstTensorInfo = CreateConstTensorInfo(beta);
274  auto fbGammaConstTensorInfo = CreateConstTensorInfo(gamma);
275  auto fbBatchNormalizationLayer = serializer::CreateBatchNormalizationLayer(m_flatBufferBuilder,
276  fbBatchNormalizationBaseLayer,
277  fbBatchNormalizationDescriptor,
278  fbMeanConstTensorInfo,
279  fbVarianceConstTensorInfo,
280  fbBetaConstTensorInfo,
281  fbGammaConstTensorInfo);
282 
284 }
285 
286 void SerializerStrategy::SerializeCastLayer(const armnn::IConnectableLayer* layer,
287  const char* name)
288 {
289  IgnoreUnused(name);
290 
291  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Cast);
292  auto fbCastLayer = serializer::CreateCastLayer(m_flatBufferBuilder, fbBaseLayer);
294 }
295 
296 void SerializerStrategy::SerializeChannelShuffleLayer(const armnn::IConnectableLayer* layer,
297  const armnn::ChannelShuffleDescriptor& descriptor,
298  const char* name)
299 {
300  IgnoreUnused(name);
301  auto fbDescriptor = CreateChannelShuffleDescriptor(m_flatBufferBuilder,
302  descriptor.m_Axis,
303  descriptor.m_NumGroups);
305  auto fbChannelShuffleLayer = serializer::CreateChannelShuffleLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
307 }
308 
309 void SerializerStrategy::SerializeComparisonLayer(const armnn::IConnectableLayer* layer,
310  const armnn::ComparisonDescriptor& descriptor,
311  const char* name)
312 {
313  IgnoreUnused(name);
314 
316  auto fbDescriptor = serializer::CreateComparisonDescriptor(
317  m_flatBufferBuilder,
319 
320  auto fbLayer = serializer::CreateComparisonLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
322 }
323 
324 // Build FlatBuffer for Constant Layer
325 void SerializerStrategy::SerializeConstantLayer(const armnn::IConnectableLayer* layer,
326  const std::vector<armnn::ConstTensor>& constants,
327  const char* name)
328 {
329  IgnoreUnused(name);
330 
331  armnn::ConstTensor input = constants[0];
332 
333  // Create FlatBuffer BaseLayer
334  auto flatBufferConstantBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Constant);
335 
336  auto flatBufferConstTensorInfo = CreateConstTensorInfo(input);
337 
338  // Create the FlatBuffer ConstantLayer
339  auto flatBufferLayer = CreateConstantLayer(m_flatBufferBuilder,
340  flatBufferConstantBaseLayer,
341  flatBufferConstTensorInfo);
342 
343  // Add the AnyLayer to the FlatBufferLayers
345 }
346 
347 // Build FlatBuffer for Convolution2dLayer
348 void SerializerStrategy::SerializeConvolution2dLayer(const armnn::IConnectableLayer* layer,
349  const armnn::Convolution2dDescriptor& descriptor,
350  const std::vector<armnn::ConstTensor>& constants,
351  const char* name)
352 {
353  IgnoreUnused(name);
354 
355  const armnn::ConstTensor weights = constants[0];
356 
357  // Create FlatBuffer BaseLayer
358  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution2d);
359 
360  auto flatBufferDescriptor = CreateConvolution2dDescriptor(m_flatBufferBuilder,
361  descriptor.m_PadLeft,
362  descriptor.m_PadRight,
363  descriptor.m_PadTop,
364  descriptor.m_PadBottom,
365  descriptor.m_StrideX,
366  descriptor.m_StrideY,
367  descriptor.m_DilationX,
368  descriptor.m_DilationY,
369  descriptor.m_BiasEnabled,
371  auto flatBufferWeightsConstTensorInfo = CreateConstTensorInfo(weights);
372  flatbuffers::Offset<serializer::ConstTensor> flatBufferBiasesConstTensorInfo;
373 
374  if (constants.size() > 1)
375  {
376  const armnn::ConstTensor biases = constants[1];
377  flatBufferBiasesConstTensorInfo = CreateConstTensorInfo(biases);
378  }
379 
380  // Create the FlatBuffer Convolution2dLayer
381  auto flatBufferLayer = CreateConvolution2dLayer(m_flatBufferBuilder,
382  flatBufferBaseLayer,
383  flatBufferDescriptor,
384  flatBufferWeightsConstTensorInfo,
385  flatBufferBiasesConstTensorInfo);
386 
387  // Add the AnyLayer to the FlatBufferLayers
389 }
390 
391 // Build FlatBuffer for Convolution3dLayer
392 void SerializerStrategy::SerializeConvolution3dLayer(const armnn::IConnectableLayer* layer,
393  const armnn::Convolution3dDescriptor& descriptor,
394  const char* name)
395 {
396  IgnoreUnused(name);
397 
398  // Create FlatBuffer BaseLayer
399  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution3d);
400 
401  auto flatBufferDescriptor = CreateConvolution3dDescriptor(m_flatBufferBuilder,
402  descriptor.m_PadLeft,
403  descriptor.m_PadRight,
404  descriptor.m_PadTop,
405  descriptor.m_PadBottom,
406  descriptor.m_PadFront,
407  descriptor.m_PadBack,
408  descriptor.m_StrideX,
409  descriptor.m_StrideY,
410  descriptor.m_StrideZ,
411  descriptor.m_DilationX,
412  descriptor.m_DilationY,
413  descriptor.m_DilationZ,
414  descriptor.m_BiasEnabled,
416 
417  // Create the FlatBuffer Convolution3dLayer
418  auto flatBufferLayer = CreateConvolution3dLayer(m_flatBufferBuilder,
419  flatBufferBaseLayer,
420  flatBufferDescriptor);
421 
422  // Add the AnyLayer to the FlatBufferLayers
424 }
425 
426 void SerializerStrategy::SerializeDepthToSpaceLayer(const armnn::IConnectableLayer* layer,
427  const armnn::DepthToSpaceDescriptor& descriptor,
428  const char* name)
429 {
430  IgnoreUnused(name);
431 
433  auto fbDescriptor = CreateDepthToSpaceDescriptor(m_flatBufferBuilder,
434  descriptor.m_BlockSize,
436 
437  auto fbLayer = serializer::CreateDepthToSpaceLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
438 
440 }
441 
442 void SerializerStrategy::SerializeDepthwiseConvolution2dLayer(const armnn::IConnectableLayer* layer,
443  const armnn::DepthwiseConvolution2dDescriptor& descriptor,
444  const std::vector<armnn::ConstTensor>& constants,
445  const char* name)
446 {
447  IgnoreUnused(name);
448 
449  const armnn::ConstTensor& weights = constants[0];
450 
452  auto fbDescriptor = CreateDepthwiseConvolution2dDescriptor(m_flatBufferBuilder,
453  descriptor.m_PadLeft,
454  descriptor.m_PadRight,
455  descriptor.m_PadTop,
456  descriptor.m_PadBottom,
457  descriptor.m_StrideX,
458  descriptor.m_StrideY,
459  descriptor.m_DilationX,
460  descriptor.m_DilationY,
461  descriptor.m_BiasEnabled,
463 
464  flatbuffers::Offset<serializer::ConstTensor> fbWeightsConstTensorInfo = CreateConstTensorInfo(weights);
465  flatbuffers::Offset<serializer::ConstTensor> fbBiasesConstTensorInfo;
466 
467  if (constants.size() > 1)
468  {
469  const armnn::ConstTensor& biases = constants[1];
470  fbBiasesConstTensorInfo = CreateConstTensorInfo(biases);
471  }
472 
473  auto flatBufferLayer = CreateDepthwiseConvolution2dLayer(m_flatBufferBuilder,
474  fbBaseLayer,
475  fbDescriptor,
476  fbWeightsConstTensorInfo,
477  fbBiasesConstTensorInfo);
478 
480 }
481 
482 void SerializerStrategy::SerializeDequantizeLayer(const armnn::IConnectableLayer* layer,
483  const char* name)
484 {
485  IgnoreUnused(name);
486 
487  auto fbDequantizeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Dequantize);
488  auto fbDequantizeLayer = serializer::CreateDequantizeLayer(m_flatBufferBuilder, fbDequantizeBaseLayer);
489 
491 }
492 
493 void SerializerStrategy::SerializeDetectionPostProcessLayer(const armnn::IConnectableLayer* layer,
494  const armnn::DetectionPostProcessDescriptor& descriptor,
495  const std::vector<armnn::ConstTensor>& constants,
496  const char* name)
497 {
498  IgnoreUnused(name);
499 
500  const armnn::ConstTensor& anchors = constants[0];
501 
503  auto fbDescriptor = CreateDetectionPostProcessDescriptor(m_flatBufferBuilder,
504  descriptor.m_MaxDetections,
505  descriptor.m_MaxClassesPerDetection,
506  descriptor.m_DetectionsPerClass,
507  descriptor.m_NmsScoreThreshold,
508  descriptor.m_NmsIouThreshold,
509  descriptor.m_NumClasses,
510  descriptor.m_UseRegularNms,
511  descriptor.m_ScaleX,
512  descriptor.m_ScaleY,
513  descriptor.m_ScaleW,
514  descriptor.m_ScaleH);
515 
516  flatbuffers::Offset<serializer::ConstTensor> fbAnchorsConstTensorInfo = CreateConstTensorInfo(anchors);
517 
518  auto flatBufferLayer = CreateDetectionPostProcessLayer(m_flatBufferBuilder,
519  fbBaseLayer,
520  fbDescriptor,
521  fbAnchorsConstTensorInfo);
522 
524 }
525 
526 void SerializerStrategy::SerializeDivisionLayer(const armnn::IConnectableLayer* layer, const char* name)
527 {
528  IgnoreUnused(name);
529 
530  auto fbDivisionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Division);
531  auto fbDivisionLayer = serializer::CreateDivisionLayer(m_flatBufferBuilder, fbDivisionBaseLayer);
532 
534 }
535 
536 void SerializerStrategy::SerializeElementwiseUnaryLayer(const armnn::IConnectableLayer* layer,
537  const armnn::ElementwiseUnaryDescriptor& descriptor,
538  const char* name)
539 {
540  IgnoreUnused(name);
541 
544  m_flatBufferBuilder,
546 
547  auto fbLayer = serializer::CreateElementwiseUnaryLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
549 }
550 
551 void SerializerStrategy::SerializeFillLayer(const armnn::IConnectableLayer* layer,
552  const armnn::FillDescriptor& fillDescriptor,
553  const char* name)
554 {
555  IgnoreUnused(name);
556 
557  auto fbFillBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Fill);
558 
559  auto fbDescriptor = serializer::CreateFillDescriptor(m_flatBufferBuilder, fillDescriptor.m_Value);
560 
561  auto fbFillLayer = serializer::CreateFillLayer(m_flatBufferBuilder, fbFillBaseLayer, fbDescriptor);
562 
564 }
565 
566 void SerializerStrategy::SerializeFloorLayer(const armnn::IConnectableLayer *layer, const char *name)
567 {
568  IgnoreUnused(name);
569 
570  auto flatBufferFloorBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Floor);
571  auto flatBufferFloorLayer = serializer::CreateFloorLayer(m_flatBufferBuilder, flatBufferFloorBaseLayer);
572 
573  CreateAnyLayer(flatBufferFloorLayer.o, serializer::Layer::Layer_FloorLayer);
574 }
575 
576 void SerializerStrategy::SerializeGatherLayer(const armnn::IConnectableLayer* layer,
577  const armnn::GatherDescriptor& gatherDescriptor,
578  const char* name)
579 {
580  IgnoreUnused(name);
581 
582  auto fbGatherDescriptor = CreateGatherDescriptor(m_flatBufferBuilder,
583  gatherDescriptor.m_Axis);
584  auto fbGatherBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Gather);
585  auto flatBufferLayer = serializer::CreateGatherLayer(m_flatBufferBuilder, fbGatherBaseLayer, fbGatherDescriptor);
586 
588 }
589 
590 void SerializerStrategy::SerializeInstanceNormalizationLayer(
591  const armnn::IConnectableLayer* layer,
592  const armnn::InstanceNormalizationDescriptor& instanceNormalizationDescriptor,
593  const char* name)
594 {
595  IgnoreUnused(name);
596 
598  m_flatBufferBuilder,
599  instanceNormalizationDescriptor.m_Gamma,
600  instanceNormalizationDescriptor.m_Beta,
601  instanceNormalizationDescriptor.m_Eps,
602  GetFlatBufferDataLayout(instanceNormalizationDescriptor.m_DataLayout));
603 
605  auto fbLayer = serializer::CreateInstanceNormalizationLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
606 
608 }
609 
610 void SerializerStrategy::SerializeL2NormalizationLayer(const armnn::IConnectableLayer* layer,
611  const armnn::L2NormalizationDescriptor& l2NormalizationDescriptor,
612  const char* name)
613 {
614  IgnoreUnused(name);
615 
616  // Create FlatBuffer BaseLayer
618 
619  // Create the FlatBuffer L2Normalization Descriptor
621  m_flatBufferBuilder,
622  GetFlatBufferDataLayout(l2NormalizationDescriptor.m_DataLayout),
623  l2NormalizationDescriptor.m_Eps);
624 
625  // Create FlatBuffer layer
626  auto fbLayer = serializer::CreateL2NormalizationLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
627 
629 }
630 
631 void SerializerStrategy::SerializeLogicalBinaryLayer(const armnn::IConnectableLayer* layer,
632  const armnn::LogicalBinaryDescriptor& descriptor,
633  const char* name)
634 {
635  IgnoreUnused(name);
636 
638  auto fbDescriptor = serializer::CreateLogicalBinaryDescriptor(
639  m_flatBufferBuilder,
641 
642  auto fbLayer = serializer::CreateLogicalBinaryLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
644 }
645 
646 void SerializerStrategy::SerializeLogSoftmaxLayer(const armnn::IConnectableLayer* layer,
647  const armnn::LogSoftmaxDescriptor& logSoftmaxDescriptor,
648  const char* name)
649 {
650  IgnoreUnused(name);
651 
652  // Create FlatBuffer BaseLayer
653  auto flatBufferLogSoftmaxBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_LogSoftmax);
654 
655  // Create the FlatBuffer LogSoftmaxDescriptor
656  auto flatBufferLogSoftmaxDesc =
657  serializer::CreateLogSoftmaxDescriptor(m_flatBufferBuilder,
658  logSoftmaxDescriptor.m_Beta,
659  logSoftmaxDescriptor.m_Axis);
660 
661  // Create the FlatBuffer LogSoftmaxLayer
662  auto flatBufferLogSoftmaxLayer =
663  serializer::CreateLogSoftmaxLayer(m_flatBufferBuilder,
664  flatBufferLogSoftmaxBaseLayer,
665  flatBufferLogSoftmaxDesc);
666 
667  CreateAnyLayer(flatBufferLogSoftmaxLayer.o, serializer::Layer::Layer_LogSoftmaxLayer);
668 }
669 
670 void SerializerStrategy::SerializeLstmLayer(const armnn::IConnectableLayer* layer,
671  const armnn::LstmDescriptor& descriptor,
672  const std::vector<armnn::ConstTensor>& constants,
673  const char* name)
674 {
675  IgnoreUnused(name);
676 
677  auto fbLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Lstm);
678 
679  auto fbLstmDescriptor = serializer::CreateLstmDescriptor(
680  m_flatBufferBuilder,
681  descriptor.m_ActivationFunc,
682  descriptor.m_ClippingThresCell,
683  descriptor.m_ClippingThresProj,
684  descriptor.m_CifgEnabled,
685  descriptor.m_PeepholeEnabled,
686  descriptor.m_ProjectionEnabled,
687  descriptor.m_LayerNormEnabled);
688 
689  // Index for constants vector
690  std::size_t i = 0;
691 
692  // Get mandatory/basic input parameters
693  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]); //InputToForgetWeights
694  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]); //InputToCellWeights
695  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]); //InputToOutputWeights
696  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToForgetWeights
697  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToCellWeights
698  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToOutputWeights
699  auto forgetGateBias = CreateConstTensorInfo(constants[i++]); //ForgetGateBias
700  auto cellBias = CreateConstTensorInfo(constants[i++]); //CellBias
701  auto outputGateBias = CreateConstTensorInfo(constants[i++]); //OutputGateBias
702 
703 
704 
705  //Define optional parameters, these will be set depending on configuration in Lstm descriptor
706  flatbuffers::Offset<serializer::ConstTensor> inputToInputWeights;
707  flatbuffers::Offset<serializer::ConstTensor> recurrentToInputWeights;
708  flatbuffers::Offset<serializer::ConstTensor> cellToInputWeights;
709  flatbuffers::Offset<serializer::ConstTensor> inputGateBias;
710  flatbuffers::Offset<serializer::ConstTensor> projectionWeights;
711  flatbuffers::Offset<serializer::ConstTensor> projectionBias;
712  flatbuffers::Offset<serializer::ConstTensor> cellToForgetWeights;
713  flatbuffers::Offset<serializer::ConstTensor> cellToOutputWeights;
714  flatbuffers::Offset<serializer::ConstTensor> inputLayerNormWeights;
715  flatbuffers::Offset<serializer::ConstTensor> forgetLayerNormWeights;
716  flatbuffers::Offset<serializer::ConstTensor> cellLayerNormWeights;
717  flatbuffers::Offset<serializer::ConstTensor> outputLayerNormWeights;
718 
719  if (!descriptor.m_CifgEnabled)
720  {
721  inputToInputWeights = CreateConstTensorInfo(constants[i++]); //InputToInputWeights
722  recurrentToInputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToInputWeights
723  inputGateBias = CreateConstTensorInfo(constants[i++]); //InputGateBias
724  }
725 
726  if (descriptor.m_PeepholeEnabled)
727  {
728  if (!descriptor.m_CifgEnabled)
729  {
730  cellToInputWeights = CreateConstTensorInfo(constants[i++]); //CellToInputWeights
731  }
732  cellToForgetWeights = CreateConstTensorInfo(constants[i++]); //CellToForgetWeights
733  cellToOutputWeights = CreateConstTensorInfo(constants[i++]); //CellToOutputWeights
734  }
735 
736  if (descriptor.m_ProjectionEnabled)
737  {
738  projectionWeights = CreateConstTensorInfo(constants[i++]); //ProjectionWeights
739  projectionBias = CreateConstTensorInfo(constants[i++]); //ProjectionBias
740  }
741 
742  if (descriptor.m_LayerNormEnabled)
743  {
744  if (!descriptor.m_CifgEnabled)
745  {
746  inputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //InputLayerNormWeights
747  }
748  forgetLayerNormWeights = CreateConstTensorInfo(constants[i++]); //ForgetLayerNormWeights
749  cellLayerNormWeights = CreateConstTensorInfo(constants[i++]); //CellLayerNormWeights
750  outputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //OutputLayerNormWeights
751  }
752 
753  auto fbLstmParams = serializer::CreateLstmInputParams(
754  m_flatBufferBuilder,
755  inputToForgetWeights,
756  inputToCellWeights,
757  inputToOutputWeights,
758  recurrentToForgetWeights,
759  recurrentToCellWeights,
760  recurrentToOutputWeights,
761  forgetGateBias,
762  cellBias,
763  outputGateBias,
764  inputToInputWeights,
765  recurrentToInputWeights,
766  cellToInputWeights,
767  inputGateBias,
768  projectionWeights,
769  projectionBias,
770  cellToForgetWeights,
771  cellToOutputWeights,
772  inputLayerNormWeights,
773  forgetLayerNormWeights,
774  cellLayerNormWeights,
775  outputLayerNormWeights);
776 
777  auto fbLstmLayer = serializer::CreateLstmLayer(
778  m_flatBufferBuilder,
779  fbLstmBaseLayer,
780  fbLstmDescriptor,
781  fbLstmParams);
782 
784 }
785 
786 void SerializerStrategy::SerializeMaximumLayer(const armnn::IConnectableLayer* layer, const char* name)
787 {
788  IgnoreUnused(name);
789 
790  auto fbMaximumBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Maximum);
791  auto fbMaximumLayer = serializer::CreateMaximumLayer(m_flatBufferBuilder, fbMaximumBaseLayer);
792 
794 }
795 
796 void SerializerStrategy::SerializeMeanLayer(const armnn::IConnectableLayer* layer,
797  const armnn::MeanDescriptor& descriptor,
798  const char* name)
799 {
800  IgnoreUnused(name);
801 
802  auto fbMeanBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Mean);
803  auto fbMeanDescriptor = serializer::CreateMeanDescriptor(m_flatBufferBuilder,
804  m_flatBufferBuilder.CreateVector(descriptor.m_Axis),
805  descriptor.m_KeepDims);
806 
807  auto fbMeanLayer = serializer::CreateMeanLayer(m_flatBufferBuilder,
808  fbMeanBaseLayer,
809  fbMeanDescriptor);
810 
812 }
813 
814 void SerializerStrategy::SerializeMinimumLayer(const armnn::IConnectableLayer* layer, const char* name)
815 {
816  IgnoreUnused(name);
817 
818  auto fbMinimumBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Minimum);
819  auto fbMinimumLayer = serializer::CreateMinimumLayer(m_flatBufferBuilder, fbMinimumBaseLayer);
820 
822 }
823 
824 void SerializerStrategy::SerializeMergeLayer(const armnn::IConnectableLayer* layer, const char* name)
825 {
826  IgnoreUnused(name);
827 
828  auto fbMergeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Merge);
829  auto fbMergeLayer = serializer::CreateMergeLayer(m_flatBufferBuilder, fbMergeBaseLayer);
830 
832 }
833 
834 void SerializerStrategy::SerializeConcatLayer(const armnn::IConnectableLayer* layer,
835  const armnn::ConcatDescriptor& concatDescriptor,
836  const char* name)
837 {
838  IgnoreUnused(name);
839 
840  auto flatBufferConcatBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Concat);
841 
842  std::vector<flatbuffers::Offset<UintVector>> views;
843  for (unsigned int v = 0; v < concatDescriptor.GetNumViews(); ++v)
844  {
845  const uint32_t* origin = concatDescriptor.GetViewOrigin(v);
846  std::vector<uint32_t> origins;
847  for (unsigned int d = 0; d < concatDescriptor.GetNumDimensions(); ++d)
848  {
849  origins.push_back(origin[d]);
850  }
851  auto view = m_flatBufferBuilder.CreateVector(origins);
852  auto uintVector = CreateUintVector(m_flatBufferBuilder, view);
853  views.push_back(uintVector);
854  }
855 
856  auto flatBufferConcatDescriptor = CreateOriginsDescriptor(m_flatBufferBuilder,
857  concatDescriptor.GetConcatAxis(),
858  concatDescriptor.GetNumViews(),
859  concatDescriptor.GetNumDimensions(),
860  m_flatBufferBuilder.CreateVector(views));
861 
862  auto flatBufferLayer = CreateConcatLayer(m_flatBufferBuilder,
863  flatBufferConcatBaseLayer,
864  flatBufferConcatDescriptor);
865 
867 }
868 
869 void SerializerStrategy::SerializeMultiplicationLayer(const armnn::IConnectableLayer* layer, const char* name)
870 {
871  IgnoreUnused(name);
872 
873  auto fbMultiplicationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Multiplication);
874  auto fbMultiplicationLayer = serializer::CreateMultiplicationLayer(m_flatBufferBuilder,
875  fbMultiplicationBaseLayer);
876 
878 }
879 
880 void SerializerStrategy::SerializePadLayer(const armnn::IConnectableLayer* layer,
881  const armnn::PadDescriptor& padDescriptor,
882  const char* name)
883 {
884  IgnoreUnused(name);
885 
886  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pad);
887 
888  std::vector<unsigned int> padList;
889  for (auto& p: padDescriptor.m_PadList)
890  {
891  padList.push_back(p.first);
892  padList.push_back(p.second);
893  }
894 
895  auto flatBufferPadDesc = serializer::CreatePadDescriptor(m_flatBufferBuilder,
896  m_flatBufferBuilder.CreateVector(padList),
897  padDescriptor.m_PadValue,
898  GetFlatBufferPaddingMode(padDescriptor.m_PaddingMode));
899 
900  auto flatBufferPadLayer = serializer::CreatePadLayer(m_flatBufferBuilder,
901  flatBufferBaseLayer,
902  flatBufferPadDesc);
903 
904  CreateAnyLayer(flatBufferPadLayer.o, serializer::Layer::Layer_PadLayer);
905 }
906 
907 void SerializerStrategy::SerializePermuteLayer(const armnn::IConnectableLayer* layer,
908  const armnn::PermuteDescriptor& permuteDescriptor,
909  const char* name)
910 {
911  IgnoreUnused(name);
912 
913  // Create FlatBuffer BaseLayer
914  auto flatBufferPermuteBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Permute);
915 
916  std::vector<unsigned int> dimMappings;
917  for (unsigned int i=0; i<permuteDescriptor.m_DimMappings.GetSize(); ++i)
918  {
919  dimMappings.push_back(permuteDescriptor.m_DimMappings[i]);
920  }
921 
922  auto flatBufferPermuteDesc = serializer::CreatePermuteDescriptor(m_flatBufferBuilder,
923  m_flatBufferBuilder.CreateVector(dimMappings));
924 
925  // Create the FlatBuffer PermuteLayer
926  auto flatBufferPermuteLayer = serializer::CreatePermuteLayer(m_flatBufferBuilder,
927  flatBufferPermuteBaseLayer,
928  flatBufferPermuteDesc);
929 
930  // Add the AnyLayer to the FlatBufferLayers
931  CreateAnyLayer(flatBufferPermuteLayer.o, serializer::Layer::Layer_PermuteLayer);
932 }
933 
934 // Build FlatBuffer for Rank Layer
935 void SerializerStrategy::SerializeRankLayer(const armnn::IConnectableLayer* layer,
936  const char* name)
937 {
938  IgnoreUnused(name);
939  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Rank);
940  auto flatBufferRankLayer = serializer::CreateRankLayer(m_flatBufferBuilder, flatBufferBaseLayer);
941 
942  CreateAnyLayer(flatBufferRankLayer.o, serializer::Layer::Layer_RankLayer);
943 }
944 
945 void SerializerStrategy::SerializeReduceLayer(const armnn::IConnectableLayer* layer,
946  const armnn::ReduceDescriptor& reduceDescriptor,
947  const char*)
948 {
949  auto fbReduceBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Reduce);
950  auto fbDescriptor = CreateReduceDescriptor(m_flatBufferBuilder,
951  reduceDescriptor.m_KeepDims,
952  m_flatBufferBuilder.CreateVector(reduceDescriptor.m_vAxis),
954  auto fbReduceLayer = serializer::CreateReduceLayer(m_flatBufferBuilder,
955  fbReduceBaseLayer,
956  fbDescriptor);
957 
959 }
960 
961 // Build FlatBuffer for Reshape Layer
962 void SerializerStrategy::SerializeReshapeLayer(const armnn::IConnectableLayer* layer,
963  const armnn::ReshapeDescriptor& reshapeDescriptor,
964  const char* name)
965 {
966  IgnoreUnused(name);
967 
968  // Create FlatBuffer BaseLayer
969  auto flatBufferReshapeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Reshape);
970 
971  std::vector<unsigned int> targetShape;
972  for (unsigned int i =0; i < reshapeDescriptor.m_TargetShape.GetNumDimensions(); i++)
973  {
974  targetShape.push_back(reshapeDescriptor.m_TargetShape[i]);
975  }
976 
977  auto flatBufferReshapeDesc = serializer::CreateReshapeDescriptor(m_flatBufferBuilder,
978  m_flatBufferBuilder.CreateVector(targetShape));
979 
980  // Create the FlatBuffer ReshapeLayer
981  auto flatBufferReshapeLayer = serializer::CreateReshapeLayer(m_flatBufferBuilder, flatBufferReshapeBaseLayer,
982  flatBufferReshapeDesc);
983 
984  // Add the AnyLayer to the FlatBufferLayers
985  CreateAnyLayer(flatBufferReshapeLayer.o, serializer::Layer::Layer_ReshapeLayer);
986 }
987 
988 void SerializerStrategy::SerializeResizeLayer(const armnn::IConnectableLayer* layer,
989  const armnn::ResizeDescriptor& resizeDescriptor,
990  const char* name)
991 {
992  IgnoreUnused(name);
993 
994  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Resize);
995 
996  auto flatBufferDescriptor =
997  CreateResizeDescriptor(m_flatBufferBuilder,
998  resizeDescriptor.m_TargetHeight,
999  resizeDescriptor.m_TargetWidth,
1000  GetFlatBufferResizeMethod(resizeDescriptor.m_Method),
1001  GetFlatBufferDataLayout(resizeDescriptor.m_DataLayout),
1002  resizeDescriptor.m_AlignCorners,
1003  resizeDescriptor.m_HalfPixelCenters);
1004 
1005  auto flatBufferLayer = serializer::CreateResizeLayer(m_flatBufferBuilder,
1006  flatBufferBaseLayer,
1007  flatBufferDescriptor);
1008 
1010 }
1011 
1012 void SerializerStrategy::SerializeSliceLayer(const armnn::IConnectableLayer* layer,
1013  const armnn::SliceDescriptor& sliceDescriptor,
1014  const char* name)
1015 {
1016  IgnoreUnused(name);
1017 
1018  auto fbSliceBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Slice);
1019  auto fbSliceDescriptor = CreateSliceDescriptor(m_flatBufferBuilder,
1020  m_flatBufferBuilder.CreateVector(sliceDescriptor.m_Begin),
1021  m_flatBufferBuilder.CreateVector(sliceDescriptor.m_Size));
1022 
1023  auto fbSliceLayer = serializer::CreateSliceLayer(m_flatBufferBuilder, fbSliceBaseLayer, fbSliceDescriptor);
1024 
1026 }
1027 
1028 // Build FlatBuffer for Softmax Layer
1029 void SerializerStrategy::SerializeSoftmaxLayer(const armnn::IConnectableLayer* layer,
1030  const armnn::SoftmaxDescriptor& softmaxDescriptor,
1031  const char* name)
1032 {
1033  IgnoreUnused(name);
1034 
1035  // Create FlatBuffer BaseLayer
1036  auto flatBufferSoftmaxBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Softmax);
1037 
1038  // Create the FlatBuffer SoftmaxDescriptor
1039  auto flatBufferSoftmaxDesc =
1040  serializer::CreateSoftmaxDescriptor(m_flatBufferBuilder,
1041  softmaxDescriptor.m_Beta,
1042  softmaxDescriptor.m_Axis);
1043 
1044  // Create the FlatBuffer SoftmaxLayer
1045  auto flatBufferSoftmaxLayer =
1046  serializer::CreateSoftmaxLayer(m_flatBufferBuilder,
1047  flatBufferSoftmaxBaseLayer,
1048  flatBufferSoftmaxDesc);
1049 
1050  CreateAnyLayer(flatBufferSoftmaxLayer.o, serializer::Layer::Layer_SoftmaxLayer);
1051 }
1052 
1053 void SerializerStrategy::SerializePooling2dLayer(const armnn::IConnectableLayer* layer,
1054  const armnn::Pooling2dDescriptor& pooling2dDescriptor,
1055  const char* name)
1056 {
1057  IgnoreUnused(name);
1058 
1059  auto fbPooling2dBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pooling2d);
1060  auto fbPooling2dDescriptor = serializer::CreatePooling2dDescriptor(
1061  m_flatBufferBuilder,
1062  GetFlatBufferPoolingAlgorithm(pooling2dDescriptor.m_PoolType),
1063  pooling2dDescriptor.m_PadLeft,
1064  pooling2dDescriptor.m_PadRight,
1065  pooling2dDescriptor.m_PadTop,
1066  pooling2dDescriptor.m_PadBottom,
1067  pooling2dDescriptor.m_PoolWidth,
1068  pooling2dDescriptor.m_PoolHeight,
1069  pooling2dDescriptor.m_StrideX,
1070  pooling2dDescriptor.m_StrideY,
1072  GetFlatBufferPaddingMethod(pooling2dDescriptor.m_PaddingMethod),
1073  GetFlatBufferDataLayout(pooling2dDescriptor.m_DataLayout));
1074 
1075  auto fbPooling2dLayer = serializer::CreatePooling2dLayer(m_flatBufferBuilder,
1076  fbPooling2dBaseLayer,
1077  fbPooling2dDescriptor);
1078 
1080 }
1081 
1082 void SerializerStrategy::SerializePooling3dLayer(const armnn::IConnectableLayer* layer,
1083  const armnn::Pooling3dDescriptor& pooling3dDescriptor,
1084  const char* name)
1085 {
1086  IgnoreUnused(name);
1087 
1088  auto fbPooling3dBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pooling3d);
1089  auto fbPooling3dDescriptor = serializer::CreatePooling3dDescriptor(
1090  m_flatBufferBuilder,
1091  GetFlatBufferPoolingAlgorithm(pooling3dDescriptor.m_PoolType),
1092  pooling3dDescriptor.m_PadLeft,
1093  pooling3dDescriptor.m_PadRight,
1094  pooling3dDescriptor.m_PadTop,
1095  pooling3dDescriptor.m_PadBottom,
1096  pooling3dDescriptor.m_PadFront,
1097  pooling3dDescriptor.m_PadBack,
1098  pooling3dDescriptor.m_PoolWidth,
1099  pooling3dDescriptor.m_PoolHeight,
1100  pooling3dDescriptor.m_PoolDepth,
1101  pooling3dDescriptor.m_StrideX,
1102  pooling3dDescriptor.m_StrideY,
1103  pooling3dDescriptor.m_StrideZ,
1105  GetFlatBufferPaddingMethod(pooling3dDescriptor.m_PaddingMethod),
1106  GetFlatBufferDataLayout(pooling3dDescriptor.m_DataLayout));
1107 
1108  auto fbPooling3dLayer = serializer::CreatePooling3dLayer(m_flatBufferBuilder,
1109  fbPooling3dBaseLayer,
1110  fbPooling3dDescriptor);
1111 
1113 }
1114 
1115 void SerializerStrategy::SerializePreluLayer(const armnn::IConnectableLayer* layer,
1116  const char* name)
1117 {
1118  IgnoreUnused(name);
1119 
1120  // Create FlatBuffer BaseLayer
1121  auto flatBufferPreluBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Prelu);
1122 
1123  // Create the FlatBuffer AdditionLayer
1124  auto flatBufferPreluLayer = serializer::CreatePreluLayer(m_flatBufferBuilder, flatBufferPreluBaseLayer);
1125 
1126  // Add the AnyLayer to the FlatBufferLayers
1127  CreateAnyLayer(flatBufferPreluLayer.o, serializer::Layer::Layer_PreluLayer);
1128 }
1129 
1130 void SerializerStrategy::SerializeQuantizeLayer(const armnn::IConnectableLayer *layer, const char *name)
1131 {
1132  IgnoreUnused(name);
1133 
1134  auto fbQuantizeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Quantize);
1135  auto fbQuantizeLayer = serializer::CreateQuantizeLayer(m_flatBufferBuilder,
1136  fbQuantizeBaseLayer);
1138 }
1139 
1140 // Build FlatBuffer for FullyConnected Layer
1141 void SerializerStrategy::SerializeFullyConnectedLayer(const armnn::IConnectableLayer* layer,
1142  const armnn::FullyConnectedDescriptor& fullyConnectedDescriptor,
1143  const char*)
1144 {
1145  // Create FlatBuffer BaseLayer
1146  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_FullyConnected);
1147 
1148  // Create FlatBuffer FullyConnectedDescriptor
1149  auto flatBufferDescriptor =
1150  serializer::CreateFullyConnectedDescriptor(m_flatBufferBuilder,
1151  fullyConnectedDescriptor.m_BiasEnabled,
1152  fullyConnectedDescriptor.m_TransposeWeightMatrix,
1153  fullyConnectedDescriptor.m_ConstantWeights);
1154 
1155  // Create FlatBuffer FullyConnectedLayer
1156  auto flatBufferLayer = serializer::CreateFullyConnectedLayer(m_flatBufferBuilder,
1157  flatBufferBaseLayer,
1158  flatBufferDescriptor);
1159 
1160  // Add created FullyConnectedLayer to the FlatBufferLayers
1162 }
1163 
1164 // Build FlatBuffer for SpaceToBatchNd Layer
1165 void SerializerStrategy::SerializeSpaceToBatchNdLayer(const armnn::IConnectableLayer* layer,
1166  const armnn::SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
1167  const char* name)
1168 {
1169  IgnoreUnused(name);
1170 
1171  // Create FlatBuffer BaseLayer
1172  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_SpaceToBatchNd);
1173 
1174  std::vector<unsigned int> padList;
1175  padList.reserve(spaceToBatchNdDescriptor.m_PadList.size()*2);
1176  for (auto& pad : spaceToBatchNdDescriptor.m_PadList)
1177  {
1178  padList.push_back(pad.first);
1179  padList.push_back(pad.second);
1180  }
1181 
1182  auto flatBufferDescriptor =
1183  CreateSpaceToBatchNdDescriptor(m_flatBufferBuilder,
1184  m_flatBufferBuilder.CreateVector(spaceToBatchNdDescriptor.m_BlockShape),
1185  m_flatBufferBuilder.CreateVector(padList),
1186  GetFlatBufferDataLayout(spaceToBatchNdDescriptor.m_DataLayout));
1187 
1188  auto flatBufferLayer = serializer::CreateSpaceToBatchNdLayer(m_flatBufferBuilder,
1189  flatBufferBaseLayer,
1190  flatBufferDescriptor);
1191 
1193 }
1194 
1195 // Build FlatBuffer for SpaceToDepthLayer
1196 void SerializerStrategy::SerializeSpaceToDepthLayer(const armnn::IConnectableLayer* layer,
1197  const armnn::SpaceToDepthDescriptor& spaceToDepthDescriptor,
1198  const char* name)
1199 {
1200  IgnoreUnused(name);
1201 
1202  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_SpaceToDepth);
1203  auto flatBufferDescriptor =
1204  CreateSpaceToDepthDescriptor(m_flatBufferBuilder,
1205  spaceToDepthDescriptor.m_BlockSize,
1206  GetFlatBufferDataLayout(spaceToDepthDescriptor.m_DataLayout));
1207 
1208  auto flatBufferLayer = serializer::CreateSpaceToDepthLayer(m_flatBufferBuilder,
1209  flatBufferBaseLayer,
1210  flatBufferDescriptor);
1211 
1213 }
1214 
1215 // Build FlatBuffer for Splitter Layer
1216 void SerializerStrategy::SerializeSplitterLayer(const armnn::IConnectableLayer* layer,
1217  const armnn::ViewsDescriptor& viewsDescriptor,
1218  const char* name)
1219 {
1220  IgnoreUnused(name);
1221 
1222  // Create FlatBuffer ViewOrigins
1223  std::vector<flatbuffers::Offset<UintVector>> flatBufferViewOrigins;
1224  flatBufferViewOrigins.reserve(viewsDescriptor.GetNumViews());
1225 
1226  for(unsigned int vIdx = 0; vIdx < viewsDescriptor.GetNumViews(); ++vIdx)
1227  {
1228  std::vector<uint32_t> viewOrigin;
1229  viewOrigin.reserve(viewsDescriptor.GetNumDimensions());
1230 
1231  // Copy vector
1232  for(unsigned int dIdx = 0; dIdx < viewsDescriptor.GetNumDimensions(); ++dIdx)
1233  {
1234  viewOrigin.push_back(viewsDescriptor.GetViewOrigin(vIdx)[dIdx]);
1235  }
1236 
1237  flatBufferViewOrigins.push_back(CreateUintVector(m_flatBufferBuilder,
1238  m_flatBufferBuilder.CreateVector(viewOrigin)));
1239  }
1240 
1241  // Create FlatBuffer OriginsDescriptor
1242  auto flatBufferOriginDescriptor = CreateOriginsDescriptor(m_flatBufferBuilder,
1243  viewsDescriptor.GetOrigins().GetConcatAxis(),
1244  viewsDescriptor.GetOrigins().GetNumViews(),
1245  viewsDescriptor.GetOrigins().GetNumDimensions(),
1246  m_flatBufferBuilder.CreateVector(flatBufferViewOrigins));
1247 
1248  // Create FlatBuffer ViewOrigins
1249  std::vector<flatbuffers::Offset<UintVector>> flatBufferViewSizes;
1250  flatBufferViewSizes.reserve(viewsDescriptor.GetNumViews());
1251 
1252  for(unsigned int vIdx = 0; vIdx < viewsDescriptor.GetNumViews(); ++vIdx)
1253  {
1254  std::vector<uint32_t> viewSize;
1255  viewSize.reserve(viewsDescriptor.GetNumDimensions());
1256 
1257  // Copy vector
1258  for(unsigned int dIdx = 0; dIdx < viewsDescriptor.GetNumDimensions(); ++dIdx)
1259  {
1260  viewSize.push_back(viewsDescriptor.GetViewSizes(vIdx)[dIdx]);
1261  }
1262 
1263  flatBufferViewSizes.push_back(CreateUintVector(m_flatBufferBuilder,
1264  m_flatBufferBuilder.CreateVector(viewSize)));
1265  }
1266 
1267  // Create FlatBuffer ViewsDescriptor
1268  auto flatBufferViewsDescriptor = CreateViewsDescriptor(m_flatBufferBuilder,
1269  flatBufferOriginDescriptor,
1270  m_flatBufferBuilder.CreateVector(flatBufferViewSizes));
1271 
1272  // Create FlatBuffer BaseLayer
1273  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Splitter);
1274 
1275  auto flatBufferSplitterLayer = serializer::CreateSplitterLayer(m_flatBufferBuilder,
1276  flatBufferBaseLayer,
1277  flatBufferViewsDescriptor);
1278 
1279  CreateAnyLayer(flatBufferSplitterLayer.o, serializer::Layer::Layer_SplitterLayer);
1280 }
1281 
1282 void SerializerStrategy::SerializeNormalizationLayer(const armnn::IConnectableLayer* layer,
1283  const armnn::NormalizationDescriptor& descriptor,
1284  const char* name)
1285 {
1286  IgnoreUnused(name);
1287 
1288  auto fbNormalizationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Normalization);
1289 
1290  auto fbNormalizationDescriptor = serializer::CreateNormalizationDescriptor(
1291  m_flatBufferBuilder,
1294  descriptor.m_NormSize,
1295  descriptor.m_Alpha,
1296  descriptor.m_Beta,
1297  descriptor.m_K,
1299 
1300  auto flatBufferLayer = serializer::CreateNormalizationLayer(m_flatBufferBuilder,
1301  fbNormalizationBaseLayer,
1302  fbNormalizationDescriptor);
1303 
1305 }
1306 
1307 void SerializerStrategy::SerializeShapeLayer(const armnn::IConnectableLayer* layer,
1308  const char* name)
1309 {
1310  IgnoreUnused(name);
1311 
1312  auto shapeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Shape);
1313  auto shapeLayer = serializer::CreateShapeLayer(m_flatBufferBuilder, shapeBaseLayer);
1314 
1316 }
1317 
1318 void SerializerStrategy::SerializeStackLayer(const armnn::IConnectableLayer* layer,
1319  const armnn::StackDescriptor& stackDescriptor,
1320  const char* name)
1321 {
1322  IgnoreUnused(name);
1323 
1324  auto stackBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Stack);
1325 
1326  std::vector<unsigned int> inputShape;
1327  for (unsigned int i =0; i < stackDescriptor.m_InputShape.GetNumDimensions(); i++)
1328  {
1329  inputShape.push_back(stackDescriptor.m_InputShape[i]);
1330  }
1331 
1332  auto flatBufferStackDescriptor = CreateStackDescriptor(m_flatBufferBuilder,
1333  stackDescriptor.m_Axis,
1334  stackDescriptor.m_NumInputs,
1335  m_flatBufferBuilder.CreateVector(inputShape));
1336 
1337  auto stackLayer = serializer::CreateStackLayer(m_flatBufferBuilder, stackBaseLayer, flatBufferStackDescriptor);
1339 }
1340 
1341 void SerializerStrategy::SerializeStandInLayer(const armnn::IConnectableLayer *layer,
1342  const armnn::StandInDescriptor& standInDescriptor,
1343  const char *name)
1344 {
1345  IgnoreUnused(name);
1346 
1347  auto fbDescriptor = serializer::CreateStandInDescriptor(m_flatBufferBuilder,
1348  standInDescriptor.m_NumInputs,
1349  standInDescriptor.m_NumOutputs);
1350 
1351  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_StandIn);
1352  auto fbLayer = serializer::CreateStandInLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
1353 
1355 }
1356 
1357 void SerializerStrategy::SerializeStridedSliceLayer(const armnn::IConnectableLayer* layer,
1358  const armnn::StridedSliceDescriptor& stridedSliceDescriptor,
1359  const char* name)
1360 {
1361  IgnoreUnused(name);
1362 
1363  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_StridedSlice);
1364 
1365  auto flatBufferDescriptor =
1366  CreateStridedSliceDescriptor(m_flatBufferBuilder,
1367  m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.m_Begin),
1368  m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.m_End),
1369  m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.m_Stride),
1370  stridedSliceDescriptor.m_BeginMask,
1371  stridedSliceDescriptor.m_EndMask,
1372  stridedSliceDescriptor.m_ShrinkAxisMask,
1373  stridedSliceDescriptor.m_EllipsisMask,
1374  stridedSliceDescriptor.m_NewAxisMask,
1375  GetFlatBufferDataLayout(stridedSliceDescriptor.m_DataLayout));
1376 
1377  auto flatBufferLayer = serializer::CreateStridedSliceLayer(m_flatBufferBuilder,
1378  flatBufferBaseLayer,
1379  flatBufferDescriptor);
1380 
1382 }
1383 
1384 void SerializerStrategy::SerializeSubtractionLayer(const armnn::IConnectableLayer* layer, const char* name)
1385 {
1386  IgnoreUnused(name);
1387 
1388  auto fbSubtractionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Subtraction);
1389  auto fbSubtractionLayer = serializer::CreateSubtractionLayer(m_flatBufferBuilder, fbSubtractionBaseLayer);
1390 
1392 }
1393 
1394 void SerializerStrategy::SerializeSwitchLayer(const armnn::IConnectableLayer* layer, const char* name)
1395 {
1396  IgnoreUnused(name);
1397 
1398  auto fbSwitchBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Switch);
1399  auto fbSwitchLayer = serializer::CreateSwitchLayer(m_flatBufferBuilder, fbSwitchBaseLayer);
1400 
1402 }
1403 
1404 void SerializerStrategy::SerializeTransposeConvolution2dLayer(
1405  const armnn::IConnectableLayer* layer,
1406  const armnn::TransposeConvolution2dDescriptor& descriptor,
1407  const std::vector<armnn::ConstTensor>& constants,
1408  const char* name)
1409 {
1410  IgnoreUnused(name);
1411 
1412  const armnn::ConstTensor& weights = constants.at(0);
1413 
1415  auto fbDescriptor = CreateTransposeConvolution2dDescriptor(m_flatBufferBuilder,
1416  descriptor.m_PadLeft,
1417  descriptor.m_PadRight,
1418  descriptor.m_PadTop,
1419  descriptor.m_PadBottom,
1420  descriptor.m_StrideX,
1421  descriptor.m_StrideY,
1422  descriptor.m_BiasEnabled,
1424 
1425  // weights & biases
1426  auto fbWeightsConstTensorInfo = CreateConstTensorInfo(weights);
1427  flatbuffers::Offset<serializer::ConstTensor> fbBiasesConstTensorInfo;
1428  if (constants.size() > 1)
1429  {
1430  const armnn::ConstTensor& biases = constants.at(1);
1431  fbBiasesConstTensorInfo = CreateConstTensorInfo(biases);
1432  }
1433 
1434  auto fbLayer = CreateTransposeConvolution2dLayer(m_flatBufferBuilder,
1435  fbBaseLayer,
1436  fbDescriptor,
1437  fbWeightsConstTensorInfo,
1438  fbBiasesConstTensorInfo);
1439 
1441 }
1442 
1443 void SerializerStrategy::SerializeTransposeLayer(const armnn::IConnectableLayer* layer,
1444  const armnn::TransposeDescriptor& descriptor,
1445  const char* name)
1446 {
1447  IgnoreUnused(name);
1448 
1449  // Create FlatBuffer BaseLayer
1450  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Transpose);
1451 
1452  std::vector<unsigned int> dimMappings;
1453  for (unsigned int i=0; i<descriptor.m_DimMappings.GetSize(); ++i)
1454  {
1455  dimMappings.push_back(descriptor.m_DimMappings[i]);
1456  }
1457 
1458  auto flatBufferDesc = serializer::CreateTransposeDescriptor(m_flatBufferBuilder,
1459  m_flatBufferBuilder.CreateVector(dimMappings));
1460 
1461  // Create the FlatBuffer TransposeLayer
1462  auto flatBufferLayer = serializer::CreateTransposeLayer(m_flatBufferBuilder,
1463  flatBufferBaseLayer,
1464  flatBufferDesc);
1465 
1466  // Add the AnyLayer to the FlatBufferLayers
1468 }
1469 
1470 void SerializerStrategy::SerializeQLstmLayer(const armnn::IConnectableLayer* layer,
1471  const armnn::QLstmDescriptor& descriptor,
1472  const std::vector<armnn::ConstTensor>& constants,
1473  const char* name)
1474 {
1475  IgnoreUnused(name);
1476 
1477  auto fbQLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_QLstm);
1478 
1479  auto fbQLstmDescriptor = serializer::CreateQLstmDescriptor(
1480  m_flatBufferBuilder,
1481  descriptor.m_CifgEnabled,
1482  descriptor.m_PeepholeEnabled,
1483  descriptor.m_ProjectionEnabled,
1484  descriptor.m_LayerNormEnabled,
1485  descriptor.m_CellClip,
1486  descriptor.m_ProjectionClip,
1487  descriptor.m_InputIntermediateScale,
1488  descriptor.m_ForgetIntermediateScale,
1489  descriptor.m_CellIntermediateScale,
1490  descriptor.m_OutputIntermediateScale,
1491  descriptor.m_HiddenStateZeroPoint,
1492  descriptor.m_HiddenStateScale
1493  );
1494 
1495  // Index for constants vector
1496  std::size_t i = 0;
1497 
1498  // Mandatory params
1499  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]); //InputToForgetWeights
1500  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]); //InputToCellWeights
1501  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]); //InputToOutputWeights
1502  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToForgetWeights
1503  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToCellWeights
1504  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToOutputWeights
1505  auto forgetGateBias = CreateConstTensorInfo(constants[i++]); //ForgetGateBias
1506  auto cellBias = CreateConstTensorInfo(constants[i++]); //CellBias
1507  auto outputGateBias = CreateConstTensorInfo(constants[i++]); //OutputGateBias
1508 
1509  // CIFG
1510  flatbuffers::Offset<serializer::ConstTensor> inputToInputWeights;
1511  flatbuffers::Offset<serializer::ConstTensor> recurrentToInputWeights;
1512  flatbuffers::Offset<serializer::ConstTensor> inputGateBias;
1513 
1514  if (!descriptor.m_CifgEnabled)
1515  {
1516  inputToInputWeights = CreateConstTensorInfo(constants[i++]); //InputToInputWeights
1517  recurrentToInputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToInputWeights
1518  inputGateBias = CreateConstTensorInfo(constants[i++]); //InputGateBias
1519  }
1520 
1521  // Peephole
1522  flatbuffers::Offset<serializer::ConstTensor> cellToInputWeights;
1523  flatbuffers::Offset<serializer::ConstTensor> cellToForgetWeights;
1524  flatbuffers::Offset<serializer::ConstTensor> cellToOutputWeights;
1525 
1526  if (descriptor.m_PeepholeEnabled)
1527  {
1528  if (!descriptor.m_CifgEnabled)
1529  {
1530  cellToInputWeights = CreateConstTensorInfo(constants[i++]); //CellToInputWeights
1531  }
1532  cellToForgetWeights = CreateConstTensorInfo(constants[i++]); //CellToForgetWeights
1533  cellToOutputWeights = CreateConstTensorInfo(constants[i++]); //CellToOutputWeights
1534  }
1535 
1536  // Projection
1537  flatbuffers::Offset<serializer::ConstTensor> projectionWeights;
1538  flatbuffers::Offset<serializer::ConstTensor> projectionBias;
1539 
1540  if (descriptor.m_ProjectionEnabled)
1541  {
1542  projectionWeights = CreateConstTensorInfo(constants[i++]); //ProjectionWeights
1543  projectionBias = CreateConstTensorInfo(constants[i++]); //ProjectionBias
1544  }
1545 
1546  // Layer norm
1547  flatbuffers::Offset<serializer::ConstTensor> inputLayerNormWeights;
1548  flatbuffers::Offset<serializer::ConstTensor> forgetLayerNormWeights;
1549  flatbuffers::Offset<serializer::ConstTensor> cellLayerNormWeights;
1550  flatbuffers::Offset<serializer::ConstTensor> outputLayerNormWeights;
1551 
1552  if (descriptor.m_LayerNormEnabled)
1553  {
1554  if (!descriptor.m_CifgEnabled)
1555  {
1556  inputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //InputLayerNormWeights
1557  }
1558  forgetLayerNormWeights = CreateConstTensorInfo(constants[i++]); //ForgetLayerNormWeights
1559  cellLayerNormWeights = CreateConstTensorInfo(constants[i++]); //CellLayerNormWeights
1560  outputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //OutputLayerNormWeights
1561  }
1562 
1563  auto fbQLstmParams = serializer::CreateQLstmInputParams(
1564  m_flatBufferBuilder,
1565  inputToForgetWeights,
1566  inputToCellWeights,
1567  inputToOutputWeights,
1568  recurrentToForgetWeights,
1569  recurrentToCellWeights,
1570  recurrentToOutputWeights,
1571  forgetGateBias,
1572  cellBias,
1573  outputGateBias,
1574  inputToInputWeights,
1575  recurrentToInputWeights,
1576  inputGateBias,
1577  projectionWeights,
1578  projectionBias,
1579  cellToInputWeights,
1580  cellToForgetWeights,
1581  cellToOutputWeights,
1582  inputLayerNormWeights,
1583  forgetLayerNormWeights,
1584  cellLayerNormWeights,
1585  outputLayerNormWeights);
1586 
1587  auto fbQLstmLayer = serializer::CreateQLstmLayer(
1588  m_flatBufferBuilder,
1589  fbQLstmBaseLayer,
1590  fbQLstmDescriptor,
1591  fbQLstmParams);
1592 
1594 }
1595 
1596 void SerializerStrategy::SerializeQuantizedLstmLayer(const armnn::IConnectableLayer* layer,
1597  const std::vector<armnn::ConstTensor>& constants,
1598  const char* name)
1599 {
1600  IgnoreUnused(name);
1601 
1602  auto fbQuantizedLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_QuantizedLstm);
1603 
1604  // index for constants vector
1605  size_t i = 0;
1606 
1607  // Get input parameters
1608  auto inputToInputWeights = CreateConstTensorInfo(constants[i++]);
1609  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]);
1610  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]);
1611  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]);
1612 
1613  auto recurrentToInputWeights = CreateConstTensorInfo(constants[i++]);
1614  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]);
1615  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]);
1616  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]);
1617 
1618  auto inputGateBias = CreateConstTensorInfo(constants[i++]);
1619  auto forgetGateBias = CreateConstTensorInfo(constants[i++]);
1620  auto cellBias = CreateConstTensorInfo(constants[i++]);
1621  auto outputGateBias = CreateConstTensorInfo(constants[i++]);
1622 
1623  auto fbQuantizedLstmParams = serializer::CreateQuantizedLstmInputParams(
1624  m_flatBufferBuilder,
1625  inputToInputWeights,
1626  inputToForgetWeights,
1627  inputToCellWeights,
1628  inputToOutputWeights,
1629  recurrentToInputWeights,
1630  recurrentToForgetWeights,
1631  recurrentToCellWeights,
1632  recurrentToOutputWeights,
1633  inputGateBias,
1634  forgetGateBias,
1635  cellBias,
1636  outputGateBias);
1637 
1638  auto fbQuantizedLstmLayer = serializer::CreateQuantizedLstmLayer(
1639  m_flatBufferBuilder,
1640  fbQuantizedLstmBaseLayer,
1641  fbQuantizedLstmParams);
1642 
1644 }
1645 
1646 void SerializerStrategy::SerializeUnidirectionalSequenceLstmLayer(
1647  const armnn::IConnectableLayer* layer,
1649  const std::vector<armnn::ConstTensor>& constants,
1650  const char* name)
1651 {
1652  IgnoreUnused(name);
1653 
1654  auto fbUnidirectionalSequenceLstmBaseLayer =
1656 
1657  auto fbUnidirectionalSequenceLstmDescriptor = serializer::CreateUnidirectionalSequenceLstmDescriptor(
1658  m_flatBufferBuilder,
1659  descriptor.m_ActivationFunc,
1660  descriptor.m_ClippingThresCell,
1661  descriptor.m_ClippingThresProj,
1662  descriptor.m_CifgEnabled,
1663  descriptor.m_PeepholeEnabled,
1664  descriptor.m_ProjectionEnabled,
1665  descriptor.m_LayerNormEnabled,
1666  descriptor.m_TimeMajor);
1667 
1668  // Index for constants vector
1669  std::size_t i = 0;
1670 
1671  // Get mandatory/basic input parameters
1672  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]); //InputToForgetWeights
1673  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]); //InputToCellWeights
1674  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]); //InputToOutputWeights
1675  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToForgetWeights
1676  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToCellWeights
1677  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToOutputWeights
1678  auto forgetGateBias = CreateConstTensorInfo(constants[i++]); //ForgetGateBias
1679  auto cellBias = CreateConstTensorInfo(constants[i++]); //CellBias
1680  auto outputGateBias = CreateConstTensorInfo(constants[i++]); //OutputGateBias
1681 
1682  //Define optional parameters, these will be set depending on configuration in Lstm descriptor
1683  flatbuffers::Offset<serializer::ConstTensor> inputToInputWeights;
1684  flatbuffers::Offset<serializer::ConstTensor> recurrentToInputWeights;
1685  flatbuffers::Offset<serializer::ConstTensor> cellToInputWeights;
1686  flatbuffers::Offset<serializer::ConstTensor> inputGateBias;
1687  flatbuffers::Offset<serializer::ConstTensor> projectionWeights;
1688  flatbuffers::Offset<serializer::ConstTensor> projectionBias;
1689  flatbuffers::Offset<serializer::ConstTensor> cellToForgetWeights;
1690  flatbuffers::Offset<serializer::ConstTensor> cellToOutputWeights;
1691  flatbuffers::Offset<serializer::ConstTensor> inputLayerNormWeights;
1692  flatbuffers::Offset<serializer::ConstTensor> forgetLayerNormWeights;
1693  flatbuffers::Offset<serializer::ConstTensor> cellLayerNormWeights;
1694  flatbuffers::Offset<serializer::ConstTensor> outputLayerNormWeights;
1695 
1696  if (!descriptor.m_CifgEnabled)
1697  {
1698  inputToInputWeights = CreateConstTensorInfo(constants[i++]); //InputToInputWeights
1699  recurrentToInputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToInputWeights
1700  inputGateBias = CreateConstTensorInfo(constants[i++]); //InputGateBias
1701  }
1702 
1703  if (descriptor.m_PeepholeEnabled)
1704  {
1705  if (!descriptor.m_CifgEnabled)
1706  {
1707  cellToInputWeights = CreateConstTensorInfo(constants[i++]); //CellToInputWeights
1708  }
1709  cellToForgetWeights = CreateConstTensorInfo(constants[i++]); //CellToForgetWeights
1710  cellToOutputWeights = CreateConstTensorInfo(constants[i++]); //CellToOutputWeights
1711  }
1712 
1713  if (descriptor.m_ProjectionEnabled)
1714  {
1715  projectionWeights = CreateConstTensorInfo(constants[i++]); //ProjectionWeights
1716  projectionBias = CreateConstTensorInfo(constants[i++]); //ProjectionBias
1717  }
1718 
1719  if (descriptor.m_LayerNormEnabled)
1720  {
1721  if (!descriptor.m_CifgEnabled)
1722  {
1723  inputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //InputLayerNormWeights
1724  }
1725  forgetLayerNormWeights = CreateConstTensorInfo(constants[i++]); //ForgetLayerNormWeights
1726  cellLayerNormWeights = CreateConstTensorInfo(constants[i++]); //CellLayerNormWeights
1727  outputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //OutputLayerNormWeights
1728  }
1729 
1730  auto fbUnidirectionalSequenceLstmParams = serializer::CreateLstmInputParams(
1731  m_flatBufferBuilder,
1732  inputToForgetWeights,
1733  inputToCellWeights,
1734  inputToOutputWeights,
1735  recurrentToForgetWeights,
1736  recurrentToCellWeights,
1737  recurrentToOutputWeights,
1738  forgetGateBias,
1739  cellBias,
1740  outputGateBias,
1741  inputToInputWeights,
1742  recurrentToInputWeights,
1743  cellToInputWeights,
1744  inputGateBias,
1745  projectionWeights,
1746  projectionBias,
1747  cellToForgetWeights,
1748  cellToOutputWeights,
1749  inputLayerNormWeights,
1750  forgetLayerNormWeights,
1751  cellLayerNormWeights,
1752  outputLayerNormWeights);
1753 
1754  auto fbUnidirectionalSequenceLstmLayer = serializer::CreateUnidirectionalSequenceLstmLayer(
1755  m_flatBufferBuilder,
1756  fbUnidirectionalSequenceLstmBaseLayer,
1757  fbUnidirectionalSequenceLstmDescriptor,
1758  fbUnidirectionalSequenceLstmParams);
1759 
1760  CreateAnyLayer(fbUnidirectionalSequenceLstmLayer.o, serializer::Layer::Layer_UnidirectionalSequenceLstmLayer);
1761 }
1762 
1763 fb::Offset<serializer::LayerBase> SerializerStrategy::CreateLayerBase(const IConnectableLayer* layer,
1764  const serializer::LayerType layerType)
1765 {
1766 
1767  uint32_t fbIndex = GetSerializedId(layer->GetGuid());
1768 
1769  std::vector<fb::Offset<serializer::InputSlot>> inputSlots = CreateInputSlots(layer);
1770  std::vector<fb::Offset<serializer::OutputSlot>> outputSlots = CreateOutputSlots(layer);
1771 
1772  return serializer::CreateLayerBase(m_flatBufferBuilder,
1773  fbIndex,
1774  m_flatBufferBuilder.CreateString(layer->GetName()),
1775  layerType,
1776  m_flatBufferBuilder.CreateVector(inputSlots),
1777  m_flatBufferBuilder.CreateVector(outputSlots));
1778 }
1779 
1780 void SerializerStrategy::CreateAnyLayer(const flatbuffers::Offset<void>& layer, const serializer::Layer serializerLayer)
1781 {
1782 
1783  auto anyLayer = armnnSerializer::CreateAnyLayer(m_flatBufferBuilder, serializerLayer, layer);
1784  m_serializedLayers.push_back(anyLayer);
1785 }
1786 
1787 template <typename T>
1788 flatbuffers::Offset<flatbuffers::Vector<T>> SerializerStrategy::CreateDataVector(const void* memory, unsigned int size)
1789 {
1790  const T* buffer = reinterpret_cast<const T*>(memory);
1791  std::vector<T> vector(buffer, buffer + (size / sizeof(T)));
1792  auto fbVector = m_flatBufferBuilder.CreateVector(vector);
1793  return fbVector;
1794 }
1795 
1796 flatbuffers::Offset<TensorInfo> SerializerStrategy::CreateTensorInfo(const armnn::TensorInfo& tensorInfo)
1797 {
1798  // Get the dimensions
1799  std::vector<unsigned int> shape;
1800  std::vector<bool> specificity;
1801  // This assumes that the TensorShape constructors have ensured that the size of m_DimensionsSpecificity
1802  // matches the size of dimensions.
1803  for(unsigned int dim = 0; dim < tensorInfo.GetShape().GetNumDimensions(); ++dim)
1804  {
1805  specificity.push_back(tensorInfo.GetShape().GetDimensionSpecificity(dim));
1806 
1807  if (tensorInfo.GetShape().GetDimensionSpecificity(dim))
1808  {
1809  shape.push_back(tensorInfo.GetShape()[dim]);
1810  }
1811  else
1812  {
1813  shape.push_back(0);
1814  }
1815  }
1816 
1817  if (tensorInfo.HasPerAxisQuantization())
1818  {
1819  // Create FlatBuffer TensorInfo
1820  auto flatBufferTensorInfo =
1821  serializer::CreateTensorInfo(m_flatBufferBuilder,
1822  m_flatBufferBuilder.CreateVector(shape),
1823  GetFlatBufferDataType(tensorInfo.GetDataType()),
1824  tensorInfo.GetQuantizationScales()[0],
1825  tensorInfo.GetQuantizationOffset(),
1826  m_flatBufferBuilder.CreateVector(tensorInfo.GetQuantizationScales()),
1827  tensorInfo.GetQuantizationDim().value(),
1828  static_cast<unsigned int>
1829  (tensorInfo.GetShape().GetDimensionality()),
1830  m_flatBufferBuilder.CreateVector(specificity));
1831  return flatBufferTensorInfo;
1832  }
1833 
1834  // Create FlatBuffer TensorInfo
1835  auto flatBufferTensorInfo = serializer::CreateTensorInfo(m_flatBufferBuilder,
1836  m_flatBufferBuilder.CreateVector(shape),
1837  GetFlatBufferDataType(tensorInfo.GetDataType()),
1838  tensorInfo.GetQuantizationScale(),
1839  tensorInfo.GetQuantizationOffset(),
1840  0,
1841  0,
1842  static_cast<unsigned int>
1843  (tensorInfo.GetShape().GetDimensionality()),
1844  m_flatBufferBuilder.CreateVector(specificity));
1845  return flatBufferTensorInfo;
1846 }
1847 
1848 flatbuffers::Offset<serializer::ConstTensor>
1849  SerializerStrategy::CreateConstTensorInfo(const armnn::ConstTensor& constTensor)
1850 {
1851  armnn::TensorInfo tensorInfo = constTensor.GetInfo();
1852 
1853  flatbuffers::Offset<void> fbPayload;
1854 
1855  switch (tensorInfo.GetDataType())
1856  {
1858  {
1859  auto fbVector = CreateDataVector<int64_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1860  flatbuffers::Offset<serializer::LongData> flatBuffersData = serializer::CreateLongData(
1861  m_flatBufferBuilder,
1862  fbVector);
1863  fbPayload = flatBuffersData.o;
1864  break;
1865  }
1868  {
1869  auto fbVector = CreateDataVector<int32_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1870  flatbuffers::Offset<serializer::IntData> flatBuffersData = serializer::CreateIntData(
1871  m_flatBufferBuilder,
1872  fbVector);
1873  fbPayload = flatBuffersData.o;
1874  break;
1875  }
1879  {
1880  auto fbVector = CreateDataVector<int16_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1881  flatbuffers::Offset<serializer::ShortData> flatBuffersData = serializer::CreateShortData(
1882  m_flatBufferBuilder,
1883  fbVector);
1884  fbPayload = flatBuffersData.o;
1885  break;
1886  }
1891  default:
1892  {
1893  auto fbVector = CreateDataVector<int8_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1894  flatbuffers::Offset<serializer::ByteData> flatBuffersData = serializer::CreateByteData(
1895  m_flatBufferBuilder,
1896  fbVector);
1897  fbPayload = flatBuffersData.o;
1898  }
1899  }
1900  flatbuffers::Offset<serializer::ConstTensor> flatBufferConstTensor = serializer::CreateConstTensor(
1901  m_flatBufferBuilder,
1902  CreateTensorInfo(tensorInfo),
1904  fbPayload);
1905  return flatBufferConstTensor;
1906 }
1907 
1908 flatbuffers::Offset<armnnSerializer::FeatureCompatibilityVersions> SerializerStrategy::GetVersionTable()
1909 {
1910  flatbuffers::Offset<armnnSerializer::FeatureCompatibilityVersions> versionsTable =
1912  m_flatBufferBuilder,
1913  1, // Binding ids scheme version
1914  1, // Weights layout scheme version
1915  1 // Constant tensors as inputs version
1916  );
1917  return versionsTable;
1918 }
1919 
1920 std::vector<fb::Offset<serializer::InputSlot>>
1921  SerializerStrategy::CreateInputSlots(const armnn::IConnectableLayer* layer)
1922 {
1923  std::vector<fb::Offset<serializer::InputSlot>> inputSlots;
1924 
1925  // Get the InputSlots
1926  for (unsigned int slotIndex = 0; slotIndex<layer->GetNumInputSlots(); ++slotIndex)
1927  {
1928  const IInputSlot& inputSlot = layer->GetInputSlot(slotIndex);
1929 
1930  // Get the Connection for the InputSlot
1931  const IOutputSlot* connection = inputSlot.GetConnection();
1932 
1933  // Create FlatBuffer Connection
1934  serializer::Connection conn(GetSerializedId(inputSlot.GetConnection()->GetOwningLayerGuid()),
1935  connection->CalculateIndexOnOwner());
1936  // Create FlatBuffer InputSlot
1937  inputSlots.push_back(serializer::CreateInputSlot(m_flatBufferBuilder, slotIndex, &conn));
1938  }
1939  return inputSlots;
1940 }
1941 
1942 std::vector<fb::Offset<serializer::OutputSlot>>
1943  SerializerStrategy::CreateOutputSlots(const armnn::IConnectableLayer* layer)
1944 {
1945  std::vector<fb::Offset<serializer::OutputSlot>> outputSlots;
1946 
1947  // Get the OutputSlots
1948  for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
1949  {
1950  const IOutputSlot& outputSlot = layer->GetOutputSlot(slotIndex);
1951  const armnn::TensorInfo& tensorInfo = outputSlot.GetTensorInfo();
1952 
1953  // Create FlatBuffer Outputslot
1954  outputSlots.push_back(serializer::CreateOutputSlot(m_flatBufferBuilder,
1955  slotIndex,
1956  CreateTensorInfo(tensorInfo)));
1957  }
1958  return outputSlots;
1959 }
1960 
1962  const BaseDescriptor& descriptor,
1963  const std::vector<armnn::ConstTensor>& constants,
1964  const char* name,
1965  const armnn::LayerBindingId id)
1966 {
1967  IgnoreUnused(constants);
1968 
1969  switch (layer->GetType())
1970  {
1972  {
1973  const armnn::ActivationDescriptor& layerDescriptor =
1974  static_cast<const armnn::ActivationDescriptor&>(descriptor);
1975  SerializeActivationLayer(layer, layerDescriptor, name);
1976  break;
1977  }
1979  {
1980  SerializeAdditionLayer(layer, name);
1981  break;
1982  }
1984  {
1985  const armnn::ArgMinMaxDescriptor& layerDescriptor =
1986  static_cast<const armnn::ArgMinMaxDescriptor&>(descriptor);
1987  SerializeArgMinMaxLayer(layer, layerDescriptor, name);
1988  break;
1989  }
1991  {
1992  const armnn::BatchNormalizationDescriptor& layerDescriptor =
1993  static_cast<const armnn::BatchNormalizationDescriptor&>(descriptor);
1994  SerializeBatchNormalizationLayer(layer,
1995  layerDescriptor,
1996  constants,
1997  name);
1998  break;
1999  }
2001  {
2002  const armnn::BatchToSpaceNdDescriptor& layerDescriptor =
2003  static_cast<const armnn::BatchToSpaceNdDescriptor&>(descriptor);
2004  SerializeBatchToSpaceNdLayer(layer,
2005  layerDescriptor,
2006  name);
2007  break;
2008  }
2009  case armnn::LayerType::Cast :
2010  {
2011  SerializeCastLayer(layer, name);
2012  break;
2013  }
2015  {
2016  const armnn::ChannelShuffleDescriptor& layerDescriptor =
2017  static_cast<const armnn::ChannelShuffleDescriptor&>(descriptor);
2018  SerializeChannelShuffleLayer(layer,
2019  layerDescriptor,
2020  name);
2021  break;
2022  }
2024  {
2025  const armnn::ComparisonDescriptor& layerDescriptor =
2026  static_cast<const armnn::ComparisonDescriptor&>(descriptor);
2027  SerializeComparisonLayer(layer,
2028  layerDescriptor,
2029  name);
2030  break;
2031  }
2033  {
2034  const armnn::ConcatDescriptor& layerDescriptor =
2035  static_cast<const armnn::ConcatDescriptor&>(descriptor);
2036  SerializeConcatLayer(layer,
2037  layerDescriptor,
2038  name);
2039  break;
2040  }
2042  {
2043  SerializeConstantLayer(layer,
2044  constants,
2045  name);
2046  break;
2047  }
2049  {
2050  const armnn::Convolution2dDescriptor& layerDescriptor =
2051  static_cast<const armnn::Convolution2dDescriptor&>(descriptor);
2052  SerializeConvolution2dLayer(layer,
2053  layerDescriptor,
2054  constants,
2055  name);
2056  break;
2057  }
2059  {
2060  const armnn::Convolution3dDescriptor& layerDescriptor =
2061  static_cast<const armnn::Convolution3dDescriptor&>(descriptor);
2062  SerializeConvolution3dLayer(layer,
2063  layerDescriptor,
2064  name);
2065  break;
2066  }
2068  {
2069  const armnn::DepthToSpaceDescriptor& layerDescriptor =
2070  static_cast<const armnn::DepthToSpaceDescriptor&>(descriptor);
2071  SerializeDepthToSpaceLayer(layer,
2072  layerDescriptor,
2073  name);
2074  break;
2075  }
2077  {
2078  const armnn::DepthwiseConvolution2dDescriptor& layerDescriptor =
2079  static_cast<const armnn::DepthwiseConvolution2dDescriptor&>(descriptor);
2080  SerializeDepthwiseConvolution2dLayer(layer,
2081  layerDescriptor,
2082  constants,
2083  name);
2084  break;
2085  }
2087  {
2088  SerializeDequantizeLayer(layer,
2089  name);
2090  break;
2091  }
2093  {
2094  const armnn::DetectionPostProcessDescriptor& layerDescriptor =
2095  static_cast<const armnn::DetectionPostProcessDescriptor&>(descriptor);
2096  SerializeDetectionPostProcessLayer(layer, layerDescriptor, constants, name);
2097  break;
2098  }
2100  {
2101  SerializeDivisionLayer(layer, name);
2102  break;
2103  }
2105  {
2106  const armnn::ElementwiseUnaryDescriptor& layerDescriptor =
2107  static_cast<const armnn::ElementwiseUnaryDescriptor&>(descriptor);
2108  SerializeElementwiseUnaryLayer(layer, layerDescriptor, name);
2109  break;
2110  }
2111  case armnn::LayerType::Fill :
2112  {
2113  const armnn::FillDescriptor& layerDescriptor =
2114  static_cast<const armnn::FillDescriptor&>(descriptor);
2115  SerializeFillLayer(layer, layerDescriptor, name);
2116  break;
2117  }
2119  {
2120  SerializeFloorLayer(layer, name);
2121  break;
2122  }
2124  {
2125  const armnn::FullyConnectedDescriptor& layerDescriptor =
2126  static_cast<const armnn::FullyConnectedDescriptor&>(descriptor);
2127  SerializeFullyConnectedLayer(layer, layerDescriptor, name);
2128  break;
2129  }
2131  {
2132  const armnn::GatherDescriptor& layerDescriptor =
2133  static_cast<const armnn::GatherDescriptor&>(descriptor);
2134  SerializeGatherLayer(layer, layerDescriptor, name);
2135  break;
2136  }
2138  {
2139  SerializeInputLayer(layer, id, name);
2140  break;
2141  }
2143  {
2144  const armnn::InstanceNormalizationDescriptor& layerDescriptor =
2145  static_cast<const armnn::InstanceNormalizationDescriptor&>(descriptor);
2146  SerializeInstanceNormalizationLayer(layer, layerDescriptor, name);
2147  break;
2148  }
2150  {
2151  const armnn::L2NormalizationDescriptor& layerDescriptor =
2152  static_cast<const armnn::L2NormalizationDescriptor&>(descriptor);
2153  SerializeL2NormalizationLayer(layer, layerDescriptor, name);
2154  break;
2155  }
2157  {
2158  const armnn::LogicalBinaryDescriptor& layerDescriptor =
2159  static_cast<const armnn::LogicalBinaryDescriptor&>(descriptor);
2160  SerializeLogicalBinaryLayer(layer, layerDescriptor, name);
2161  break;
2162  }
2164  {
2165  const armnn::LogSoftmaxDescriptor& layerDescriptor =
2166  static_cast<const armnn::LogSoftmaxDescriptor&>(descriptor);
2167  SerializeLogSoftmaxLayer(layer, layerDescriptor, name);
2168  break;
2169  }
2170  case armnn::LayerType::Lstm :
2171  {
2172  const armnn::LstmDescriptor& layerDescriptor =
2173  static_cast<const armnn::LstmDescriptor&>(descriptor);
2174  SerializeLstmLayer(layer, layerDescriptor, constants, name);
2175  break;
2176  }
2178  {
2179  const armnn::QLstmDescriptor& layerDescriptor =
2180  static_cast<const armnn::QLstmDescriptor&>(descriptor);
2181  SerializeQLstmLayer(layer, layerDescriptor, constants, name);
2182  break;
2183  }
2185  {
2186  SerializeMaximumLayer(layer, name);
2187  break;
2188  }
2189  case armnn::LayerType::Mean :
2190  {
2191  const armnn::MeanDescriptor& layerDescriptor =
2192  static_cast<const armnn::MeanDescriptor&>(descriptor);
2193  SerializeMeanLayer(layer, layerDescriptor, name);
2194  break;
2195  }
2197  {
2198  SerializeMergeLayer(layer, name);
2199  break;
2200  }
2202  {
2203  SerializeMinimumLayer(layer, name);
2204  break;
2205  }
2207  {
2208  SerializeMultiplicationLayer(layer, name);
2209  break;
2210  }
2212  {
2213  const armnn::NormalizationDescriptor& layerDescriptor =
2214  static_cast<const armnn::NormalizationDescriptor&>(descriptor);
2215  SerializeNormalizationLayer(layer, layerDescriptor, name);
2216  break;
2217  }
2219  {
2220  SerializeOutputLayer(layer, id, name);
2221  break;
2222  }
2223  case armnn::LayerType::Pad :
2224  {
2225  const armnn::PadDescriptor& layerDescriptor =
2226  static_cast<const armnn::PadDescriptor&>(descriptor);
2227  SerializePadLayer(layer, layerDescriptor, name);
2228  break;
2229  }
2231  {
2232  const armnn::PermuteDescriptor& layerDescriptor =
2233  static_cast<const armnn::PermuteDescriptor&>(descriptor);
2234  SerializePermuteLayer(layer, layerDescriptor, name);
2235  break;
2236  }
2238  {
2239  const armnn::Pooling2dDescriptor& layerDescriptor =
2240  static_cast<const armnn::Pooling2dDescriptor&>(descriptor);
2241  SerializePooling2dLayer(layer, layerDescriptor, name);
2242  break;
2243  }
2245  {
2246  const armnn::Pooling3dDescriptor& layerDescriptor =
2247  static_cast<const armnn::Pooling3dDescriptor&>(descriptor);
2248  SerializePooling3dLayer(layer, layerDescriptor, name);
2249  break;
2250  }
2252  {
2253  SerializePreluLayer(layer, name);
2254  break;
2255  }
2257  {
2258  SerializeQuantizeLayer(layer, name);
2259  break;
2260  }
2262  SerializeQuantizedLstmLayer(layer, constants, name);
2263  break;
2265  {
2266  const armnn::ReshapeDescriptor &layerDescriptor =
2267  static_cast<const armnn::ReshapeDescriptor &>(descriptor);
2268  SerializeReshapeLayer(layer, layerDescriptor, name);
2269  break;
2270  }
2272  {
2273  SerializeRankLayer(layer, name);
2274  break;
2275  }
2277  {
2278  const armnn::ReduceDescriptor& layerDescriptor =
2279  static_cast<const armnn::ReduceDescriptor&>(descriptor);
2280  SerializeReduceLayer(layer, layerDescriptor, name);
2281  break;
2282  }
2284  {
2285  const armnn::ResizeDescriptor& layerDescriptor =
2286  static_cast<const armnn::ResizeDescriptor&>(descriptor);
2287  SerializeResizeLayer(layer, layerDescriptor, name);
2288  break;
2289  }
2291  {
2292  SerializeShapeLayer(layer, name);
2293  break;
2294  }
2296  {
2297  const armnn::SliceDescriptor& layerDescriptor =
2298  static_cast<const armnn::SliceDescriptor&>(descriptor);
2299  SerializeSliceLayer(layer, layerDescriptor, name);
2300  break;
2301  }
2303  {
2304  const armnn::SoftmaxDescriptor& layerDescriptor =
2305  static_cast<const armnn::SoftmaxDescriptor&>(descriptor);
2306  SerializeSoftmaxLayer(layer, layerDescriptor, name);
2307  break;
2308  }
2310  {
2311  const armnn::SpaceToBatchNdDescriptor& layerDescriptor =
2312  static_cast<const armnn::SpaceToBatchNdDescriptor&>(descriptor);
2313  SerializeSpaceToBatchNdLayer(layer, layerDescriptor, name);
2314  break;
2315  }
2317  {
2318  const armnn::SpaceToDepthDescriptor& layerDescriptor =
2319  static_cast<const armnn::SpaceToDepthDescriptor&>(descriptor);
2320  SerializeSpaceToDepthLayer(layer, layerDescriptor, name);
2321  break;
2322  }
2324  {
2325  const armnn::SplitterDescriptor& layerDescriptor =
2326  static_cast<const armnn::SplitterDescriptor&>(descriptor);
2327  SerializeSplitterLayer(layer, layerDescriptor, name);
2328  break;
2329  }
2331  {
2332  const armnn::StackDescriptor& layerDescriptor =
2333  static_cast<const armnn::StackDescriptor&>(descriptor);
2334  SerializeStackLayer(layer, layerDescriptor, name);
2335  break;
2336  }
2338  {
2339  const armnn::StandInDescriptor& layerDescriptor =
2340  static_cast<const armnn::StandInDescriptor&>(descriptor);
2341  SerializeStandInLayer(layer, layerDescriptor, name);
2342  break;
2343  }
2345  {
2346  const armnn::StridedSliceDescriptor& layerDescriptor =
2347  static_cast<const armnn::StridedSliceDescriptor&>(descriptor);
2348  SerializeStridedSliceLayer(layer, layerDescriptor, name);
2349  break;
2350  }
2352  {
2353  SerializeSubtractionLayer(layer, name);
2354  break;
2355  }
2357  {
2358  SerializeSwitchLayer(layer, name);
2359  break;
2360  }
2362  {
2363  const armnn::TransposeDescriptor& layerDescriptor =
2364  static_cast<const armnn::TransposeDescriptor&>(descriptor);
2365  SerializeTransposeLayer(layer, layerDescriptor, name);
2366  break;
2367  }
2369  {
2370  const armnn::TransposeConvolution2dDescriptor& layerDescriptor =
2371  static_cast<const armnn::TransposeConvolution2dDescriptor&>(descriptor);
2372  SerializeTransposeConvolution2dLayer(layer, layerDescriptor, constants, name);
2373  break;
2374  }
2376  {
2377  const armnn::UnidirectionalSequenceLstmDescriptor& layerDescriptor =
2378  static_cast<const armnn::UnidirectionalSequenceLstmDescriptor&>(descriptor);
2379  SerializeUnidirectionalSequenceLstmLayer(layer, layerDescriptor, constants, name);
2380  break;
2381  }
2382  default:
2383  {
2385  fmt::format("A layer of unknown type was given to the serializer. Layer name: {}; Layer Id: {}",
2386  layer->GetName(),
2387  id));
2388  }
2389  }
2390 }
2391 
2393 {
2394  // Iterate through to network
2395  inNetwork.ExecuteStrategy(m_SerializerStrategy);
2396  flatbuffers::FlatBufferBuilder& fbBuilder = m_SerializerStrategy.GetFlatBufferBuilder();
2397 
2398  // Create FlatBuffer SerializedGraph
2399  auto serializedGraph = serializer::CreateSerializedGraph(
2400  fbBuilder,
2401  fbBuilder.CreateVector(m_SerializerStrategy.GetSerializedLayers()),
2402  fbBuilder.CreateVector(m_SerializerStrategy.GetInputIds()),
2403  fbBuilder.CreateVector(m_SerializerStrategy.GetOutputIds()),
2404  m_SerializerStrategy.GetVersionTable());
2405 
2406  // Serialize the graph
2407  fbBuilder.Finish(serializedGraph);
2408 }
2409 
2410 
2412 {
2413  flatbuffers::FlatBufferBuilder& fbBuilder = m_SerializerStrategy.GetFlatBufferBuilder();
2414 
2415  auto bytesToWrite = armnn::numeric_cast<std::streamsize>(fbBuilder.GetSize());
2416  stream.write(reinterpret_cast<const char*>(fbBuilder.GetBufferPointer()), bytesToWrite);
2417  return !stream.bad();
2418 }
2419 
2420 } // namespace armnnSerializer
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
flatbuffers::Offset< Convolution3dDescriptor > CreateConvolution3dDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t padLeft=0, uint32_t padRight=0, uint32_t padTop=0, uint32_t padBottom=0, uint32_t padFront=0, uint32_t padBack=0, uint32_t strideX=0, uint32_t strideY=0, uint32_t strideZ=0, uint32_t dilationX=1, uint32_t dilationY=1, uint32_t dilationZ=1, bool biasEnabled=false, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NDHWC)
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
flatbuffers::Offset< LongData > CreateLongData(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< int64_t >> data=0)
float m_Eps
Used to avoid dividing by zero.
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
armnnSerializer::UnaryOperation GetFlatBufferUnaryOperation(armnn::UnaryOperation comparisonOperation)
bool m_ProjectionEnabled
Enable/disable the projection layer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< ReshapeDescriptor > CreateReshapeDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> targetShape=0)
ARMNN_NO_DEPRECATE_WARN_END void ExecuteStrategy(IStrategy &strategy) const
Definition: Network.cpp:482
flatbuffers::Offset< ReduceLayer > CreateReduceLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ReduceDescriptor > descriptor=0)
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
uint32_t m_Axis
0-based axis along which to stack the input tensors.
flatbuffers::Offset< OutputSlot > CreateOutputSlot(flatbuffers::FlatBufferBuilder &_fbb, uint32_t index=0, flatbuffers::Offset< armnnSerializer::TensorInfo > tensorInfo=0)
A ViewsDescriptor for the SplitterLayer.
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:66
flatbuffers::Offset< DepthwiseConvolution2dDescriptor > CreateDepthwiseConvolution2dDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t padLeft=0, uint32_t padRight=0, uint32_t padTop=0, uint32_t padBottom=0, uint32_t strideX=0, uint32_t strideY=0, uint32_t dilationX=1, uint32_t dilationY=1, bool biasEnabled=false, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NCHW)
float m_ScaleW
Center size encoding scale weight.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
virtual unsigned int GetNumInputSlots() const =0
Returns the number of connectable input slots.
float m_K
Kappa value used for the across channel normalization equation.
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
flatbuffers::Offset< LstmLayer > CreateLstmLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::LstmDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::LstmInputParams > inputParams=0)
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
uint32_t m_PoolWidth
Pooling width value.
flatbuffers::Offset< L2NormalizationLayer > CreateL2NormalizationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::L2NormalizationDescriptor > descriptor=0)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
flatbuffers::Offset< TransposeConvolution2dDescriptor > CreateTransposeConvolution2dDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t padLeft=0, uint32_t padRight=0, uint32_t padTop=0, uint32_t padBottom=0, uint32_t strideX=0, uint32_t strideY=0, bool biasEnabled=false, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NCHW)
float m_ClippingThresProj
Clipping threshold value for the projection.
uint32_t m_PoolDepth
Pooling depth value.
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1...
A ReshapeDescriptor for the ReshapeLayer.
flatbuffers::Offset< ResizeDescriptor > CreateResizeDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t targetHeight=0, uint32_t targetWidth=0, armnnSerializer::ResizeMethod method=armnnSerializer::ResizeMethod_NearestNeighbor, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC, bool alignCorners=false, bool halfPixelCenters=false)
flatbuffers::Offset< FillLayer > CreateFillLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::FillDescriptor > descriptor=0)
std::vector< int > m_Begin
Begin values for the input that will be sliced.
uint32_t m_PadBack
Padding back value in the depth dimension.
float m_PadValue
Optional value to use for padding, defaults to 0.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t GetNumDimensions() const
Get the number of dimensions.
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
void Serialize(const armnn::INetwork &inNetwork)
Serializes the network to ArmNN SerializedGraph.
float m_ScaleX
Center size encoding scale x.
TensorShape m_InputShape
Required shape of all input tensors.
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
Dimensionality GetDimensionality() const
Function that returns the tensor type.
Definition: Tensor.hpp:92
flatbuffers::Offset< GatherLayer > CreateGatherLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::GatherDescriptor > descriptor=0)
flatbuffers::Offset< RankLayer > CreateRankLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
bool HasPerAxisQuantization() const
Definition: Tensor.cpp:448
uint32_t m_PoolWidth
Pooling width value.
bool m_PeepholeEnabled
Enable/disable peephole.
armnnSerializer::OutputShapeRounding GetFlatBufferOutputShapeRounding(armnn::OutputShapeRounding outputShapeRounding)
flatbuffers::Offset< TransposeLayer > CreateTransposeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::TransposeDescriptor > descriptor=0)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
A Convolution2dDescriptor for the Convolution2dLayer.
float m_Alpha
Alpha value for the normalization equation.
uint32_t m_PadLeft
Padding left value in the width dimension.
flatbuffers::Offset< ComparisonLayer > CreateComparisonLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ComparisonDescriptor > descriptor=0)
bool m_KeepDims
if true then output shape has no change.
float m_HiddenStateScale
Hidden State quantization scale.
bool m_BiasEnabled
Enable/disable bias.
flatbuffers::Offset< ConstTensor > CreateConstTensor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::TensorInfo > info=0, armnnSerializer::ConstTensorData data_type=armnnSerializer::ConstTensorData_NONE, flatbuffers::Offset< void > data=0)
Optional< unsigned int > GetQuantizationDim() const
Definition: Tensor.cpp:496
flatbuffers::Offset< QuantizeLayer > CreateQuantizeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
float m_OutputIntermediateScale
Output intermediate quantization scale.
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
float m_Gamma
Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0. ...
float m_Beta
Exponentiation value.
flatbuffers::Offset< InputSlot > CreateInputSlot(flatbuffers::FlatBufferBuilder &_fbb, uint32_t index=0, const armnnSerializer::Connection *connection=0)
std::vector< unsigned int > m_Size
Size of the slice in each dimension.
static ISerializer * CreateRaw()
Definition: Serializer.cpp:30
flatbuffers::Offset< SpaceToDepthDescriptor > CreateSpaceToDepthDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t blockSize=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:81
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< QuantizedLstmLayer > CreateQuantizedLstmLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::QuantizedLstmInputParams > inputParams=0)
flatbuffers::Offset< TransposeDescriptor > CreateTransposeDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> dimMappings=0)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
serializer::ActivationFunction GetFlatBufferActivationFunction(armnn::ActivationFunction function)
Definition: Serializer.cpp:55
Main network class which provides the interface for building up a neural network. ...
Definition: INetwork.hpp:249
uint32_t m_PadRight
Padding right value in the width dimension.
flatbuffers::Offset< DetectionPostProcessDescriptor > CreateDetectionPostProcessDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t maxDetections=0, uint32_t maxClassesPerDetection=0, uint32_t detectionsPerClass=0, float nmsScoreThreshold=0.0f, float nmsIouThreshold=0.0f, uint32_t numClasses=0, bool useRegularNms=false, float scaleX=0.0f, float scaleY=0.0f, float scaleW=0.0f, float scaleH=0.0f)
flatbuffers::Offset< Pooling3dLayer > CreatePooling3dLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::Pooling3dDescriptor > descriptor=0)
armnnSerializer::NormalizationAlgorithmMethod GetFlatBufferNormalizationAlgorithmMethod(armnn::NormalizationAlgorithmMethod normalizationAlgorithmMethod)
uint32_t m_PadTop
Padding top value in the height dimension.
flatbuffers::Offset< AnyLayer > CreateAnyLayer(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::Layer layer_type=armnnSerializer::Layer_NONE, flatbuffers::Offset< void > layer=0)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
flatbuffers::Offset< DepthwiseConvolution2dLayer > CreateDepthwiseConvolution2dLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::DepthwiseConvolution2dDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > weights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > biases=0)
bool m_BiasEnabled
Enable/disable bias.
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
flatbuffers::Offset< MergeLayer > CreateMergeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
uint32_t m_PadRight
Padding right value in the width dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
MemoryType GetMemoryArea() const
Definition: Tensor.hpp:305
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding for input dimension.
uint32_t GetNumViews() const
Get the number of views.
ReduceOperation m_ReduceOperation
Specifies the reduction operation to execute.
flatbuffers::Offset< QLstmInputParams > CreateQLstmInputParams(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > forgetGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > outputGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > projectionWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > projectionBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > forgetLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > outputLayerNormWeights=0)
bool m_TimeMajor
Enable/disable time major.
Copyright (c) 2021 ARM Limited and Contributors.
DataLayout m_DataLayout
The data layout to be used (NCDHW, NDHWC).
void IgnoreUnused(Ts &&...)
flatbuffers::Offset< TensorInfo > CreateTensorInfo(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> dimensions=0, armnnSerializer::DataType dataType=armnnSerializer::DataType_Float16, float quantizationScale=1.0f, int32_t quantizationOffset=0, flatbuffers::Offset< flatbuffers::Vector< float >> quantizationScales=0, uint32_t quantizationDim=0, uint32_t dimensionality=1, flatbuffers::Offset< flatbuffers::Vector< uint8_t >> dimensionSpecificity=0, bool isConstant=false)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
int32_t m_BeginMask
Begin mask value.
uint32_t m_PadFront
Padding front value in the depth dimension.
flatbuffers::Offset< FullyConnectedDescriptor > CreateFullyConnectedDescriptor(flatbuffers::FlatBufferBuilder &_fbb, bool biasEnabled=false, bool transposeWeightsMatrix=false, bool constantWeights=true)
flatbuffers::Offset< TransposeConvolution2dLayer > CreateTransposeConvolution2dLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::TransposeConvolution2dDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > weights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > biases=0)
SizeType GetSize() const
Definition: Types.hpp:325
uint32_t m_DilationY
Dilation along y axis.
int32_t m_EndMask
End mask value.
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
flatbuffers::Offset< PreluLayer > CreatePreluLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding values for the input dimension: heightPad{top, bottom} widthPad{left, right}.
uint32_t m_PoolHeight
Pooling height value.
std::vector< float > GetQuantizationScales() const
Definition: Tensor.cpp:453
uint32_t m_DilationX
Dilation along x axis.
flatbuffers::Offset< StandInDescriptor > CreateStandInDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t numInputs=0, uint32_t numOutputs=0)
bool SaveSerializedToStream(std::ostream &stream)
Serializes the SerializedGraph to the stream.
Definition: Serializer.cpp:50
uint32_t m_DilationY
Dilation factor value for height dimension.
armnnSerializer::ConstTensorData GetFlatBufferConstTensorData(armnn::DataType dataType)
bool GetDimensionSpecificity(unsigned int i) const
Gets information about if the dimension size has been specified or not.
Definition: Tensor.cpp:211
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
void ExecuteStrategy(const armnn::IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id) override
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
flatbuffers::Offset< MultiplicationLayer > CreateMultiplicationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
flatbuffers::Offset< DepthToSpaceLayer > CreateDepthToSpaceLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::DepthToSpaceDescriptor > descriptor=0)
flatbuffers::Offset< InstanceNormalizationLayer > CreateInstanceNormalizationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::InstanceNormalizationDescriptor > descriptor=0)
armnnSerializer::ReduceOperation GetFlatBufferReduceOperation(armnn::ReduceOperation reduceOperation)
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:277
flatbuffers::Offset< SliceLayer > CreateSliceLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::SliceDescriptor > descriptor=0)
armnnSerializer::DataType GetFlatBufferDataType(armnn::DataType dataType)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_NumOutputs
Number of output tensors.
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
flatbuffers::Offset< Convolution2dDescriptor > CreateConvolution2dDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t padLeft=0, uint32_t padRight=0, uint32_t padTop=0, uint32_t padBottom=0, uint32_t strideX=0, uint32_t strideY=0, uint32_t dilationX=1, uint32_t dilationY=1, bool biasEnabled=false, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NCHW)
flatbuffers::Offset< Pooling3dDescriptor > CreatePooling3dDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::PoolingAlgorithm poolType=armnnSerializer::PoolingAlgorithm_Max, uint32_t padLeft=0, uint32_t padRight=0, uint32_t padTop=0, uint32_t padBottom=0, uint32_t padFront=0, uint32_t padBack=0, uint32_t poolWidth=0, uint32_t poolHeight=0, uint32_t poolDepth=0, uint32_t strideX=0, uint32_t strideY=0, uint32_t strideZ=0, armnnSerializer::OutputShapeRounding outputShapeRounding=armnnSerializer::OutputShapeRounding_Floor, armnnSerializer::PaddingMethod paddingMethod=armnnSerializer::PaddingMethod_IgnoreValue, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
flatbuffers::Offset< InputLayer > CreateInputLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::BindableLayerBase > base=0)
A ResizeBilinearDescriptor for the ResizeBilinearLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
Base class for all descriptors.
Definition: Descriptors.hpp:22
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
A StackDescriptor for the StackLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< ShortData > CreateShortData(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< int16_t >> data=0)
serializer::ArgMinMaxFunction GetFlatBufferArgMinMaxFunction(armnn::ArgMinMaxFunction function)
Definition: Serializer.cpp:86
TensorShape m_TargetShape
Target shape value.
bool SaveSerializedToStream(std::ostream &stream)
Serializes the SerializedGraph to the stream.
flatbuffers::Offset< ConcatLayer > CreateConcatLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::OriginsDescriptor > descriptor=0)
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_MaxDetections
Maximum numbers of detections.
A PadDescriptor for the PadLayer.
flatbuffers::Offset< SubtractionLayer > CreateSubtractionLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
flatbuffers::Offset< BindableLayerBase > CreateBindableLayerBase(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, int32_t layerBindingId=0)
const uint32_t * GetViewOrigin(uint32_t idx) const
Return the view origin at the int value idx.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_PadBack
Padding back value in the depth dimension.
flatbuffers::Offset< ArgMinMaxLayer > CreateArgMinMaxLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ArgMinMaxDescriptor > descriptor=0)
armnnSerializer::NormalizationAlgorithmChannel GetFlatBufferNormalizationAlgorithmChannel(armnn::NormalizationAlgorithmChannel normalizationAlgorithmChannel)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
flatbuffers::Offset< QLstmDescriptor > CreateQLstmDescriptor(flatbuffers::FlatBufferBuilder &_fbb, bool cifgEnabled=true, bool peepholeEnabled=false, bool projectionEnabled=false, bool layerNormEnabled=false, float cellClip=0.0f, float projectionClip=0.0f, float inputIntermediateScale=0.0f, float forgetIntermediateScale=0.0f, float cellIntermediateScale=0.0f, float outputIntermediateScale=0.0f, int32_t hiddenStateZeroPoint=0, float hiddenStateScale=0.0f)
bool m_LayerNormEnabled
Enable/disable layer normalization.
float m_NmsIouThreshold
Intersection over union threshold.
flatbuffers::Offset< ReshapeLayer > CreateReshapeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ReshapeDescriptor > descriptor=0)
armnnSerializer::LogicalBinaryOperation GetFlatBufferLogicalBinaryOperation(armnn::LogicalBinaryOperation logicalBinaryOperation)
flatbuffers::Offset< ArgMinMaxDescriptor > CreateArgMinMaxDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::ArgMinMaxFunction argMinMaxFunction=armnnSerializer::ArgMinMaxFunction_Min, int32_t axis=0)
flatbuffers::Offset< SoftmaxDescriptor > CreateSoftmaxDescriptor(flatbuffers::FlatBufferBuilder &_fbb, float beta=0.0f, int32_t axis=-1)
An LstmDescriptor for the LstmLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
flatbuffers::Offset< AdditionLayer > CreateAdditionLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
uint32_t m_DilationX
Dilation factor value for width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
flatbuffers::Offset< L2NormalizationDescriptor > CreateL2NormalizationDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NCHW, float eps=1e-12f)
std::vector< unsigned int > m_Begin
Beginning indices of the slice in each dimension.
int32_t m_NewAxisMask
New axis mask value.
flatbuffers::Offset< MinimumLayer > CreateMinimumLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept...
flatbuffers::Offset< ByteData > CreateByteData(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< int8_t >> data=0)
std::vector< unsigned int > m_BlockShape
Block shape values.
flatbuffers::Offset< FeatureCompatibilityVersions > CreateFeatureCompatibilityVersions(flatbuffers::FlatBufferBuilder &_fbb, uint32_t bindingIdsScheme=0, uint32_t weightsLayoutScheme=0, uint32_t constantTensorsAsInputs=0)
float m_Eps
Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f...
An output connection slot for a layer.
Definition: INetwork.hpp:40
flatbuffers::Offset< DepthToSpaceDescriptor > CreateDepthToSpaceDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t blockSize=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
A L2NormalizationDescriptor for the L2NormalizationLayer.
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:480
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
armnnSerializer::PaddingMode GetFlatBufferPaddingMode(armnn::PaddingMode paddingMode)
float GetQuantizationScale() const
Definition: Tensor.cpp:463
flatbuffers::Offset< LstmInputParams > CreateLstmInputParams(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > forgetGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > outputGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > projectionWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > projectionBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > forgetLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > outputLayerNormWeights=0)
DataType GetDataType() const
Definition: Tensor.hpp:198
An OriginsDescriptor for the ConcatLayer.
A ReduceDescriptor for the REDUCE operators.
float m_ProjectionClip
Clipping threshold value for the projection.
flatbuffers::Offset< CastLayer > CreateCastLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
flatbuffers::Offset< LayerBase > CreateLayerBase(flatbuffers::FlatBufferBuilder &_fbb, uint32_t index=0, flatbuffers::Offset< flatbuffers::String > layerName=0, armnnSerializer::LayerType layerType=armnnSerializer::LayerType_Addition, flatbuffers::Offset< flatbuffers::Vector< flatbuffers::Offset< armnnSerializer::InputSlot >>> inputSlots=0, flatbuffers::Offset< flatbuffers::Vector< flatbuffers::Offset< armnnSerializer::OutputSlot >>> outputSlots=0)
flatbuffers::Offset< ShapeLayer > CreateShapeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
A FullyConnectedDescriptor for the FullyConnectedLayer.
int32_t m_EllipsisMask
Ellipsis mask value.
virtual LayerGuid GetGuid() const =0
Returns the unique id of the layer.
bool m_BiasEnabled
Enable/disable bias.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
flatbuffers::Offset< QuantizedLstmInputParams > CreateQuantizedLstmInputParams(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > forgetGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > outputGateBias=0)
flatbuffers::Offset< ReduceDescriptor > CreateReduceDescriptor(flatbuffers::FlatBufferBuilder &_fbb, bool keepDims=false, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> axis=0, armnnSerializer::ReduceOperation reduceOperation=armnnSerializer::ReduceOperation_Sum)
flatbuffers::Offset< StackDescriptor > CreateStackDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t axis=0, uint32_t numInputs=0, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> inputShape=0)
flatbuffers::Offset< BatchToSpaceNdDescriptor > CreateBatchToSpaceNdDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> blockShape=0, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> crops=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
float m_InputIntermediateScale
Input intermediate quantization scale.
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
uint32_t m_TargetWidth
Target width value.
flatbuffers::Offset< SplitterLayer > CreateSplitterLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ViewsDescriptor > descriptor=0)
A GatherDescriptor for the GatherLayer.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_PeepholeEnabled
Enable/disable peephole.
uint32_t m_NumClasses
Number of classes.
bool m_HalfPixelCenters
Half Pixel Centers.
flatbuffers::Offset< OutputLayer > CreateOutputLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::BindableLayerBase > base=0)
void Serialize(const armnn::INetwork &inNetwork)
Serializes the network to ArmNN SerializedGraph.
Definition: Serializer.cpp:45
flatbuffers::Offset< SoftmaxLayer > CreateSoftmaxLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::SoftmaxDescriptor > descriptor=0)
flatbuffers::Offset< FillDescriptor > CreateFillDescriptor(flatbuffers::FlatBufferBuilder &_fbb, float value=0.0f)
uint32_t m_PadTop
Padding top value in the height dimension.
A StandInDescriptor for the StandIn layer.
A QLstmDescriptor for the QLstmLayer.
flatbuffers::Offset< StridedSliceLayer > CreateStridedSliceLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::StridedSliceDescriptor > descriptor=0)
virtual unsigned int CalculateIndexOnOwner() const =0
flatbuffers::Offset< LogSoftmaxDescriptor > CreateLogSoftmaxDescriptor(flatbuffers::FlatBufferBuilder &_fbb, float beta=1.0f, int32_t axis=-1)
bool m_UseRegularNms
Use Regular NMS.
uint32_t m_PadFront
Padding front value in the depth dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< MeanLayer > CreateMeanLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::MeanDescriptor > descriptor=0)
std::vector< unsigned int > m_BlockShape
Block shape value.
std::vector< int > m_Stride
Stride values for the input that will be sliced.
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
flatbuffers::Offset< ActivationLayer > CreateActivationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ActivationDescriptor > descriptor=0)
flatbuffers::Offset< SpaceToDepthLayer > CreateSpaceToDepthLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::SpaceToDepthDescriptor > descriptor=0)
flatbuffers::Offset< SliceDescriptor > CreateSliceDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> begin=0, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> size=0)
const TensorInfo & GetInfo() const
Definition: Tensor.hpp:295
min(a, max(b, input)) ReLu1 & ReLu6.
flatbuffers::Offset< BatchNormalizationLayer > CreateBatchNormalizationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::BatchNormalizationDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > mean=0, flatbuffers::Offset< armnnSerializer::ConstTensor > variance=0, flatbuffers::Offset< armnnSerializer::ConstTensor > beta=0, flatbuffers::Offset< armnnSerializer::ConstTensor > gamma=0)
flatbuffers::Offset< BatchNormalizationDescriptor > CreateBatchNormalizationDescriptor(flatbuffers::FlatBufferBuilder &_fbb, float eps=0.0f, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
uint32_t m_NumInputs
Number of input tensors.
flatbuffers::Offset< GatherDescriptor > CreateGatherDescriptor(flatbuffers::FlatBufferBuilder &_fbb, int32_t axis=0)
flatbuffers::Offset< Convolution3dLayer > CreateConvolution3dLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::Convolution3dDescriptor > descriptor=0)
uint32_t m_PadLeft
Padding left value in the width dimension.
flatbuffers::Offset< ActivationDescriptor > CreateActivationDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::ActivationFunction activationFunction=armnnSerializer::ActivationFunction_Sigmoid, float a=0.0f, float b=0.0f)
uint32_t m_TargetHeight
Target height value.
uint32_t m_ActivationFunc
The activation function to use.
A SliceDescriptor for the SliceLayer.
flatbuffers::Offset< NormalizationLayer > CreateNormalizationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::NormalizationDescriptor > descriptor=0)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A Convolution3dDescriptor for the Convolution3dLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
flatbuffers::Offset< ViewsDescriptor > CreateViewsDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::OriginsDescriptor > origins=0, flatbuffers::Offset< flatbuffers::Vector< flatbuffers::Offset< armnnSerializer::UintVector >>> viewSizes=0)
virtual LayerType GetType() const =0
Returns the armnn::LayerType of this layer.
flatbuffers::Offset< PermuteDescriptor > CreatePermuteDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> dimMappings=0)
float m_ClippingThresCell
Clipping threshold value for the cell state.
unsigned int m_BlockSize
Scalar specifying the input block size. It must be >= 1.
uint32_t m_NumGroups
Number of groups for the channel shuffle operation.
flatbuffers::Offset< MeanDescriptor > CreateMeanDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> axis=0, bool keepDims=false)
const uint32_t * GetViewOrigin(uint32_t idx) const
Get the view origin at the int value idx.
PaddingMode m_PaddingMode
Specifies the Padding mode (Constant, Reflect or Symmetric)
flatbuffers::Offset< StandInLayer > CreateStandInLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::StandInDescriptor > descriptor=0)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_ForgetIntermediateScale
Forget intermediate quantization scale.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_Beta
Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0. ...
armnnSerializer::DataLayout GetFlatBufferDataLayout(armnn::DataLayout dataLayout)
A Pooling3dDescriptor for the Pooling3dLayer.
uint32_t m_StrideZ
Stride value when proceeding through input for the depth dimension.
std::vector< uint32_t > m_vAxis
The indices of the dimensions to reduce.
float m_ScaleH
Center size encoding scale height.
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
std::vector< int > m_End
End values for the input that will be sliced.
flatbuffers::Offset< SwitchLayer > CreateSwitchLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
DataLayout m_DataLayout
The data layout to be used (NDHWC, NCDHW).
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
const uint32_t * GetViewSizes(uint32_t idx) const
Get the view sizes at the int value idx.
float m_CellClip
Clipping threshold value for the cell state.
flatbuffers::Offset< ElementwiseUnaryDescriptor > CreateElementwiseUnaryDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::UnaryOperation operation=armnnSerializer::UnaryOperation_Abs)
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:61
uint32_t m_DilationX
Dilation along x axis.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< PadDescriptor > CreatePadDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> padList=0, float padValue=0.0f, armnnSerializer::PaddingMode paddingMode=armnnSerializer::PaddingMode_Constant)
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
flatbuffers::Offset< PadLayer > CreatePadLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::PadDescriptor > descriptor=0)
flatbuffers::Offset< FloorLayer > CreateFloorLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
std::unique_ptr< ISerializer, void(*)(ISerializer *serializer)> ISerializerPtr
Definition: ISerializer.hpp:15
flatbuffers::Offset< NormalizationDescriptor > CreateNormalizationDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::NormalizationAlgorithmChannel normChannelType=armnnSerializer::NormalizationAlgorithmChannel_Across, armnnSerializer::NormalizationAlgorithmMethod normMethodType=armnnSerializer::NormalizationAlgorithmMethod_LocalBrightness, uint32_t normSize=0, float alpha=0.0f, float beta=0.0f, float k=0.0f, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NCHW)
uint32_t m_PadLeft
Padding left value in the width dimension.
armnnSerializer::ComparisonOperation GetFlatBufferComparisonOperation(armnn::ComparisonOperation comparisonOperation)
flatbuffers::Offset< Pooling2dDescriptor > CreatePooling2dDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::PoolingAlgorithm poolType=armnnSerializer::PoolingAlgorithm_Max, uint32_t padLeft=0, uint32_t padRight=0, uint32_t padTop=0, uint32_t padBottom=0, uint32_t poolWidth=0, uint32_t poolHeight=0, uint32_t strideX=0, uint32_t strideY=0, armnnSerializer::OutputShapeRounding outputShapeRounding=armnnSerializer::OutputShapeRounding_Floor, armnnSerializer::PaddingMethod paddingMethod=armnnSerializer::PaddingMethod_IgnoreValue, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
bool m_AlignCorners
Aligned corners.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
flatbuffers::Offset< ConstantLayer > CreateConstantLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ConstTensor > input=0)
int32_t m_Axis
The axis in params to gather indices from.
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
flatbuffers::Offset< ChannelShuffleLayer > CreateChannelShuffleLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ChannelShuffleDescriptor > descriptor=0)
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
flatbuffers::Offset< UintVector > CreateUintVector(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> data=0)
flatbuffers::Offset< StackLayer > CreateStackLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::StackDescriptor > descriptor=0)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
flatbuffers::Offset< Convolution2dLayer > CreateConvolution2dLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::Convolution2dDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > weights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > biases=0)
flatbuffers::Offset< UnidirectionalSequenceLstmLayer > CreateUnidirectionalSequenceLstmLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::UnidirectionalSequenceLstmDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::LstmInputParams > inputParams=0)
uint32_t m_PadTop
Padding top value in the height dimension.
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
flatbuffers::Offset< Pooling2dLayer > CreatePooling2dLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::Pooling2dDescriptor > descriptor=0)
uint32_t m_PadTop
Padding top value in the height dimension.
bool m_ProjectionEnabled
Enable/disable the projection layer.
ArgMinMaxFunction
Definition: Types.hpp:89
flatbuffers::Offset< SpaceToBatchNdLayer > CreateSpaceToBatchNdLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::SpaceToBatchNdDescriptor > descriptor=0)
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
armnnSerializer::ResizeMethod GetFlatBufferResizeMethod(armnn::ResizeMethod method)
uint32_t m_NumInputs
Number of input tensors.
flatbuffers::Offset< UnidirectionalSequenceLstmDescriptor > CreateUnidirectionalSequenceLstmDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t activationFunc=0, float clippingThresCell=0.0f, float clippingThresProj=0.0f, bool cifgEnabled=true, bool peepholeEnabled=false, bool projectionEnabled=false, bool layerNormEnabled=false, bool timeMajor=false)
profiling::ProfilingGuid LayerGuid
Define LayerGuid type.
Definition: Types.hpp:363
uint32_t GetNumDimensions() const
Get the number of dimensions.
flatbuffers::Offset< ComparisonDescriptor > CreateComparisonDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::ComparisonOperation operation=armnnSerializer::ComparisonOperation_Equal)
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
A MeanDescriptor for the MeanLayer.
flatbuffers::Offset< MaximumLayer > CreateMaximumLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
virtual const IOutputSlot * GetConnection() const =0
static ISerializerPtr Create()
Definition: Serializer.cpp:35
armnnSerializer::PaddingMethod GetFlatBufferPaddingMethod(armnn::PaddingMethod paddingMethod)
bool m_LayerNormEnabled
Enable/disable layer normalization.
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
uint32_t m_PadRight
Padding right value in the width dimension.
flatbuffers::Offset< InstanceNormalizationDescriptor > CreateInstanceNormalizationDescriptor(flatbuffers::FlatBufferBuilder &_fbb, float gamma=0.0f, float beta=0.0f, float eps=0.0f, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
virtual const TensorInfo & GetTensorInfo() const =0
uint32_t m_Axis
Axis to apply channel shuffle operation on.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:83
flatbuffers::Offset< IntData > CreateIntData(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< int32_t >> data=0)
virtual const char * GetName() const =0
Returns the name of the layer.
float m_ScaleY
Center size encoding scale y.
flatbuffers::Offset< ResizeLayer > CreateResizeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ResizeDescriptor > descriptor=0)
uint32_t GetNumViews() const
Get the number of views.
flatbuffers::Offset< FullyConnectedLayer > CreateFullyConnectedLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::FullyConnectedDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > weights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > biases=0)
float m_NmsScoreThreshold
NMS score threshold.
flatbuffers::Offset< DequantizeLayer > CreateDequantizeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
flatbuffers::Offset< ChannelShuffleDescriptor > CreateChannelShuffleDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t axis=0, uint32_t numGroups=0)
virtual LayerGuid GetOwningLayerGuid() const =0
A Pooling2dDescriptor for the Pooling2dLayer.
flatbuffers::Offset< DetectionPostProcessLayer > CreateDetectionPostProcessLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::DetectionPostProcessDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > anchors=0)
A NormalizationDescriptor for the NormalizationLayer.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< BatchToSpaceNdLayer > CreateBatchToSpaceNdLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::BatchToSpaceNdDescriptor > descriptor=0)
flatbuffers::Offset< armnnSerializer::FeatureCompatibilityVersions > GetVersionTable()
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
unsigned int GetConcatAxis() const
Get the concatenation axis value.
A ChannelShuffleDescriptor for the ChannelShuffle operator.
flatbuffers::Offset< LogSoftmaxLayer > CreateLogSoftmaxLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::LogSoftmaxDescriptor > descriptor=0)
float m_CellIntermediateScale
Cell intermediate quantization scale.
flatbuffers::Offset< StridedSliceDescriptor > CreateStridedSliceDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< int32_t >> begin=0, flatbuffers::Offset< flatbuffers::Vector< int32_t >> end=0, flatbuffers::Offset< flatbuffers::Vector< int32_t >> stride=0, int32_t beginMask=0, int32_t endMask=0, int32_t shrinkAxisMask=0, int32_t ellipsisMask=0, int32_t newAxisMask=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
flatbuffers::Offset< OriginsDescriptor > CreateOriginsDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t concatAxis=0, uint32_t numViews=0, uint32_t numDimensions=0, flatbuffers::Offset< flatbuffers::Vector< flatbuffers::Offset< armnnSerializer::UintVector >>> viewOrigins=0)
flatbuffers::Offset< QLstmLayer > CreateQLstmLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::QLstmDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::QLstmInputParams > inputParams=0)
uint32_t m_DilationZ
Dilation along z axis.
flatbuffers::Offset< LstmDescriptor > CreateLstmDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t activationFunc=0, float clippingThresCell=0.0f, float clippingThresProj=0.0f, bool cifgEnabled=true, bool peepholeEnabled=false, bool projectionEnabled=false, bool layerNormEnabled=false)
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:63
static void Destroy(ISerializer *serializer)
Definition: Serializer.cpp:40
A SoftmaxDescriptor for the SoftmaxLayer.
float m_Beta
Beta value for the normalization equation.
flatbuffers::Offset< ElementwiseUnaryLayer > CreateElementwiseUnaryLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ElementwiseUnaryDescriptor > descriptor=0)
flatbuffers::Offset< PermuteLayer > CreatePermuteLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::PermuteDescriptor > descriptor=0)
uint32_t m_StrideZ
Stride value when proceeding through input for the depth dimension.
const OriginsDescriptor & GetOrigins() const
Get the View Origins.
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
uint32_t m_NormSize
Depth radius value.
flatbuffers::Offset< SpaceToBatchNdDescriptor > CreateSpaceToBatchNdDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> blockShape=0, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> padList=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:59
An input connection slot for a layer.
Definition: INetwork.hpp:26
flatbuffers::Offset< SerializedGraph > CreateSerializedGraph(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< flatbuffers::Offset< armnnSerializer::AnyLayer >>> layers=0, flatbuffers::Offset< flatbuffers::Vector< int32_t >> inputIds=0, flatbuffers::Offset< flatbuffers::Vector< int32_t >> outputIds=0, flatbuffers::Offset< armnnSerializer::FeatureCompatibilityVersions > featureVersions=0)
armnnSerializer::PoolingAlgorithm GetFlatBufferPoolingAlgorithm(armnn::PoolingAlgorithm poolingAlgorithm)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
uint32_t m_DilationY
Dilation along y axis.
A FillDescriptor for the FillLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
unsigned int GetNumBytes() const
Definition: Tensor.hpp:302
ActivationFunction
Definition: Types.hpp:73
flatbuffers::Offset< LogicalBinaryDescriptor > CreateLogicalBinaryDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::LogicalBinaryOperation operation=armnnSerializer::LogicalBinaryOperation_LogicalAnd)
flatbuffers::Offset< DivisionLayer > CreateDivisionLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
A PermuteDescriptor for the PermuteLayer.
flatbuffers::Offset< LogicalBinaryLayer > CreateLogicalBinaryLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::LogicalBinaryDescriptor > descriptor=0)
uint32_t m_PadRight
Padding right value in the width dimension.
int32_t m_HiddenStateZeroPoint
Hidden State zero point.
bool m_ConstantWeights
Enable/disable constant weights and biases.