ArmNN
 22.05.01
Serializer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "Serializer.hpp"
6 #include "SerializerUtils.hpp"
7 
8 #include <armnn/Descriptors.hpp>
9 #include <armnn/LstmParams.hpp>
13 
14 #include <fmt/format.h>
15 #include <iostream>
16 
17 using namespace armnn;
18 namespace fb = flatbuffers;
19 namespace serializer = armnnSerializer;
20 
21 namespace armnnSerializer
22 {
23 
24 ISerializer::ISerializer() : pSerializerImpl(new SerializerImpl())
25 {
26 }
27 
28 ISerializer::~ISerializer() = default;
29 
31 {
32  return new ISerializer();
33 }
34 
36 {
38 }
39 
41 {
42  delete serializer;
43 }
44 
46 {
47  pSerializerImpl->Serialize(inNetwork);
48 }
49 
50 bool ISerializer::SaveSerializedToStream(std::ostream& stream)
51 {
52  return pSerializerImpl->SaveSerializedToStream(stream);
53 }
54 
56 {
57  switch (function)
58  {
81  default:
83  }
84 }
85 
87 {
88  switch (function)
89  {
93  default:
95  }
96 }
97 
98 uint32_t SerializerStrategy::GetSerializedId(LayerGuid guid)
99 {
100  if (m_guidMap.empty())
101  {
102  m_guidMap.insert(std::make_pair(guid, m_layerId));
103  }
104  else if (m_guidMap.find(guid) == m_guidMap.end())
105  {
106  ++m_layerId;
107  m_guidMap.insert(std::make_pair(guid, m_layerId));
108 
109  return m_layerId;
110  }
111  return m_guidMap[guid];
112 }
113 
114 // Build FlatBuffer for Input Layer
115 void SerializerStrategy::SerializeInputLayer(const armnn::IConnectableLayer* layer, LayerBindingId id, const char* name)
116 {
117  IgnoreUnused(name);
118 
119  // Create FlatBuffer BaseLayer
120  auto flatBufferInputBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Input);
121 
122  // Create FlatBuffer BindableBaseLayer
123  auto flatBufferInputBindableBaseLayer = serializer::CreateBindableLayerBase(m_flatBufferBuilder,
124  flatBufferInputBaseLayer,
125  id);
126  // Push layer binding id to outputIds.
127  m_inputIds.push_back(id);
128 
129  // Create the FlatBuffer InputLayer
130  auto flatBufferInputLayer = serializer::CreateInputLayer(m_flatBufferBuilder, flatBufferInputBindableBaseLayer);
131 
132  // Add the AnyLayer to the FlatBufferLayers
133  CreateAnyLayer(flatBufferInputLayer.o, serializer::Layer::Layer_InputLayer);
134 }
135 
136 // Build FlatBuffer for Output Layer
137 void SerializerStrategy::SerializeOutputLayer(const armnn::IConnectableLayer* layer,
138  LayerBindingId id, const char* name)
139 {
140  IgnoreUnused(name);
141 
142  // Create FlatBuffer BaseLayer
143  auto flatBufferOutputBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Output);
144 
145  // Create FlatBuffer BindableBaseLayer
146  auto flatBufferOutputBindableBaseLayer = serializer::CreateBindableLayerBase(m_flatBufferBuilder,
147  flatBufferOutputBaseLayer,
148  id);
149  // Push layer binding id to outputIds.
150  m_outputIds.push_back(id);
151 
152  // Create the FlatBuffer OutputLayer
153  auto flatBufferOutputLayer = serializer::CreateOutputLayer(m_flatBufferBuilder, flatBufferOutputBindableBaseLayer);
154  // Add the AnyLayer to the FlatBufferLayers
155  CreateAnyLayer(flatBufferOutputLayer.o, serializer::Layer::Layer_OutputLayer);
156 }
157 
158 // Build FlatBuffer for Activation Layer
159 void SerializerStrategy::SerializeActivationLayer(const armnn::IConnectableLayer* layer,
160  const armnn::ActivationDescriptor& descriptor,
161  const char* name)
162 {
163  IgnoreUnused(name);
164 
165  // Create FlatBuffer BaseLayer
166  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Activation);
167 
168  // Create the FlatBuffer ActivationDescriptor
169  auto flatBufferDescriptor = CreateActivationDescriptor(m_flatBufferBuilder,
171  descriptor.m_A,
172  descriptor.m_B);
173 
174  // Create the FlatBuffer ActivationLayer
175  auto flatBufferAdditionLayer = CreateActivationLayer(m_flatBufferBuilder,
176  flatBufferBaseLayer,
177  flatBufferDescriptor);
178 
179  // Add the AnyLayer to the FlatBufferLayers
180  CreateAnyLayer(flatBufferAdditionLayer.o, serializer::Layer::Layer_ActivationLayer);
181 }
182 
183 // Build FlatBuffer for Addition Layer
184 void SerializerStrategy::SerializeAdditionLayer(const armnn::IConnectableLayer* layer, const char* name)
185 {
186  IgnoreUnused(name);
187 
188  // Create FlatBuffer BaseLayer
189  auto flatBufferAdditionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Addition);
190 
191  // Create the FlatBuffer AdditionLayer
192  auto flatBufferAdditionLayer = serializer::CreateAdditionLayer(m_flatBufferBuilder, flatBufferAdditionBaseLayer);
193 
194  // Add the AnyLayer to the FlatBufferLayers
195  CreateAnyLayer(flatBufferAdditionLayer.o, serializer::Layer::Layer_AdditionLayer);
196 }
197 
198 // Build FlatBuffer for ArgMinMax Layer
199 void SerializerStrategy::SerializeArgMinMaxLayer(const armnn::IConnectableLayer *layer,
200  const armnn::ArgMinMaxDescriptor& descriptor,
201  const char *name)
202 {
203  IgnoreUnused(name);
204 
205  // Create FlatBuffer BaseLayer
206  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ArgMinMax);
207 
208  // Create FlatBuffer Descriptor
209  auto flatBufferDescriptor = CreateArgMinMaxDescriptor(m_flatBufferBuilder,
211  descriptor.m_Axis);
212 
213  // Create FlatBuffer ArgMinMaxLayer
214  auto flatBufferLayer = CreateArgMinMaxLayer(m_flatBufferBuilder,
215  flatBufferBaseLayer,
216  flatBufferDescriptor);
217 
219 }
220 
221 // Build FlatBuffer for BatchToSpaceNd Layer
222 void SerializerStrategy::SerializeBatchToSpaceNdLayer(const armnn::IConnectableLayer* layer,
223  const armnn::BatchToSpaceNdDescriptor& descriptor,
224  const char* name)
225 {
226  IgnoreUnused(name);
227 
228  // Create FlatBuffer BaseLayer
229  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchToSpaceNd);
230 
231  std::vector<unsigned int> crops;
232  crops.reserve(descriptor.m_Crops.size() * 2);
233  for (auto& crop : descriptor.m_Crops)
234  {
235  crops.push_back(crop.first);
236  crops.push_back(crop.second);
237  }
238 
239  auto flatBufferDescriptor =
240  CreateBatchToSpaceNdDescriptor(m_flatBufferBuilder,
241  m_flatBufferBuilder.CreateVector(descriptor.m_BlockShape),
242  m_flatBufferBuilder.CreateVector(crops),
244 
245  auto flatBufferLayer = serializer::CreateBatchToSpaceNdLayer(m_flatBufferBuilder,
246  flatBufferBaseLayer,
247  flatBufferDescriptor);
248 
250 }
251 
252 void SerializerStrategy::SerializeBatchNormalizationLayer(
253  const armnn::IConnectableLayer* layer,
254  const armnn::BatchNormalizationDescriptor& batchNormDescriptor,
255  const std::vector<armnn::ConstTensor>& constants,
256  const char* name)
257 {
258  IgnoreUnused(name);
259 
260  const armnn::ConstTensor& mean = constants[0];
261  const armnn::ConstTensor& variance = constants[1];
262  const armnn::ConstTensor& beta = constants[2];
263  const armnn::ConstTensor& gamma = constants[3];
264 
265  auto fbBatchNormalizationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchNormalization);
266  auto fbBatchNormalizationDescriptor = serializer::CreateBatchNormalizationDescriptor(
267  m_flatBufferBuilder,
268  batchNormDescriptor.m_Eps,
269  GetFlatBufferDataLayout(batchNormDescriptor.m_DataLayout));
270 
271  auto fbMeanConstTensorInfo = CreateConstTensorInfo(mean);
272  auto fbVarianceConstTensorInfo = CreateConstTensorInfo(variance);
273  auto fbBetaConstTensorInfo = CreateConstTensorInfo(beta);
274  auto fbGammaConstTensorInfo = CreateConstTensorInfo(gamma);
275  auto fbBatchNormalizationLayer = serializer::CreateBatchNormalizationLayer(m_flatBufferBuilder,
276  fbBatchNormalizationBaseLayer,
277  fbBatchNormalizationDescriptor,
278  fbMeanConstTensorInfo,
279  fbVarianceConstTensorInfo,
280  fbBetaConstTensorInfo,
281  fbGammaConstTensorInfo);
282 
284 }
285 
286 void SerializerStrategy::SerializeCastLayer(const armnn::IConnectableLayer* layer,
287  const char* name)
288 {
289  IgnoreUnused(name);
290 
291  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Cast);
292  auto fbCastLayer = serializer::CreateCastLayer(m_flatBufferBuilder, fbBaseLayer);
294 }
295 
296 void SerializerStrategy::SerializeChannelShuffleLayer(const armnn::IConnectableLayer* layer,
297  const armnn::ChannelShuffleDescriptor& descriptor,
298  const char* name)
299 {
300  IgnoreUnused(name);
301  auto fbDescriptor = CreateChannelShuffleDescriptor(m_flatBufferBuilder,
302  descriptor.m_Axis,
303  descriptor.m_NumGroups);
305  auto fbChannelShuffleLayer = serializer::CreateChannelShuffleLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
307 }
308 
309 void SerializerStrategy::SerializeComparisonLayer(const armnn::IConnectableLayer* layer,
310  const armnn::ComparisonDescriptor& descriptor,
311  const char* name)
312 {
313  IgnoreUnused(name);
314 
316  auto fbDescriptor = serializer::CreateComparisonDescriptor(
317  m_flatBufferBuilder,
319 
320  auto fbLayer = serializer::CreateComparisonLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
322 }
323 
324 // Build FlatBuffer for Constant Layer
325 void SerializerStrategy::SerializeConstantLayer(const armnn::IConnectableLayer* layer,
326  const std::vector<armnn::ConstTensor>& constants,
327  const char* name)
328 {
329  IgnoreUnused(name);
330 
331  armnn::ConstTensor input = constants[0];
332 
333  // Create FlatBuffer BaseLayer
334  auto flatBufferConstantBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Constant);
335 
336  auto flatBufferConstTensorInfo = CreateConstTensorInfo(input);
337 
338  // Create the FlatBuffer ConstantLayer
339  auto flatBufferLayer = CreateConstantLayer(m_flatBufferBuilder,
340  flatBufferConstantBaseLayer,
341  flatBufferConstTensorInfo);
342 
343  // Add the AnyLayer to the FlatBufferLayers
345 }
346 
347 // Build FlatBuffer for Convolution2dLayer
348 void SerializerStrategy::SerializeConvolution2dLayer(const armnn::IConnectableLayer* layer,
349  const armnn::Convolution2dDescriptor& descriptor,
350  const char* name)
351 {
352  IgnoreUnused(name);
353 
354  // Create FlatBuffer BaseLayer
355  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution2d);
356 
357  auto flatBufferDescriptor = CreateConvolution2dDescriptor(m_flatBufferBuilder,
358  descriptor.m_PadLeft,
359  descriptor.m_PadRight,
360  descriptor.m_PadTop,
361  descriptor.m_PadBottom,
362  descriptor.m_StrideX,
363  descriptor.m_StrideY,
364  descriptor.m_DilationX,
365  descriptor.m_DilationY,
366  descriptor.m_BiasEnabled,
368 
369  // Create the FlatBuffer Convolution2dLayer
370  auto flatBufferLayer = CreateConvolution2dLayer(m_flatBufferBuilder,
371  flatBufferBaseLayer,
372  flatBufferDescriptor);
373 
374  // Add the AnyLayer to the FlatBufferLayers
376 }
377 
378 // Build FlatBuffer for Convolution3dLayer
379 void SerializerStrategy::SerializeConvolution3dLayer(const armnn::IConnectableLayer* layer,
380  const armnn::Convolution3dDescriptor& descriptor,
381  const char* name)
382 {
383  IgnoreUnused(name);
384 
385  // Create FlatBuffer BaseLayer
386  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution3d);
387 
388  auto flatBufferDescriptor = CreateConvolution3dDescriptor(m_flatBufferBuilder,
389  descriptor.m_PadLeft,
390  descriptor.m_PadRight,
391  descriptor.m_PadTop,
392  descriptor.m_PadBottom,
393  descriptor.m_PadFront,
394  descriptor.m_PadBack,
395  descriptor.m_StrideX,
396  descriptor.m_StrideY,
397  descriptor.m_StrideZ,
398  descriptor.m_DilationX,
399  descriptor.m_DilationY,
400  descriptor.m_DilationZ,
401  descriptor.m_BiasEnabled,
403 
404  // Create the FlatBuffer Convolution3dLayer
405  auto flatBufferLayer = CreateConvolution3dLayer(m_flatBufferBuilder,
406  flatBufferBaseLayer,
407  flatBufferDescriptor);
408 
409  // Add the AnyLayer to the FlatBufferLayers
411 }
412 
413 void SerializerStrategy::SerializeDepthToSpaceLayer(const armnn::IConnectableLayer* layer,
414  const armnn::DepthToSpaceDescriptor& descriptor,
415  const char* name)
416 {
417  IgnoreUnused(name);
418 
420  auto fbDescriptor = CreateDepthToSpaceDescriptor(m_flatBufferBuilder,
421  descriptor.m_BlockSize,
423 
424  auto fbLayer = serializer::CreateDepthToSpaceLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
425 
427 }
428 
429 void SerializerStrategy::SerializeDepthwiseConvolution2dLayer(const armnn::IConnectableLayer* layer,
430  const armnn::DepthwiseConvolution2dDescriptor& descriptor,
431  const char* name)
432 {
433  IgnoreUnused(name);
434 
436  auto fbDescriptor = CreateDepthwiseConvolution2dDescriptor(m_flatBufferBuilder,
437  descriptor.m_PadLeft,
438  descriptor.m_PadRight,
439  descriptor.m_PadTop,
440  descriptor.m_PadBottom,
441  descriptor.m_StrideX,
442  descriptor.m_StrideY,
443  descriptor.m_DilationX,
444  descriptor.m_DilationY,
445  descriptor.m_BiasEnabled,
447 
448  auto flatBufferLayer = CreateDepthwiseConvolution2dLayer(m_flatBufferBuilder,
449  fbBaseLayer,
450  fbDescriptor);
451 
453 }
454 
455 void SerializerStrategy::SerializeDequantizeLayer(const armnn::IConnectableLayer* layer,
456  const char* name)
457 {
458  IgnoreUnused(name);
459 
460  auto fbDequantizeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Dequantize);
461  auto fbDequantizeLayer = serializer::CreateDequantizeLayer(m_flatBufferBuilder, fbDequantizeBaseLayer);
462 
464 }
465 
466 void SerializerStrategy::SerializeDetectionPostProcessLayer(const armnn::IConnectableLayer* layer,
467  const armnn::DetectionPostProcessDescriptor& descriptor,
468  const std::vector<armnn::ConstTensor>& constants,
469  const char* name)
470 {
471  IgnoreUnused(name);
472 
473  const armnn::ConstTensor& anchors = constants[0];
474 
476  auto fbDescriptor = CreateDetectionPostProcessDescriptor(m_flatBufferBuilder,
477  descriptor.m_MaxDetections,
478  descriptor.m_MaxClassesPerDetection,
479  descriptor.m_DetectionsPerClass,
480  descriptor.m_NmsScoreThreshold,
481  descriptor.m_NmsIouThreshold,
482  descriptor.m_NumClasses,
483  descriptor.m_UseRegularNms,
484  descriptor.m_ScaleX,
485  descriptor.m_ScaleY,
486  descriptor.m_ScaleW,
487  descriptor.m_ScaleH);
488 
489  flatbuffers::Offset<serializer::ConstTensor> fbAnchorsConstTensorInfo = CreateConstTensorInfo(anchors);
490 
491  auto flatBufferLayer = CreateDetectionPostProcessLayer(m_flatBufferBuilder,
492  fbBaseLayer,
493  fbDescriptor,
494  fbAnchorsConstTensorInfo);
495 
497 }
498 
499 void SerializerStrategy::SerializeDivisionLayer(const armnn::IConnectableLayer* layer, const char* name)
500 {
501  IgnoreUnused(name);
502 
503  auto fbDivisionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Division);
504  auto fbDivisionLayer = serializer::CreateDivisionLayer(m_flatBufferBuilder, fbDivisionBaseLayer);
505 
507 }
508 
509 void SerializerStrategy::SerializeElementwiseUnaryLayer(const armnn::IConnectableLayer* layer,
510  const armnn::ElementwiseUnaryDescriptor& descriptor,
511  const char* name)
512 {
513  IgnoreUnused(name);
514 
517  m_flatBufferBuilder,
519 
520  auto fbLayer = serializer::CreateElementwiseUnaryLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
522 }
523 
524 void SerializerStrategy::SerializeFillLayer(const armnn::IConnectableLayer* layer,
525  const armnn::FillDescriptor& fillDescriptor,
526  const char* name)
527 {
528  IgnoreUnused(name);
529 
530  auto fbFillBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Fill);
531 
532  auto fbDescriptor = serializer::CreateFillDescriptor(m_flatBufferBuilder, fillDescriptor.m_Value);
533 
534  auto fbFillLayer = serializer::CreateFillLayer(m_flatBufferBuilder, fbFillBaseLayer, fbDescriptor);
535 
537 }
538 
539 void SerializerStrategy::SerializeFloorLayer(const armnn::IConnectableLayer *layer, const char *name)
540 {
541  IgnoreUnused(name);
542 
543  auto flatBufferFloorBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Floor);
544  auto flatBufferFloorLayer = serializer::CreateFloorLayer(m_flatBufferBuilder, flatBufferFloorBaseLayer);
545 
546  CreateAnyLayer(flatBufferFloorLayer.o, serializer::Layer::Layer_FloorLayer);
547 }
548 
549 void SerializerStrategy::SerializeGatherLayer(const armnn::IConnectableLayer* layer,
550  const armnn::GatherDescriptor& gatherDescriptor,
551  const char* name)
552 {
553  IgnoreUnused(name);
554 
555  auto fbGatherDescriptor = CreateGatherDescriptor(m_flatBufferBuilder,
556  gatherDescriptor.m_Axis);
557  auto fbGatherBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Gather);
558  auto flatBufferLayer = serializer::CreateGatherLayer(m_flatBufferBuilder, fbGatherBaseLayer, fbGatherDescriptor);
559 
561 }
562 
563 void SerializerStrategy::SerializeGatherNdLayer(const armnn::IConnectableLayer* layer,
564  const char* name)
565 {
566  IgnoreUnused(name);
567 
568  auto fbGatherNdBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_GatherNd);
569  auto flatBufferLayer = serializer::CreateGatherNdLayer(m_flatBufferBuilder, fbGatherNdBaseLayer);
570 
572 }
573 
574 void SerializerStrategy::SerializeInstanceNormalizationLayer(
575  const armnn::IConnectableLayer* layer,
576  const armnn::InstanceNormalizationDescriptor& instanceNormalizationDescriptor,
577  const char* name)
578 {
579  IgnoreUnused(name);
580 
582  m_flatBufferBuilder,
583  instanceNormalizationDescriptor.m_Gamma,
584  instanceNormalizationDescriptor.m_Beta,
585  instanceNormalizationDescriptor.m_Eps,
586  GetFlatBufferDataLayout(instanceNormalizationDescriptor.m_DataLayout));
587 
589  auto fbLayer = serializer::CreateInstanceNormalizationLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
590 
592 }
593 
594 void SerializerStrategy::SerializeL2NormalizationLayer(const armnn::IConnectableLayer* layer,
595  const armnn::L2NormalizationDescriptor& l2NormalizationDescriptor,
596  const char* name)
597 {
598  IgnoreUnused(name);
599 
600  // Create FlatBuffer BaseLayer
602 
603  // Create the FlatBuffer L2Normalization Descriptor
605  m_flatBufferBuilder,
606  GetFlatBufferDataLayout(l2NormalizationDescriptor.m_DataLayout),
607  l2NormalizationDescriptor.m_Eps);
608 
609  // Create FlatBuffer layer
610  auto fbLayer = serializer::CreateL2NormalizationLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
611 
613 }
614 
615 void SerializerStrategy::SerializeLogicalBinaryLayer(const armnn::IConnectableLayer* layer,
616  const armnn::LogicalBinaryDescriptor& descriptor,
617  const char* name)
618 {
619  IgnoreUnused(name);
620 
622  auto fbDescriptor = serializer::CreateLogicalBinaryDescriptor(
623  m_flatBufferBuilder,
625 
626  auto fbLayer = serializer::CreateLogicalBinaryLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
628 }
629 
630 void SerializerStrategy::SerializeLogSoftmaxLayer(const armnn::IConnectableLayer* layer,
631  const armnn::LogSoftmaxDescriptor& logSoftmaxDescriptor,
632  const char* name)
633 {
634  IgnoreUnused(name);
635 
636  // Create FlatBuffer BaseLayer
637  auto flatBufferLogSoftmaxBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_LogSoftmax);
638 
639  // Create the FlatBuffer LogSoftmaxDescriptor
640  auto flatBufferLogSoftmaxDesc =
641  serializer::CreateLogSoftmaxDescriptor(m_flatBufferBuilder,
642  logSoftmaxDescriptor.m_Beta,
643  logSoftmaxDescriptor.m_Axis);
644 
645  // Create the FlatBuffer LogSoftmaxLayer
646  auto flatBufferLogSoftmaxLayer =
647  serializer::CreateLogSoftmaxLayer(m_flatBufferBuilder,
648  flatBufferLogSoftmaxBaseLayer,
649  flatBufferLogSoftmaxDesc);
650 
651  CreateAnyLayer(flatBufferLogSoftmaxLayer.o, serializer::Layer::Layer_LogSoftmaxLayer);
652 }
653 
654 void SerializerStrategy::SerializeLstmLayer(const armnn::IConnectableLayer* layer,
655  const armnn::LstmDescriptor& descriptor,
656  const std::vector<armnn::ConstTensor>& constants,
657  const char* name)
658 {
659  IgnoreUnused(name);
660 
661  auto fbLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Lstm);
662 
663  auto fbLstmDescriptor = serializer::CreateLstmDescriptor(
664  m_flatBufferBuilder,
665  descriptor.m_ActivationFunc,
666  descriptor.m_ClippingThresCell,
667  descriptor.m_ClippingThresProj,
668  descriptor.m_CifgEnabled,
669  descriptor.m_PeepholeEnabled,
670  descriptor.m_ProjectionEnabled,
671  descriptor.m_LayerNormEnabled);
672 
673  // Index for constants vector
674  std::size_t i = 0;
675 
676  // Get mandatory/basic input parameters
677  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]); //InputToForgetWeights
678  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]); //InputToCellWeights
679  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]); //InputToOutputWeights
680  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToForgetWeights
681  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToCellWeights
682  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToOutputWeights
683  auto forgetGateBias = CreateConstTensorInfo(constants[i++]); //ForgetGateBias
684  auto cellBias = CreateConstTensorInfo(constants[i++]); //CellBias
685  auto outputGateBias = CreateConstTensorInfo(constants[i++]); //OutputGateBias
686 
687 
688 
689  //Define optional parameters, these will be set depending on configuration in Lstm descriptor
690  flatbuffers::Offset<serializer::ConstTensor> inputToInputWeights;
691  flatbuffers::Offset<serializer::ConstTensor> recurrentToInputWeights;
692  flatbuffers::Offset<serializer::ConstTensor> cellToInputWeights;
693  flatbuffers::Offset<serializer::ConstTensor> inputGateBias;
694  flatbuffers::Offset<serializer::ConstTensor> projectionWeights;
695  flatbuffers::Offset<serializer::ConstTensor> projectionBias;
696  flatbuffers::Offset<serializer::ConstTensor> cellToForgetWeights;
697  flatbuffers::Offset<serializer::ConstTensor> cellToOutputWeights;
698  flatbuffers::Offset<serializer::ConstTensor> inputLayerNormWeights;
699  flatbuffers::Offset<serializer::ConstTensor> forgetLayerNormWeights;
700  flatbuffers::Offset<serializer::ConstTensor> cellLayerNormWeights;
701  flatbuffers::Offset<serializer::ConstTensor> outputLayerNormWeights;
702 
703  if (!descriptor.m_CifgEnabled)
704  {
705  inputToInputWeights = CreateConstTensorInfo(constants[i++]); //InputToInputWeights
706  recurrentToInputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToInputWeights
707  inputGateBias = CreateConstTensorInfo(constants[i++]); //InputGateBias
708  }
709 
710  if (descriptor.m_PeepholeEnabled)
711  {
712  if (!descriptor.m_CifgEnabled)
713  {
714  cellToInputWeights = CreateConstTensorInfo(constants[i++]); //CellToInputWeights
715  }
716  cellToForgetWeights = CreateConstTensorInfo(constants[i++]); //CellToForgetWeights
717  cellToOutputWeights = CreateConstTensorInfo(constants[i++]); //CellToOutputWeights
718  }
719 
720  if (descriptor.m_ProjectionEnabled)
721  {
722  projectionWeights = CreateConstTensorInfo(constants[i++]); //ProjectionWeights
723  projectionBias = CreateConstTensorInfo(constants[i++]); //ProjectionBias
724  }
725 
726  if (descriptor.m_LayerNormEnabled)
727  {
728  if (!descriptor.m_CifgEnabled)
729  {
730  inputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //InputLayerNormWeights
731  }
732  forgetLayerNormWeights = CreateConstTensorInfo(constants[i++]); //ForgetLayerNormWeights
733  cellLayerNormWeights = CreateConstTensorInfo(constants[i++]); //CellLayerNormWeights
734  outputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //OutputLayerNormWeights
735  }
736 
737  auto fbLstmParams = serializer::CreateLstmInputParams(
738  m_flatBufferBuilder,
739  inputToForgetWeights,
740  inputToCellWeights,
741  inputToOutputWeights,
742  recurrentToForgetWeights,
743  recurrentToCellWeights,
744  recurrentToOutputWeights,
745  forgetGateBias,
746  cellBias,
747  outputGateBias,
748  inputToInputWeights,
749  recurrentToInputWeights,
750  cellToInputWeights,
751  inputGateBias,
752  projectionWeights,
753  projectionBias,
754  cellToForgetWeights,
755  cellToOutputWeights,
756  inputLayerNormWeights,
757  forgetLayerNormWeights,
758  cellLayerNormWeights,
759  outputLayerNormWeights);
760 
761  auto fbLstmLayer = serializer::CreateLstmLayer(
762  m_flatBufferBuilder,
763  fbLstmBaseLayer,
764  fbLstmDescriptor,
765  fbLstmParams);
766 
768 }
769 
770 void SerializerStrategy::SerializeMaximumLayer(const armnn::IConnectableLayer* layer, const char* name)
771 {
772  IgnoreUnused(name);
773 
774  auto fbMaximumBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Maximum);
775  auto fbMaximumLayer = serializer::CreateMaximumLayer(m_flatBufferBuilder, fbMaximumBaseLayer);
776 
778 }
779 
780 void SerializerStrategy::SerializeMeanLayer(const armnn::IConnectableLayer* layer,
781  const armnn::MeanDescriptor& descriptor,
782  const char* name)
783 {
784  IgnoreUnused(name);
785 
786  auto fbMeanBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Mean);
787  auto fbMeanDescriptor = serializer::CreateMeanDescriptor(m_flatBufferBuilder,
788  m_flatBufferBuilder.CreateVector(descriptor.m_Axis),
789  descriptor.m_KeepDims);
790 
791  auto fbMeanLayer = serializer::CreateMeanLayer(m_flatBufferBuilder,
792  fbMeanBaseLayer,
793  fbMeanDescriptor);
794 
796 }
797 
798 void SerializerStrategy::SerializeMinimumLayer(const armnn::IConnectableLayer* layer, const char* name)
799 {
800  IgnoreUnused(name);
801 
802  auto fbMinimumBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Minimum);
803  auto fbMinimumLayer = serializer::CreateMinimumLayer(m_flatBufferBuilder, fbMinimumBaseLayer);
804 
806 }
807 
808 void SerializerStrategy::SerializeMergeLayer(const armnn::IConnectableLayer* layer, const char* name)
809 {
810  IgnoreUnused(name);
811 
812  auto fbMergeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Merge);
813  auto fbMergeLayer = serializer::CreateMergeLayer(m_flatBufferBuilder, fbMergeBaseLayer);
814 
816 }
817 
818 void SerializerStrategy::SerializeConcatLayer(const armnn::IConnectableLayer* layer,
819  const armnn::ConcatDescriptor& concatDescriptor,
820  const char* name)
821 {
822  IgnoreUnused(name);
823 
824  auto flatBufferConcatBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Concat);
825 
826  std::vector<flatbuffers::Offset<UintVector>> views;
827  for (unsigned int v = 0; v < concatDescriptor.GetNumViews(); ++v)
828  {
829  const uint32_t* origin = concatDescriptor.GetViewOrigin(v);
830  std::vector<uint32_t> origins;
831  for (unsigned int d = 0; d < concatDescriptor.GetNumDimensions(); ++d)
832  {
833  origins.push_back(origin[d]);
834  }
835  auto view = m_flatBufferBuilder.CreateVector(origins);
836  auto uintVector = CreateUintVector(m_flatBufferBuilder, view);
837  views.push_back(uintVector);
838  }
839 
840  auto flatBufferConcatDescriptor = CreateOriginsDescriptor(m_flatBufferBuilder,
841  concatDescriptor.GetConcatAxis(),
842  concatDescriptor.GetNumViews(),
843  concatDescriptor.GetNumDimensions(),
844  m_flatBufferBuilder.CreateVector(views));
845 
846  auto flatBufferLayer = CreateConcatLayer(m_flatBufferBuilder,
847  flatBufferConcatBaseLayer,
848  flatBufferConcatDescriptor);
849 
851 }
852 
853 void SerializerStrategy::SerializeMultiplicationLayer(const armnn::IConnectableLayer* layer, const char* name)
854 {
855  IgnoreUnused(name);
856 
857  auto fbMultiplicationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Multiplication);
858  auto fbMultiplicationLayer = serializer::CreateMultiplicationLayer(m_flatBufferBuilder,
859  fbMultiplicationBaseLayer);
860 
862 }
863 
864 void SerializerStrategy::SerializePadLayer(const armnn::IConnectableLayer* layer,
865  const armnn::PadDescriptor& padDescriptor,
866  const char* name)
867 {
868  IgnoreUnused(name);
869 
870  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pad);
871 
872  std::vector<unsigned int> padList;
873  for (auto& p: padDescriptor.m_PadList)
874  {
875  padList.push_back(p.first);
876  padList.push_back(p.second);
877  }
878 
879  auto flatBufferPadDesc = serializer::CreatePadDescriptor(m_flatBufferBuilder,
880  m_flatBufferBuilder.CreateVector(padList),
881  padDescriptor.m_PadValue,
882  GetFlatBufferPaddingMode(padDescriptor.m_PaddingMode));
883 
884  auto flatBufferPadLayer = serializer::CreatePadLayer(m_flatBufferBuilder,
885  flatBufferBaseLayer,
886  flatBufferPadDesc);
887 
888  CreateAnyLayer(flatBufferPadLayer.o, serializer::Layer::Layer_PadLayer);
889 }
890 
891 void SerializerStrategy::SerializePermuteLayer(const armnn::IConnectableLayer* layer,
892  const armnn::PermuteDescriptor& permuteDescriptor,
893  const char* name)
894 {
895  IgnoreUnused(name);
896 
897  // Create FlatBuffer BaseLayer
898  auto flatBufferPermuteBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Permute);
899 
900  std::vector<unsigned int> dimMappings;
901  for (unsigned int i=0; i<permuteDescriptor.m_DimMappings.GetSize(); ++i)
902  {
903  dimMappings.push_back(permuteDescriptor.m_DimMappings[i]);
904  }
905 
906  auto flatBufferPermuteDesc = serializer::CreatePermuteDescriptor(m_flatBufferBuilder,
907  m_flatBufferBuilder.CreateVector(dimMappings));
908 
909  // Create the FlatBuffer PermuteLayer
910  auto flatBufferPermuteLayer = serializer::CreatePermuteLayer(m_flatBufferBuilder,
911  flatBufferPermuteBaseLayer,
912  flatBufferPermuteDesc);
913 
914  // Add the AnyLayer to the FlatBufferLayers
915  CreateAnyLayer(flatBufferPermuteLayer.o, serializer::Layer::Layer_PermuteLayer);
916 }
917 
918 // Build FlatBuffer for Rank Layer
919 void SerializerStrategy::SerializeRankLayer(const armnn::IConnectableLayer* layer,
920  const char* name)
921 {
922  IgnoreUnused(name);
923  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Rank);
924  auto flatBufferRankLayer = serializer::CreateRankLayer(m_flatBufferBuilder, flatBufferBaseLayer);
925 
926  CreateAnyLayer(flatBufferRankLayer.o, serializer::Layer::Layer_RankLayer);
927 }
928 
929 void SerializerStrategy::SerializeReduceLayer(const armnn::IConnectableLayer* layer,
930  const armnn::ReduceDescriptor& reduceDescriptor,
931  const char*)
932 {
933  auto fbReduceBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Reduce);
934  auto fbDescriptor = CreateReduceDescriptor(m_flatBufferBuilder,
935  reduceDescriptor.m_KeepDims,
936  m_flatBufferBuilder.CreateVector(reduceDescriptor.m_vAxis),
938  auto fbReduceLayer = serializer::CreateReduceLayer(m_flatBufferBuilder,
939  fbReduceBaseLayer,
940  fbDescriptor);
941 
943 }
944 
945 // Build FlatBuffer for Reshape Layer
946 void SerializerStrategy::SerializeReshapeLayer(const armnn::IConnectableLayer* layer,
947  const armnn::ReshapeDescriptor& reshapeDescriptor,
948  const char* name)
949 {
950  IgnoreUnused(name);
951 
952  // Create FlatBuffer BaseLayer
953  auto flatBufferReshapeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Reshape);
954 
955  std::vector<unsigned int> targetShape;
956  for (unsigned int i =0; i < reshapeDescriptor.m_TargetShape.GetNumDimensions(); i++)
957  {
958  targetShape.push_back(reshapeDescriptor.m_TargetShape[i]);
959  }
960 
961  auto flatBufferReshapeDesc = serializer::CreateReshapeDescriptor(m_flatBufferBuilder,
962  m_flatBufferBuilder.CreateVector(targetShape));
963 
964  // Create the FlatBuffer ReshapeLayer
965  auto flatBufferReshapeLayer = serializer::CreateReshapeLayer(m_flatBufferBuilder, flatBufferReshapeBaseLayer,
966  flatBufferReshapeDesc);
967 
968  // Add the AnyLayer to the FlatBufferLayers
969  CreateAnyLayer(flatBufferReshapeLayer.o, serializer::Layer::Layer_ReshapeLayer);
970 }
971 
972 void SerializerStrategy::SerializeResizeLayer(const armnn::IConnectableLayer* layer,
973  const armnn::ResizeDescriptor& resizeDescriptor,
974  const char* name)
975 {
976  IgnoreUnused(name);
977 
978  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Resize);
979 
980  auto flatBufferDescriptor =
981  CreateResizeDescriptor(m_flatBufferBuilder,
982  resizeDescriptor.m_TargetHeight,
983  resizeDescriptor.m_TargetWidth,
984  GetFlatBufferResizeMethod(resizeDescriptor.m_Method),
985  GetFlatBufferDataLayout(resizeDescriptor.m_DataLayout),
986  resizeDescriptor.m_AlignCorners,
987  resizeDescriptor.m_HalfPixelCenters);
988 
989  auto flatBufferLayer = serializer::CreateResizeLayer(m_flatBufferBuilder,
990  flatBufferBaseLayer,
991  flatBufferDescriptor);
992 
994 }
995 
996 void SerializerStrategy::SerializeSliceLayer(const armnn::IConnectableLayer* layer,
997  const armnn::SliceDescriptor& sliceDescriptor,
998  const char* name)
999 {
1000  IgnoreUnused(name);
1001 
1002  auto fbSliceBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Slice);
1003  auto fbSliceDescriptor = CreateSliceDescriptor(m_flatBufferBuilder,
1004  m_flatBufferBuilder.CreateVector(sliceDescriptor.m_Begin),
1005  m_flatBufferBuilder.CreateVector(sliceDescriptor.m_Size));
1006 
1007  auto fbSliceLayer = serializer::CreateSliceLayer(m_flatBufferBuilder, fbSliceBaseLayer, fbSliceDescriptor);
1008 
1010 }
1011 
1012 // Build FlatBuffer for Softmax Layer
1013 void SerializerStrategy::SerializeSoftmaxLayer(const armnn::IConnectableLayer* layer,
1014  const armnn::SoftmaxDescriptor& softmaxDescriptor,
1015  const char* name)
1016 {
1017  IgnoreUnused(name);
1018 
1019  // Create FlatBuffer BaseLayer
1020  auto flatBufferSoftmaxBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Softmax);
1021 
1022  // Create the FlatBuffer SoftmaxDescriptor
1023  auto flatBufferSoftmaxDesc =
1024  serializer::CreateSoftmaxDescriptor(m_flatBufferBuilder,
1025  softmaxDescriptor.m_Beta,
1026  softmaxDescriptor.m_Axis);
1027 
1028  // Create the FlatBuffer SoftmaxLayer
1029  auto flatBufferSoftmaxLayer =
1030  serializer::CreateSoftmaxLayer(m_flatBufferBuilder,
1031  flatBufferSoftmaxBaseLayer,
1032  flatBufferSoftmaxDesc);
1033 
1034  CreateAnyLayer(flatBufferSoftmaxLayer.o, serializer::Layer::Layer_SoftmaxLayer);
1035 }
1036 
1037 void SerializerStrategy::SerializePooling2dLayer(const armnn::IConnectableLayer* layer,
1038  const armnn::Pooling2dDescriptor& pooling2dDescriptor,
1039  const char* name)
1040 {
1041  IgnoreUnused(name);
1042 
1043  auto fbPooling2dBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pooling2d);
1044  auto fbPooling2dDescriptor = serializer::CreatePooling2dDescriptor(
1045  m_flatBufferBuilder,
1046  GetFlatBufferPoolingAlgorithm(pooling2dDescriptor.m_PoolType),
1047  pooling2dDescriptor.m_PadLeft,
1048  pooling2dDescriptor.m_PadRight,
1049  pooling2dDescriptor.m_PadTop,
1050  pooling2dDescriptor.m_PadBottom,
1051  pooling2dDescriptor.m_PoolWidth,
1052  pooling2dDescriptor.m_PoolHeight,
1053  pooling2dDescriptor.m_StrideX,
1054  pooling2dDescriptor.m_StrideY,
1056  GetFlatBufferPaddingMethod(pooling2dDescriptor.m_PaddingMethod),
1057  GetFlatBufferDataLayout(pooling2dDescriptor.m_DataLayout));
1058 
1059  auto fbPooling2dLayer = serializer::CreatePooling2dLayer(m_flatBufferBuilder,
1060  fbPooling2dBaseLayer,
1061  fbPooling2dDescriptor);
1062 
1064 }
1065 
1066 void SerializerStrategy::SerializePooling3dLayer(const armnn::IConnectableLayer* layer,
1067  const armnn::Pooling3dDescriptor& pooling3dDescriptor,
1068  const char* name)
1069 {
1070  IgnoreUnused(name);
1071 
1072  auto fbPooling3dBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pooling3d);
1073  auto fbPooling3dDescriptor = serializer::CreatePooling3dDescriptor(
1074  m_flatBufferBuilder,
1075  GetFlatBufferPoolingAlgorithm(pooling3dDescriptor.m_PoolType),
1076  pooling3dDescriptor.m_PadLeft,
1077  pooling3dDescriptor.m_PadRight,
1078  pooling3dDescriptor.m_PadTop,
1079  pooling3dDescriptor.m_PadBottom,
1080  pooling3dDescriptor.m_PadFront,
1081  pooling3dDescriptor.m_PadBack,
1082  pooling3dDescriptor.m_PoolWidth,
1083  pooling3dDescriptor.m_PoolHeight,
1084  pooling3dDescriptor.m_PoolDepth,
1085  pooling3dDescriptor.m_StrideX,
1086  pooling3dDescriptor.m_StrideY,
1087  pooling3dDescriptor.m_StrideZ,
1089  GetFlatBufferPaddingMethod(pooling3dDescriptor.m_PaddingMethod),
1090  GetFlatBufferDataLayout(pooling3dDescriptor.m_DataLayout));
1091 
1092  auto fbPooling3dLayer = serializer::CreatePooling3dLayer(m_flatBufferBuilder,
1093  fbPooling3dBaseLayer,
1094  fbPooling3dDescriptor);
1095 
1097 }
1098 
1099 void SerializerStrategy::SerializePreluLayer(const armnn::IConnectableLayer* layer,
1100  const char* name)
1101 {
1102  IgnoreUnused(name);
1103 
1104  // Create FlatBuffer BaseLayer
1105  auto flatBufferPreluBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Prelu);
1106 
1107  // Create the FlatBuffer AdditionLayer
1108  auto flatBufferPreluLayer = serializer::CreatePreluLayer(m_flatBufferBuilder, flatBufferPreluBaseLayer);
1109 
1110  // Add the AnyLayer to the FlatBufferLayers
1111  CreateAnyLayer(flatBufferPreluLayer.o, serializer::Layer::Layer_PreluLayer);
1112 }
1113 
1114 void SerializerStrategy::SerializeQuantizeLayer(const armnn::IConnectableLayer *layer, const char *name)
1115 {
1116  IgnoreUnused(name);
1117 
1118  auto fbQuantizeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Quantize);
1119  auto fbQuantizeLayer = serializer::CreateQuantizeLayer(m_flatBufferBuilder,
1120  fbQuantizeBaseLayer);
1122 }
1123 
1124 // Build FlatBuffer for FullyConnected Layer
1125 void SerializerStrategy::SerializeFullyConnectedLayer(const armnn::IConnectableLayer* layer,
1126  const armnn::FullyConnectedDescriptor& fullyConnectedDescriptor,
1127  const char*)
1128 {
1129  // Create FlatBuffer BaseLayer
1130  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_FullyConnected);
1131 
1132  // Create FlatBuffer FullyConnectedDescriptor
1133  auto flatBufferDescriptor =
1134  serializer::CreateFullyConnectedDescriptor(m_flatBufferBuilder,
1135  fullyConnectedDescriptor.m_BiasEnabled,
1136  fullyConnectedDescriptor.m_TransposeWeightMatrix,
1137  fullyConnectedDescriptor.m_ConstantWeights);
1138 
1139  // Create FlatBuffer FullyConnectedLayer
1140  auto flatBufferLayer = serializer::CreateFullyConnectedLayer(m_flatBufferBuilder,
1141  flatBufferBaseLayer,
1142  flatBufferDescriptor);
1143 
1144  // Add created FullyConnectedLayer to the FlatBufferLayers
1146 }
1147 
1148 // Build FlatBuffer for SpaceToBatchNd Layer
1149 void SerializerStrategy::SerializeSpaceToBatchNdLayer(const armnn::IConnectableLayer* layer,
1150  const armnn::SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
1151  const char* name)
1152 {
1153  IgnoreUnused(name);
1154 
1155  // Create FlatBuffer BaseLayer
1156  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_SpaceToBatchNd);
1157 
1158  std::vector<unsigned int> padList;
1159  padList.reserve(spaceToBatchNdDescriptor.m_PadList.size()*2);
1160  for (auto& pad : spaceToBatchNdDescriptor.m_PadList)
1161  {
1162  padList.push_back(pad.first);
1163  padList.push_back(pad.second);
1164  }
1165 
1166  auto flatBufferDescriptor =
1167  CreateSpaceToBatchNdDescriptor(m_flatBufferBuilder,
1168  m_flatBufferBuilder.CreateVector(spaceToBatchNdDescriptor.m_BlockShape),
1169  m_flatBufferBuilder.CreateVector(padList),
1170  GetFlatBufferDataLayout(spaceToBatchNdDescriptor.m_DataLayout));
1171 
1172  auto flatBufferLayer = serializer::CreateSpaceToBatchNdLayer(m_flatBufferBuilder,
1173  flatBufferBaseLayer,
1174  flatBufferDescriptor);
1175 
1177 }
1178 
1179 // Build FlatBuffer for SpaceToDepthLayer
1180 void SerializerStrategy::SerializeSpaceToDepthLayer(const armnn::IConnectableLayer* layer,
1181  const armnn::SpaceToDepthDescriptor& spaceToDepthDescriptor,
1182  const char* name)
1183 {
1184  IgnoreUnused(name);
1185 
1186  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_SpaceToDepth);
1187  auto flatBufferDescriptor =
1188  CreateSpaceToDepthDescriptor(m_flatBufferBuilder,
1189  spaceToDepthDescriptor.m_BlockSize,
1190  GetFlatBufferDataLayout(spaceToDepthDescriptor.m_DataLayout));
1191 
1192  auto flatBufferLayer = serializer::CreateSpaceToDepthLayer(m_flatBufferBuilder,
1193  flatBufferBaseLayer,
1194  flatBufferDescriptor);
1195 
1197 }
1198 
1199 // Build FlatBuffer for Splitter Layer
1200 void SerializerStrategy::SerializeSplitterLayer(const armnn::IConnectableLayer* layer,
1201  const armnn::ViewsDescriptor& viewsDescriptor,
1202  const char* name)
1203 {
1204  IgnoreUnused(name);
1205 
1206  // Create FlatBuffer ViewOrigins
1207  std::vector<flatbuffers::Offset<UintVector>> flatBufferViewOrigins;
1208  flatBufferViewOrigins.reserve(viewsDescriptor.GetNumViews());
1209 
1210  for(unsigned int vIdx = 0; vIdx < viewsDescriptor.GetNumViews(); ++vIdx)
1211  {
1212  std::vector<uint32_t> viewOrigin;
1213  viewOrigin.reserve(viewsDescriptor.GetNumDimensions());
1214 
1215  // Copy vector
1216  for(unsigned int dIdx = 0; dIdx < viewsDescriptor.GetNumDimensions(); ++dIdx)
1217  {
1218  viewOrigin.push_back(viewsDescriptor.GetViewOrigin(vIdx)[dIdx]);
1219  }
1220 
1221  flatBufferViewOrigins.push_back(CreateUintVector(m_flatBufferBuilder,
1222  m_flatBufferBuilder.CreateVector(viewOrigin)));
1223  }
1224 
1225  // Create FlatBuffer OriginsDescriptor
1226  auto flatBufferOriginDescriptor = CreateOriginsDescriptor(m_flatBufferBuilder,
1227  viewsDescriptor.GetOrigins().GetConcatAxis(),
1228  viewsDescriptor.GetOrigins().GetNumViews(),
1229  viewsDescriptor.GetOrigins().GetNumDimensions(),
1230  m_flatBufferBuilder.CreateVector(flatBufferViewOrigins));
1231 
1232  // Create FlatBuffer ViewOrigins
1233  std::vector<flatbuffers::Offset<UintVector>> flatBufferViewSizes;
1234  flatBufferViewSizes.reserve(viewsDescriptor.GetNumViews());
1235 
1236  for(unsigned int vIdx = 0; vIdx < viewsDescriptor.GetNumViews(); ++vIdx)
1237  {
1238  std::vector<uint32_t> viewSize;
1239  viewSize.reserve(viewsDescriptor.GetNumDimensions());
1240 
1241  // Copy vector
1242  for(unsigned int dIdx = 0; dIdx < viewsDescriptor.GetNumDimensions(); ++dIdx)
1243  {
1244  viewSize.push_back(viewsDescriptor.GetViewSizes(vIdx)[dIdx]);
1245  }
1246 
1247  flatBufferViewSizes.push_back(CreateUintVector(m_flatBufferBuilder,
1248  m_flatBufferBuilder.CreateVector(viewSize)));
1249  }
1250 
1251  // Create FlatBuffer ViewsDescriptor
1252  auto flatBufferViewsDescriptor = CreateViewsDescriptor(m_flatBufferBuilder,
1253  flatBufferOriginDescriptor,
1254  m_flatBufferBuilder.CreateVector(flatBufferViewSizes));
1255 
1256  // Create FlatBuffer BaseLayer
1257  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Splitter);
1258 
1259  auto flatBufferSplitterLayer = serializer::CreateSplitterLayer(m_flatBufferBuilder,
1260  flatBufferBaseLayer,
1261  flatBufferViewsDescriptor);
1262 
1263  CreateAnyLayer(flatBufferSplitterLayer.o, serializer::Layer::Layer_SplitterLayer);
1264 }
1265 
1266 void SerializerStrategy::SerializeNormalizationLayer(const armnn::IConnectableLayer* layer,
1267  const armnn::NormalizationDescriptor& descriptor,
1268  const char* name)
1269 {
1270  IgnoreUnused(name);
1271 
1272  auto fbNormalizationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Normalization);
1273 
1274  auto fbNormalizationDescriptor = serializer::CreateNormalizationDescriptor(
1275  m_flatBufferBuilder,
1278  descriptor.m_NormSize,
1279  descriptor.m_Alpha,
1280  descriptor.m_Beta,
1281  descriptor.m_K,
1283 
1284  auto flatBufferLayer = serializer::CreateNormalizationLayer(m_flatBufferBuilder,
1285  fbNormalizationBaseLayer,
1286  fbNormalizationDescriptor);
1287 
1289 }
1290 
1291 void SerializerStrategy::SerializeShapeLayer(const armnn::IConnectableLayer* layer,
1292  const char* name)
1293 {
1294  IgnoreUnused(name);
1295 
1296  auto shapeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Shape);
1297  auto shapeLayer = serializer::CreateShapeLayer(m_flatBufferBuilder, shapeBaseLayer);
1298 
1300 }
1301 
1302 void SerializerStrategy::SerializeStackLayer(const armnn::IConnectableLayer* layer,
1303  const armnn::StackDescriptor& stackDescriptor,
1304  const char* name)
1305 {
1306  IgnoreUnused(name);
1307 
1308  auto stackBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Stack);
1309 
1310  std::vector<unsigned int> inputShape;
1311  for (unsigned int i =0; i < stackDescriptor.m_InputShape.GetNumDimensions(); i++)
1312  {
1313  inputShape.push_back(stackDescriptor.m_InputShape[i]);
1314  }
1315 
1316  auto flatBufferStackDescriptor = CreateStackDescriptor(m_flatBufferBuilder,
1317  stackDescriptor.m_Axis,
1318  stackDescriptor.m_NumInputs,
1319  m_flatBufferBuilder.CreateVector(inputShape));
1320 
1321  auto stackLayer = serializer::CreateStackLayer(m_flatBufferBuilder, stackBaseLayer, flatBufferStackDescriptor);
1323 }
1324 
1325 void SerializerStrategy::SerializeStandInLayer(const armnn::IConnectableLayer *layer,
1326  const armnn::StandInDescriptor& standInDescriptor,
1327  const char *name)
1328 {
1329  IgnoreUnused(name);
1330 
1331  auto fbDescriptor = serializer::CreateStandInDescriptor(m_flatBufferBuilder,
1332  standInDescriptor.m_NumInputs,
1333  standInDescriptor.m_NumOutputs);
1334 
1335  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_StandIn);
1336  auto fbLayer = serializer::CreateStandInLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
1337 
1339 }
1340 
1341 void SerializerStrategy::SerializeStridedSliceLayer(const armnn::IConnectableLayer* layer,
1342  const armnn::StridedSliceDescriptor& stridedSliceDescriptor,
1343  const char* name)
1344 {
1345  IgnoreUnused(name);
1346 
1347  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_StridedSlice);
1348 
1349  auto flatBufferDescriptor =
1350  CreateStridedSliceDescriptor(m_flatBufferBuilder,
1351  m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.m_Begin),
1352  m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.m_End),
1353  m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.m_Stride),
1354  stridedSliceDescriptor.m_BeginMask,
1355  stridedSliceDescriptor.m_EndMask,
1356  stridedSliceDescriptor.m_ShrinkAxisMask,
1357  stridedSliceDescriptor.m_EllipsisMask,
1358  stridedSliceDescriptor.m_NewAxisMask,
1359  GetFlatBufferDataLayout(stridedSliceDescriptor.m_DataLayout));
1360 
1361  auto flatBufferLayer = serializer::CreateStridedSliceLayer(m_flatBufferBuilder,
1362  flatBufferBaseLayer,
1363  flatBufferDescriptor);
1364 
1366 }
1367 
1368 void SerializerStrategy::SerializeSubtractionLayer(const armnn::IConnectableLayer* layer, const char* name)
1369 {
1370  IgnoreUnused(name);
1371 
1372  auto fbSubtractionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Subtraction);
1373  auto fbSubtractionLayer = serializer::CreateSubtractionLayer(m_flatBufferBuilder, fbSubtractionBaseLayer);
1374 
1376 }
1377 
1378 void SerializerStrategy::SerializeSwitchLayer(const armnn::IConnectableLayer* layer, const char* name)
1379 {
1380  IgnoreUnused(name);
1381 
1382  auto fbSwitchBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Switch);
1383  auto fbSwitchLayer = serializer::CreateSwitchLayer(m_flatBufferBuilder, fbSwitchBaseLayer);
1384 
1386 }
1387 
1388 void SerializerStrategy::SerializeTransposeConvolution2dLayer(
1389  const armnn::IConnectableLayer* layer,
1390  const armnn::TransposeConvolution2dDescriptor& descriptor,
1391  const std::vector<armnn::ConstTensor>& constants,
1392  const char* name)
1393 {
1394  IgnoreUnused(name);
1395 
1396  const armnn::ConstTensor& weights = constants.at(0);
1397 
1399  auto fbDescriptor = CreateTransposeConvolution2dDescriptor(m_flatBufferBuilder,
1400  descriptor.m_PadLeft,
1401  descriptor.m_PadRight,
1402  descriptor.m_PadTop,
1403  descriptor.m_PadBottom,
1404  descriptor.m_StrideX,
1405  descriptor.m_StrideY,
1406  descriptor.m_BiasEnabled,
1408 
1409  // weights & biases
1410  auto fbWeightsConstTensorInfo = CreateConstTensorInfo(weights);
1411  flatbuffers::Offset<serializer::ConstTensor> fbBiasesConstTensorInfo;
1412  if (constants.size() > 1)
1413  {
1414  const armnn::ConstTensor& biases = constants.at(1);
1415  fbBiasesConstTensorInfo = CreateConstTensorInfo(biases);
1416  }
1417 
1418  auto fbLayer = CreateTransposeConvolution2dLayer(m_flatBufferBuilder,
1419  fbBaseLayer,
1420  fbDescriptor,
1421  fbWeightsConstTensorInfo,
1422  fbBiasesConstTensorInfo);
1423 
1425 }
1426 
1427 void SerializerStrategy::SerializeTransposeLayer(const armnn::IConnectableLayer* layer,
1428  const armnn::TransposeDescriptor& descriptor,
1429  const char* name)
1430 {
1431  IgnoreUnused(name);
1432 
1433  // Create FlatBuffer BaseLayer
1434  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Transpose);
1435 
1436  std::vector<unsigned int> dimMappings;
1437  for (unsigned int i=0; i<descriptor.m_DimMappings.GetSize(); ++i)
1438  {
1439  dimMappings.push_back(descriptor.m_DimMappings[i]);
1440  }
1441 
1442  auto flatBufferDesc = serializer::CreateTransposeDescriptor(m_flatBufferBuilder,
1443  m_flatBufferBuilder.CreateVector(dimMappings));
1444 
1445  // Create the FlatBuffer TransposeLayer
1446  auto flatBufferLayer = serializer::CreateTransposeLayer(m_flatBufferBuilder,
1447  flatBufferBaseLayer,
1448  flatBufferDesc);
1449 
1450  // Add the AnyLayer to the FlatBufferLayers
1452 }
1453 
1454 void SerializerStrategy::SerializeQLstmLayer(const armnn::IConnectableLayer* layer,
1455  const armnn::QLstmDescriptor& descriptor,
1456  const std::vector<armnn::ConstTensor>& constants,
1457  const char* name)
1458 {
1459  IgnoreUnused(name);
1460 
1461  auto fbQLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_QLstm);
1462 
1463  auto fbQLstmDescriptor = serializer::CreateQLstmDescriptor(
1464  m_flatBufferBuilder,
1465  descriptor.m_CifgEnabled,
1466  descriptor.m_PeepholeEnabled,
1467  descriptor.m_ProjectionEnabled,
1468  descriptor.m_LayerNormEnabled,
1469  descriptor.m_CellClip,
1470  descriptor.m_ProjectionClip,
1471  descriptor.m_InputIntermediateScale,
1472  descriptor.m_ForgetIntermediateScale,
1473  descriptor.m_CellIntermediateScale,
1474  descriptor.m_OutputIntermediateScale,
1475  descriptor.m_HiddenStateZeroPoint,
1476  descriptor.m_HiddenStateScale
1477  );
1478 
1479  // Index for constants vector
1480  std::size_t i = 0;
1481 
1482  // Mandatory params
1483  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]); //InputToForgetWeights
1484  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]); //InputToCellWeights
1485  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]); //InputToOutputWeights
1486  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToForgetWeights
1487  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToCellWeights
1488  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToOutputWeights
1489  auto forgetGateBias = CreateConstTensorInfo(constants[i++]); //ForgetGateBias
1490  auto cellBias = CreateConstTensorInfo(constants[i++]); //CellBias
1491  auto outputGateBias = CreateConstTensorInfo(constants[i++]); //OutputGateBias
1492 
1493  // CIFG
1494  flatbuffers::Offset<serializer::ConstTensor> inputToInputWeights;
1495  flatbuffers::Offset<serializer::ConstTensor> recurrentToInputWeights;
1496  flatbuffers::Offset<serializer::ConstTensor> inputGateBias;
1497 
1498  if (!descriptor.m_CifgEnabled)
1499  {
1500  inputToInputWeights = CreateConstTensorInfo(constants[i++]); //InputToInputWeights
1501  recurrentToInputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToInputWeights
1502  inputGateBias = CreateConstTensorInfo(constants[i++]); //InputGateBias
1503  }
1504 
1505  // Peephole
1506  flatbuffers::Offset<serializer::ConstTensor> cellToInputWeights;
1507  flatbuffers::Offset<serializer::ConstTensor> cellToForgetWeights;
1508  flatbuffers::Offset<serializer::ConstTensor> cellToOutputWeights;
1509 
1510  if (descriptor.m_PeepholeEnabled)
1511  {
1512  if (!descriptor.m_CifgEnabled)
1513  {
1514  cellToInputWeights = CreateConstTensorInfo(constants[i++]); //CellToInputWeights
1515  }
1516  cellToForgetWeights = CreateConstTensorInfo(constants[i++]); //CellToForgetWeights
1517  cellToOutputWeights = CreateConstTensorInfo(constants[i++]); //CellToOutputWeights
1518  }
1519 
1520  // Projection
1521  flatbuffers::Offset<serializer::ConstTensor> projectionWeights;
1522  flatbuffers::Offset<serializer::ConstTensor> projectionBias;
1523 
1524  if (descriptor.m_ProjectionEnabled)
1525  {
1526  projectionWeights = CreateConstTensorInfo(constants[i++]); //ProjectionWeights
1527  projectionBias = CreateConstTensorInfo(constants[i++]); //ProjectionBias
1528  }
1529 
1530  // Layer norm
1531  flatbuffers::Offset<serializer::ConstTensor> inputLayerNormWeights;
1532  flatbuffers::Offset<serializer::ConstTensor> forgetLayerNormWeights;
1533  flatbuffers::Offset<serializer::ConstTensor> cellLayerNormWeights;
1534  flatbuffers::Offset<serializer::ConstTensor> outputLayerNormWeights;
1535 
1536  if (descriptor.m_LayerNormEnabled)
1537  {
1538  if (!descriptor.m_CifgEnabled)
1539  {
1540  inputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //InputLayerNormWeights
1541  }
1542  forgetLayerNormWeights = CreateConstTensorInfo(constants[i++]); //ForgetLayerNormWeights
1543  cellLayerNormWeights = CreateConstTensorInfo(constants[i++]); //CellLayerNormWeights
1544  outputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //OutputLayerNormWeights
1545  }
1546 
1547  auto fbQLstmParams = serializer::CreateQLstmInputParams(
1548  m_flatBufferBuilder,
1549  inputToForgetWeights,
1550  inputToCellWeights,
1551  inputToOutputWeights,
1552  recurrentToForgetWeights,
1553  recurrentToCellWeights,
1554  recurrentToOutputWeights,
1555  forgetGateBias,
1556  cellBias,
1557  outputGateBias,
1558  inputToInputWeights,
1559  recurrentToInputWeights,
1560  inputGateBias,
1561  projectionWeights,
1562  projectionBias,
1563  cellToInputWeights,
1564  cellToForgetWeights,
1565  cellToOutputWeights,
1566  inputLayerNormWeights,
1567  forgetLayerNormWeights,
1568  cellLayerNormWeights,
1569  outputLayerNormWeights);
1570 
1571  auto fbQLstmLayer = serializer::CreateQLstmLayer(
1572  m_flatBufferBuilder,
1573  fbQLstmBaseLayer,
1574  fbQLstmDescriptor,
1575  fbQLstmParams);
1576 
1578 }
1579 
1580 void SerializerStrategy::SerializeQuantizedLstmLayer(const armnn::IConnectableLayer* layer,
1581  const std::vector<armnn::ConstTensor>& constants,
1582  const char* name)
1583 {
1584  IgnoreUnused(name);
1585 
1586  auto fbQuantizedLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_QuantizedLstm);
1587 
1588  // index for constants vector
1589  size_t i = 0;
1590 
1591  // Get input parameters
1592  auto inputToInputWeights = CreateConstTensorInfo(constants[i++]);
1593  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]);
1594  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]);
1595  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]);
1596 
1597  auto recurrentToInputWeights = CreateConstTensorInfo(constants[i++]);
1598  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]);
1599  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]);
1600  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]);
1601 
1602  auto inputGateBias = CreateConstTensorInfo(constants[i++]);
1603  auto forgetGateBias = CreateConstTensorInfo(constants[i++]);
1604  auto cellBias = CreateConstTensorInfo(constants[i++]);
1605  auto outputGateBias = CreateConstTensorInfo(constants[i++]);
1606 
1607  auto fbQuantizedLstmParams = serializer::CreateQuantizedLstmInputParams(
1608  m_flatBufferBuilder,
1609  inputToInputWeights,
1610  inputToForgetWeights,
1611  inputToCellWeights,
1612  inputToOutputWeights,
1613  recurrentToInputWeights,
1614  recurrentToForgetWeights,
1615  recurrentToCellWeights,
1616  recurrentToOutputWeights,
1617  inputGateBias,
1618  forgetGateBias,
1619  cellBias,
1620  outputGateBias);
1621 
1622  auto fbQuantizedLstmLayer = serializer::CreateQuantizedLstmLayer(
1623  m_flatBufferBuilder,
1624  fbQuantizedLstmBaseLayer,
1625  fbQuantizedLstmParams);
1626 
1628 }
1629 
1630 void SerializerStrategy::SerializeUnidirectionalSequenceLstmLayer(
1631  const armnn::IConnectableLayer* layer,
1633  const std::vector<armnn::ConstTensor>& constants,
1634  const char* name)
1635 {
1636  IgnoreUnused(name);
1637 
1638  auto fbUnidirectionalSequenceLstmBaseLayer =
1640 
1641  auto fbUnidirectionalSequenceLstmDescriptor = serializer::CreateUnidirectionalSequenceLstmDescriptor(
1642  m_flatBufferBuilder,
1643  descriptor.m_ActivationFunc,
1644  descriptor.m_ClippingThresCell,
1645  descriptor.m_ClippingThresProj,
1646  descriptor.m_CifgEnabled,
1647  descriptor.m_PeepholeEnabled,
1648  descriptor.m_ProjectionEnabled,
1649  descriptor.m_LayerNormEnabled,
1650  descriptor.m_TimeMajor);
1651 
1652  // Index for constants vector
1653  std::size_t i = 0;
1654 
1655  // Get mandatory/basic input parameters
1656  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]); //InputToForgetWeights
1657  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]); //InputToCellWeights
1658  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]); //InputToOutputWeights
1659  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToForgetWeights
1660  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToCellWeights
1661  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToOutputWeights
1662  auto forgetGateBias = CreateConstTensorInfo(constants[i++]); //ForgetGateBias
1663  auto cellBias = CreateConstTensorInfo(constants[i++]); //CellBias
1664  auto outputGateBias = CreateConstTensorInfo(constants[i++]); //OutputGateBias
1665 
1666  //Define optional parameters, these will be set depending on configuration in Lstm descriptor
1667  flatbuffers::Offset<serializer::ConstTensor> inputToInputWeights;
1668  flatbuffers::Offset<serializer::ConstTensor> recurrentToInputWeights;
1669  flatbuffers::Offset<serializer::ConstTensor> cellToInputWeights;
1670  flatbuffers::Offset<serializer::ConstTensor> inputGateBias;
1671  flatbuffers::Offset<serializer::ConstTensor> projectionWeights;
1672  flatbuffers::Offset<serializer::ConstTensor> projectionBias;
1673  flatbuffers::Offset<serializer::ConstTensor> cellToForgetWeights;
1674  flatbuffers::Offset<serializer::ConstTensor> cellToOutputWeights;
1675  flatbuffers::Offset<serializer::ConstTensor> inputLayerNormWeights;
1676  flatbuffers::Offset<serializer::ConstTensor> forgetLayerNormWeights;
1677  flatbuffers::Offset<serializer::ConstTensor> cellLayerNormWeights;
1678  flatbuffers::Offset<serializer::ConstTensor> outputLayerNormWeights;
1679 
1680  if (!descriptor.m_CifgEnabled)
1681  {
1682  inputToInputWeights = CreateConstTensorInfo(constants[i++]); //InputToInputWeights
1683  recurrentToInputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToInputWeights
1684  inputGateBias = CreateConstTensorInfo(constants[i++]); //InputGateBias
1685  }
1686 
1687  if (descriptor.m_PeepholeEnabled)
1688  {
1689  if (!descriptor.m_CifgEnabled)
1690  {
1691  cellToInputWeights = CreateConstTensorInfo(constants[i++]); //CellToInputWeights
1692  }
1693  cellToForgetWeights = CreateConstTensorInfo(constants[i++]); //CellToForgetWeights
1694  cellToOutputWeights = CreateConstTensorInfo(constants[i++]); //CellToOutputWeights
1695  }
1696 
1697  if (descriptor.m_ProjectionEnabled)
1698  {
1699  projectionWeights = CreateConstTensorInfo(constants[i++]); //ProjectionWeights
1700  projectionBias = CreateConstTensorInfo(constants[i++]); //ProjectionBias
1701  }
1702 
1703  if (descriptor.m_LayerNormEnabled)
1704  {
1705  if (!descriptor.m_CifgEnabled)
1706  {
1707  inputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //InputLayerNormWeights
1708  }
1709  forgetLayerNormWeights = CreateConstTensorInfo(constants[i++]); //ForgetLayerNormWeights
1710  cellLayerNormWeights = CreateConstTensorInfo(constants[i++]); //CellLayerNormWeights
1711  outputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //OutputLayerNormWeights
1712  }
1713 
1714  auto fbUnidirectionalSequenceLstmParams = serializer::CreateLstmInputParams(
1715  m_flatBufferBuilder,
1716  inputToForgetWeights,
1717  inputToCellWeights,
1718  inputToOutputWeights,
1719  recurrentToForgetWeights,
1720  recurrentToCellWeights,
1721  recurrentToOutputWeights,
1722  forgetGateBias,
1723  cellBias,
1724  outputGateBias,
1725  inputToInputWeights,
1726  recurrentToInputWeights,
1727  cellToInputWeights,
1728  inputGateBias,
1729  projectionWeights,
1730  projectionBias,
1731  cellToForgetWeights,
1732  cellToOutputWeights,
1733  inputLayerNormWeights,
1734  forgetLayerNormWeights,
1735  cellLayerNormWeights,
1736  outputLayerNormWeights);
1737 
1738  auto fbUnidirectionalSequenceLstmLayer = serializer::CreateUnidirectionalSequenceLstmLayer(
1739  m_flatBufferBuilder,
1740  fbUnidirectionalSequenceLstmBaseLayer,
1741  fbUnidirectionalSequenceLstmDescriptor,
1742  fbUnidirectionalSequenceLstmParams);
1743 
1744  CreateAnyLayer(fbUnidirectionalSequenceLstmLayer.o, serializer::Layer::Layer_UnidirectionalSequenceLstmLayer);
1745 }
1746 
1747 fb::Offset<serializer::LayerBase> SerializerStrategy::CreateLayerBase(const IConnectableLayer* layer,
1748  const serializer::LayerType layerType)
1749 {
1750 
1751  uint32_t fbIndex = GetSerializedId(layer->GetGuid());
1752 
1753  std::vector<fb::Offset<serializer::InputSlot>> inputSlots = CreateInputSlots(layer);
1754  std::vector<fb::Offset<serializer::OutputSlot>> outputSlots = CreateOutputSlots(layer);
1755 
1756  return serializer::CreateLayerBase(m_flatBufferBuilder,
1757  fbIndex,
1758  m_flatBufferBuilder.CreateString(layer->GetName()),
1759  layerType,
1760  m_flatBufferBuilder.CreateVector(inputSlots),
1761  m_flatBufferBuilder.CreateVector(outputSlots));
1762 }
1763 
1764 void SerializerStrategy::CreateAnyLayer(const flatbuffers::Offset<void>& layer, const serializer::Layer serializerLayer)
1765 {
1766 
1767  auto anyLayer = armnnSerializer::CreateAnyLayer(m_flatBufferBuilder, serializerLayer, layer);
1768  m_serializedLayers.push_back(anyLayer);
1769 }
1770 
1771 template <typename T>
1772 flatbuffers::Offset<flatbuffers::Vector<T>> SerializerStrategy::CreateDataVector(const void* memory, unsigned int size)
1773 {
1774  const T* buffer = reinterpret_cast<const T*>(memory);
1775  std::vector<T> vector(buffer, buffer + (size / sizeof(T)));
1776  auto fbVector = m_flatBufferBuilder.CreateVector(vector);
1777  return fbVector;
1778 }
1779 
1780 flatbuffers::Offset<TensorInfo> SerializerStrategy::CreateTensorInfo(const armnn::TensorInfo& tensorInfo)
1781 {
1782  // Get the dimensions
1783  std::vector<unsigned int> shape;
1784  std::vector<bool> specificity;
1785  // This assumes that the TensorShape constructors have ensured that the size of m_DimensionsSpecificity
1786  // matches the size of dimensions.
1787  for(unsigned int dim = 0; dim < tensorInfo.GetShape().GetNumDimensions(); ++dim)
1788  {
1789  specificity.push_back(tensorInfo.GetShape().GetDimensionSpecificity(dim));
1790 
1791  if (tensorInfo.GetShape().GetDimensionSpecificity(dim))
1792  {
1793  shape.push_back(tensorInfo.GetShape()[dim]);
1794  }
1795  else
1796  {
1797  shape.push_back(0);
1798  }
1799  }
1800 
1801  if (tensorInfo.HasPerAxisQuantization())
1802  {
1803  // Create FlatBuffer TensorInfo
1804  auto flatBufferTensorInfo =
1805  serializer::CreateTensorInfo(m_flatBufferBuilder,
1806  m_flatBufferBuilder.CreateVector(shape),
1807  GetFlatBufferDataType(tensorInfo.GetDataType()),
1808  tensorInfo.GetQuantizationScales()[0],
1809  tensorInfo.GetQuantizationOffset(),
1810  m_flatBufferBuilder.CreateVector(tensorInfo.GetQuantizationScales()),
1811  tensorInfo.GetQuantizationDim().value(),
1812  static_cast<unsigned int>
1813  (tensorInfo.GetShape().GetDimensionality()),
1814  m_flatBufferBuilder.CreateVector(specificity));
1815  return flatBufferTensorInfo;
1816  }
1817 
1818  // Create FlatBuffer TensorInfo
1819  auto flatBufferTensorInfo = serializer::CreateTensorInfo(m_flatBufferBuilder,
1820  m_flatBufferBuilder.CreateVector(shape),
1821  GetFlatBufferDataType(tensorInfo.GetDataType()),
1822  tensorInfo.GetQuantizationScale(),
1823  tensorInfo.GetQuantizationOffset(),
1824  0,
1825  0,
1826  static_cast<unsigned int>
1827  (tensorInfo.GetShape().GetDimensionality()),
1828  m_flatBufferBuilder.CreateVector(specificity));
1829  return flatBufferTensorInfo;
1830 }
1831 
1832 flatbuffers::Offset<serializer::ConstTensor>
1833  SerializerStrategy::CreateConstTensorInfo(const armnn::ConstTensor& constTensor)
1834 {
1835  armnn::TensorInfo tensorInfo = constTensor.GetInfo();
1836 
1837  flatbuffers::Offset<void> fbPayload;
1838 
1839  switch (tensorInfo.GetDataType())
1840  {
1842  {
1843  auto fbVector = CreateDataVector<int64_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1844  flatbuffers::Offset<serializer::LongData> flatBuffersData = serializer::CreateLongData(
1845  m_flatBufferBuilder,
1846  fbVector);
1847  fbPayload = flatBuffersData.o;
1848  break;
1849  }
1852  {
1853  auto fbVector = CreateDataVector<int32_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1854  flatbuffers::Offset<serializer::IntData> flatBuffersData = serializer::CreateIntData(
1855  m_flatBufferBuilder,
1856  fbVector);
1857  fbPayload = flatBuffersData.o;
1858  break;
1859  }
1863  {
1864  auto fbVector = CreateDataVector<int16_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1865  flatbuffers::Offset<serializer::ShortData> flatBuffersData = serializer::CreateShortData(
1866  m_flatBufferBuilder,
1867  fbVector);
1868  fbPayload = flatBuffersData.o;
1869  break;
1870  }
1875  default:
1876  {
1877  auto fbVector = CreateDataVector<int8_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1878  flatbuffers::Offset<serializer::ByteData> flatBuffersData = serializer::CreateByteData(
1879  m_flatBufferBuilder,
1880  fbVector);
1881  fbPayload = flatBuffersData.o;
1882  }
1883  }
1884  flatbuffers::Offset<serializer::ConstTensor> flatBufferConstTensor = serializer::CreateConstTensor(
1885  m_flatBufferBuilder,
1886  CreateTensorInfo(tensorInfo),
1888  fbPayload);
1889  return flatBufferConstTensor;
1890 }
1891 
1892 flatbuffers::Offset<armnnSerializer::FeatureCompatibilityVersions> SerializerStrategy::GetVersionTable()
1893 {
1894  flatbuffers::Offset<armnnSerializer::FeatureCompatibilityVersions> versionsTable =
1896  m_flatBufferBuilder,
1897  1, // Binding ids scheme version
1898  1, // Weights layout scheme version
1899  1 // Constant tensors as inputs version
1900  );
1901  return versionsTable;
1902 }
1903 
1904 std::vector<fb::Offset<serializer::InputSlot>>
1905  SerializerStrategy::CreateInputSlots(const armnn::IConnectableLayer* layer)
1906 {
1907  std::vector<fb::Offset<serializer::InputSlot>> inputSlots;
1908 
1909  // Get the InputSlots
1910  for (unsigned int slotIndex = 0; slotIndex<layer->GetNumInputSlots(); ++slotIndex)
1911  {
1912  const IInputSlot& inputSlot = layer->GetInputSlot(slotIndex);
1913 
1914  // Get the Connection for the InputSlot
1915  const IOutputSlot* connection = inputSlot.GetConnection();
1916 
1917  // Create FlatBuffer Connection
1918  serializer::Connection conn(GetSerializedId(inputSlot.GetConnection()->GetOwningLayerGuid()),
1919  connection->CalculateIndexOnOwner());
1920  // Create FlatBuffer InputSlot
1921  inputSlots.push_back(serializer::CreateInputSlot(m_flatBufferBuilder, slotIndex, &conn));
1922  }
1923  return inputSlots;
1924 }
1925 
1926 std::vector<fb::Offset<serializer::OutputSlot>>
1927  SerializerStrategy::CreateOutputSlots(const armnn::IConnectableLayer* layer)
1928 {
1929  std::vector<fb::Offset<serializer::OutputSlot>> outputSlots;
1930 
1931  // Get the OutputSlots
1932  for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
1933  {
1934  const IOutputSlot& outputSlot = layer->GetOutputSlot(slotIndex);
1935  const armnn::TensorInfo& tensorInfo = outputSlot.GetTensorInfo();
1936 
1937  // Create FlatBuffer Outputslot
1938  outputSlots.push_back(serializer::CreateOutputSlot(m_flatBufferBuilder,
1939  slotIndex,
1940  CreateTensorInfo(tensorInfo)));
1941  }
1942  return outputSlots;
1943 }
1944 
1946  const BaseDescriptor& descriptor,
1947  const std::vector<armnn::ConstTensor>& constants,
1948  const char* name,
1949  const armnn::LayerBindingId id)
1950 {
1951  IgnoreUnused(constants);
1952 
1953  switch (layer->GetType())
1954  {
1956  {
1957  const armnn::ActivationDescriptor& layerDescriptor =
1958  static_cast<const armnn::ActivationDescriptor&>(descriptor);
1959  SerializeActivationLayer(layer, layerDescriptor, name);
1960  break;
1961  }
1963  {
1964  SerializeAdditionLayer(layer, name);
1965  break;
1966  }
1968  {
1969  const armnn::ArgMinMaxDescriptor& layerDescriptor =
1970  static_cast<const armnn::ArgMinMaxDescriptor&>(descriptor);
1971  SerializeArgMinMaxLayer(layer, layerDescriptor, name);
1972  break;
1973  }
1975  {
1976  const armnn::BatchNormalizationDescriptor& layerDescriptor =
1977  static_cast<const armnn::BatchNormalizationDescriptor&>(descriptor);
1978  SerializeBatchNormalizationLayer(layer,
1979  layerDescriptor,
1980  constants,
1981  name);
1982  break;
1983  }
1985  {
1986  const armnn::BatchToSpaceNdDescriptor& layerDescriptor =
1987  static_cast<const armnn::BatchToSpaceNdDescriptor&>(descriptor);
1988  SerializeBatchToSpaceNdLayer(layer,
1989  layerDescriptor,
1990  name);
1991  break;
1992  }
1993  case armnn::LayerType::Cast :
1994  {
1995  SerializeCastLayer(layer, name);
1996  break;
1997  }
1999  {
2000  const armnn::ChannelShuffleDescriptor& layerDescriptor =
2001  static_cast<const armnn::ChannelShuffleDescriptor&>(descriptor);
2002  SerializeChannelShuffleLayer(layer,
2003  layerDescriptor,
2004  name);
2005  break;
2006  }
2008  {
2009  const armnn::ComparisonDescriptor& layerDescriptor =
2010  static_cast<const armnn::ComparisonDescriptor&>(descriptor);
2011  SerializeComparisonLayer(layer,
2012  layerDescriptor,
2013  name);
2014  break;
2015  }
2017  {
2018  const armnn::ConcatDescriptor& layerDescriptor =
2019  static_cast<const armnn::ConcatDescriptor&>(descriptor);
2020  SerializeConcatLayer(layer,
2021  layerDescriptor,
2022  name);
2023  break;
2024  }
2026  {
2027  SerializeConstantLayer(layer,
2028  constants,
2029  name);
2030  break;
2031  }
2033  {
2034  const armnn::Convolution2dDescriptor& layerDescriptor =
2035  static_cast<const armnn::Convolution2dDescriptor&>(descriptor);
2036  SerializeConvolution2dLayer(layer,
2037  layerDescriptor,
2038  name);
2039  break;
2040  }
2042  {
2043  const armnn::Convolution3dDescriptor& layerDescriptor =
2044  static_cast<const armnn::Convolution3dDescriptor&>(descriptor);
2045  SerializeConvolution3dLayer(layer,
2046  layerDescriptor,
2047  name);
2048  break;
2049  }
2051  {
2052  const armnn::DepthToSpaceDescriptor& layerDescriptor =
2053  static_cast<const armnn::DepthToSpaceDescriptor&>(descriptor);
2054  SerializeDepthToSpaceLayer(layer,
2055  layerDescriptor,
2056  name);
2057  break;
2058  }
2060  {
2061  const armnn::DepthwiseConvolution2dDescriptor& layerDescriptor =
2062  static_cast<const armnn::DepthwiseConvolution2dDescriptor&>(descriptor);
2063  SerializeDepthwiseConvolution2dLayer(layer,
2064  layerDescriptor,
2065  name);
2066  break;
2067  }
2069  {
2070  SerializeDequantizeLayer(layer,
2071  name);
2072  break;
2073  }
2075  {
2076  const armnn::DetectionPostProcessDescriptor& layerDescriptor =
2077  static_cast<const armnn::DetectionPostProcessDescriptor&>(descriptor);
2078  SerializeDetectionPostProcessLayer(layer, layerDescriptor, constants, name);
2079  break;
2080  }
2082  {
2083  SerializeDivisionLayer(layer, name);
2084  break;
2085  }
2087  {
2088  const armnn::ElementwiseUnaryDescriptor& layerDescriptor =
2089  static_cast<const armnn::ElementwiseUnaryDescriptor&>(descriptor);
2090  SerializeElementwiseUnaryLayer(layer, layerDescriptor, name);
2091  break;
2092  }
2093  case armnn::LayerType::Fill :
2094  {
2095  const armnn::FillDescriptor& layerDescriptor =
2096  static_cast<const armnn::FillDescriptor&>(descriptor);
2097  SerializeFillLayer(layer, layerDescriptor, name);
2098  break;
2099  }
2101  {
2102  SerializeFloorLayer(layer, name);
2103  break;
2104  }
2106  {
2107  const armnn::FullyConnectedDescriptor& layerDescriptor =
2108  static_cast<const armnn::FullyConnectedDescriptor&>(descriptor);
2109  SerializeFullyConnectedLayer(layer, layerDescriptor, name);
2110  break;
2111  }
2113  {
2114  const armnn::GatherDescriptor& layerDescriptor =
2115  static_cast<const armnn::GatherDescriptor&>(descriptor);
2116  SerializeGatherLayer(layer, layerDescriptor, name);
2117  break;
2118  }
2120  {
2121  SerializeGatherNdLayer(layer, name);
2122  break;
2123  }
2125  {
2126  SerializeInputLayer(layer, id, name);
2127  break;
2128  }
2130  {
2131  const armnn::InstanceNormalizationDescriptor& layerDescriptor =
2132  static_cast<const armnn::InstanceNormalizationDescriptor&>(descriptor);
2133  SerializeInstanceNormalizationLayer(layer, layerDescriptor, name);
2134  break;
2135  }
2137  {
2138  const armnn::L2NormalizationDescriptor& layerDescriptor =
2139  static_cast<const armnn::L2NormalizationDescriptor&>(descriptor);
2140  SerializeL2NormalizationLayer(layer, layerDescriptor, name);
2141  break;
2142  }
2144  {
2145  const armnn::LogicalBinaryDescriptor& layerDescriptor =
2146  static_cast<const armnn::LogicalBinaryDescriptor&>(descriptor);
2147  SerializeLogicalBinaryLayer(layer, layerDescriptor, name);
2148  break;
2149  }
2151  {
2152  const armnn::LogSoftmaxDescriptor& layerDescriptor =
2153  static_cast<const armnn::LogSoftmaxDescriptor&>(descriptor);
2154  SerializeLogSoftmaxLayer(layer, layerDescriptor, name);
2155  break;
2156  }
2157  case armnn::LayerType::Lstm :
2158  {
2159  const armnn::LstmDescriptor& layerDescriptor =
2160  static_cast<const armnn::LstmDescriptor&>(descriptor);
2161  SerializeLstmLayer(layer, layerDescriptor, constants, name);
2162  break;
2163  }
2165  {
2166  const armnn::QLstmDescriptor& layerDescriptor =
2167  static_cast<const armnn::QLstmDescriptor&>(descriptor);
2168  SerializeQLstmLayer(layer, layerDescriptor, constants, name);
2169  break;
2170  }
2172  {
2173  SerializeMaximumLayer(layer, name);
2174  break;
2175  }
2176  case armnn::LayerType::Mean :
2177  {
2178  const armnn::MeanDescriptor& layerDescriptor =
2179  static_cast<const armnn::MeanDescriptor&>(descriptor);
2180  SerializeMeanLayer(layer, layerDescriptor, name);
2181  break;
2182  }
2184  {
2185  SerializeMergeLayer(layer, name);
2186  break;
2187  }
2189  {
2190  SerializeMinimumLayer(layer, name);
2191  break;
2192  }
2194  {
2195  SerializeMultiplicationLayer(layer, name);
2196  break;
2197  }
2199  {
2200  const armnn::NormalizationDescriptor& layerDescriptor =
2201  static_cast<const armnn::NormalizationDescriptor&>(descriptor);
2202  SerializeNormalizationLayer(layer, layerDescriptor, name);
2203  break;
2204  }
2206  {
2207  SerializeOutputLayer(layer, id, name);
2208  break;
2209  }
2210  case armnn::LayerType::Pad :
2211  {
2212  const armnn::PadDescriptor& layerDescriptor =
2213  static_cast<const armnn::PadDescriptor&>(descriptor);
2214  SerializePadLayer(layer, layerDescriptor, name);
2215  break;
2216  }
2218  {
2219  const armnn::PermuteDescriptor& layerDescriptor =
2220  static_cast<const armnn::PermuteDescriptor&>(descriptor);
2221  SerializePermuteLayer(layer, layerDescriptor, name);
2222  break;
2223  }
2225  {
2226  const armnn::Pooling2dDescriptor& layerDescriptor =
2227  static_cast<const armnn::Pooling2dDescriptor&>(descriptor);
2228  SerializePooling2dLayer(layer, layerDescriptor, name);
2229  break;
2230  }
2232  {
2233  const armnn::Pooling3dDescriptor& layerDescriptor =
2234  static_cast<const armnn::Pooling3dDescriptor&>(descriptor);
2235  SerializePooling3dLayer(layer, layerDescriptor, name);
2236  break;
2237  }
2239  {
2240  SerializePreluLayer(layer, name);
2241  break;
2242  }
2244  {
2245  SerializeQuantizeLayer(layer, name);
2246  break;
2247  }
2249  SerializeQuantizedLstmLayer(layer, constants, name);
2250  break;
2252  {
2253  const armnn::ReshapeDescriptor &layerDescriptor =
2254  static_cast<const armnn::ReshapeDescriptor &>(descriptor);
2255  SerializeReshapeLayer(layer, layerDescriptor, name);
2256  break;
2257  }
2259  {
2260  SerializeRankLayer(layer, name);
2261  break;
2262  }
2264  {
2265  const armnn::ReduceDescriptor& layerDescriptor =
2266  static_cast<const armnn::ReduceDescriptor&>(descriptor);
2267  SerializeReduceLayer(layer, layerDescriptor, name);
2268  break;
2269  }
2271  {
2272  const armnn::ResizeDescriptor& layerDescriptor =
2273  static_cast<const armnn::ResizeDescriptor&>(descriptor);
2274  SerializeResizeLayer(layer, layerDescriptor, name);
2275  break;
2276  }
2278  {
2279  SerializeShapeLayer(layer, name);
2280  break;
2281  }
2283  {
2284  const armnn::SliceDescriptor& layerDescriptor =
2285  static_cast<const armnn::SliceDescriptor&>(descriptor);
2286  SerializeSliceLayer(layer, layerDescriptor, name);
2287  break;
2288  }
2290  {
2291  const armnn::SoftmaxDescriptor& layerDescriptor =
2292  static_cast<const armnn::SoftmaxDescriptor&>(descriptor);
2293  SerializeSoftmaxLayer(layer, layerDescriptor, name);
2294  break;
2295  }
2297  {
2298  const armnn::SpaceToBatchNdDescriptor& layerDescriptor =
2299  static_cast<const armnn::SpaceToBatchNdDescriptor&>(descriptor);
2300  SerializeSpaceToBatchNdLayer(layer, layerDescriptor, name);
2301  break;
2302  }
2304  {
2305  const armnn::SpaceToDepthDescriptor& layerDescriptor =
2306  static_cast<const armnn::SpaceToDepthDescriptor&>(descriptor);
2307  SerializeSpaceToDepthLayer(layer, layerDescriptor, name);
2308  break;
2309  }
2311  {
2312  const armnn::SplitterDescriptor& layerDescriptor =
2313  static_cast<const armnn::SplitterDescriptor&>(descriptor);
2314  SerializeSplitterLayer(layer, layerDescriptor, name);
2315  break;
2316  }
2318  {
2319  const armnn::StackDescriptor& layerDescriptor =
2320  static_cast<const armnn::StackDescriptor&>(descriptor);
2321  SerializeStackLayer(layer, layerDescriptor, name);
2322  break;
2323  }
2325  {
2326  const armnn::StandInDescriptor& layerDescriptor =
2327  static_cast<const armnn::StandInDescriptor&>(descriptor);
2328  SerializeStandInLayer(layer, layerDescriptor, name);
2329  break;
2330  }
2332  {
2333  const armnn::StridedSliceDescriptor& layerDescriptor =
2334  static_cast<const armnn::StridedSliceDescriptor&>(descriptor);
2335  SerializeStridedSliceLayer(layer, layerDescriptor, name);
2336  break;
2337  }
2339  {
2340  SerializeSubtractionLayer(layer, name);
2341  break;
2342  }
2344  {
2345  SerializeSwitchLayer(layer, name);
2346  break;
2347  }
2349  {
2350  const armnn::TransposeDescriptor& layerDescriptor =
2351  static_cast<const armnn::TransposeDescriptor&>(descriptor);
2352  SerializeTransposeLayer(layer, layerDescriptor, name);
2353  break;
2354  }
2356  {
2357  const armnn::TransposeConvolution2dDescriptor& layerDescriptor =
2358  static_cast<const armnn::TransposeConvolution2dDescriptor&>(descriptor);
2359  SerializeTransposeConvolution2dLayer(layer, layerDescriptor, constants, name);
2360  break;
2361  }
2363  {
2364  const armnn::UnidirectionalSequenceLstmDescriptor& layerDescriptor =
2365  static_cast<const armnn::UnidirectionalSequenceLstmDescriptor&>(descriptor);
2366  SerializeUnidirectionalSequenceLstmLayer(layer, layerDescriptor, constants, name);
2367  break;
2368  }
2369  default:
2370  {
2372  fmt::format("A layer of unknown type was given to the serializer. Layer name: {}; Layer Id: {}",
2373  layer->GetName(),
2374  id));
2375  }
2376  }
2377 }
2378 
2380 {
2381  // Iterate through to network
2382  inNetwork.ExecuteStrategy(m_SerializerStrategy);
2383  flatbuffers::FlatBufferBuilder& fbBuilder = m_SerializerStrategy.GetFlatBufferBuilder();
2384 
2385  // Create FlatBuffer SerializedGraph
2386  auto serializedGraph = serializer::CreateSerializedGraph(
2387  fbBuilder,
2388  fbBuilder.CreateVector(m_SerializerStrategy.GetSerializedLayers()),
2389  fbBuilder.CreateVector(m_SerializerStrategy.GetInputIds()),
2390  fbBuilder.CreateVector(m_SerializerStrategy.GetOutputIds()),
2391  m_SerializerStrategy.GetVersionTable());
2392 
2393  // Serialize the graph
2394  fbBuilder.Finish(serializedGraph);
2395 }
2396 
2397 
2399 {
2400  flatbuffers::FlatBufferBuilder& fbBuilder = m_SerializerStrategy.GetFlatBufferBuilder();
2401 
2402  auto bytesToWrite = armnn::numeric_cast<std::streamsize>(fbBuilder.GetSize());
2403  stream.write(reinterpret_cast<const char*>(fbBuilder.GetBufferPointer()), bytesToWrite);
2404  return !stream.bad();
2405 }
2406 
2407 } // namespace armnnSerializer
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
flatbuffers::Offset< Convolution3dDescriptor > CreateConvolution3dDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t padLeft=0, uint32_t padRight=0, uint32_t padTop=0, uint32_t padBottom=0, uint32_t padFront=0, uint32_t padBack=0, uint32_t strideX=0, uint32_t strideY=0, uint32_t strideZ=0, uint32_t dilationX=1, uint32_t dilationY=1, uint32_t dilationZ=1, bool biasEnabled=false, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NDHWC)
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
flatbuffers::Offset< LongData > CreateLongData(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< int64_t >> data=0)
float m_Eps
Used to avoid dividing by zero.
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
armnnSerializer::UnaryOperation GetFlatBufferUnaryOperation(armnn::UnaryOperation comparisonOperation)
bool m_ProjectionEnabled
Enable/disable the projection layer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< ReshapeDescriptor > CreateReshapeDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> targetShape=0)
ARMNN_NO_DEPRECATE_WARN_END void ExecuteStrategy(IStrategy &strategy) const
Definition: Network.cpp:466
flatbuffers::Offset< ReduceLayer > CreateReduceLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ReduceDescriptor > descriptor=0)
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
uint32_t m_Axis
0-based axis along which to stack the input tensors.
flatbuffers::Offset< OutputSlot > CreateOutputSlot(flatbuffers::FlatBufferBuilder &_fbb, uint32_t index=0, flatbuffers::Offset< armnnSerializer::TensorInfo > tensorInfo=0)
A ViewsDescriptor for the SplitterLayer.
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:66
flatbuffers::Offset< DepthwiseConvolution2dDescriptor > CreateDepthwiseConvolution2dDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t padLeft=0, uint32_t padRight=0, uint32_t padTop=0, uint32_t padBottom=0, uint32_t strideX=0, uint32_t strideY=0, uint32_t dilationX=1, uint32_t dilationY=1, bool biasEnabled=false, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NCHW)
float m_ScaleW
Center size encoding scale weight.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
virtual unsigned int GetNumInputSlots() const =0
Returns the number of connectable input slots.
float m_K
Kappa value used for the across channel normalization equation.
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
flatbuffers::Offset< LstmLayer > CreateLstmLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::LstmDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::LstmInputParams > inputParams=0)
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
uint32_t m_PoolWidth
Pooling width value.
flatbuffers::Offset< L2NormalizationLayer > CreateL2NormalizationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::L2NormalizationDescriptor > descriptor=0)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
flatbuffers::Offset< TransposeConvolution2dDescriptor > CreateTransposeConvolution2dDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t padLeft=0, uint32_t padRight=0, uint32_t padTop=0, uint32_t padBottom=0, uint32_t strideX=0, uint32_t strideY=0, bool biasEnabled=false, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NCHW)
float m_ClippingThresProj
Clipping threshold value for the projection.
uint32_t m_PoolDepth
Pooling depth value.
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1...
A ReshapeDescriptor for the ReshapeLayer.
flatbuffers::Offset< ResizeDescriptor > CreateResizeDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t targetHeight=0, uint32_t targetWidth=0, armnnSerializer::ResizeMethod method=armnnSerializer::ResizeMethod_NearestNeighbor, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC, bool alignCorners=false, bool halfPixelCenters=false)
flatbuffers::Offset< FillLayer > CreateFillLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::FillDescriptor > descriptor=0)
std::vector< int > m_Begin
Begin values for the input that will be sliced.
uint32_t m_PadBack
Padding back value in the depth dimension.
float m_PadValue
Optional value to use for padding, defaults to 0.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t GetNumDimensions() const
Get the number of dimensions.
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
void Serialize(const armnn::INetwork &inNetwork)
Serializes the network to ArmNN SerializedGraph.
float m_ScaleX
Center size encoding scale x.
TensorShape m_InputShape
Required shape of all input tensors.
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
Dimensionality GetDimensionality() const
Function that returns the tensor type.
Definition: Tensor.hpp:92
flatbuffers::Offset< GatherLayer > CreateGatherLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::GatherDescriptor > descriptor=0)
flatbuffers::Offset< RankLayer > CreateRankLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
bool HasPerAxisQuantization() const
Definition: Tensor.cpp:446
uint32_t m_PoolWidth
Pooling width value.
bool m_PeepholeEnabled
Enable/disable peephole.
armnnSerializer::OutputShapeRounding GetFlatBufferOutputShapeRounding(armnn::OutputShapeRounding outputShapeRounding)
flatbuffers::Offset< TransposeLayer > CreateTransposeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::TransposeDescriptor > descriptor=0)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
A Convolution2dDescriptor for the Convolution2dLayer.
float m_Alpha
Alpha value for the normalization equation.
uint32_t m_PadLeft
Padding left value in the width dimension.
flatbuffers::Offset< ComparisonLayer > CreateComparisonLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ComparisonDescriptor > descriptor=0)
bool m_KeepDims
if true then output shape has no change.
float m_HiddenStateScale
Hidden State quantization scale.
bool m_BiasEnabled
Enable/disable bias.
flatbuffers::Offset< ConstTensor > CreateConstTensor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::TensorInfo > info=0, armnnSerializer::ConstTensorData data_type=armnnSerializer::ConstTensorData_NONE, flatbuffers::Offset< void > data=0)
Optional< unsigned int > GetQuantizationDim() const
Definition: Tensor.cpp:494
flatbuffers::Offset< QuantizeLayer > CreateQuantizeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
float m_OutputIntermediateScale
Output intermediate quantization scale.
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
float m_Gamma
Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0. ...
float m_Beta
Exponentiation value.
flatbuffers::Offset< InputSlot > CreateInputSlot(flatbuffers::FlatBufferBuilder &_fbb, uint32_t index=0, const armnnSerializer::Connection *connection=0)
std::vector< unsigned int > m_Size
Size of the slice in each dimension.
static ISerializer * CreateRaw()
Definition: Serializer.cpp:30
flatbuffers::Offset< SpaceToDepthDescriptor > CreateSpaceToDepthDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t blockSize=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:81
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< GatherNdLayer > CreateGatherNdLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
flatbuffers::Offset< QuantizedLstmLayer > CreateQuantizedLstmLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::QuantizedLstmInputParams > inputParams=0)
flatbuffers::Offset< TransposeDescriptor > CreateTransposeDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> dimMappings=0)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
serializer::ActivationFunction GetFlatBufferActivationFunction(armnn::ActivationFunction function)
Definition: Serializer.cpp:55
Main network class which provides the interface for building up a neural network. ...
Definition: INetwork.hpp:249
uint32_t m_PadRight
Padding right value in the width dimension.
flatbuffers::Offset< DetectionPostProcessDescriptor > CreateDetectionPostProcessDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t maxDetections=0, uint32_t maxClassesPerDetection=0, uint32_t detectionsPerClass=0, float nmsScoreThreshold=0.0f, float nmsIouThreshold=0.0f, uint32_t numClasses=0, bool useRegularNms=false, float scaleX=0.0f, float scaleY=0.0f, float scaleW=0.0f, float scaleH=0.0f)
flatbuffers::Offset< Pooling3dLayer > CreatePooling3dLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::Pooling3dDescriptor > descriptor=0)
armnnSerializer::NormalizationAlgorithmMethod GetFlatBufferNormalizationAlgorithmMethod(armnn::NormalizationAlgorithmMethod normalizationAlgorithmMethod)
uint32_t m_PadTop
Padding top value in the height dimension.
flatbuffers::Offset< AnyLayer > CreateAnyLayer(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::Layer layer_type=armnnSerializer::Layer_NONE, flatbuffers::Offset< void > layer=0)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
flatbuffers::Offset< DepthwiseConvolution2dLayer > CreateDepthwiseConvolution2dLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::DepthwiseConvolution2dDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > weights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > biases=0)
bool m_BiasEnabled
Enable/disable bias.
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
flatbuffers::Offset< MergeLayer > CreateMergeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
uint32_t m_PadRight
Padding right value in the width dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
MemoryType GetMemoryArea() const
Definition: Tensor.hpp:305
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding for input dimension.
uint32_t GetNumViews() const
Get the number of views.
ReduceOperation m_ReduceOperation
Specifies the reduction operation to execute.
flatbuffers::Offset< QLstmInputParams > CreateQLstmInputParams(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > forgetGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > outputGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > projectionWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > projectionBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > forgetLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > outputLayerNormWeights=0)
bool m_TimeMajor
Enable/disable time major.
Copyright (c) 2021 ARM Limited and Contributors.
DataLayout m_DataLayout
The data layout to be used (NCDHW, NDHWC).
void IgnoreUnused(Ts &&...)
flatbuffers::Offset< TensorInfo > CreateTensorInfo(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> dimensions=0, armnnSerializer::DataType dataType=armnnSerializer::DataType_Float16, float quantizationScale=1.0f, int32_t quantizationOffset=0, flatbuffers::Offset< flatbuffers::Vector< float >> quantizationScales=0, uint32_t quantizationDim=0, uint32_t dimensionality=1, flatbuffers::Offset< flatbuffers::Vector< uint8_t >> dimensionSpecificity=0, bool isConstant=false)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
int32_t m_BeginMask
Begin mask value.
uint32_t m_PadFront
Padding front value in the depth dimension.
flatbuffers::Offset< FullyConnectedDescriptor > CreateFullyConnectedDescriptor(flatbuffers::FlatBufferBuilder &_fbb, bool biasEnabled=false, bool transposeWeightsMatrix=false, bool constantWeights=true)
flatbuffers::Offset< TransposeConvolution2dLayer > CreateTransposeConvolution2dLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::TransposeConvolution2dDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > weights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > biases=0)
SizeType GetSize() const
Definition: Types.hpp:338
uint32_t m_DilationY
Dilation along y axis.
int32_t m_EndMask
End mask value.
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
flatbuffers::Offset< PreluLayer > CreatePreluLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding values for the input dimension: heightPad{top, bottom} widthPad{left, right}.
uint32_t m_PoolHeight
Pooling height value.
std::vector< float > GetQuantizationScales() const
Definition: Tensor.cpp:451
uint32_t m_DilationX
Dilation along x axis.
flatbuffers::Offset< StandInDescriptor > CreateStandInDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t numInputs=0, uint32_t numOutputs=0)
bool SaveSerializedToStream(std::ostream &stream)
Serializes the SerializedGraph to the stream.
Definition: Serializer.cpp:50
uint32_t m_DilationY
Dilation factor value for height dimension.
armnnSerializer::ConstTensorData GetFlatBufferConstTensorData(armnn::DataType dataType)
bool GetDimensionSpecificity(unsigned int i) const
Gets information about if the dimension size has been specified or not.
Definition: Tensor.cpp:211
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
void ExecuteStrategy(const armnn::IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id) override
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
flatbuffers::Offset< MultiplicationLayer > CreateMultiplicationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
flatbuffers::Offset< DepthToSpaceLayer > CreateDepthToSpaceLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::DepthToSpaceDescriptor > descriptor=0)
flatbuffers::Offset< InstanceNormalizationLayer > CreateInstanceNormalizationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::InstanceNormalizationDescriptor > descriptor=0)
armnnSerializer::ReduceOperation GetFlatBufferReduceOperation(armnn::ReduceOperation reduceOperation)
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:290
flatbuffers::Offset< SliceLayer > CreateSliceLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::SliceDescriptor > descriptor=0)
armnnSerializer::DataType GetFlatBufferDataType(armnn::DataType dataType)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_NumOutputs
Number of output tensors.
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
flatbuffers::Offset< Convolution2dDescriptor > CreateConvolution2dDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t padLeft=0, uint32_t padRight=0, uint32_t padTop=0, uint32_t padBottom=0, uint32_t strideX=0, uint32_t strideY=0, uint32_t dilationX=1, uint32_t dilationY=1, bool biasEnabled=false, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NCHW)
flatbuffers::Offset< Pooling3dDescriptor > CreatePooling3dDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::PoolingAlgorithm poolType=armnnSerializer::PoolingAlgorithm_Max, uint32_t padLeft=0, uint32_t padRight=0, uint32_t padTop=0, uint32_t padBottom=0, uint32_t padFront=0, uint32_t padBack=0, uint32_t poolWidth=0, uint32_t poolHeight=0, uint32_t poolDepth=0, uint32_t strideX=0, uint32_t strideY=0, uint32_t strideZ=0, armnnSerializer::OutputShapeRounding outputShapeRounding=armnnSerializer::OutputShapeRounding_Floor, armnnSerializer::PaddingMethod paddingMethod=armnnSerializer::PaddingMethod_IgnoreValue, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
flatbuffers::Offset< InputLayer > CreateInputLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::BindableLayerBase > base=0)
A ResizeBilinearDescriptor for the ResizeBilinearLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
Base class for all descriptors.
Definition: Descriptors.hpp:22
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
A StackDescriptor for the StackLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< ShortData > CreateShortData(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< int16_t >> data=0)
serializer::ArgMinMaxFunction GetFlatBufferArgMinMaxFunction(armnn::ArgMinMaxFunction function)
Definition: Serializer.cpp:86
TensorShape m_TargetShape
Target shape value.
bool SaveSerializedToStream(std::ostream &stream)
Serializes the SerializedGraph to the stream.
flatbuffers::Offset< ConcatLayer > CreateConcatLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::OriginsDescriptor > descriptor=0)
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_MaxDetections
Maximum numbers of detections.
A PadDescriptor for the PadLayer.
flatbuffers::Offset< SubtractionLayer > CreateSubtractionLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
flatbuffers::Offset< BindableLayerBase > CreateBindableLayerBase(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, int32_t layerBindingId=0)
const uint32_t * GetViewOrigin(uint32_t idx) const
Return the view origin at the int value idx.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_PadBack
Padding back value in the depth dimension.
flatbuffers::Offset< ArgMinMaxLayer > CreateArgMinMaxLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ArgMinMaxDescriptor > descriptor=0)
armnnSerializer::NormalizationAlgorithmChannel GetFlatBufferNormalizationAlgorithmChannel(armnn::NormalizationAlgorithmChannel normalizationAlgorithmChannel)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
flatbuffers::Offset< QLstmDescriptor > CreateQLstmDescriptor(flatbuffers::FlatBufferBuilder &_fbb, bool cifgEnabled=true, bool peepholeEnabled=false, bool projectionEnabled=false, bool layerNormEnabled=false, float cellClip=0.0f, float projectionClip=0.0f, float inputIntermediateScale=0.0f, float forgetIntermediateScale=0.0f, float cellIntermediateScale=0.0f, float outputIntermediateScale=0.0f, int32_t hiddenStateZeroPoint=0, float hiddenStateScale=0.0f)
bool m_LayerNormEnabled
Enable/disable layer normalization.
float m_NmsIouThreshold
Intersection over union threshold.
flatbuffers::Offset< ReshapeLayer > CreateReshapeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ReshapeDescriptor > descriptor=0)
armnnSerializer::LogicalBinaryOperation GetFlatBufferLogicalBinaryOperation(armnn::LogicalBinaryOperation logicalBinaryOperation)
flatbuffers::Offset< ArgMinMaxDescriptor > CreateArgMinMaxDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::ArgMinMaxFunction argMinMaxFunction=armnnSerializer::ArgMinMaxFunction_Min, int32_t axis=0)
flatbuffers::Offset< SoftmaxDescriptor > CreateSoftmaxDescriptor(flatbuffers::FlatBufferBuilder &_fbb, float beta=0.0f, int32_t axis=-1)
An LstmDescriptor for the LstmLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
flatbuffers::Offset< AdditionLayer > CreateAdditionLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
uint32_t m_DilationX
Dilation factor value for width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
flatbuffers::Offset< L2NormalizationDescriptor > CreateL2NormalizationDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NCHW, float eps=1e-12f)
std::vector< unsigned int > m_Begin
Beginning indices of the slice in each dimension.
int32_t m_NewAxisMask
New axis mask value.
flatbuffers::Offset< MinimumLayer > CreateMinimumLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept...
flatbuffers::Offset< ByteData > CreateByteData(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< int8_t >> data=0)
std::vector< unsigned int > m_BlockShape
Block shape values.
flatbuffers::Offset< FeatureCompatibilityVersions > CreateFeatureCompatibilityVersions(flatbuffers::FlatBufferBuilder &_fbb, uint32_t bindingIdsScheme=0, uint32_t weightsLayoutScheme=0, uint32_t constantTensorsAsInputs=0)
float m_Eps
Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f...
An output connection slot for a layer.
Definition: INetwork.hpp:40
flatbuffers::Offset< DepthToSpaceDescriptor > CreateDepthToSpaceDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t blockSize=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
A L2NormalizationDescriptor for the L2NormalizationLayer.
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:478
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
armnnSerializer::PaddingMode GetFlatBufferPaddingMode(armnn::PaddingMode paddingMode)
float GetQuantizationScale() const
Definition: Tensor.cpp:461
flatbuffers::Offset< LstmInputParams > CreateLstmInputParams(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > forgetGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > outputGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > projectionWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > projectionBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > forgetLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > outputLayerNormWeights=0)
DataType GetDataType() const
Definition: Tensor.hpp:198
An OriginsDescriptor for the ConcatLayer.
A ReduceDescriptor for the REDUCE operators.
float m_ProjectionClip
Clipping threshold value for the projection.
flatbuffers::Offset< CastLayer > CreateCastLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
flatbuffers::Offset< LayerBase > CreateLayerBase(flatbuffers::FlatBufferBuilder &_fbb, uint32_t index=0, flatbuffers::Offset< flatbuffers::String > layerName=0, armnnSerializer::LayerType layerType=armnnSerializer::LayerType_Addition, flatbuffers::Offset< flatbuffers::Vector< flatbuffers::Offset< armnnSerializer::InputSlot >>> inputSlots=0, flatbuffers::Offset< flatbuffers::Vector< flatbuffers::Offset< armnnSerializer::OutputSlot >>> outputSlots=0)
flatbuffers::Offset< ShapeLayer > CreateShapeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
A FullyConnectedDescriptor for the FullyConnectedLayer.
int32_t m_EllipsisMask
Ellipsis mask value.
virtual LayerGuid GetGuid() const =0
Returns the unique id of the layer.
bool m_BiasEnabled
Enable/disable bias.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
flatbuffers::Offset< QuantizedLstmInputParams > CreateQuantizedLstmInputParams(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > forgetGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > outputGateBias=0)
flatbuffers::Offset< ReduceDescriptor > CreateReduceDescriptor(flatbuffers::FlatBufferBuilder &_fbb, bool keepDims=false, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> axis=0, armnnSerializer::ReduceOperation reduceOperation=armnnSerializer::ReduceOperation_Sum)
flatbuffers::Offset< StackDescriptor > CreateStackDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t axis=0, uint32_t numInputs=0, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> inputShape=0)
flatbuffers::Offset< BatchToSpaceNdDescriptor > CreateBatchToSpaceNdDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> blockShape=0, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> crops=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
float m_InputIntermediateScale
Input intermediate quantization scale.
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
uint32_t m_TargetWidth
Target width value.
flatbuffers::Offset< SplitterLayer > CreateSplitterLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ViewsDescriptor > descriptor=0)
A GatherDescriptor for the GatherLayer.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_PeepholeEnabled
Enable/disable peephole.
uint32_t m_NumClasses
Number of classes.
bool m_HalfPixelCenters
Half Pixel Centers.
flatbuffers::Offset< OutputLayer > CreateOutputLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::BindableLayerBase > base=0)
void Serialize(const armnn::INetwork &inNetwork)
Serializes the network to ArmNN SerializedGraph.
Definition: Serializer.cpp:45
flatbuffers::Offset< SoftmaxLayer > CreateSoftmaxLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::SoftmaxDescriptor > descriptor=0)
flatbuffers::Offset< FillDescriptor > CreateFillDescriptor(flatbuffers::FlatBufferBuilder &_fbb, float value=0.0f)
uint32_t m_PadTop
Padding top value in the height dimension.
A StandInDescriptor for the StandIn layer.
A QLstmDescriptor for the QLstmLayer.
flatbuffers::Offset< StridedSliceLayer > CreateStridedSliceLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::StridedSliceDescriptor > descriptor=0)
virtual unsigned int CalculateIndexOnOwner() const =0
flatbuffers::Offset< LogSoftmaxDescriptor > CreateLogSoftmaxDescriptor(flatbuffers::FlatBufferBuilder &_fbb, float beta=1.0f, int32_t axis=-1)
bool m_UseRegularNms
Use Regular NMS.
uint32_t m_PadFront
Padding front value in the depth dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< MeanLayer > CreateMeanLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::MeanDescriptor > descriptor=0)
std::vector< unsigned int > m_BlockShape
Block shape value.
std::vector< int > m_Stride
Stride values for the input that will be sliced.
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
flatbuffers::Offset< ActivationLayer > CreateActivationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ActivationDescriptor > descriptor=0)
flatbuffers::Offset< SpaceToDepthLayer > CreateSpaceToDepthLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::SpaceToDepthDescriptor > descriptor=0)
flatbuffers::Offset< SliceDescriptor > CreateSliceDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> begin=0, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> size=0)
const TensorInfo & GetInfo() const
Definition: Tensor.hpp:295
min(a, max(b, input)) ReLu1 & ReLu6.
flatbuffers::Offset< BatchNormalizationLayer > CreateBatchNormalizationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::BatchNormalizationDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > mean=0, flatbuffers::Offset< armnnSerializer::ConstTensor > variance=0, flatbuffers::Offset< armnnSerializer::ConstTensor > beta=0, flatbuffers::Offset< armnnSerializer::ConstTensor > gamma=0)
flatbuffers::Offset< BatchNormalizationDescriptor > CreateBatchNormalizationDescriptor(flatbuffers::FlatBufferBuilder &_fbb, float eps=0.0f, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
uint32_t m_NumInputs
Number of input tensors.
flatbuffers::Offset< GatherDescriptor > CreateGatherDescriptor(flatbuffers::FlatBufferBuilder &_fbb, int32_t axis=0)
flatbuffers::Offset< Convolution3dLayer > CreateConvolution3dLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::Convolution3dDescriptor > descriptor=0)
uint32_t m_PadLeft
Padding left value in the width dimension.
flatbuffers::Offset< ActivationDescriptor > CreateActivationDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::ActivationFunction activationFunction=armnnSerializer::ActivationFunction_Sigmoid, float a=0.0f, float b=0.0f)
uint32_t m_TargetHeight
Target height value.
uint32_t m_ActivationFunc
The activation function to use.
A SliceDescriptor for the SliceLayer.
flatbuffers::Offset< NormalizationLayer > CreateNormalizationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::NormalizationDescriptor > descriptor=0)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A Convolution3dDescriptor for the Convolution3dLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
flatbuffers::Offset< ViewsDescriptor > CreateViewsDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::OriginsDescriptor > origins=0, flatbuffers::Offset< flatbuffers::Vector< flatbuffers::Offset< armnnSerializer::UintVector >>> viewSizes=0)
virtual LayerType GetType() const =0
Returns the armnn::LayerType of this layer.
flatbuffers::Offset< PermuteDescriptor > CreatePermuteDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> dimMappings=0)
float m_ClippingThresCell
Clipping threshold value for the cell state.
unsigned int m_BlockSize
Scalar specifying the input block size. It must be >= 1.
uint32_t m_NumGroups
Number of groups for the channel shuffle operation.
flatbuffers::Offset< MeanDescriptor > CreateMeanDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> axis=0, bool keepDims=false)
const uint32_t * GetViewOrigin(uint32_t idx) const
Get the view origin at the int value idx.
PaddingMode m_PaddingMode
Specifies the Padding mode (Constant, Reflect or Symmetric)
flatbuffers::Offset< StandInLayer > CreateStandInLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::StandInDescriptor > descriptor=0)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_ForgetIntermediateScale
Forget intermediate quantization scale.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_Beta
Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0. ...
armnnSerializer::DataLayout GetFlatBufferDataLayout(armnn::DataLayout dataLayout)
A Pooling3dDescriptor for the Pooling3dLayer.
uint32_t m_StrideZ
Stride value when proceeding through input for the depth dimension.
std::vector< uint32_t > m_vAxis
The indices of the dimensions to reduce.
float m_ScaleH
Center size encoding scale height.
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
std::vector< int > m_End
End values for the input that will be sliced.
flatbuffers::Offset< SwitchLayer > CreateSwitchLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
DataLayout m_DataLayout
The data layout to be used (NDHWC, NCDHW).
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
const uint32_t * GetViewSizes(uint32_t idx) const
Get the view sizes at the int value idx.
float m_CellClip
Clipping threshold value for the cell state.
flatbuffers::Offset< ElementwiseUnaryDescriptor > CreateElementwiseUnaryDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::UnaryOperation operation=armnnSerializer::UnaryOperation_Abs)
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:61
uint32_t m_DilationX
Dilation along x axis.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
arm::pipe::ProfilingGuid LayerGuid
Define LayerGuid type.
Definition: Types.hpp:26
flatbuffers::Offset< PadDescriptor > CreatePadDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> padList=0, float padValue=0.0f, armnnSerializer::PaddingMode paddingMode=armnnSerializer::PaddingMode_Constant)
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
flatbuffers::Offset< PadLayer > CreatePadLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::PadDescriptor > descriptor=0)
flatbuffers::Offset< FloorLayer > CreateFloorLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
std::unique_ptr< ISerializer, void(*)(ISerializer *serializer)> ISerializerPtr
Definition: ISerializer.hpp:15
flatbuffers::Offset< NormalizationDescriptor > CreateNormalizationDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::NormalizationAlgorithmChannel normChannelType=armnnSerializer::NormalizationAlgorithmChannel_Across, armnnSerializer::NormalizationAlgorithmMethod normMethodType=armnnSerializer::NormalizationAlgorithmMethod_LocalBrightness, uint32_t normSize=0, float alpha=0.0f, float beta=0.0f, float k=0.0f, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NCHW)
uint32_t m_PadLeft
Padding left value in the width dimension.
armnnSerializer::ComparisonOperation GetFlatBufferComparisonOperation(armnn::ComparisonOperation comparisonOperation)
flatbuffers::Offset< Pooling2dDescriptor > CreatePooling2dDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::PoolingAlgorithm poolType=armnnSerializer::PoolingAlgorithm_Max, uint32_t padLeft=0, uint32_t padRight=0, uint32_t padTop=0, uint32_t padBottom=0, uint32_t poolWidth=0, uint32_t poolHeight=0, uint32_t strideX=0, uint32_t strideY=0, armnnSerializer::OutputShapeRounding outputShapeRounding=armnnSerializer::OutputShapeRounding_Floor, armnnSerializer::PaddingMethod paddingMethod=armnnSerializer::PaddingMethod_IgnoreValue, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
bool m_AlignCorners
Aligned corners.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
flatbuffers::Offset< ConstantLayer > CreateConstantLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ConstTensor > input=0)
int32_t m_Axis
The axis in params to gather indices from.
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
flatbuffers::Offset< ChannelShuffleLayer > CreateChannelShuffleLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ChannelShuffleDescriptor > descriptor=0)
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
flatbuffers::Offset< UintVector > CreateUintVector(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> data=0)
flatbuffers::Offset< StackLayer > CreateStackLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::StackDescriptor > descriptor=0)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
flatbuffers::Offset< Convolution2dLayer > CreateConvolution2dLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::Convolution2dDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > weights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > biases=0)
flatbuffers::Offset< UnidirectionalSequenceLstmLayer > CreateUnidirectionalSequenceLstmLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::UnidirectionalSequenceLstmDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::LstmInputParams > inputParams=0)
uint32_t m_PadTop
Padding top value in the height dimension.
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
flatbuffers::Offset< Pooling2dLayer > CreatePooling2dLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::Pooling2dDescriptor > descriptor=0)
uint32_t m_PadTop
Padding top value in the height dimension.
bool m_ProjectionEnabled
Enable/disable the projection layer.
ArgMinMaxFunction
Definition: Types.hpp:102
flatbuffers::Offset< SpaceToBatchNdLayer > CreateSpaceToBatchNdLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::SpaceToBatchNdDescriptor > descriptor=0)
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
armnnSerializer::ResizeMethod GetFlatBufferResizeMethod(armnn::ResizeMethod method)
uint32_t m_NumInputs
Number of input tensors.
flatbuffers::Offset< UnidirectionalSequenceLstmDescriptor > CreateUnidirectionalSequenceLstmDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t activationFunc=0, float clippingThresCell=0.0f, float clippingThresProj=0.0f, bool cifgEnabled=true, bool peepholeEnabled=false, bool projectionEnabled=false, bool layerNormEnabled=false, bool timeMajor=false)
uint32_t GetNumDimensions() const
Get the number of dimensions.
flatbuffers::Offset< ComparisonDescriptor > CreateComparisonDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::ComparisonOperation operation=armnnSerializer::ComparisonOperation_Equal)
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
A MeanDescriptor for the MeanLayer.
flatbuffers::Offset< MaximumLayer > CreateMaximumLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
virtual const IOutputSlot * GetConnection() const =0
static ISerializerPtr Create()
Definition: Serializer.cpp:35
armnnSerializer::PaddingMethod GetFlatBufferPaddingMethod(armnn::PaddingMethod paddingMethod)
bool m_LayerNormEnabled
Enable/disable layer normalization.
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
uint32_t m_PadRight
Padding right value in the width dimension.
flatbuffers::Offset< InstanceNormalizationDescriptor > CreateInstanceNormalizationDescriptor(flatbuffers::FlatBufferBuilder &_fbb, float gamma=0.0f, float beta=0.0f, float eps=0.0f, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
virtual const TensorInfo & GetTensorInfo() const =0
uint32_t m_Axis
Axis to apply channel shuffle operation on.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:83
flatbuffers::Offset< IntData > CreateIntData(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< int32_t >> data=0)
virtual const char * GetName() const =0
Returns the name of the layer.
float m_ScaleY
Center size encoding scale y.
flatbuffers::Offset< ResizeLayer > CreateResizeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ResizeDescriptor > descriptor=0)
uint32_t GetNumViews() const
Get the number of views.
flatbuffers::Offset< FullyConnectedLayer > CreateFullyConnectedLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::FullyConnectedDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > weights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > biases=0)
float m_NmsScoreThreshold
NMS score threshold.
flatbuffers::Offset< DequantizeLayer > CreateDequantizeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
flatbuffers::Offset< ChannelShuffleDescriptor > CreateChannelShuffleDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t axis=0, uint32_t numGroups=0)
virtual LayerGuid GetOwningLayerGuid() const =0
A Pooling2dDescriptor for the Pooling2dLayer.
flatbuffers::Offset< DetectionPostProcessLayer > CreateDetectionPostProcessLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::DetectionPostProcessDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > anchors=0)
A NormalizationDescriptor for the NormalizationLayer.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< BatchToSpaceNdLayer > CreateBatchToSpaceNdLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::BatchToSpaceNdDescriptor > descriptor=0)
flatbuffers::Offset< armnnSerializer::FeatureCompatibilityVersions > GetVersionTable()
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
unsigned int GetConcatAxis() const
Get the concatenation axis value.
A ChannelShuffleDescriptor for the ChannelShuffle operator.
flatbuffers::Offset< LogSoftmaxLayer > CreateLogSoftmaxLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::LogSoftmaxDescriptor > descriptor=0)
float m_CellIntermediateScale
Cell intermediate quantization scale.
flatbuffers::Offset< StridedSliceDescriptor > CreateStridedSliceDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< int32_t >> begin=0, flatbuffers::Offset< flatbuffers::Vector< int32_t >> end=0, flatbuffers::Offset< flatbuffers::Vector< int32_t >> stride=0, int32_t beginMask=0, int32_t endMask=0, int32_t shrinkAxisMask=0, int32_t ellipsisMask=0, int32_t newAxisMask=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
flatbuffers::Offset< OriginsDescriptor > CreateOriginsDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t concatAxis=0, uint32_t numViews=0, uint32_t numDimensions=0, flatbuffers::Offset< flatbuffers::Vector< flatbuffers::Offset< armnnSerializer::UintVector >>> viewOrigins=0)
flatbuffers::Offset< QLstmLayer > CreateQLstmLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::QLstmDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::QLstmInputParams > inputParams=0)
uint32_t m_DilationZ
Dilation along z axis.
flatbuffers::Offset< LstmDescriptor > CreateLstmDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t activationFunc=0, float clippingThresCell=0.0f, float clippingThresProj=0.0f, bool cifgEnabled=true, bool peepholeEnabled=false, bool projectionEnabled=false, bool layerNormEnabled=false)
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:63
static void Destroy(ISerializer *serializer)
Definition: Serializer.cpp:40
A SoftmaxDescriptor for the SoftmaxLayer.
float m_Beta
Beta value for the normalization equation.
flatbuffers::Offset< ElementwiseUnaryLayer > CreateElementwiseUnaryLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ElementwiseUnaryDescriptor > descriptor=0)
flatbuffers::Offset< PermuteLayer > CreatePermuteLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::PermuteDescriptor > descriptor=0)
uint32_t m_StrideZ
Stride value when proceeding through input for the depth dimension.
const OriginsDescriptor & GetOrigins() const
Get the View Origins.
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
uint32_t m_NormSize
Depth radius value.
flatbuffers::Offset< SpaceToBatchNdDescriptor > CreateSpaceToBatchNdDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> blockShape=0, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> padList=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:59
An input connection slot for a layer.
Definition: INetwork.hpp:26
flatbuffers::Offset< SerializedGraph > CreateSerializedGraph(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< flatbuffers::Offset< armnnSerializer::AnyLayer >>> layers=0, flatbuffers::Offset< flatbuffers::Vector< int32_t >> inputIds=0, flatbuffers::Offset< flatbuffers::Vector< int32_t >> outputIds=0, flatbuffers::Offset< armnnSerializer::FeatureCompatibilityVersions > featureVersions=0)
armnnSerializer::PoolingAlgorithm GetFlatBufferPoolingAlgorithm(armnn::PoolingAlgorithm poolingAlgorithm)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
uint32_t m_DilationY
Dilation along y axis.
A FillDescriptor for the FillLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
unsigned int GetNumBytes() const
Definition: Tensor.hpp:302
ActivationFunction
Definition: Types.hpp:86
flatbuffers::Offset< LogicalBinaryDescriptor > CreateLogicalBinaryDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::LogicalBinaryOperation operation=armnnSerializer::LogicalBinaryOperation_LogicalAnd)
flatbuffers::Offset< DivisionLayer > CreateDivisionLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
A PermuteDescriptor for the PermuteLayer.
flatbuffers::Offset< LogicalBinaryLayer > CreateLogicalBinaryLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::LogicalBinaryDescriptor > descriptor=0)
uint32_t m_PadRight
Padding right value in the width dimension.
int32_t m_HiddenStateZeroPoint
Hidden State zero point.
bool m_ConstantWeights
Enable/disable constant weights and biases.