ArmNN
 21.11
Serializer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "Serializer.hpp"
6 #include "SerializerUtils.hpp"
7 
8 #include <armnn/Descriptors.hpp>
9 #include <armnn/LstmParams.hpp>
13 
14 #include <fmt/format.h>
15 #include <iostream>
16 
17 using namespace armnn;
18 namespace fb = flatbuffers;
19 namespace serializer = armnnSerializer;
20 
21 namespace armnnSerializer
22 {
23 
24 ISerializer::ISerializer() : pSerializerImpl(new SerializerImpl())
25 {
26 }
27 
28 ISerializer::~ISerializer() = default;
29 
31 {
32  return new ISerializer();
33 }
34 
36 {
38 }
39 
41 {
42  delete serializer;
43 }
44 
46 {
47  pSerializerImpl->Serialize(inNetwork);
48 }
49 
50 bool ISerializer::SaveSerializedToStream(std::ostream& stream)
51 {
52  return pSerializerImpl->SaveSerializedToStream(stream);
53 }
54 
56 {
57  switch (function)
58  {
81  default:
83  }
84 }
85 
87 {
88  switch (function)
89  {
93  default:
95  }
96 }
97 
98 uint32_t SerializerStrategy::GetSerializedId(armnn::LayerGuid guid)
99 {
100  if (m_guidMap.empty())
101  {
102  m_guidMap.insert(std::make_pair(guid, m_layerId));
103  }
104  else if (m_guidMap.find(guid) == m_guidMap.end())
105  {
106  ++m_layerId;
107  m_guidMap.insert(std::make_pair(guid, m_layerId));
108 
109  return m_layerId;
110  }
111  return m_guidMap[guid];
112 }
113 
114 // Build FlatBuffer for Input Layer
115 void SerializerStrategy::SerializeInputLayer(const armnn::IConnectableLayer* layer, LayerBindingId id, const char* name)
116 {
117  IgnoreUnused(name);
118 
119  // Create FlatBuffer BaseLayer
120  auto flatBufferInputBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Input);
121 
122  // Create FlatBuffer BindableBaseLayer
123  auto flatBufferInputBindableBaseLayer = serializer::CreateBindableLayerBase(m_flatBufferBuilder,
124  flatBufferInputBaseLayer,
125  id);
126  // Push layer binding id to outputIds.
127  m_inputIds.push_back(id);
128 
129  // Create the FlatBuffer InputLayer
130  auto flatBufferInputLayer = serializer::CreateInputLayer(m_flatBufferBuilder, flatBufferInputBindableBaseLayer);
131 
132  // Add the AnyLayer to the FlatBufferLayers
133  CreateAnyLayer(flatBufferInputLayer.o, serializer::Layer::Layer_InputLayer);
134 }
135 
136 // Build FlatBuffer for Output Layer
137 void SerializerStrategy::SerializeOutputLayer(const armnn::IConnectableLayer* layer,
138  LayerBindingId id, const char* name)
139 {
140  IgnoreUnused(name);
141 
142  // Create FlatBuffer BaseLayer
143  auto flatBufferOutputBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Output);
144 
145  // Create FlatBuffer BindableBaseLayer
146  auto flatBufferOutputBindableBaseLayer = serializer::CreateBindableLayerBase(m_flatBufferBuilder,
147  flatBufferOutputBaseLayer,
148  id);
149  // Push layer binding id to outputIds.
150  m_outputIds.push_back(id);
151 
152  // Create the FlatBuffer OutputLayer
153  auto flatBufferOutputLayer = serializer::CreateOutputLayer(m_flatBufferBuilder, flatBufferOutputBindableBaseLayer);
154  // Add the AnyLayer to the FlatBufferLayers
155  CreateAnyLayer(flatBufferOutputLayer.o, serializer::Layer::Layer_OutputLayer);
156 }
157 
158 // Build FlatBuffer for Activation Layer
159 void SerializerStrategy::SerializeActivationLayer(const armnn::IConnectableLayer* layer,
160  const armnn::ActivationDescriptor& descriptor,
161  const char* name)
162 {
163  IgnoreUnused(name);
164 
165  // Create FlatBuffer BaseLayer
166  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Activation);
167 
168  // Create the FlatBuffer ActivationDescriptor
169  auto flatBufferDescriptor = CreateActivationDescriptor(m_flatBufferBuilder,
171  descriptor.m_A,
172  descriptor.m_B);
173 
174  // Create the FlatBuffer ActivationLayer
175  auto flatBufferAdditionLayer = CreateActivationLayer(m_flatBufferBuilder,
176  flatBufferBaseLayer,
177  flatBufferDescriptor);
178 
179  // Add the AnyLayer to the FlatBufferLayers
180  CreateAnyLayer(flatBufferAdditionLayer.o, serializer::Layer::Layer_ActivationLayer);
181 }
182 
183 // Build FlatBuffer for Addition Layer
184 void SerializerStrategy::SerializeAdditionLayer(const armnn::IConnectableLayer* layer, const char* name)
185 {
186  IgnoreUnused(name);
187 
188  // Create FlatBuffer BaseLayer
189  auto flatBufferAdditionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Addition);
190 
191  // Create the FlatBuffer AdditionLayer
192  auto flatBufferAdditionLayer = serializer::CreateAdditionLayer(m_flatBufferBuilder, flatBufferAdditionBaseLayer);
193 
194  // Add the AnyLayer to the FlatBufferLayers
195  CreateAnyLayer(flatBufferAdditionLayer.o, serializer::Layer::Layer_AdditionLayer);
196 }
197 
198 // Build FlatBuffer for ArgMinMax Layer
199 void SerializerStrategy::SerializeArgMinMaxLayer(const armnn::IConnectableLayer *layer,
200  const armnn::ArgMinMaxDescriptor& descriptor,
201  const char *name)
202 {
203  IgnoreUnused(name);
204 
205  // Create FlatBuffer BaseLayer
206  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ArgMinMax);
207 
208  // Create FlatBuffer Descriptor
209  auto flatBufferDescriptor = CreateArgMinMaxDescriptor(m_flatBufferBuilder,
211  descriptor.m_Axis);
212 
213  // Create FlatBuffer ArgMinMaxLayer
214  auto flatBufferLayer = CreateArgMinMaxLayer(m_flatBufferBuilder,
215  flatBufferBaseLayer,
216  flatBufferDescriptor);
217 
219 }
220 
221 // Build FlatBuffer for BatchToSpaceNd Layer
222 void SerializerStrategy::SerializeBatchToSpaceNdLayer(const armnn::IConnectableLayer* layer,
223  const armnn::BatchToSpaceNdDescriptor& descriptor,
224  const char* name)
225 {
226  IgnoreUnused(name);
227 
228  // Create FlatBuffer BaseLayer
229  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchToSpaceNd);
230 
231  std::vector<unsigned int> crops;
232  crops.reserve(descriptor.m_Crops.size() * 2);
233  for (auto& crop : descriptor.m_Crops)
234  {
235  crops.push_back(crop.first);
236  crops.push_back(crop.second);
237  }
238 
239  auto flatBufferDescriptor =
240  CreateBatchToSpaceNdDescriptor(m_flatBufferBuilder,
241  m_flatBufferBuilder.CreateVector(descriptor.m_BlockShape),
242  m_flatBufferBuilder.CreateVector(crops),
244 
245  auto flatBufferLayer = serializer::CreateBatchToSpaceNdLayer(m_flatBufferBuilder,
246  flatBufferBaseLayer,
247  flatBufferDescriptor);
248 
250 }
251 
252 void SerializerStrategy::SerializeBatchNormalizationLayer(
253  const armnn::IConnectableLayer* layer,
254  const armnn::BatchNormalizationDescriptor& batchNormDescriptor,
255  const std::vector<armnn::ConstTensor>& constants,
256  const char* name)
257 {
258  IgnoreUnused(name);
259 
260  const armnn::ConstTensor& mean = constants[0];
261  const armnn::ConstTensor& variance = constants[1];
262  const armnn::ConstTensor& beta = constants[2];
263  const armnn::ConstTensor& gamma = constants[3];
264 
265  auto fbBatchNormalizationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchNormalization);
266  auto fbBatchNormalizationDescriptor = serializer::CreateBatchNormalizationDescriptor(
267  m_flatBufferBuilder,
268  batchNormDescriptor.m_Eps,
269  GetFlatBufferDataLayout(batchNormDescriptor.m_DataLayout));
270 
271  auto fbMeanConstTensorInfo = CreateConstTensorInfo(mean);
272  auto fbVarianceConstTensorInfo = CreateConstTensorInfo(variance);
273  auto fbBetaConstTensorInfo = CreateConstTensorInfo(beta);
274  auto fbGammaConstTensorInfo = CreateConstTensorInfo(gamma);
275  auto fbBatchNormalizationLayer = serializer::CreateBatchNormalizationLayer(m_flatBufferBuilder,
276  fbBatchNormalizationBaseLayer,
277  fbBatchNormalizationDescriptor,
278  fbMeanConstTensorInfo,
279  fbVarianceConstTensorInfo,
280  fbBetaConstTensorInfo,
281  fbGammaConstTensorInfo);
282 
284 }
285 
286 void SerializerStrategy::SerializeCastLayer(const armnn::IConnectableLayer* layer,
287  const char* name)
288 {
289  IgnoreUnused(name);
290 
291  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Cast);
292  auto fbCastLayer = serializer::CreateCastLayer(m_flatBufferBuilder, fbBaseLayer);
294 }
295 
296 void SerializerStrategy::SerializeChannelShuffleLayer(const armnn::IConnectableLayer* layer,
297  const armnn::ChannelShuffleDescriptor& descriptor,
298  const char* name)
299 {
300  IgnoreUnused(name);
301  auto fbDescriptor = CreateChannelShuffleDescriptor(m_flatBufferBuilder,
302  descriptor.m_Axis,
303  descriptor.m_NumGroups);
305  auto fbChannelShuffleLayer = serializer::CreateChannelShuffleLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
307 }
308 
309 void SerializerStrategy::SerializeComparisonLayer(const armnn::IConnectableLayer* layer,
310  const armnn::ComparisonDescriptor& descriptor,
311  const char* name)
312 {
313  IgnoreUnused(name);
314 
316  auto fbDescriptor = serializer::CreateComparisonDescriptor(
317  m_flatBufferBuilder,
319 
320  auto fbLayer = serializer::CreateComparisonLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
322 }
323 
324 // Build FlatBuffer for Constant Layer
325 void SerializerStrategy::SerializeConstantLayer(const armnn::IConnectableLayer* layer,
326  const std::vector<armnn::ConstTensor>& constants,
327  const char* name)
328 {
329  IgnoreUnused(name);
330 
331  armnn::ConstTensor input = constants[0];
332 
333  // Create FlatBuffer BaseLayer
334  auto flatBufferConstantBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Constant);
335 
336  auto flatBufferConstTensorInfo = CreateConstTensorInfo(input);
337 
338  // Create the FlatBuffer ConstantLayer
339  auto flatBufferLayer = CreateConstantLayer(m_flatBufferBuilder,
340  flatBufferConstantBaseLayer,
341  flatBufferConstTensorInfo);
342 
343  // Add the AnyLayer to the FlatBufferLayers
345 }
346 
347 // Build FlatBuffer for Convolution2dLayer
348 void SerializerStrategy::SerializeConvolution2dLayer(const armnn::IConnectableLayer* layer,
349  const armnn::Convolution2dDescriptor& descriptor,
350  const std::vector<armnn::ConstTensor>& constants,
351  const char* name)
352 {
353  IgnoreUnused(name);
354 
355  const armnn::ConstTensor weights = constants[0];
356 
357  // Create FlatBuffer BaseLayer
358  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution2d);
359 
360  auto flatBufferDescriptor = CreateConvolution2dDescriptor(m_flatBufferBuilder,
361  descriptor.m_PadLeft,
362  descriptor.m_PadRight,
363  descriptor.m_PadTop,
364  descriptor.m_PadBottom,
365  descriptor.m_StrideX,
366  descriptor.m_StrideY,
367  descriptor.m_DilationX,
368  descriptor.m_DilationY,
369  descriptor.m_BiasEnabled,
371  auto flatBufferWeightsConstTensorInfo = CreateConstTensorInfo(weights);
372  flatbuffers::Offset<serializer::ConstTensor> flatBufferBiasesConstTensorInfo;
373 
374  if (constants.size() > 1)
375  {
376  const armnn::ConstTensor biases = constants[1];
377  flatBufferBiasesConstTensorInfo = CreateConstTensorInfo(biases);
378  }
379 
380  // Create the FlatBuffer Convolution2dLayer
381  auto flatBufferLayer = CreateConvolution2dLayer(m_flatBufferBuilder,
382  flatBufferBaseLayer,
383  flatBufferDescriptor,
384  flatBufferWeightsConstTensorInfo,
385  flatBufferBiasesConstTensorInfo);
386 
387  // Add the AnyLayer to the FlatBufferLayers
389 }
390 
391 // Build FlatBuffer for Convolution3dLayer
392 void SerializerStrategy::SerializeConvolution3dLayer(const armnn::IConnectableLayer* layer,
393  const armnn::Convolution3dDescriptor& descriptor,
394  const char* name)
395 {
396  IgnoreUnused(name);
397 
398  // Create FlatBuffer BaseLayer
399  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution3d);
400 
401  auto flatBufferDescriptor = CreateConvolution3dDescriptor(m_flatBufferBuilder,
402  descriptor.m_PadLeft,
403  descriptor.m_PadRight,
404  descriptor.m_PadTop,
405  descriptor.m_PadBottom,
406  descriptor.m_PadFront,
407  descriptor.m_PadBack,
408  descriptor.m_StrideX,
409  descriptor.m_StrideY,
410  descriptor.m_StrideZ,
411  descriptor.m_DilationX,
412  descriptor.m_DilationY,
413  descriptor.m_DilationZ,
414  descriptor.m_BiasEnabled,
416 
417  // Create the FlatBuffer Convolution3dLayer
418  auto flatBufferLayer = CreateConvolution3dLayer(m_flatBufferBuilder,
419  flatBufferBaseLayer,
420  flatBufferDescriptor);
421 
422  // Add the AnyLayer to the FlatBufferLayers
424 }
425 
426 void SerializerStrategy::SerializeDepthToSpaceLayer(const armnn::IConnectableLayer* layer,
427  const armnn::DepthToSpaceDescriptor& descriptor,
428  const char* name)
429 {
430  IgnoreUnused(name);
431 
433  auto fbDescriptor = CreateDepthToSpaceDescriptor(m_flatBufferBuilder,
434  descriptor.m_BlockSize,
436 
437  auto fbLayer = serializer::CreateDepthToSpaceLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
438 
440 }
441 
442 void SerializerStrategy::SerializeDepthwiseConvolution2dLayer(const armnn::IConnectableLayer* layer,
443  const armnn::DepthwiseConvolution2dDescriptor& descriptor,
444  const std::vector<armnn::ConstTensor>& constants,
445  const char* name)
446 {
447  IgnoreUnused(name);
448 
449  const armnn::ConstTensor& weights = constants[0];
450 
452  auto fbDescriptor = CreateDepthwiseConvolution2dDescriptor(m_flatBufferBuilder,
453  descriptor.m_PadLeft,
454  descriptor.m_PadRight,
455  descriptor.m_PadTop,
456  descriptor.m_PadBottom,
457  descriptor.m_StrideX,
458  descriptor.m_StrideY,
459  descriptor.m_DilationX,
460  descriptor.m_DilationY,
461  descriptor.m_BiasEnabled,
463 
464  flatbuffers::Offset<serializer::ConstTensor> fbWeightsConstTensorInfo = CreateConstTensorInfo(weights);
465  flatbuffers::Offset<serializer::ConstTensor> fbBiasesConstTensorInfo;
466 
467  if (constants.size() > 1)
468  {
469  const armnn::ConstTensor& biases = constants[1];
470  fbBiasesConstTensorInfo = CreateConstTensorInfo(biases);
471  }
472 
473  auto flatBufferLayer = CreateDepthwiseConvolution2dLayer(m_flatBufferBuilder,
474  fbBaseLayer,
475  fbDescriptor,
476  fbWeightsConstTensorInfo,
477  fbBiasesConstTensorInfo);
478 
480 }
481 
482 void SerializerStrategy::SerializeDequantizeLayer(const armnn::IConnectableLayer* layer,
483  const char* name)
484 {
485  IgnoreUnused(name);
486 
487  auto fbDequantizeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Dequantize);
488  auto fbDequantizeLayer = serializer::CreateDequantizeLayer(m_flatBufferBuilder, fbDequantizeBaseLayer);
489 
491 }
492 
493 void SerializerStrategy::SerializeDetectionPostProcessLayer(const armnn::IConnectableLayer* layer,
494  const armnn::DetectionPostProcessDescriptor& descriptor,
495  const std::vector<armnn::ConstTensor>& constants,
496  const char* name)
497 {
498  IgnoreUnused(name);
499 
500  const armnn::ConstTensor& anchors = constants[0];
501 
503  auto fbDescriptor = CreateDetectionPostProcessDescriptor(m_flatBufferBuilder,
504  descriptor.m_MaxDetections,
505  descriptor.m_MaxClassesPerDetection,
506  descriptor.m_DetectionsPerClass,
507  descriptor.m_NmsScoreThreshold,
508  descriptor.m_NmsIouThreshold,
509  descriptor.m_NumClasses,
510  descriptor.m_UseRegularNms,
511  descriptor.m_ScaleX,
512  descriptor.m_ScaleY,
513  descriptor.m_ScaleW,
514  descriptor.m_ScaleH);
515 
516  flatbuffers::Offset<serializer::ConstTensor> fbAnchorsConstTensorInfo = CreateConstTensorInfo(anchors);
517 
518  auto flatBufferLayer = CreateDetectionPostProcessLayer(m_flatBufferBuilder,
519  fbBaseLayer,
520  fbDescriptor,
521  fbAnchorsConstTensorInfo);
522 
524 }
525 
526 void SerializerStrategy::SerializeDivisionLayer(const armnn::IConnectableLayer* layer, const char* name)
527 {
528  IgnoreUnused(name);
529 
530  auto fbDivisionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Division);
531  auto fbDivisionLayer = serializer::CreateDivisionLayer(m_flatBufferBuilder, fbDivisionBaseLayer);
532 
534 }
535 
536 void SerializerStrategy::SerializeElementwiseUnaryLayer(const armnn::IConnectableLayer* layer,
537  const armnn::ElementwiseUnaryDescriptor& descriptor,
538  const char* name)
539 {
540  IgnoreUnused(name);
541 
544  m_flatBufferBuilder,
546 
547  auto fbLayer = serializer::CreateElementwiseUnaryLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
549 }
550 
551 void SerializerStrategy::SerializeFillLayer(const armnn::IConnectableLayer* layer,
552  const armnn::FillDescriptor& fillDescriptor,
553  const char* name)
554 {
555  IgnoreUnused(name);
556 
557  auto fbFillBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Fill);
558 
559  auto fbDescriptor = serializer::CreateFillDescriptor(m_flatBufferBuilder, fillDescriptor.m_Value);
560 
561  auto fbFillLayer = serializer::CreateFillLayer(m_flatBufferBuilder, fbFillBaseLayer, fbDescriptor);
562 
564 }
565 
566 void SerializerStrategy::SerializeFloorLayer(const armnn::IConnectableLayer *layer, const char *name)
567 {
568  IgnoreUnused(name);
569 
570  auto flatBufferFloorBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Floor);
571  auto flatBufferFloorLayer = serializer::CreateFloorLayer(m_flatBufferBuilder, flatBufferFloorBaseLayer);
572 
573  CreateAnyLayer(flatBufferFloorLayer.o, serializer::Layer::Layer_FloorLayer);
574 }
575 
576 void SerializerStrategy::SerializeGatherLayer(const armnn::IConnectableLayer* layer,
577  const armnn::GatherDescriptor& gatherDescriptor,
578  const char* name)
579 {
580  IgnoreUnused(name);
581 
582  auto fbGatherDescriptor = CreateGatherDescriptor(m_flatBufferBuilder,
583  gatherDescriptor.m_Axis);
584  auto fbGatherBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Gather);
585  auto flatBufferLayer = serializer::CreateGatherLayer(m_flatBufferBuilder, fbGatherBaseLayer, fbGatherDescriptor);
586 
588 }
589 
590 void SerializerStrategy::SerializeInstanceNormalizationLayer(
591  const armnn::IConnectableLayer* layer,
592  const armnn::InstanceNormalizationDescriptor& instanceNormalizationDescriptor,
593  const char* name)
594 {
595  IgnoreUnused(name);
596 
598  m_flatBufferBuilder,
599  instanceNormalizationDescriptor.m_Gamma,
600  instanceNormalizationDescriptor.m_Beta,
601  instanceNormalizationDescriptor.m_Eps,
602  GetFlatBufferDataLayout(instanceNormalizationDescriptor.m_DataLayout));
603 
605  auto fbLayer = serializer::CreateInstanceNormalizationLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
606 
608 }
609 
610 void SerializerStrategy::SerializeL2NormalizationLayer(const armnn::IConnectableLayer* layer,
611  const armnn::L2NormalizationDescriptor& l2NormalizationDescriptor,
612  const char* name)
613 {
614  IgnoreUnused(name);
615 
616  // Create FlatBuffer BaseLayer
618 
619  // Create the FlatBuffer L2Normalization Descriptor
621  m_flatBufferBuilder,
622  GetFlatBufferDataLayout(l2NormalizationDescriptor.m_DataLayout),
623  l2NormalizationDescriptor.m_Eps);
624 
625  // Create FlatBuffer layer
626  auto fbLayer = serializer::CreateL2NormalizationLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
627 
629 }
630 
631 void SerializerStrategy::SerializeLogicalBinaryLayer(const armnn::IConnectableLayer* layer,
632  const armnn::LogicalBinaryDescriptor& descriptor,
633  const char* name)
634 {
635  IgnoreUnused(name);
636 
638  auto fbDescriptor = serializer::CreateLogicalBinaryDescriptor(
639  m_flatBufferBuilder,
641 
642  auto fbLayer = serializer::CreateLogicalBinaryLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
644 }
645 
646 void SerializerStrategy::SerializeLogSoftmaxLayer(const armnn::IConnectableLayer* layer,
647  const armnn::LogSoftmaxDescriptor& logSoftmaxDescriptor,
648  const char* name)
649 {
650  IgnoreUnused(name);
651 
652  // Create FlatBuffer BaseLayer
653  auto flatBufferLogSoftmaxBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_LogSoftmax);
654 
655  // Create the FlatBuffer LogSoftmaxDescriptor
656  auto flatBufferLogSoftmaxDesc =
657  serializer::CreateLogSoftmaxDescriptor(m_flatBufferBuilder,
658  logSoftmaxDescriptor.m_Beta,
659  logSoftmaxDescriptor.m_Axis);
660 
661  // Create the FlatBuffer LogSoftmaxLayer
662  auto flatBufferLogSoftmaxLayer =
663  serializer::CreateLogSoftmaxLayer(m_flatBufferBuilder,
664  flatBufferLogSoftmaxBaseLayer,
665  flatBufferLogSoftmaxDesc);
666 
667  CreateAnyLayer(flatBufferLogSoftmaxLayer.o, serializer::Layer::Layer_LogSoftmaxLayer);
668 }
669 
670 void SerializerStrategy::SerializeLstmLayer(const armnn::IConnectableLayer* layer,
671  const armnn::LstmDescriptor& descriptor,
672  const std::vector<armnn::ConstTensor>& constants,
673  const char* name)
674 {
675  IgnoreUnused(name);
676 
677  auto fbLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Lstm);
678 
679  auto fbLstmDescriptor = serializer::CreateLstmDescriptor(
680  m_flatBufferBuilder,
681  descriptor.m_ActivationFunc,
682  descriptor.m_ClippingThresCell,
683  descriptor.m_ClippingThresProj,
684  descriptor.m_CifgEnabled,
685  descriptor.m_PeepholeEnabled,
686  descriptor.m_ProjectionEnabled,
687  descriptor.m_LayerNormEnabled);
688 
689  // Index for constants vector
690  std::size_t i = 0;
691 
692  // Get mandatory/basic input parameters
693  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]); //InputToForgetWeights
694  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]); //InputToCellWeights
695  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]); //InputToOutputWeights
696  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToForgetWeights
697  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToCellWeights
698  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToOutputWeights
699  auto forgetGateBias = CreateConstTensorInfo(constants[i++]); //ForgetGateBias
700  auto cellBias = CreateConstTensorInfo(constants[i++]); //CellBias
701  auto outputGateBias = CreateConstTensorInfo(constants[i++]); //OutputGateBias
702 
703 
704 
705  //Define optional parameters, these will be set depending on configuration in Lstm descriptor
706  flatbuffers::Offset<serializer::ConstTensor> inputToInputWeights;
707  flatbuffers::Offset<serializer::ConstTensor> recurrentToInputWeights;
708  flatbuffers::Offset<serializer::ConstTensor> cellToInputWeights;
709  flatbuffers::Offset<serializer::ConstTensor> inputGateBias;
710  flatbuffers::Offset<serializer::ConstTensor> projectionWeights;
711  flatbuffers::Offset<serializer::ConstTensor> projectionBias;
712  flatbuffers::Offset<serializer::ConstTensor> cellToForgetWeights;
713  flatbuffers::Offset<serializer::ConstTensor> cellToOutputWeights;
714  flatbuffers::Offset<serializer::ConstTensor> inputLayerNormWeights;
715  flatbuffers::Offset<serializer::ConstTensor> forgetLayerNormWeights;
716  flatbuffers::Offset<serializer::ConstTensor> cellLayerNormWeights;
717  flatbuffers::Offset<serializer::ConstTensor> outputLayerNormWeights;
718 
719  if (!descriptor.m_CifgEnabled)
720  {
721  inputToInputWeights = CreateConstTensorInfo(constants[i++]); //InputToInputWeights
722  recurrentToInputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToInputWeights
723  inputGateBias = CreateConstTensorInfo(constants[i++]); //InputGateBias
724  }
725 
726  if (descriptor.m_PeepholeEnabled)
727  {
728  if (!descriptor.m_CifgEnabled)
729  {
730  cellToInputWeights = CreateConstTensorInfo(constants[i++]); //CellToInputWeights
731  }
732  cellToForgetWeights = CreateConstTensorInfo(constants[i++]); //CellToForgetWeights
733  cellToOutputWeights = CreateConstTensorInfo(constants[i++]); //CellToOutputWeights
734  }
735 
736  if (descriptor.m_ProjectionEnabled)
737  {
738  projectionWeights = CreateConstTensorInfo(constants[i++]); //ProjectionWeights
739  projectionBias = CreateConstTensorInfo(constants[i++]); //ProjectionBias
740  }
741 
742  if (descriptor.m_LayerNormEnabled)
743  {
744  if (!descriptor.m_CifgEnabled)
745  {
746  inputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //InputLayerNormWeights
747  }
748  forgetLayerNormWeights = CreateConstTensorInfo(constants[i++]); //ForgetLayerNormWeights
749  cellLayerNormWeights = CreateConstTensorInfo(constants[i++]); //CellLayerNormWeights
750  outputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //OutputLayerNormWeights
751  }
752 
753  auto fbLstmParams = serializer::CreateLstmInputParams(
754  m_flatBufferBuilder,
755  inputToForgetWeights,
756  inputToCellWeights,
757  inputToOutputWeights,
758  recurrentToForgetWeights,
759  recurrentToCellWeights,
760  recurrentToOutputWeights,
761  forgetGateBias,
762  cellBias,
763  outputGateBias,
764  inputToInputWeights,
765  recurrentToInputWeights,
766  cellToInputWeights,
767  inputGateBias,
768  projectionWeights,
769  projectionBias,
770  cellToForgetWeights,
771  cellToOutputWeights,
772  inputLayerNormWeights,
773  forgetLayerNormWeights,
774  cellLayerNormWeights,
775  outputLayerNormWeights);
776 
777  auto fbLstmLayer = serializer::CreateLstmLayer(
778  m_flatBufferBuilder,
779  fbLstmBaseLayer,
780  fbLstmDescriptor,
781  fbLstmParams);
782 
784 }
785 
786 void SerializerStrategy::SerializeMaximumLayer(const armnn::IConnectableLayer* layer, const char* name)
787 {
788  IgnoreUnused(name);
789 
790  auto fbMaximumBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Maximum);
791  auto fbMaximumLayer = serializer::CreateMaximumLayer(m_flatBufferBuilder, fbMaximumBaseLayer);
792 
794 }
795 
796 void SerializerStrategy::SerializeMeanLayer(const armnn::IConnectableLayer* layer,
797  const armnn::MeanDescriptor& descriptor,
798  const char* name)
799 {
800  IgnoreUnused(name);
801 
802  auto fbMeanBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Mean);
803  auto fbMeanDescriptor = serializer::CreateMeanDescriptor(m_flatBufferBuilder,
804  m_flatBufferBuilder.CreateVector(descriptor.m_Axis),
805  descriptor.m_KeepDims);
806 
807  auto fbMeanLayer = serializer::CreateMeanLayer(m_flatBufferBuilder,
808  fbMeanBaseLayer,
809  fbMeanDescriptor);
810 
812 }
813 
814 void SerializerStrategy::SerializeMinimumLayer(const armnn::IConnectableLayer* layer, const char* name)
815 {
816  IgnoreUnused(name);
817 
818  auto fbMinimumBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Minimum);
819  auto fbMinimumLayer = serializer::CreateMinimumLayer(m_flatBufferBuilder, fbMinimumBaseLayer);
820 
822 }
823 
824 void SerializerStrategy::SerializeMergeLayer(const armnn::IConnectableLayer* layer, const char* name)
825 {
826  IgnoreUnused(name);
827 
828  auto fbMergeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Merge);
829  auto fbMergeLayer = serializer::CreateMergeLayer(m_flatBufferBuilder, fbMergeBaseLayer);
830 
832 }
833 
834 void SerializerStrategy::SerializeConcatLayer(const armnn::IConnectableLayer* layer,
835  const armnn::ConcatDescriptor& concatDescriptor,
836  const char* name)
837 {
838  IgnoreUnused(name);
839 
840  auto flatBufferConcatBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Concat);
841 
842  std::vector<flatbuffers::Offset<UintVector>> views;
843  for (unsigned int v = 0; v < concatDescriptor.GetNumViews(); ++v)
844  {
845  const uint32_t* origin = concatDescriptor.GetViewOrigin(v);
846  std::vector<uint32_t> origins;
847  for (unsigned int d = 0; d < concatDescriptor.GetNumDimensions(); ++d)
848  {
849  origins.push_back(origin[d]);
850  }
851  auto view = m_flatBufferBuilder.CreateVector(origins);
852  auto uintVector = CreateUintVector(m_flatBufferBuilder, view);
853  views.push_back(uintVector);
854  }
855 
856  auto flatBufferConcatDescriptor = CreateOriginsDescriptor(m_flatBufferBuilder,
857  concatDescriptor.GetConcatAxis(),
858  concatDescriptor.GetNumViews(),
859  concatDescriptor.GetNumDimensions(),
860  m_flatBufferBuilder.CreateVector(views));
861 
862  auto flatBufferLayer = CreateConcatLayer(m_flatBufferBuilder,
863  flatBufferConcatBaseLayer,
864  flatBufferConcatDescriptor);
865 
867 }
868 
869 void SerializerStrategy::SerializeMultiplicationLayer(const armnn::IConnectableLayer* layer, const char* name)
870 {
871  IgnoreUnused(name);
872 
873  auto fbMultiplicationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Multiplication);
874  auto fbMultiplicationLayer = serializer::CreateMultiplicationLayer(m_flatBufferBuilder,
875  fbMultiplicationBaseLayer);
876 
878 }
879 
880 void SerializerStrategy::SerializePadLayer(const armnn::IConnectableLayer* layer,
881  const armnn::PadDescriptor& padDescriptor,
882  const char* name)
883 {
884  IgnoreUnused(name);
885 
886  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pad);
887 
888  std::vector<unsigned int> padList;
889  for (auto& p: padDescriptor.m_PadList)
890  {
891  padList.push_back(p.first);
892  padList.push_back(p.second);
893  }
894 
895  auto flatBufferPadDesc = serializer::CreatePadDescriptor(m_flatBufferBuilder,
896  m_flatBufferBuilder.CreateVector(padList),
897  padDescriptor.m_PadValue,
898  GetFlatBufferPaddingMode(padDescriptor.m_PaddingMode));
899 
900  auto flatBufferPadLayer = serializer::CreatePadLayer(m_flatBufferBuilder,
901  flatBufferBaseLayer,
902  flatBufferPadDesc);
903 
904  CreateAnyLayer(flatBufferPadLayer.o, serializer::Layer::Layer_PadLayer);
905 }
906 
907 void SerializerStrategy::SerializePermuteLayer(const armnn::IConnectableLayer* layer,
908  const armnn::PermuteDescriptor& permuteDescriptor,
909  const char* name)
910 {
911  IgnoreUnused(name);
912 
913  // Create FlatBuffer BaseLayer
914  auto flatBufferPermuteBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Permute);
915 
916  std::vector<unsigned int> dimMappings;
917  for (unsigned int i=0; i<permuteDescriptor.m_DimMappings.GetSize(); ++i)
918  {
919  dimMappings.push_back(permuteDescriptor.m_DimMappings[i]);
920  }
921 
922  auto flatBufferPermuteDesc = serializer::CreatePermuteDescriptor(m_flatBufferBuilder,
923  m_flatBufferBuilder.CreateVector(dimMappings));
924 
925  // Create the FlatBuffer PermuteLayer
926  auto flatBufferPermuteLayer = serializer::CreatePermuteLayer(m_flatBufferBuilder,
927  flatBufferPermuteBaseLayer,
928  flatBufferPermuteDesc);
929 
930  // Add the AnyLayer to the FlatBufferLayers
931  CreateAnyLayer(flatBufferPermuteLayer.o, serializer::Layer::Layer_PermuteLayer);
932 }
933 
934 // Build FlatBuffer for Rank Layer
935 void SerializerStrategy::SerializeRankLayer(const armnn::IConnectableLayer* layer,
936  const char* name)
937 {
938  IgnoreUnused(name);
939  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Rank);
940  auto flatBufferRankLayer = serializer::CreateRankLayer(m_flatBufferBuilder, flatBufferBaseLayer);
941 
942  CreateAnyLayer(flatBufferRankLayer.o, serializer::Layer::Layer_RankLayer);
943 }
944 
945 void SerializerStrategy::SerializeReduceLayer(const armnn::IConnectableLayer* layer,
946  const armnn::ReduceDescriptor& reduceDescriptor,
947  const char*)
948 {
949  auto fbReduceBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Reduce);
950  auto fbDescriptor = CreateReduceDescriptor(m_flatBufferBuilder,
951  reduceDescriptor.m_KeepDims,
952  m_flatBufferBuilder.CreateVector(reduceDescriptor.m_vAxis),
954  auto fbReduceLayer = serializer::CreateReduceLayer(m_flatBufferBuilder,
955  fbReduceBaseLayer,
956  fbDescriptor);
957 
959 }
960 
961 // Build FlatBuffer for Reshape Layer
962 void SerializerStrategy::SerializeReshapeLayer(const armnn::IConnectableLayer* layer,
963  const armnn::ReshapeDescriptor& reshapeDescriptor,
964  const char* name)
965 {
966  IgnoreUnused(name);
967 
968  // Create FlatBuffer BaseLayer
969  auto flatBufferReshapeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Reshape);
970 
971  std::vector<unsigned int> targetShape;
972  for (unsigned int i =0; i < reshapeDescriptor.m_TargetShape.GetNumDimensions(); i++)
973  {
974  targetShape.push_back(reshapeDescriptor.m_TargetShape[i]);
975  }
976 
977  auto flatBufferReshapeDesc = serializer::CreateReshapeDescriptor(m_flatBufferBuilder,
978  m_flatBufferBuilder.CreateVector(targetShape));
979 
980  // Create the FlatBuffer ReshapeLayer
981  auto flatBufferReshapeLayer = serializer::CreateReshapeLayer(m_flatBufferBuilder, flatBufferReshapeBaseLayer,
982  flatBufferReshapeDesc);
983 
984  // Add the AnyLayer to the FlatBufferLayers
985  CreateAnyLayer(flatBufferReshapeLayer.o, serializer::Layer::Layer_ReshapeLayer);
986 }
987 
988 void SerializerStrategy::SerializeResizeLayer(const armnn::IConnectableLayer* layer,
989  const armnn::ResizeDescriptor& resizeDescriptor,
990  const char* name)
991 {
992  IgnoreUnused(name);
993 
994  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Resize);
995 
996  auto flatBufferDescriptor =
997  CreateResizeDescriptor(m_flatBufferBuilder,
998  resizeDescriptor.m_TargetHeight,
999  resizeDescriptor.m_TargetWidth,
1000  GetFlatBufferResizeMethod(resizeDescriptor.m_Method),
1001  GetFlatBufferDataLayout(resizeDescriptor.m_DataLayout),
1002  resizeDescriptor.m_AlignCorners,
1003  resizeDescriptor.m_HalfPixelCenters);
1004 
1005  auto flatBufferLayer = serializer::CreateResizeLayer(m_flatBufferBuilder,
1006  flatBufferBaseLayer,
1007  flatBufferDescriptor);
1008 
1010 }
1011 
1012 void SerializerStrategy::SerializeSliceLayer(const armnn::IConnectableLayer* layer,
1013  const armnn::SliceDescriptor& sliceDescriptor,
1014  const char* name)
1015 {
1016  IgnoreUnused(name);
1017 
1018  auto fbSliceBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Slice);
1019  auto fbSliceDescriptor = CreateSliceDescriptor(m_flatBufferBuilder,
1020  m_flatBufferBuilder.CreateVector(sliceDescriptor.m_Begin),
1021  m_flatBufferBuilder.CreateVector(sliceDescriptor.m_Size));
1022 
1023  auto fbSliceLayer = serializer::CreateSliceLayer(m_flatBufferBuilder, fbSliceBaseLayer, fbSliceDescriptor);
1024 
1026 }
1027 
1028 // Build FlatBuffer for Softmax Layer
1029 void SerializerStrategy::SerializeSoftmaxLayer(const armnn::IConnectableLayer* layer,
1030  const armnn::SoftmaxDescriptor& softmaxDescriptor,
1031  const char* name)
1032 {
1033  IgnoreUnused(name);
1034 
1035  // Create FlatBuffer BaseLayer
1036  auto flatBufferSoftmaxBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Softmax);
1037 
1038  // Create the FlatBuffer SoftmaxDescriptor
1039  auto flatBufferSoftmaxDesc =
1040  serializer::CreateSoftmaxDescriptor(m_flatBufferBuilder,
1041  softmaxDescriptor.m_Beta,
1042  softmaxDescriptor.m_Axis);
1043 
1044  // Create the FlatBuffer SoftmaxLayer
1045  auto flatBufferSoftmaxLayer =
1046  serializer::CreateSoftmaxLayer(m_flatBufferBuilder,
1047  flatBufferSoftmaxBaseLayer,
1048  flatBufferSoftmaxDesc);
1049 
1050  CreateAnyLayer(flatBufferSoftmaxLayer.o, serializer::Layer::Layer_SoftmaxLayer);
1051 }
1052 
1053 void SerializerStrategy::SerializePooling2dLayer(const armnn::IConnectableLayer* layer,
1054  const armnn::Pooling2dDescriptor& pooling2dDescriptor,
1055  const char* name)
1056 {
1057  IgnoreUnused(name);
1058 
1059  auto fbPooling2dBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pooling2d);
1060  auto fbPooling2dDescriptor = serializer::CreatePooling2dDescriptor(
1061  m_flatBufferBuilder,
1062  GetFlatBufferPoolingAlgorithm(pooling2dDescriptor.m_PoolType),
1063  pooling2dDescriptor.m_PadLeft,
1064  pooling2dDescriptor.m_PadRight,
1065  pooling2dDescriptor.m_PadTop,
1066  pooling2dDescriptor.m_PadBottom,
1067  pooling2dDescriptor.m_PoolWidth,
1068  pooling2dDescriptor.m_PoolHeight,
1069  pooling2dDescriptor.m_StrideX,
1070  pooling2dDescriptor.m_StrideY,
1072  GetFlatBufferPaddingMethod(pooling2dDescriptor.m_PaddingMethod),
1073  GetFlatBufferDataLayout(pooling2dDescriptor.m_DataLayout));
1074 
1075  auto fbPooling2dLayer = serializer::CreatePooling2dLayer(m_flatBufferBuilder,
1076  fbPooling2dBaseLayer,
1077  fbPooling2dDescriptor);
1078 
1080 }
1081 
1082 void SerializerStrategy::SerializePreluLayer(const armnn::IConnectableLayer* layer,
1083  const char* name)
1084 {
1085  IgnoreUnused(name);
1086 
1087  // Create FlatBuffer BaseLayer
1088  auto flatBufferPreluBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Prelu);
1089 
1090  // Create the FlatBuffer AdditionLayer
1091  auto flatBufferPreluLayer = serializer::CreatePreluLayer(m_flatBufferBuilder, flatBufferPreluBaseLayer);
1092 
1093  // Add the AnyLayer to the FlatBufferLayers
1094  CreateAnyLayer(flatBufferPreluLayer.o, serializer::Layer::Layer_PreluLayer);
1095 }
1096 
1097 void SerializerStrategy::SerializeQuantizeLayer(const armnn::IConnectableLayer *layer, const char *name)
1098 {
1099  IgnoreUnused(name);
1100 
1101  auto fbQuantizeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Quantize);
1102  auto fbQuantizeLayer = serializer::CreateQuantizeLayer(m_flatBufferBuilder,
1103  fbQuantizeBaseLayer);
1105 }
1106 
1107 // Build FlatBuffer for FullyConnected Layer
1108 void SerializerStrategy::SerializeFullyConnectedLayer(const armnn::IConnectableLayer* layer,
1109  const armnn::FullyConnectedDescriptor& fullyConnectedDescriptor,
1110  const char*)
1111 {
1112  // Create FlatBuffer BaseLayer
1113  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_FullyConnected);
1114 
1115  // Create FlatBuffer FullyConnectedDescriptor
1116  auto flatBufferDescriptor =
1117  serializer::CreateFullyConnectedDescriptor(m_flatBufferBuilder,
1118  fullyConnectedDescriptor.m_BiasEnabled,
1119  fullyConnectedDescriptor.m_TransposeWeightMatrix,
1120  fullyConnectedDescriptor.m_ConstantWeights);
1121 
1122  // Create FlatBuffer FullyConnectedLayer
1123  auto flatBufferLayer = serializer::CreateFullyConnectedLayer(m_flatBufferBuilder,
1124  flatBufferBaseLayer,
1125  flatBufferDescriptor);
1126 
1127  // Add created FullyConnectedLayer to the FlatBufferLayers
1129 }
1130 
1131 // Build FlatBuffer for SpaceToBatchNd Layer
1132 void SerializerStrategy::SerializeSpaceToBatchNdLayer(const armnn::IConnectableLayer* layer,
1133  const armnn::SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
1134  const char* name)
1135 {
1136  IgnoreUnused(name);
1137 
1138  // Create FlatBuffer BaseLayer
1139  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_SpaceToBatchNd);
1140 
1141  std::vector<unsigned int> padList;
1142  padList.reserve(spaceToBatchNdDescriptor.m_PadList.size()*2);
1143  for (auto& pad : spaceToBatchNdDescriptor.m_PadList)
1144  {
1145  padList.push_back(pad.first);
1146  padList.push_back(pad.second);
1147  }
1148 
1149  auto flatBufferDescriptor =
1150  CreateSpaceToBatchNdDescriptor(m_flatBufferBuilder,
1151  m_flatBufferBuilder.CreateVector(spaceToBatchNdDescriptor.m_BlockShape),
1152  m_flatBufferBuilder.CreateVector(padList),
1153  GetFlatBufferDataLayout(spaceToBatchNdDescriptor.m_DataLayout));
1154 
1155  auto flatBufferLayer = serializer::CreateSpaceToBatchNdLayer(m_flatBufferBuilder,
1156  flatBufferBaseLayer,
1157  flatBufferDescriptor);
1158 
1160 }
1161 
1162 // Build FlatBuffer for SpaceToDepthLayer
1163 void SerializerStrategy::SerializeSpaceToDepthLayer(const armnn::IConnectableLayer* layer,
1164  const armnn::SpaceToDepthDescriptor& spaceToDepthDescriptor,
1165  const char* name)
1166 {
1167  IgnoreUnused(name);
1168 
1169  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_SpaceToDepth);
1170  auto flatBufferDescriptor =
1171  CreateSpaceToDepthDescriptor(m_flatBufferBuilder,
1172  spaceToDepthDescriptor.m_BlockSize,
1173  GetFlatBufferDataLayout(spaceToDepthDescriptor.m_DataLayout));
1174 
1175  auto flatBufferLayer = serializer::CreateSpaceToDepthLayer(m_flatBufferBuilder,
1176  flatBufferBaseLayer,
1177  flatBufferDescriptor);
1178 
1180 }
1181 
1182 // Build FlatBuffer for Splitter Layer
1183 void SerializerStrategy::SerializeSplitterLayer(const armnn::IConnectableLayer* layer,
1184  const armnn::ViewsDescriptor& viewsDescriptor,
1185  const char* name)
1186 {
1187  IgnoreUnused(name);
1188 
1189  // Create FlatBuffer ViewOrigins
1190  std::vector<flatbuffers::Offset<UintVector>> flatBufferViewOrigins;
1191  flatBufferViewOrigins.reserve(viewsDescriptor.GetNumViews());
1192 
1193  for(unsigned int vIdx = 0; vIdx < viewsDescriptor.GetNumViews(); ++vIdx)
1194  {
1195  std::vector<uint32_t> viewOrigin;
1196  viewOrigin.reserve(viewsDescriptor.GetNumDimensions());
1197 
1198  // Copy vector
1199  for(unsigned int dIdx = 0; dIdx < viewsDescriptor.GetNumDimensions(); ++dIdx)
1200  {
1201  viewOrigin.push_back(viewsDescriptor.GetViewOrigin(vIdx)[dIdx]);
1202  }
1203 
1204  flatBufferViewOrigins.push_back(CreateUintVector(m_flatBufferBuilder,
1205  m_flatBufferBuilder.CreateVector(viewOrigin)));
1206  }
1207 
1208  // Create FlatBuffer OriginsDescriptor
1209  auto flatBufferOriginDescriptor = CreateOriginsDescriptor(m_flatBufferBuilder,
1210  viewsDescriptor.GetOrigins().GetConcatAxis(),
1211  viewsDescriptor.GetOrigins().GetNumViews(),
1212  viewsDescriptor.GetOrigins().GetNumDimensions(),
1213  m_flatBufferBuilder.CreateVector(flatBufferViewOrigins));
1214 
1215  // Create FlatBuffer ViewOrigins
1216  std::vector<flatbuffers::Offset<UintVector>> flatBufferViewSizes;
1217  flatBufferViewSizes.reserve(viewsDescriptor.GetNumViews());
1218 
1219  for(unsigned int vIdx = 0; vIdx < viewsDescriptor.GetNumViews(); ++vIdx)
1220  {
1221  std::vector<uint32_t> viewSize;
1222  viewSize.reserve(viewsDescriptor.GetNumDimensions());
1223 
1224  // Copy vector
1225  for(unsigned int dIdx = 0; dIdx < viewsDescriptor.GetNumDimensions(); ++dIdx)
1226  {
1227  viewSize.push_back(viewsDescriptor.GetViewSizes(vIdx)[dIdx]);
1228  }
1229 
1230  flatBufferViewSizes.push_back(CreateUintVector(m_flatBufferBuilder,
1231  m_flatBufferBuilder.CreateVector(viewSize)));
1232  }
1233 
1234  // Create FlatBuffer ViewsDescriptor
1235  auto flatBufferViewsDescriptor = CreateViewsDescriptor(m_flatBufferBuilder,
1236  flatBufferOriginDescriptor,
1237  m_flatBufferBuilder.CreateVector(flatBufferViewSizes));
1238 
1239  // Create FlatBuffer BaseLayer
1240  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Splitter);
1241 
1242  auto flatBufferSplitterLayer = serializer::CreateSplitterLayer(m_flatBufferBuilder,
1243  flatBufferBaseLayer,
1244  flatBufferViewsDescriptor);
1245 
1246  CreateAnyLayer(flatBufferSplitterLayer.o, serializer::Layer::Layer_SplitterLayer);
1247 }
1248 
1249 void SerializerStrategy::SerializeNormalizationLayer(const armnn::IConnectableLayer* layer,
1250  const armnn::NormalizationDescriptor& descriptor,
1251  const char* name)
1252 {
1253  IgnoreUnused(name);
1254 
1255  auto fbNormalizationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Normalization);
1256 
1257  auto fbNormalizationDescriptor = serializer::CreateNormalizationDescriptor(
1258  m_flatBufferBuilder,
1261  descriptor.m_NormSize,
1262  descriptor.m_Alpha,
1263  descriptor.m_Beta,
1264  descriptor.m_K,
1266 
1267  auto flatBufferLayer = serializer::CreateNormalizationLayer(m_flatBufferBuilder,
1268  fbNormalizationBaseLayer,
1269  fbNormalizationDescriptor);
1270 
1272 }
1273 
1274 void SerializerStrategy::SerializeShapeLayer(const armnn::IConnectableLayer* layer,
1275  const char* name)
1276 {
1277  IgnoreUnused(name);
1278 
1279  auto shapeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Shape);
1280  auto shapeLayer = serializer::CreateShapeLayer(m_flatBufferBuilder, shapeBaseLayer);
1281 
1283 }
1284 
1285 void SerializerStrategy::SerializeStackLayer(const armnn::IConnectableLayer* layer,
1286  const armnn::StackDescriptor& stackDescriptor,
1287  const char* name)
1288 {
1289  IgnoreUnused(name);
1290 
1291  auto stackBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Stack);
1292 
1293  std::vector<unsigned int> inputShape;
1294  for (unsigned int i =0; i < stackDescriptor.m_InputShape.GetNumDimensions(); i++)
1295  {
1296  inputShape.push_back(stackDescriptor.m_InputShape[i]);
1297  }
1298 
1299  auto flatBufferStackDescriptor = CreateStackDescriptor(m_flatBufferBuilder,
1300  stackDescriptor.m_Axis,
1301  stackDescriptor.m_NumInputs,
1302  m_flatBufferBuilder.CreateVector(inputShape));
1303 
1304  auto stackLayer = serializer::CreateStackLayer(m_flatBufferBuilder, stackBaseLayer, flatBufferStackDescriptor);
1306 }
1307 
1308 void SerializerStrategy::SerializeStandInLayer(const armnn::IConnectableLayer *layer,
1309  const armnn::StandInDescriptor& standInDescriptor,
1310  const char *name)
1311 {
1312  IgnoreUnused(name);
1313 
1314  auto fbDescriptor = serializer::CreateStandInDescriptor(m_flatBufferBuilder,
1315  standInDescriptor.m_NumInputs,
1316  standInDescriptor.m_NumOutputs);
1317 
1318  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_StandIn);
1319  auto fbLayer = serializer::CreateStandInLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
1320 
1322 }
1323 
1324 void SerializerStrategy::SerializeStridedSliceLayer(const armnn::IConnectableLayer* layer,
1325  const armnn::StridedSliceDescriptor& stridedSliceDescriptor,
1326  const char* name)
1327 {
1328  IgnoreUnused(name);
1329 
1330  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_StridedSlice);
1331 
1332  auto flatBufferDescriptor =
1333  CreateStridedSliceDescriptor(m_flatBufferBuilder,
1334  m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.m_Begin),
1335  m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.m_End),
1336  m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.m_Stride),
1337  stridedSliceDescriptor.m_BeginMask,
1338  stridedSliceDescriptor.m_EndMask,
1339  stridedSliceDescriptor.m_ShrinkAxisMask,
1340  stridedSliceDescriptor.m_EllipsisMask,
1341  stridedSliceDescriptor.m_NewAxisMask,
1342  GetFlatBufferDataLayout(stridedSliceDescriptor.m_DataLayout));
1343 
1344  auto flatBufferLayer = serializer::CreateStridedSliceLayer(m_flatBufferBuilder,
1345  flatBufferBaseLayer,
1346  flatBufferDescriptor);
1347 
1349 }
1350 
1351 void SerializerStrategy::SerializeSubtractionLayer(const armnn::IConnectableLayer* layer, const char* name)
1352 {
1353  IgnoreUnused(name);
1354 
1355  auto fbSubtractionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Subtraction);
1356  auto fbSubtractionLayer = serializer::CreateSubtractionLayer(m_flatBufferBuilder, fbSubtractionBaseLayer);
1357 
1359 }
1360 
1361 void SerializerStrategy::SerializeSwitchLayer(const armnn::IConnectableLayer* layer, const char* name)
1362 {
1363  IgnoreUnused(name);
1364 
1365  auto fbSwitchBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Switch);
1366  auto fbSwitchLayer = serializer::CreateSwitchLayer(m_flatBufferBuilder, fbSwitchBaseLayer);
1367 
1369 }
1370 
1371 void SerializerStrategy::SerializeTransposeConvolution2dLayer(
1372  const armnn::IConnectableLayer* layer,
1373  const armnn::TransposeConvolution2dDescriptor& descriptor,
1374  const std::vector<armnn::ConstTensor>& constants,
1375  const char* name)
1376 {
1377  IgnoreUnused(name);
1378 
1379  const armnn::ConstTensor& weights = constants.at(0);
1380 
1382  auto fbDescriptor = CreateTransposeConvolution2dDescriptor(m_flatBufferBuilder,
1383  descriptor.m_PadLeft,
1384  descriptor.m_PadRight,
1385  descriptor.m_PadTop,
1386  descriptor.m_PadBottom,
1387  descriptor.m_StrideX,
1388  descriptor.m_StrideY,
1389  descriptor.m_BiasEnabled,
1391 
1392  // weights & biases
1393  auto fbWeightsConstTensorInfo = CreateConstTensorInfo(weights);
1394  flatbuffers::Offset<serializer::ConstTensor> fbBiasesConstTensorInfo;
1395  if (constants.size() > 1)
1396  {
1397  const armnn::ConstTensor& biases = constants.at(1);
1398  fbBiasesConstTensorInfo = CreateConstTensorInfo(biases);
1399  }
1400 
1401  auto fbLayer = CreateTransposeConvolution2dLayer(m_flatBufferBuilder,
1402  fbBaseLayer,
1403  fbDescriptor,
1404  fbWeightsConstTensorInfo,
1405  fbBiasesConstTensorInfo);
1406 
1408 }
1409 
1410 void SerializerStrategy::SerializeTransposeLayer(const armnn::IConnectableLayer* layer,
1411  const armnn::TransposeDescriptor& descriptor,
1412  const char* name)
1413 {
1414  IgnoreUnused(name);
1415 
1416  // Create FlatBuffer BaseLayer
1417  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Transpose);
1418 
1419  std::vector<unsigned int> dimMappings;
1420  for (unsigned int i=0; i<descriptor.m_DimMappings.GetSize(); ++i)
1421  {
1422  dimMappings.push_back(descriptor.m_DimMappings[i]);
1423  }
1424 
1425  auto flatBufferDesc = serializer::CreateTransposeDescriptor(m_flatBufferBuilder,
1426  m_flatBufferBuilder.CreateVector(dimMappings));
1427 
1428  // Create the FlatBuffer TransposeLayer
1429  auto flatBufferLayer = serializer::CreateTransposeLayer(m_flatBufferBuilder,
1430  flatBufferBaseLayer,
1431  flatBufferDesc);
1432 
1433  // Add the AnyLayer to the FlatBufferLayers
1435 }
1436 
1437 void SerializerStrategy::SerializeQLstmLayer(const armnn::IConnectableLayer* layer,
1438  const armnn::QLstmDescriptor& descriptor,
1439  const std::vector<armnn::ConstTensor>& constants,
1440  const char* name)
1441 {
1442  IgnoreUnused(name);
1443 
1444  auto fbQLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_QLstm);
1445 
1446  auto fbQLstmDescriptor = serializer::CreateQLstmDescriptor(
1447  m_flatBufferBuilder,
1448  descriptor.m_CifgEnabled,
1449  descriptor.m_PeepholeEnabled,
1450  descriptor.m_ProjectionEnabled,
1451  descriptor.m_LayerNormEnabled,
1452  descriptor.m_CellClip,
1453  descriptor.m_ProjectionClip,
1454  descriptor.m_InputIntermediateScale,
1455  descriptor.m_ForgetIntermediateScale,
1456  descriptor.m_CellIntermediateScale,
1457  descriptor.m_OutputIntermediateScale,
1458  descriptor.m_HiddenStateZeroPoint,
1459  descriptor.m_HiddenStateScale
1460  );
1461 
1462  // Index for constants vector
1463  std::size_t i = 0;
1464 
1465  // Mandatory params
1466  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]); //InputToForgetWeights
1467  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]); //InputToCellWeights
1468  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]); //InputToOutputWeights
1469  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToForgetWeights
1470  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToCellWeights
1471  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToOutputWeights
1472  auto forgetGateBias = CreateConstTensorInfo(constants[i++]); //ForgetGateBias
1473  auto cellBias = CreateConstTensorInfo(constants[i++]); //CellBias
1474  auto outputGateBias = CreateConstTensorInfo(constants[i++]); //OutputGateBias
1475 
1476  // CIFG
1477  flatbuffers::Offset<serializer::ConstTensor> inputToInputWeights;
1478  flatbuffers::Offset<serializer::ConstTensor> recurrentToInputWeights;
1479  flatbuffers::Offset<serializer::ConstTensor> inputGateBias;
1480 
1481  if (!descriptor.m_CifgEnabled)
1482  {
1483  inputToInputWeights = CreateConstTensorInfo(constants[i++]); //InputToInputWeights
1484  recurrentToInputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToInputWeights
1485  inputGateBias = CreateConstTensorInfo(constants[i++]); //InputGateBias
1486  }
1487 
1488  // Peephole
1489  flatbuffers::Offset<serializer::ConstTensor> cellToInputWeights;
1490  flatbuffers::Offset<serializer::ConstTensor> cellToForgetWeights;
1491  flatbuffers::Offset<serializer::ConstTensor> cellToOutputWeights;
1492 
1493  if (descriptor.m_PeepholeEnabled)
1494  {
1495  if (!descriptor.m_CifgEnabled)
1496  {
1497  cellToInputWeights = CreateConstTensorInfo(constants[i++]); //CellToInputWeights
1498  }
1499  cellToForgetWeights = CreateConstTensorInfo(constants[i++]); //CellToForgetWeights
1500  cellToOutputWeights = CreateConstTensorInfo(constants[i++]); //CellToOutputWeights
1501  }
1502 
1503  // Projection
1504  flatbuffers::Offset<serializer::ConstTensor> projectionWeights;
1505  flatbuffers::Offset<serializer::ConstTensor> projectionBias;
1506 
1507  if (descriptor.m_ProjectionEnabled)
1508  {
1509  projectionWeights = CreateConstTensorInfo(constants[i++]); //ProjectionWeights
1510  projectionBias = CreateConstTensorInfo(constants[i++]); //ProjectionBias
1511  }
1512 
1513  // Layer norm
1514  flatbuffers::Offset<serializer::ConstTensor> inputLayerNormWeights;
1515  flatbuffers::Offset<serializer::ConstTensor> forgetLayerNormWeights;
1516  flatbuffers::Offset<serializer::ConstTensor> cellLayerNormWeights;
1517  flatbuffers::Offset<serializer::ConstTensor> outputLayerNormWeights;
1518 
1519  if (descriptor.m_LayerNormEnabled)
1520  {
1521  if (!descriptor.m_CifgEnabled)
1522  {
1523  inputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //InputLayerNormWeights
1524  }
1525  forgetLayerNormWeights = CreateConstTensorInfo(constants[i++]); //ForgetLayerNormWeights
1526  cellLayerNormWeights = CreateConstTensorInfo(constants[i++]); //CellLayerNormWeights
1527  outputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //OutputLayerNormWeights
1528  }
1529 
1530  auto fbQLstmParams = serializer::CreateQLstmInputParams(
1531  m_flatBufferBuilder,
1532  inputToForgetWeights,
1533  inputToCellWeights,
1534  inputToOutputWeights,
1535  recurrentToForgetWeights,
1536  recurrentToCellWeights,
1537  recurrentToOutputWeights,
1538  forgetGateBias,
1539  cellBias,
1540  outputGateBias,
1541  inputToInputWeights,
1542  recurrentToInputWeights,
1543  inputGateBias,
1544  projectionWeights,
1545  projectionBias,
1546  cellToInputWeights,
1547  cellToForgetWeights,
1548  cellToOutputWeights,
1549  inputLayerNormWeights,
1550  forgetLayerNormWeights,
1551  cellLayerNormWeights,
1552  outputLayerNormWeights);
1553 
1554  auto fbQLstmLayer = serializer::CreateQLstmLayer(
1555  m_flatBufferBuilder,
1556  fbQLstmBaseLayer,
1557  fbQLstmDescriptor,
1558  fbQLstmParams);
1559 
1561 }
1562 
1563 void SerializerStrategy::SerializeQuantizedLstmLayer(const armnn::IConnectableLayer* layer,
1564  const std::vector<armnn::ConstTensor>& constants,
1565  const char* name)
1566 {
1567  IgnoreUnused(name);
1568 
1569  auto fbQuantizedLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_QuantizedLstm);
1570 
1571  // index for constants vector
1572  size_t i = 0;
1573 
1574  // Get input parameters
1575  auto inputToInputWeights = CreateConstTensorInfo(constants[i++]);
1576  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]);
1577  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]);
1578  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]);
1579 
1580  auto recurrentToInputWeights = CreateConstTensorInfo(constants[i++]);
1581  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]);
1582  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]);
1583  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]);
1584 
1585  auto inputGateBias = CreateConstTensorInfo(constants[i++]);
1586  auto forgetGateBias = CreateConstTensorInfo(constants[i++]);
1587  auto cellBias = CreateConstTensorInfo(constants[i++]);
1588  auto outputGateBias = CreateConstTensorInfo(constants[i++]);
1589 
1590  auto fbQuantizedLstmParams = serializer::CreateQuantizedLstmInputParams(
1591  m_flatBufferBuilder,
1592  inputToInputWeights,
1593  inputToForgetWeights,
1594  inputToCellWeights,
1595  inputToOutputWeights,
1596  recurrentToInputWeights,
1597  recurrentToForgetWeights,
1598  recurrentToCellWeights,
1599  recurrentToOutputWeights,
1600  inputGateBias,
1601  forgetGateBias,
1602  cellBias,
1603  outputGateBias);
1604 
1605  auto fbQuantizedLstmLayer = serializer::CreateQuantizedLstmLayer(
1606  m_flatBufferBuilder,
1607  fbQuantizedLstmBaseLayer,
1608  fbQuantizedLstmParams);
1609 
1611 }
1612 
1613 void SerializerStrategy::SerializeUnidirectionalSequenceLstmLayer(
1614  const armnn::IConnectableLayer* layer,
1616  const std::vector<armnn::ConstTensor>& constants,
1617  const char* name)
1618 {
1619  IgnoreUnused(name);
1620 
1621  auto fbUnidirectionalSequenceLstmBaseLayer =
1623 
1624  auto fbUnidirectionalSequenceLstmDescriptor = serializer::CreateUnidirectionalSequenceLstmDescriptor(
1625  m_flatBufferBuilder,
1626  descriptor.m_ActivationFunc,
1627  descriptor.m_ClippingThresCell,
1628  descriptor.m_ClippingThresProj,
1629  descriptor.m_CifgEnabled,
1630  descriptor.m_PeepholeEnabled,
1631  descriptor.m_ProjectionEnabled,
1632  descriptor.m_LayerNormEnabled,
1633  descriptor.m_TimeMajor);
1634 
1635  // Index for constants vector
1636  std::size_t i = 0;
1637 
1638  // Get mandatory/basic input parameters
1639  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]); //InputToForgetWeights
1640  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]); //InputToCellWeights
1641  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]); //InputToOutputWeights
1642  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToForgetWeights
1643  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToCellWeights
1644  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToOutputWeights
1645  auto forgetGateBias = CreateConstTensorInfo(constants[i++]); //ForgetGateBias
1646  auto cellBias = CreateConstTensorInfo(constants[i++]); //CellBias
1647  auto outputGateBias = CreateConstTensorInfo(constants[i++]); //OutputGateBias
1648 
1649  //Define optional parameters, these will be set depending on configuration in Lstm descriptor
1650  flatbuffers::Offset<serializer::ConstTensor> inputToInputWeights;
1651  flatbuffers::Offset<serializer::ConstTensor> recurrentToInputWeights;
1652  flatbuffers::Offset<serializer::ConstTensor> cellToInputWeights;
1653  flatbuffers::Offset<serializer::ConstTensor> inputGateBias;
1654  flatbuffers::Offset<serializer::ConstTensor> projectionWeights;
1655  flatbuffers::Offset<serializer::ConstTensor> projectionBias;
1656  flatbuffers::Offset<serializer::ConstTensor> cellToForgetWeights;
1657  flatbuffers::Offset<serializer::ConstTensor> cellToOutputWeights;
1658  flatbuffers::Offset<serializer::ConstTensor> inputLayerNormWeights;
1659  flatbuffers::Offset<serializer::ConstTensor> forgetLayerNormWeights;
1660  flatbuffers::Offset<serializer::ConstTensor> cellLayerNormWeights;
1661  flatbuffers::Offset<serializer::ConstTensor> outputLayerNormWeights;
1662 
1663  if (!descriptor.m_CifgEnabled)
1664  {
1665  inputToInputWeights = CreateConstTensorInfo(constants[i++]); //InputToInputWeights
1666  recurrentToInputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToInputWeights
1667  inputGateBias = CreateConstTensorInfo(constants[i++]); //InputGateBias
1668  }
1669 
1670  if (descriptor.m_PeepholeEnabled)
1671  {
1672  if (!descriptor.m_CifgEnabled)
1673  {
1674  cellToInputWeights = CreateConstTensorInfo(constants[i++]); //CellToInputWeights
1675  }
1676  cellToForgetWeights = CreateConstTensorInfo(constants[i++]); //CellToForgetWeights
1677  cellToOutputWeights = CreateConstTensorInfo(constants[i++]); //CellToOutputWeights
1678  }
1679 
1680  if (descriptor.m_ProjectionEnabled)
1681  {
1682  projectionWeights = CreateConstTensorInfo(constants[i++]); //ProjectionWeights
1683  projectionBias = CreateConstTensorInfo(constants[i++]); //ProjectionBias
1684  }
1685 
1686  if (descriptor.m_LayerNormEnabled)
1687  {
1688  if (!descriptor.m_CifgEnabled)
1689  {
1690  inputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //InputLayerNormWeights
1691  }
1692  forgetLayerNormWeights = CreateConstTensorInfo(constants[i++]); //ForgetLayerNormWeights
1693  cellLayerNormWeights = CreateConstTensorInfo(constants[i++]); //CellLayerNormWeights
1694  outputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //OutputLayerNormWeights
1695  }
1696 
1697  auto fbUnidirectionalSequenceLstmParams = serializer::CreateLstmInputParams(
1698  m_flatBufferBuilder,
1699  inputToForgetWeights,
1700  inputToCellWeights,
1701  inputToOutputWeights,
1702  recurrentToForgetWeights,
1703  recurrentToCellWeights,
1704  recurrentToOutputWeights,
1705  forgetGateBias,
1706  cellBias,
1707  outputGateBias,
1708  inputToInputWeights,
1709  recurrentToInputWeights,
1710  cellToInputWeights,
1711  inputGateBias,
1712  projectionWeights,
1713  projectionBias,
1714  cellToForgetWeights,
1715  cellToOutputWeights,
1716  inputLayerNormWeights,
1717  forgetLayerNormWeights,
1718  cellLayerNormWeights,
1719  outputLayerNormWeights);
1720 
1721  auto fbUnidirectionalSequenceLstmLayer = serializer::CreateUnidirectionalSequenceLstmLayer(
1722  m_flatBufferBuilder,
1723  fbUnidirectionalSequenceLstmBaseLayer,
1724  fbUnidirectionalSequenceLstmDescriptor,
1725  fbUnidirectionalSequenceLstmParams);
1726 
1727  CreateAnyLayer(fbUnidirectionalSequenceLstmLayer.o, serializer::Layer::Layer_UnidirectionalSequenceLstmLayer);
1728 }
1729 
1730 fb::Offset<serializer::LayerBase> SerializerStrategy::CreateLayerBase(const IConnectableLayer* layer,
1731  const serializer::LayerType layerType)
1732 {
1733 
1734  uint32_t fbIndex = GetSerializedId(layer->GetGuid());
1735 
1736  std::vector<fb::Offset<serializer::InputSlot>> inputSlots = CreateInputSlots(layer);
1737  std::vector<fb::Offset<serializer::OutputSlot>> outputSlots = CreateOutputSlots(layer);
1738 
1739  return serializer::CreateLayerBase(m_flatBufferBuilder,
1740  fbIndex,
1741  m_flatBufferBuilder.CreateString(layer->GetName()),
1742  layerType,
1743  m_flatBufferBuilder.CreateVector(inputSlots),
1744  m_flatBufferBuilder.CreateVector(outputSlots));
1745 }
1746 
1747 void SerializerStrategy::CreateAnyLayer(const flatbuffers::Offset<void>& layer, const serializer::Layer serializerLayer)
1748 {
1749 
1750  auto anyLayer = armnnSerializer::CreateAnyLayer(m_flatBufferBuilder, serializerLayer, layer);
1751  m_serializedLayers.push_back(anyLayer);
1752 }
1753 
1754 template <typename T>
1755 flatbuffers::Offset<flatbuffers::Vector<T>> SerializerStrategy::CreateDataVector(const void* memory, unsigned int size)
1756 {
1757  const T* buffer = reinterpret_cast<const T*>(memory);
1758  std::vector<T> vector(buffer, buffer + (size / sizeof(T)));
1759  auto fbVector = m_flatBufferBuilder.CreateVector(vector);
1760  return fbVector;
1761 }
1762 
1763 flatbuffers::Offset<TensorInfo> SerializerStrategy::CreateTensorInfo(const armnn::TensorInfo& tensorInfo)
1764 {
1765  // Get the dimensions
1766  std::vector<unsigned int> shape;
1767  std::vector<bool> specificity;
1768  // This assumes that the TensorShape constructors have ensured that the size of m_DimensionsSpecificity
1769  // matches the size of dimensions.
1770  for(unsigned int dim = 0; dim < tensorInfo.GetShape().GetNumDimensions(); ++dim)
1771  {
1772  specificity.push_back(tensorInfo.GetShape().GetDimensionSpecificity(dim));
1773 
1774  if (tensorInfo.GetShape().GetDimensionSpecificity(dim))
1775  {
1776  shape.push_back(tensorInfo.GetShape()[dim]);
1777  }
1778  else
1779  {
1780  shape.push_back(0);
1781  }
1782  }
1783 
1784  if (tensorInfo.HasPerAxisQuantization())
1785  {
1786  // Create FlatBuffer TensorInfo
1787  auto flatBufferTensorInfo =
1788  serializer::CreateTensorInfo(m_flatBufferBuilder,
1789  m_flatBufferBuilder.CreateVector(shape),
1790  GetFlatBufferDataType(tensorInfo.GetDataType()),
1791  tensorInfo.GetQuantizationScales()[0],
1792  tensorInfo.GetQuantizationOffset(),
1793  m_flatBufferBuilder.CreateVector(tensorInfo.GetQuantizationScales()),
1794  tensorInfo.GetQuantizationDim().value(),
1795  static_cast<unsigned int>
1796  (tensorInfo.GetShape().GetDimensionality()),
1797  m_flatBufferBuilder.CreateVector(specificity));
1798  return flatBufferTensorInfo;
1799  }
1800 
1801  // Create FlatBuffer TensorInfo
1802  auto flatBufferTensorInfo = serializer::CreateTensorInfo(m_flatBufferBuilder,
1803  m_flatBufferBuilder.CreateVector(shape),
1804  GetFlatBufferDataType(tensorInfo.GetDataType()),
1805  tensorInfo.GetQuantizationScale(),
1806  tensorInfo.GetQuantizationOffset(),
1807  0,
1808  0,
1809  static_cast<unsigned int>
1810  (tensorInfo.GetShape().GetDimensionality()),
1811  m_flatBufferBuilder.CreateVector(specificity));
1812  return flatBufferTensorInfo;
1813 }
1814 
1815 flatbuffers::Offset<serializer::ConstTensor>
1816  SerializerStrategy::CreateConstTensorInfo(const armnn::ConstTensor& constTensor)
1817 {
1818  armnn::TensorInfo tensorInfo = constTensor.GetInfo();
1819 
1820  flatbuffers::Offset<void> fbPayload;
1821 
1822  switch (tensorInfo.GetDataType())
1823  {
1825  {
1826  auto fbVector = CreateDataVector<int64_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1827  flatbuffers::Offset<serializer::LongData> flatBuffersData = serializer::CreateLongData(
1828  m_flatBufferBuilder,
1829  fbVector);
1830  fbPayload = flatBuffersData.o;
1831  break;
1832  }
1835  {
1836  auto fbVector = CreateDataVector<int32_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1837  flatbuffers::Offset<serializer::IntData> flatBuffersData = serializer::CreateIntData(
1838  m_flatBufferBuilder,
1839  fbVector);
1840  fbPayload = flatBuffersData.o;
1841  break;
1842  }
1846  {
1847  auto fbVector = CreateDataVector<int16_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1848  flatbuffers::Offset<serializer::ShortData> flatBuffersData = serializer::CreateShortData(
1849  m_flatBufferBuilder,
1850  fbVector);
1851  fbPayload = flatBuffersData.o;
1852  break;
1853  }
1858  default:
1859  {
1860  auto fbVector = CreateDataVector<int8_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1861  flatbuffers::Offset<serializer::ByteData> flatBuffersData = serializer::CreateByteData(
1862  m_flatBufferBuilder,
1863  fbVector);
1864  fbPayload = flatBuffersData.o;
1865  }
1866  }
1867  flatbuffers::Offset<serializer::ConstTensor> flatBufferConstTensor = serializer::CreateConstTensor(
1868  m_flatBufferBuilder,
1869  CreateTensorInfo(tensorInfo),
1871  fbPayload);
1872  return flatBufferConstTensor;
1873 }
1874 
1875 flatbuffers::Offset<armnnSerializer::FeatureCompatibilityVersions> SerializerStrategy::GetVersionTable()
1876 {
1877  flatbuffers::Offset<armnnSerializer::FeatureCompatibilityVersions> versionsTable =
1879  m_flatBufferBuilder,
1880  1, // Binding ids scheme version
1881  1, // Weights layout scheme version
1882  1 // Constant tensors as inputs version
1883  );
1884  return versionsTable;
1885 }
1886 
1887 std::vector<fb::Offset<serializer::InputSlot>>
1888  SerializerStrategy::CreateInputSlots(const armnn::IConnectableLayer* layer)
1889 {
1890  std::vector<fb::Offset<serializer::InputSlot>> inputSlots;
1891 
1892  // Get the InputSlots
1893  for (unsigned int slotIndex = 0; slotIndex<layer->GetNumInputSlots(); ++slotIndex)
1894  {
1895  const IInputSlot& inputSlot = layer->GetInputSlot(slotIndex);
1896 
1897  // Get the Connection for the InputSlot
1898  const IOutputSlot* connection = inputSlot.GetConnection();
1899 
1900  // Create FlatBuffer Connection
1901  serializer::Connection conn(GetSerializedId(inputSlot.GetConnection()->GetOwningLayerGuid()),
1902  connection->CalculateIndexOnOwner());
1903  // Create FlatBuffer InputSlot
1904  inputSlots.push_back(serializer::CreateInputSlot(m_flatBufferBuilder, slotIndex, &conn));
1905  }
1906  return inputSlots;
1907 }
1908 
1909 std::vector<fb::Offset<serializer::OutputSlot>>
1910  SerializerStrategy::CreateOutputSlots(const armnn::IConnectableLayer* layer)
1911 {
1912  std::vector<fb::Offset<serializer::OutputSlot>> outputSlots;
1913 
1914  // Get the OutputSlots
1915  for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
1916  {
1917  const IOutputSlot& outputSlot = layer->GetOutputSlot(slotIndex);
1918  const armnn::TensorInfo& tensorInfo = outputSlot.GetTensorInfo();
1919 
1920  // Create FlatBuffer Outputslot
1921  outputSlots.push_back(serializer::CreateOutputSlot(m_flatBufferBuilder,
1922  slotIndex,
1923  CreateTensorInfo(tensorInfo)));
1924  }
1925  return outputSlots;
1926 }
1927 
1929  const BaseDescriptor& descriptor,
1930  const std::vector<armnn::ConstTensor>& constants,
1931  const char* name,
1932  const armnn::LayerBindingId id)
1933 {
1934  IgnoreUnused(constants);
1935 
1936  switch (layer->GetType())
1937  {
1939  {
1940  const armnn::ActivationDescriptor& layerDescriptor =
1941  static_cast<const armnn::ActivationDescriptor&>(descriptor);
1942  SerializeActivationLayer(layer, layerDescriptor, name);
1943  break;
1944  }
1946  {
1947  SerializeAdditionLayer(layer, name);
1948  break;
1949  }
1951  {
1952  const armnn::ArgMinMaxDescriptor& layerDescriptor =
1953  static_cast<const armnn::ArgMinMaxDescriptor&>(descriptor);
1954  SerializeArgMinMaxLayer(layer, layerDescriptor, name);
1955  break;
1956  }
1958  {
1959  const armnn::BatchNormalizationDescriptor& layerDescriptor =
1960  static_cast<const armnn::BatchNormalizationDescriptor&>(descriptor);
1961  SerializeBatchNormalizationLayer(layer,
1962  layerDescriptor,
1963  constants,
1964  name);
1965  break;
1966  }
1968  {
1969  const armnn::BatchToSpaceNdDescriptor& layerDescriptor =
1970  static_cast<const armnn::BatchToSpaceNdDescriptor&>(descriptor);
1971  SerializeBatchToSpaceNdLayer(layer,
1972  layerDescriptor,
1973  name);
1974  break;
1975  }
1976  case armnn::LayerType::Cast :
1977  {
1978  SerializeCastLayer(layer, name);
1979  break;
1980  }
1982  {
1983  const armnn::ChannelShuffleDescriptor& layerDescriptor =
1984  static_cast<const armnn::ChannelShuffleDescriptor&>(descriptor);
1985  SerializeChannelShuffleLayer(layer,
1986  layerDescriptor,
1987  name);
1988  break;
1989  }
1991  {
1992  const armnn::ComparisonDescriptor& layerDescriptor =
1993  static_cast<const armnn::ComparisonDescriptor&>(descriptor);
1994  SerializeComparisonLayer(layer,
1995  layerDescriptor,
1996  name);
1997  break;
1998  }
2000  {
2001  const armnn::ConcatDescriptor& layerDescriptor =
2002  static_cast<const armnn::ConcatDescriptor&>(descriptor);
2003  SerializeConcatLayer(layer,
2004  layerDescriptor,
2005  name);
2006  break;
2007  }
2009  {
2010  SerializeConstantLayer(layer,
2011  constants,
2012  name);
2013  break;
2014  }
2016  {
2017  const armnn::Convolution2dDescriptor& layerDescriptor =
2018  static_cast<const armnn::Convolution2dDescriptor&>(descriptor);
2019  SerializeConvolution2dLayer(layer,
2020  layerDescriptor,
2021  constants,
2022  name);
2023  break;
2024  }
2026  {
2027  const armnn::Convolution3dDescriptor& layerDescriptor =
2028  static_cast<const armnn::Convolution3dDescriptor&>(descriptor);
2029  SerializeConvolution3dLayer(layer,
2030  layerDescriptor,
2031  name);
2032  break;
2033  }
2035  {
2036  const armnn::DepthToSpaceDescriptor& layerDescriptor =
2037  static_cast<const armnn::DepthToSpaceDescriptor&>(descriptor);
2038  SerializeDepthToSpaceLayer(layer,
2039  layerDescriptor,
2040  name);
2041  break;
2042  }
2044  {
2045  const armnn::DepthwiseConvolution2dDescriptor& layerDescriptor =
2046  static_cast<const armnn::DepthwiseConvolution2dDescriptor&>(descriptor);
2047  SerializeDepthwiseConvolution2dLayer(layer,
2048  layerDescriptor,
2049  constants,
2050  name);
2051  break;
2052  }
2054  {
2055  SerializeDequantizeLayer(layer,
2056  name);
2057  break;
2058  }
2060  {
2061  const armnn::DetectionPostProcessDescriptor& layerDescriptor =
2062  static_cast<const armnn::DetectionPostProcessDescriptor&>(descriptor);
2063  SerializeDetectionPostProcessLayer(layer, layerDescriptor, constants, name);
2064  break;
2065  }
2067  {
2068  SerializeDivisionLayer(layer, name);
2069  break;
2070  }
2072  {
2073  const armnn::ElementwiseUnaryDescriptor& layerDescriptor =
2074  static_cast<const armnn::ElementwiseUnaryDescriptor&>(descriptor);
2075  SerializeElementwiseUnaryLayer(layer, layerDescriptor, name);
2076  break;
2077  }
2078  case armnn::LayerType::Fill :
2079  {
2080  const armnn::FillDescriptor& layerDescriptor =
2081  static_cast<const armnn::FillDescriptor&>(descriptor);
2082  SerializeFillLayer(layer, layerDescriptor, name);
2083  break;
2084  }
2086  {
2087  SerializeFloorLayer(layer, name);
2088  break;
2089  }
2091  {
2092  const armnn::FullyConnectedDescriptor& layerDescriptor =
2093  static_cast<const armnn::FullyConnectedDescriptor&>(descriptor);
2094  SerializeFullyConnectedLayer(layer, layerDescriptor, name);
2095  break;
2096  }
2098  {
2099  const armnn::GatherDescriptor& layerDescriptor =
2100  static_cast<const armnn::GatherDescriptor&>(descriptor);
2101  SerializeGatherLayer(layer, layerDescriptor, name);
2102  break;
2103  }
2105  {
2106  SerializeInputLayer(layer, id, name);
2107  break;
2108  }
2110  {
2111  const armnn::InstanceNormalizationDescriptor& layerDescriptor =
2112  static_cast<const armnn::InstanceNormalizationDescriptor&>(descriptor);
2113  SerializeInstanceNormalizationLayer(layer, layerDescriptor, name);
2114  break;
2115  }
2117  {
2118  const armnn::L2NormalizationDescriptor& layerDescriptor =
2119  static_cast<const armnn::L2NormalizationDescriptor&>(descriptor);
2120  SerializeL2NormalizationLayer(layer, layerDescriptor, name);
2121  break;
2122  }
2124  {
2125  const armnn::LogicalBinaryDescriptor& layerDescriptor =
2126  static_cast<const armnn::LogicalBinaryDescriptor&>(descriptor);
2127  SerializeLogicalBinaryLayer(layer, layerDescriptor, name);
2128  break;
2129  }
2131  {
2132  const armnn::LogSoftmaxDescriptor& layerDescriptor =
2133  static_cast<const armnn::LogSoftmaxDescriptor&>(descriptor);
2134  SerializeLogSoftmaxLayer(layer, layerDescriptor, name);
2135  break;
2136  }
2137  case armnn::LayerType::Lstm :
2138  {
2139  const armnn::LstmDescriptor& layerDescriptor =
2140  static_cast<const armnn::LstmDescriptor&>(descriptor);
2141  SerializeLstmLayer(layer, layerDescriptor, constants, name);
2142  break;
2143  }
2145  {
2146  const armnn::QLstmDescriptor& layerDescriptor =
2147  static_cast<const armnn::QLstmDescriptor&>(descriptor);
2148  SerializeQLstmLayer(layer, layerDescriptor, constants, name);
2149  break;
2150  }
2152  {
2153  SerializeMaximumLayer(layer, name);
2154  break;
2155  }
2156  case armnn::LayerType::Mean :
2157  {
2158  const armnn::MeanDescriptor& layerDescriptor =
2159  static_cast<const armnn::MeanDescriptor&>(descriptor);
2160  SerializeMeanLayer(layer, layerDescriptor, name);
2161  break;
2162  }
2164  {
2165  SerializeMergeLayer(layer, name);
2166  break;
2167  }
2169  {
2170  SerializeMinimumLayer(layer, name);
2171  break;
2172  }
2174  {
2175  SerializeMultiplicationLayer(layer, name);
2176  break;
2177  }
2179  {
2180  const armnn::NormalizationDescriptor& layerDescriptor =
2181  static_cast<const armnn::NormalizationDescriptor&>(descriptor);
2182  SerializeNormalizationLayer(layer, layerDescriptor, name);
2183  break;
2184  }
2186  {
2187  SerializeOutputLayer(layer, id, name);
2188  break;
2189  }
2190  case armnn::LayerType::Pad :
2191  {
2192  const armnn::PadDescriptor& layerDescriptor =
2193  static_cast<const armnn::PadDescriptor&>(descriptor);
2194  SerializePadLayer(layer, layerDescriptor, name);
2195  break;
2196  }
2198  {
2199  const armnn::PermuteDescriptor& layerDescriptor =
2200  static_cast<const armnn::PermuteDescriptor&>(descriptor);
2201  SerializePermuteLayer(layer, layerDescriptor, name);
2202  break;
2203  }
2205  {
2206  const armnn::Pooling2dDescriptor& layerDescriptor =
2207  static_cast<const armnn::Pooling2dDescriptor&>(descriptor);
2208  SerializePooling2dLayer(layer, layerDescriptor, name);
2209  break;
2210  }
2212  {
2213  SerializePreluLayer(layer, name);
2214  break;
2215  }
2217  {
2218  SerializeQuantizeLayer(layer, name);
2219  break;
2220  }
2222  SerializeQuantizedLstmLayer(layer, constants, name);
2223  break;
2225  {
2226  const armnn::ReshapeDescriptor &layerDescriptor =
2227  static_cast<const armnn::ReshapeDescriptor &>(descriptor);
2228  SerializeReshapeLayer(layer, layerDescriptor, name);
2229  break;
2230  }
2232  {
2233  SerializeRankLayer(layer, name);
2234  break;
2235  }
2237  {
2238  const armnn::ReduceDescriptor& layerDescriptor =
2239  static_cast<const armnn::ReduceDescriptor&>(descriptor);
2240  SerializeReduceLayer(layer, layerDescriptor, name);
2241  break;
2242  }
2244  {
2245  const armnn::ResizeDescriptor& layerDescriptor =
2246  static_cast<const armnn::ResizeDescriptor&>(descriptor);
2247  SerializeResizeLayer(layer, layerDescriptor, name);
2248  break;
2249  }
2251  {
2252  SerializeShapeLayer(layer, name);
2253  break;
2254  }
2256  {
2257  const armnn::SliceDescriptor& layerDescriptor =
2258  static_cast<const armnn::SliceDescriptor&>(descriptor);
2259  SerializeSliceLayer(layer, layerDescriptor, name);
2260  break;
2261  }
2263  {
2264  const armnn::SoftmaxDescriptor& layerDescriptor =
2265  static_cast<const armnn::SoftmaxDescriptor&>(descriptor);
2266  SerializeSoftmaxLayer(layer, layerDescriptor, name);
2267  break;
2268  }
2270  {
2271  const armnn::SpaceToBatchNdDescriptor& layerDescriptor =
2272  static_cast<const armnn::SpaceToBatchNdDescriptor&>(descriptor);
2273  SerializeSpaceToBatchNdLayer(layer, layerDescriptor, name);
2274  break;
2275  }
2277  {
2278  const armnn::SpaceToDepthDescriptor& layerDescriptor =
2279  static_cast<const armnn::SpaceToDepthDescriptor&>(descriptor);
2280  SerializeSpaceToDepthLayer(layer, layerDescriptor, name);
2281  break;
2282  }
2284  {
2285  const armnn::SplitterDescriptor& layerDescriptor =
2286  static_cast<const armnn::SplitterDescriptor&>(descriptor);
2287  SerializeSplitterLayer(layer, layerDescriptor, name);
2288  break;
2289  }
2291  {
2292  const armnn::StackDescriptor& layerDescriptor =
2293  static_cast<const armnn::StackDescriptor&>(descriptor);
2294  SerializeStackLayer(layer, layerDescriptor, name);
2295  break;
2296  }
2298  {
2299  const armnn::StandInDescriptor& layerDescriptor =
2300  static_cast<const armnn::StandInDescriptor&>(descriptor);
2301  SerializeStandInLayer(layer, layerDescriptor, name);
2302  break;
2303  }
2305  {
2306  const armnn::StridedSliceDescriptor& layerDescriptor =
2307  static_cast<const armnn::StridedSliceDescriptor&>(descriptor);
2308  SerializeStridedSliceLayer(layer, layerDescriptor, name);
2309  break;
2310  }
2312  {
2313  SerializeSubtractionLayer(layer, name);
2314  break;
2315  }
2317  {
2318  SerializeSwitchLayer(layer, name);
2319  break;
2320  }
2322  {
2323  const armnn::TransposeDescriptor& layerDescriptor =
2324  static_cast<const armnn::TransposeDescriptor&>(descriptor);
2325  SerializeTransposeLayer(layer, layerDescriptor, name);
2326  break;
2327  }
2329  {
2330  const armnn::TransposeConvolution2dDescriptor& layerDescriptor =
2331  static_cast<const armnn::TransposeConvolution2dDescriptor&>(descriptor);
2332  SerializeTransposeConvolution2dLayer(layer, layerDescriptor, constants, name);
2333  break;
2334  }
2336  {
2337  const armnn::UnidirectionalSequenceLstmDescriptor& layerDescriptor =
2338  static_cast<const armnn::UnidirectionalSequenceLstmDescriptor&>(descriptor);
2339  SerializeUnidirectionalSequenceLstmLayer(layer, layerDescriptor, constants, name);
2340  break;
2341  }
2342  default:
2343  {
2345  fmt::format("A layer of unknown type was given to the serializer. Layer name: {}; Layer Id: {}",
2346  layer->GetName(),
2347  id));
2348  }
2349  }
2350 }
2351 
2353 {
2354  // Iterate through to network
2355  inNetwork.ExecuteStrategy(m_SerializerStrategy);
2356  flatbuffers::FlatBufferBuilder& fbBuilder = m_SerializerStrategy.GetFlatBufferBuilder();
2357 
2358  // Create FlatBuffer SerializedGraph
2359  auto serializedGraph = serializer::CreateSerializedGraph(
2360  fbBuilder,
2361  fbBuilder.CreateVector(m_SerializerStrategy.GetSerializedLayers()),
2362  fbBuilder.CreateVector(m_SerializerStrategy.GetInputIds()),
2363  fbBuilder.CreateVector(m_SerializerStrategy.GetOutputIds()),
2364  m_SerializerStrategy.GetVersionTable());
2365 
2366  // Serialize the graph
2367  fbBuilder.Finish(serializedGraph);
2368 }
2369 
2370 
2372 {
2373  flatbuffers::FlatBufferBuilder& fbBuilder = m_SerializerStrategy.GetFlatBufferBuilder();
2374 
2375  auto bytesToWrite = armnn::numeric_cast<std::streamsize>(fbBuilder.GetSize());
2376  stream.write(reinterpret_cast<const char*>(fbBuilder.GetBufferPointer()), bytesToWrite);
2377  return !stream.bad();
2378 }
2379 
2380 } // namespace armnnSerializer
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
flatbuffers::Offset< Convolution3dDescriptor > CreateConvolution3dDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t padLeft=0, uint32_t padRight=0, uint32_t padTop=0, uint32_t padBottom=0, uint32_t padFront=0, uint32_t padBack=0, uint32_t strideX=0, uint32_t strideY=0, uint32_t strideZ=0, uint32_t dilationX=1, uint32_t dilationY=1, uint32_t dilationZ=1, bool biasEnabled=false, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NDHWC)
flatbuffers::Offset< LongData > CreateLongData(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< int64_t >> data=0)
float m_Eps
Used to avoid dividing by zero.
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
armnnSerializer::UnaryOperation GetFlatBufferUnaryOperation(armnn::UnaryOperation comparisonOperation)
bool m_ProjectionEnabled
Enable/disable the projection layer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< ReshapeDescriptor > CreateReshapeDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> targetShape=0)
ARMNN_NO_DEPRECATE_WARN_END void ExecuteStrategy(IStrategy &strategy) const
Definition: Network.cpp:468
flatbuffers::Offset< ReduceLayer > CreateReduceLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ReduceDescriptor > descriptor=0)
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
uint32_t m_Axis
0-based axis along which to stack the input tensors.
flatbuffers::Offset< OutputSlot > CreateOutputSlot(flatbuffers::FlatBufferBuilder &_fbb, uint32_t index=0, flatbuffers::Offset< armnnSerializer::TensorInfo > tensorInfo=0)
A ViewsDescriptor for the SplitterLayer.
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
flatbuffers::Offset< DepthwiseConvolution2dDescriptor > CreateDepthwiseConvolution2dDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t padLeft=0, uint32_t padRight=0, uint32_t padTop=0, uint32_t padBottom=0, uint32_t strideX=0, uint32_t strideY=0, uint32_t dilationX=1, uint32_t dilationY=1, bool biasEnabled=false, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NCHW)
float m_ScaleW
Center size encoding scale weight.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
virtual unsigned int GetNumInputSlots() const =0
Returns the number of connectable input slots.
float m_K
Kappa value used for the across channel normalization equation.
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
flatbuffers::Offset< LstmLayer > CreateLstmLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::LstmDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::LstmInputParams > inputParams=0)
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
flatbuffers::Offset< L2NormalizationLayer > CreateL2NormalizationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::L2NormalizationDescriptor > descriptor=0)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
flatbuffers::Offset< TransposeConvolution2dDescriptor > CreateTransposeConvolution2dDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t padLeft=0, uint32_t padRight=0, uint32_t padTop=0, uint32_t padBottom=0, uint32_t strideX=0, uint32_t strideY=0, bool biasEnabled=false, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NCHW)
float m_ClippingThresProj
Clipping threshold value for the projection.
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1...
A ReshapeDescriptor for the ReshapeLayer.
flatbuffers::Offset< ResizeDescriptor > CreateResizeDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t targetHeight=0, uint32_t targetWidth=0, armnnSerializer::ResizeMethod method=armnnSerializer::ResizeMethod_NearestNeighbor, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC, bool alignCorners=false, bool halfPixelCenters=false)
flatbuffers::Offset< FillLayer > CreateFillLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::FillDescriptor > descriptor=0)
std::vector< int > m_Begin
Begin values for the input that will be sliced.
uint32_t m_PadBack
Padding back value in the depth dimension.
float m_PadValue
Optional value to use for padding, defaults to 0.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t GetNumDimensions() const
Get the number of dimensions.
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:78
void Serialize(const armnn::INetwork &inNetwork)
Serializes the network to ArmNN SerializedGraph.
float m_ScaleX
Center size encoding scale x.
TensorShape m_InputShape
Required shape of all input tensors.
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
Dimensionality GetDimensionality() const
Function that returns the tensor type.
Definition: Tensor.hpp:92
flatbuffers::Offset< GatherLayer > CreateGatherLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::GatherDescriptor > descriptor=0)
flatbuffers::Offset< RankLayer > CreateRankLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
bool HasPerAxisQuantization() const
Definition: Tensor.cpp:448
uint32_t m_PoolWidth
Pooling width value.
bool m_PeepholeEnabled
Enable/disable peephole.
armnnSerializer::OutputShapeRounding GetFlatBufferOutputShapeRounding(armnn::OutputShapeRounding outputShapeRounding)
flatbuffers::Offset< TransposeLayer > CreateTransposeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::TransposeDescriptor > descriptor=0)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
A Convolution2dDescriptor for the Convolution2dLayer.
float m_Alpha
Alpha value for the normalization equation.
uint32_t m_PadLeft
Padding left value in the width dimension.
flatbuffers::Offset< ComparisonLayer > CreateComparisonLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ComparisonDescriptor > descriptor=0)
bool m_KeepDims
if true then output shape has no change.
float m_HiddenStateScale
Hidden State quantization scale.
bool m_BiasEnabled
Enable/disable bias.
flatbuffers::Offset< ConstTensor > CreateConstTensor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::TensorInfo > info=0, armnnSerializer::ConstTensorData data_type=armnnSerializer::ConstTensorData_NONE, flatbuffers::Offset< void > data=0)
Optional< unsigned int > GetQuantizationDim() const
Definition: Tensor.cpp:496
flatbuffers::Offset< QuantizeLayer > CreateQuantizeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
float m_OutputIntermediateScale
Output intermediate quantization scale.
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
float m_Gamma
Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0. ...
float m_Beta
Exponentiation value.
flatbuffers::Offset< InputSlot > CreateInputSlot(flatbuffers::FlatBufferBuilder &_fbb, uint32_t index=0, const armnnSerializer::Connection *connection=0)
std::vector< unsigned int > m_Size
Size of the slice in each dimension.
static ISerializer * CreateRaw()
Definition: Serializer.cpp:30
flatbuffers::Offset< SpaceToDepthDescriptor > CreateSpaceToDepthDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t blockSize=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:70
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< QuantizedLstmLayer > CreateQuantizedLstmLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::QuantizedLstmInputParams > inputParams=0)
flatbuffers::Offset< TransposeDescriptor > CreateTransposeDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> dimMappings=0)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
serializer::ActivationFunction GetFlatBufferActivationFunction(armnn::ActivationFunction function)
Definition: Serializer.cpp:55
Main network class which provides the interface for building up a neural network. ...
Definition: INetwork.hpp:202
flatbuffers::Offset< DetectionPostProcessDescriptor > CreateDetectionPostProcessDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t maxDetections=0, uint32_t maxClassesPerDetection=0, uint32_t detectionsPerClass=0, float nmsScoreThreshold=0.0f, float nmsIouThreshold=0.0f, uint32_t numClasses=0, bool useRegularNms=false, float scaleX=0.0f, float scaleY=0.0f, float scaleW=0.0f, float scaleH=0.0f)
armnnSerializer::NormalizationAlgorithmMethod GetFlatBufferNormalizationAlgorithmMethod(armnn::NormalizationAlgorithmMethod normalizationAlgorithmMethod)
uint32_t m_PadTop
Padding top value in the height dimension.
flatbuffers::Offset< AnyLayer > CreateAnyLayer(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::Layer layer_type=armnnSerializer::Layer_NONE, flatbuffers::Offset< void > layer=0)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
flatbuffers::Offset< DepthwiseConvolution2dLayer > CreateDepthwiseConvolution2dLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::DepthwiseConvolution2dDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > weights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > biases=0)
bool m_BiasEnabled
Enable/disable bias.
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
flatbuffers::Offset< MergeLayer > CreateMergeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
uint32_t m_PadRight
Padding right value in the width dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
MemoryType GetMemoryArea() const
Definition: Tensor.hpp:305
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding for input dimension.
uint32_t GetNumViews() const
Get the number of views.
ReduceOperation m_ReduceOperation
Specifies the reduction operation to execute.
flatbuffers::Offset< QLstmInputParams > CreateQLstmInputParams(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > forgetGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > outputGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > projectionWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > projectionBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > forgetLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > outputLayerNormWeights=0)
bool m_TimeMajor
Enable/disable time major.
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
flatbuffers::Offset< TensorInfo > CreateTensorInfo(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> dimensions=0, armnnSerializer::DataType dataType=armnnSerializer::DataType_Float16, float quantizationScale=1.0f, int32_t quantizationOffset=0, flatbuffers::Offset< flatbuffers::Vector< float >> quantizationScales=0, uint32_t quantizationDim=0, uint32_t dimensionality=1, flatbuffers::Offset< flatbuffers::Vector< uint8_t >> dimensionSpecificity=0, bool isConstant=false)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
int32_t m_BeginMask
Begin mask value.
flatbuffers::Offset< FullyConnectedDescriptor > CreateFullyConnectedDescriptor(flatbuffers::FlatBufferBuilder &_fbb, bool biasEnabled=false, bool transposeWeightsMatrix=false, bool constantWeights=true)
flatbuffers::Offset< TransposeConvolution2dLayer > CreateTransposeConvolution2dLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::TransposeConvolution2dDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > weights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > biases=0)
SizeType GetSize() const
Definition: Types.hpp:311
uint32_t m_DilationY
Dilation along y axis.
int32_t m_EndMask
End mask value.
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
flatbuffers::Offset< PreluLayer > CreatePreluLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding values for the input dimension: heightPad{top, bottom} widthPad{left, right}.
std::vector< float > GetQuantizationScales() const
Definition: Tensor.cpp:453
uint32_t m_DilationX
Dilation along x axis.
flatbuffers::Offset< StandInDescriptor > CreateStandInDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t numInputs=0, uint32_t numOutputs=0)
bool SaveSerializedToStream(std::ostream &stream)
Serializes the SerializedGraph to the stream.
Definition: Serializer.cpp:50
uint32_t m_DilationY
Dilation factor value for height dimension.
armnnSerializer::ConstTensorData GetFlatBufferConstTensorData(armnn::DataType dataType)
bool GetDimensionSpecificity(unsigned int i) const
Gets information about if the dimension size has been specified or not.
Definition: Tensor.cpp:211
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
void ExecuteStrategy(const armnn::IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id) override
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
flatbuffers::Offset< MultiplicationLayer > CreateMultiplicationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
flatbuffers::Offset< DepthToSpaceLayer > CreateDepthToSpaceLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::DepthToSpaceDescriptor > descriptor=0)
flatbuffers::Offset< InstanceNormalizationLayer > CreateInstanceNormalizationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::InstanceNormalizationDescriptor > descriptor=0)
armnnSerializer::ReduceOperation GetFlatBufferReduceOperation(armnn::ReduceOperation reduceOperation)
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:277
flatbuffers::Offset< SliceLayer > CreateSliceLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::SliceDescriptor > descriptor=0)
armnnSerializer::DataType GetFlatBufferDataType(armnn::DataType dataType)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_NumOutputs
Number of output tensors.
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
flatbuffers::Offset< Convolution2dDescriptor > CreateConvolution2dDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t padLeft=0, uint32_t padRight=0, uint32_t padTop=0, uint32_t padBottom=0, uint32_t strideX=0, uint32_t strideY=0, uint32_t dilationX=1, uint32_t dilationY=1, bool biasEnabled=false, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NCHW)
flatbuffers::Offset< InputLayer > CreateInputLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::BindableLayerBase > base=0)
A ResizeBilinearDescriptor for the ResizeBilinearLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
Base class for all descriptors.
Definition: Descriptors.hpp:22
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
A StackDescriptor for the StackLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< ShortData > CreateShortData(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< int16_t >> data=0)
serializer::ArgMinMaxFunction GetFlatBufferArgMinMaxFunction(armnn::ArgMinMaxFunction function)
Definition: Serializer.cpp:86
TensorShape m_TargetShape
Target shape value.
bool SaveSerializedToStream(std::ostream &stream)
Serializes the SerializedGraph to the stream.
flatbuffers::Offset< ConcatLayer > CreateConcatLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::OriginsDescriptor > descriptor=0)
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_MaxDetections
Maximum numbers of detections.
A PadDescriptor for the PadLayer.
flatbuffers::Offset< SubtractionLayer > CreateSubtractionLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
flatbuffers::Offset< BindableLayerBase > CreateBindableLayerBase(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, int32_t layerBindingId=0)
const uint32_t * GetViewOrigin(uint32_t idx) const
Return the view origin at the int value idx.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
flatbuffers::Offset< ArgMinMaxLayer > CreateArgMinMaxLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ArgMinMaxDescriptor > descriptor=0)
armnnSerializer::NormalizationAlgorithmChannel GetFlatBufferNormalizationAlgorithmChannel(armnn::NormalizationAlgorithmChannel normalizationAlgorithmChannel)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
flatbuffers::Offset< QLstmDescriptor > CreateQLstmDescriptor(flatbuffers::FlatBufferBuilder &_fbb, bool cifgEnabled=true, bool peepholeEnabled=false, bool projectionEnabled=false, bool layerNormEnabled=false, float cellClip=0.0f, float projectionClip=0.0f, float inputIntermediateScale=0.0f, float forgetIntermediateScale=0.0f, float cellIntermediateScale=0.0f, float outputIntermediateScale=0.0f, int32_t hiddenStateZeroPoint=0, float hiddenStateScale=0.0f)
bool m_LayerNormEnabled
Enable/disable layer normalization.
float m_NmsIouThreshold
Intersection over union threshold.
flatbuffers::Offset< ReshapeLayer > CreateReshapeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ReshapeDescriptor > descriptor=0)
armnnSerializer::LogicalBinaryOperation GetFlatBufferLogicalBinaryOperation(armnn::LogicalBinaryOperation logicalBinaryOperation)
flatbuffers::Offset< ArgMinMaxDescriptor > CreateArgMinMaxDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::ArgMinMaxFunction argMinMaxFunction=armnnSerializer::ArgMinMaxFunction_Min, int32_t axis=0)
flatbuffers::Offset< SoftmaxDescriptor > CreateSoftmaxDescriptor(flatbuffers::FlatBufferBuilder &_fbb, float beta=0.0f, int32_t axis=-1)
An LstmDescriptor for the LstmLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
flatbuffers::Offset< AdditionLayer > CreateAdditionLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
uint32_t m_DilationX
Dilation factor value for width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
flatbuffers::Offset< L2NormalizationDescriptor > CreateL2NormalizationDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NCHW, float eps=1e-12f)
std::vector< unsigned int > m_Begin
Beginning indices of the slice in each dimension.
int32_t m_NewAxisMask
New axis mask value.
flatbuffers::Offset< MinimumLayer > CreateMinimumLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept...
flatbuffers::Offset< ByteData > CreateByteData(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< int8_t >> data=0)
std::vector< unsigned int > m_BlockShape
Block shape values.
flatbuffers::Offset< FeatureCompatibilityVersions > CreateFeatureCompatibilityVersions(flatbuffers::FlatBufferBuilder &_fbb, uint32_t bindingIdsScheme=0, uint32_t weightsLayoutScheme=0, uint32_t constantTensorsAsInputs=0)
float m_Eps
Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f...
An output connection slot for a layer.
Definition: INetwork.hpp:37
flatbuffers::Offset< DepthToSpaceDescriptor > CreateDepthToSpaceDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t blockSize=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
A L2NormalizationDescriptor for the L2NormalizationLayer.
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:480
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:56
armnnSerializer::PaddingMode GetFlatBufferPaddingMode(armnn::PaddingMode paddingMode)
float GetQuantizationScale() const
Definition: Tensor.cpp:463
flatbuffers::Offset< LstmInputParams > CreateLstmInputParams(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > forgetGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > outputGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > projectionWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > projectionBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > forgetLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > outputLayerNormWeights=0)
DataType GetDataType() const
Definition: Tensor.hpp:198
An OriginsDescriptor for the ConcatLayer.
A ReduceDescriptor for the REDUCE operators.
float m_ProjectionClip
Clipping threshold value for the projection.
flatbuffers::Offset< CastLayer > CreateCastLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
flatbuffers::Offset< LayerBase > CreateLayerBase(flatbuffers::FlatBufferBuilder &_fbb, uint32_t index=0, flatbuffers::Offset< flatbuffers::String > layerName=0, armnnSerializer::LayerType layerType=armnnSerializer::LayerType_Addition, flatbuffers::Offset< flatbuffers::Vector< flatbuffers::Offset< armnnSerializer::InputSlot >>> inputSlots=0, flatbuffers::Offset< flatbuffers::Vector< flatbuffers::Offset< armnnSerializer::OutputSlot >>> outputSlots=0)
flatbuffers::Offset< ShapeLayer > CreateShapeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
A FullyConnectedDescriptor for the FullyConnectedLayer.
int32_t m_EllipsisMask
Ellipsis mask value.
virtual LayerGuid GetGuid() const =0
Returns the unique id of the layer.
bool m_BiasEnabled
Enable/disable bias.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
flatbuffers::Offset< QuantizedLstmInputParams > CreateQuantizedLstmInputParams(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > forgetGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > outputGateBias=0)
flatbuffers::Offset< ReduceDescriptor > CreateReduceDescriptor(flatbuffers::FlatBufferBuilder &_fbb, bool keepDims=false, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> axis=0, armnnSerializer::ReduceOperation reduceOperation=armnnSerializer::ReduceOperation_Sum)
flatbuffers::Offset< StackDescriptor > CreateStackDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t axis=0, uint32_t numInputs=0, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> inputShape=0)
flatbuffers::Offset< BatchToSpaceNdDescriptor > CreateBatchToSpaceNdDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> blockShape=0, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> crops=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
float m_InputIntermediateScale
Input intermediate quantization scale.
uint32_t m_TargetWidth
Target width value.
flatbuffers::Offset< SplitterLayer > CreateSplitterLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ViewsDescriptor > descriptor=0)
A GatherDescriptor for the GatherLayer.
bool m_PeepholeEnabled
Enable/disable peephole.
uint32_t m_NumClasses
Number of classes.
bool m_HalfPixelCenters
Half Pixel Centers.
flatbuffers::Offset< OutputLayer > CreateOutputLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::BindableLayerBase > base=0)
void Serialize(const armnn::INetwork &inNetwork)
Serializes the network to ArmNN SerializedGraph.
Definition: Serializer.cpp:45
flatbuffers::Offset< SoftmaxLayer > CreateSoftmaxLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::SoftmaxDescriptor > descriptor=0)
flatbuffers::Offset< FillDescriptor > CreateFillDescriptor(flatbuffers::FlatBufferBuilder &_fbb, float value=0.0f)
uint32_t m_PadTop
Padding top value in the height dimension.
A StandInDescriptor for the StandIn layer.
A QLstmDescriptor for the QLstmLayer.
flatbuffers::Offset< StridedSliceLayer > CreateStridedSliceLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::StridedSliceDescriptor > descriptor=0)
virtual unsigned int CalculateIndexOnOwner() const =0
flatbuffers::Offset< LogSoftmaxDescriptor > CreateLogSoftmaxDescriptor(flatbuffers::FlatBufferBuilder &_fbb, float beta=1.0f, int32_t axis=-1)
bool m_UseRegularNms
Use Regular NMS.
uint32_t m_PadFront
Padding front value in the depth dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< MeanLayer > CreateMeanLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::MeanDescriptor > descriptor=0)
std::vector< unsigned int > m_BlockShape
Block shape value.
std::vector< int > m_Stride
Stride values for the input that will be sliced.
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
flatbuffers::Offset< ActivationLayer > CreateActivationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ActivationDescriptor > descriptor=0)
flatbuffers::Offset< SpaceToDepthLayer > CreateSpaceToDepthLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::SpaceToDepthDescriptor > descriptor=0)
flatbuffers::Offset< SliceDescriptor > CreateSliceDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> begin=0, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> size=0)
const TensorInfo & GetInfo() const
Definition: Tensor.hpp:295
min(a, max(b, input)) ReLu1 & ReLu6.
flatbuffers::Offset< BatchNormalizationLayer > CreateBatchNormalizationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::BatchNormalizationDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > mean=0, flatbuffers::Offset< armnnSerializer::ConstTensor > variance=0, flatbuffers::Offset< armnnSerializer::ConstTensor > beta=0, flatbuffers::Offset< armnnSerializer::ConstTensor > gamma=0)
flatbuffers::Offset< BatchNormalizationDescriptor > CreateBatchNormalizationDescriptor(flatbuffers::FlatBufferBuilder &_fbb, float eps=0.0f, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
uint32_t m_NumInputs
Number of input tensors.
flatbuffers::Offset< GatherDescriptor > CreateGatherDescriptor(flatbuffers::FlatBufferBuilder &_fbb, int32_t axis=0)
flatbuffers::Offset< Convolution3dLayer > CreateConvolution3dLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::Convolution3dDescriptor > descriptor=0)
uint32_t m_PadLeft
Padding left value in the width dimension.
flatbuffers::Offset< ActivationDescriptor > CreateActivationDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::ActivationFunction activationFunction=armnnSerializer::ActivationFunction_Sigmoid, float a=0.0f, float b=0.0f)
uint32_t m_TargetHeight
Target height value.
uint32_t m_ActivationFunc
The activation function to use.
A SliceDescriptor for the SliceLayer.
flatbuffers::Offset< NormalizationLayer > CreateNormalizationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::NormalizationDescriptor > descriptor=0)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A Convolution3dDescriptor for the Convolution3dLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
flatbuffers::Offset< ViewsDescriptor > CreateViewsDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::OriginsDescriptor > origins=0, flatbuffers::Offset< flatbuffers::Vector< flatbuffers::Offset< armnnSerializer::UintVector >>> viewSizes=0)
virtual LayerType GetType() const =0
Returns the armnn::LayerType of this layer.
flatbuffers::Offset< PermuteDescriptor > CreatePermuteDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> dimMappings=0)
float m_ClippingThresCell
Clipping threshold value for the cell state.
unsigned int m_BlockSize
Scalar specifying the input block size. It must be >= 1.
uint32_t m_NumGroups
Number of groups for the channel shuffle operation.
flatbuffers::Offset< MeanDescriptor > CreateMeanDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> axis=0, bool keepDims=false)
const uint32_t * GetViewOrigin(uint32_t idx) const
Get the view origin at the int value idx.
PaddingMode m_PaddingMode
Specifies the Padding mode (Constant, Reflect or Symmetric)
flatbuffers::Offset< StandInLayer > CreateStandInLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::StandInDescriptor > descriptor=0)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_ForgetIntermediateScale
Forget intermediate quantization scale.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_Beta
Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0. ...
armnnSerializer::DataLayout GetFlatBufferDataLayout(armnn::DataLayout dataLayout)
std::vector< uint32_t > m_vAxis
The indices of the dimensions to reduce.
float m_ScaleH
Center size encoding scale height.
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
Definition: Descriptors.hpp:94
std::vector< int > m_End
End values for the input that will be sliced.
flatbuffers::Offset< SwitchLayer > CreateSwitchLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
DataLayout m_DataLayout
The data layout to be used (NDHWC, NCDHW).
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
const uint32_t * GetViewSizes(uint32_t idx) const
Get the view sizes at the int value idx.
float m_CellClip
Clipping threshold value for the cell state.
flatbuffers::Offset< ElementwiseUnaryDescriptor > CreateElementwiseUnaryDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::UnaryOperation operation=armnnSerializer::UnaryOperation_Abs)
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:50
uint32_t m_DilationX
Dilation along x axis.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< PadDescriptor > CreatePadDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> padList=0, float padValue=0.0f, armnnSerializer::PaddingMode paddingMode=armnnSerializer::PaddingMode_Constant)
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
flatbuffers::Offset< PadLayer > CreatePadLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::PadDescriptor > descriptor=0)
flatbuffers::Offset< FloorLayer > CreateFloorLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
std::unique_ptr< ISerializer, void(*)(ISerializer *serializer)> ISerializerPtr
Definition: ISerializer.hpp:15
flatbuffers::Offset< NormalizationDescriptor > CreateNormalizationDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::NormalizationAlgorithmChannel normChannelType=armnnSerializer::NormalizationAlgorithmChannel_Across, armnnSerializer::NormalizationAlgorithmMethod normMethodType=armnnSerializer::NormalizationAlgorithmMethod_LocalBrightness, uint32_t normSize=0, float alpha=0.0f, float beta=0.0f, float k=0.0f, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NCHW)
uint32_t m_PadLeft
Padding left value in the width dimension.
armnnSerializer::ComparisonOperation GetFlatBufferComparisonOperation(armnn::ComparisonOperation comparisonOperation)
flatbuffers::Offset< Pooling2dDescriptor > CreatePooling2dDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::PoolingAlgorithm poolType=armnnSerializer::PoolingAlgorithm_Max, uint32_t padLeft=0, uint32_t padRight=0, uint32_t padTop=0, uint32_t padBottom=0, uint32_t poolWidth=0, uint32_t poolHeight=0, uint32_t strideX=0, uint32_t strideY=0, armnnSerializer::OutputShapeRounding outputShapeRounding=armnnSerializer::OutputShapeRounding_Floor, armnnSerializer::PaddingMethod paddingMethod=armnnSerializer::PaddingMethod_IgnoreValue, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
bool m_AlignCorners
Aligned corners.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
flatbuffers::Offset< ConstantLayer > CreateConstantLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ConstTensor > input=0)
int32_t m_Axis
The axis in params to gather indices from.
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:98
flatbuffers::Offset< ChannelShuffleLayer > CreateChannelShuffleLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ChannelShuffleDescriptor > descriptor=0)
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
flatbuffers::Offset< UintVector > CreateUintVector(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> data=0)
flatbuffers::Offset< StackLayer > CreateStackLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::StackDescriptor > descriptor=0)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
flatbuffers::Offset< Convolution2dLayer > CreateConvolution2dLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::Convolution2dDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > weights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > biases=0)
flatbuffers::Offset< UnidirectionalSequenceLstmLayer > CreateUnidirectionalSequenceLstmLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::UnidirectionalSequenceLstmDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::LstmInputParams > inputParams=0)
uint32_t m_PadTop
Padding top value in the height dimension.
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
flatbuffers::Offset< Pooling2dLayer > CreatePooling2dLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::Pooling2dDescriptor > descriptor=0)
bool m_ProjectionEnabled
Enable/disable the projection layer.
ArgMinMaxFunction
Definition: Types.hpp:89
flatbuffers::Offset< SpaceToBatchNdLayer > CreateSpaceToBatchNdLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::SpaceToBatchNdDescriptor > descriptor=0)
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
armnnSerializer::ResizeMethod GetFlatBufferResizeMethod(armnn::ResizeMethod method)
uint32_t m_NumInputs
Number of input tensors.
flatbuffers::Offset< UnidirectionalSequenceLstmDescriptor > CreateUnidirectionalSequenceLstmDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t activationFunc=0, float clippingThresCell=0.0f, float clippingThresProj=0.0f, bool cifgEnabled=true, bool peepholeEnabled=false, bool projectionEnabled=false, bool layerNormEnabled=false, bool timeMajor=false)
profiling::ProfilingGuid LayerGuid
Define LayerGuid type.
Definition: Types.hpp:349
uint32_t GetNumDimensions() const
Get the number of dimensions.
flatbuffers::Offset< ComparisonDescriptor > CreateComparisonDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::ComparisonOperation operation=armnnSerializer::ComparisonOperation_Equal)
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
A MeanDescriptor for the MeanLayer.
flatbuffers::Offset< MaximumLayer > CreateMaximumLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
virtual const IOutputSlot * GetConnection() const =0
static ISerializerPtr Create()
Definition: Serializer.cpp:35
armnnSerializer::PaddingMethod GetFlatBufferPaddingMethod(armnn::PaddingMethod paddingMethod)
bool m_LayerNormEnabled
Enable/disable layer normalization.
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
uint32_t m_PadRight
Padding right value in the width dimension.
flatbuffers::Offset< InstanceNormalizationDescriptor > CreateInstanceNormalizationDescriptor(flatbuffers::FlatBufferBuilder &_fbb, float gamma=0.0f, float beta=0.0f, float eps=0.0f, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
virtual const TensorInfo & GetTensorInfo() const =0
uint32_t m_Axis
Axis to apply channel shuffle operation on.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:72
flatbuffers::Offset< IntData > CreateIntData(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< int32_t >> data=0)
virtual const char * GetName() const =0
Returns the name of the layer.
float m_ScaleY
Center size encoding scale y.
flatbuffers::Offset< ResizeLayer > CreateResizeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ResizeDescriptor > descriptor=0)
uint32_t GetNumViews() const
Get the number of views.
flatbuffers::Offset< FullyConnectedLayer > CreateFullyConnectedLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::FullyConnectedDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > weights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > biases=0)
float m_NmsScoreThreshold
NMS score threshold.
flatbuffers::Offset< DequantizeLayer > CreateDequantizeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
flatbuffers::Offset< ChannelShuffleDescriptor > CreateChannelShuffleDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t axis=0, uint32_t numGroups=0)
virtual LayerGuid GetOwningLayerGuid() const =0
A Pooling2dDescriptor for the Pooling2dLayer.
flatbuffers::Offset< DetectionPostProcessLayer > CreateDetectionPostProcessLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::DetectionPostProcessDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > anchors=0)
A NormalizationDescriptor for the NormalizationLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< BatchToSpaceNdLayer > CreateBatchToSpaceNdLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::BatchToSpaceNdDescriptor > descriptor=0)
flatbuffers::Offset< armnnSerializer::FeatureCompatibilityVersions > GetVersionTable()
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
unsigned int GetConcatAxis() const
Get the concatenation axis value.
A ChannelShuffleDescriptor for the ChannelShuffle operator.
flatbuffers::Offset< LogSoftmaxLayer > CreateLogSoftmaxLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::LogSoftmaxDescriptor > descriptor=0)
float m_CellIntermediateScale
Cell intermediate quantization scale.
flatbuffers::Offset< StridedSliceDescriptor > CreateStridedSliceDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< int32_t >> begin=0, flatbuffers::Offset< flatbuffers::Vector< int32_t >> end=0, flatbuffers::Offset< flatbuffers::Vector< int32_t >> stride=0, int32_t beginMask=0, int32_t endMask=0, int32_t shrinkAxisMask=0, int32_t ellipsisMask=0, int32_t newAxisMask=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
flatbuffers::Offset< OriginsDescriptor > CreateOriginsDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t concatAxis=0, uint32_t numViews=0, uint32_t numDimensions=0, flatbuffers::Offset< flatbuffers::Vector< flatbuffers::Offset< armnnSerializer::UintVector >>> viewOrigins=0)
flatbuffers::Offset< QLstmLayer > CreateQLstmLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::QLstmDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::QLstmInputParams > inputParams=0)
uint32_t m_DilationZ
Dilation along z axis.
flatbuffers::Offset< LstmDescriptor > CreateLstmDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t activationFunc=0, float clippingThresCell=0.0f, float clippingThresProj=0.0f, bool cifgEnabled=true, bool peepholeEnabled=false, bool projectionEnabled=false, bool layerNormEnabled=false)
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:52
static void Destroy(ISerializer *serializer)
Definition: Serializer.cpp:40
A SoftmaxDescriptor for the SoftmaxLayer.
float m_Beta
Beta value for the normalization equation.
flatbuffers::Offset< ElementwiseUnaryLayer > CreateElementwiseUnaryLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ElementwiseUnaryDescriptor > descriptor=0)
flatbuffers::Offset< PermuteLayer > CreatePermuteLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::PermuteDescriptor > descriptor=0)
uint32_t m_StrideZ
Stride value when proceeding through input for the depth dimension.
const OriginsDescriptor & GetOrigins() const
Get the View Origins.
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
uint32_t m_NormSize
Depth radius value.
flatbuffers::Offset< SpaceToBatchNdDescriptor > CreateSpaceToBatchNdDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> blockShape=0, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> padList=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:48
An input connection slot for a layer.
Definition: INetwork.hpp:24
flatbuffers::Offset< SerializedGraph > CreateSerializedGraph(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< flatbuffers::Offset< armnnSerializer::AnyLayer >>> layers=0, flatbuffers::Offset< flatbuffers::Vector< int32_t >> inputIds=0, flatbuffers::Offset< flatbuffers::Vector< int32_t >> outputIds=0, flatbuffers::Offset< armnnSerializer::FeatureCompatibilityVersions > featureVersions=0)
armnnSerializer::PoolingAlgorithm GetFlatBufferPoolingAlgorithm(armnn::PoolingAlgorithm poolingAlgorithm)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
uint32_t m_DilationY
Dilation along y axis.
A FillDescriptor for the FillLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
unsigned int GetNumBytes() const
Definition: Tensor.hpp:302
ActivationFunction
Definition: Types.hpp:73
flatbuffers::Offset< LogicalBinaryDescriptor > CreateLogicalBinaryDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::LogicalBinaryOperation operation=armnnSerializer::LogicalBinaryOperation_LogicalAnd)
flatbuffers::Offset< DivisionLayer > CreateDivisionLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
A PermuteDescriptor for the PermuteLayer.
flatbuffers::Offset< LogicalBinaryLayer > CreateLogicalBinaryLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::LogicalBinaryDescriptor > descriptor=0)
uint32_t m_PadRight
Padding right value in the width dimension.
int32_t m_HiddenStateZeroPoint
Hidden State zero point.
bool m_ConstantWeights
Enable/disable constant weights and biases.