ArmNN
 21.08
Serializer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "Serializer.hpp"
6 #include "SerializerUtils.hpp"
7 
8 #include <armnn/Descriptors.hpp>
9 #include <armnn/LstmParams.hpp>
13 
14 #include <fmt/format.h>
15 #include <iostream>
16 
17 using namespace armnn;
18 namespace fb = flatbuffers;
19 namespace serializer = armnnSerializer;
20 
21 namespace armnnSerializer
22 {
23 
24 ISerializer::ISerializer() : pSerializerImpl(new SerializerImpl())
25 {
26 }
27 
28 ISerializer::~ISerializer() = default;
29 
31 {
32  return new ISerializer();
33 }
34 
36 {
38 }
39 
41 {
42  delete serializer;
43 }
44 
46 {
47  pSerializerImpl->Serialize(inNetwork);
48 }
49 
50 bool ISerializer::SaveSerializedToStream(std::ostream& stream)
51 {
52  return pSerializerImpl->SaveSerializedToStream(stream);
53 }
54 
56 {
57  switch (function)
58  {
81  default:
83  }
84 }
85 
87 {
88  switch (function)
89  {
93  default:
95  }
96 }
97 
98 uint32_t SerializerStrategy::GetSerializedId(armnn::LayerGuid guid)
99 {
100  if (m_guidMap.empty())
101  {
102  m_guidMap.insert(std::make_pair(guid, m_layerId));
103  }
104  else if (m_guidMap.find(guid) == m_guidMap.end())
105  {
106  ++m_layerId;
107  m_guidMap.insert(std::make_pair(guid, m_layerId));
108 
109  return m_layerId;
110  }
111  return m_guidMap[guid];
112 }
113 
114 // Build FlatBuffer for Input Layer
115 void SerializerStrategy::SerializeInputLayer(const armnn::IConnectableLayer* layer, LayerBindingId id, const char* name)
116 {
117  IgnoreUnused(name);
118 
119  // Create FlatBuffer BaseLayer
120  auto flatBufferInputBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Input);
121 
122  // Create FlatBuffer BindableBaseLayer
123  auto flatBufferInputBindableBaseLayer = serializer::CreateBindableLayerBase(m_flatBufferBuilder,
124  flatBufferInputBaseLayer,
125  id);
126  // Push layer binding id to outputIds.
127  m_inputIds.push_back(id);
128 
129  // Create the FlatBuffer InputLayer
130  auto flatBufferInputLayer = serializer::CreateInputLayer(m_flatBufferBuilder, flatBufferInputBindableBaseLayer);
131 
132  // Add the AnyLayer to the FlatBufferLayers
133  CreateAnyLayer(flatBufferInputLayer.o, serializer::Layer::Layer_InputLayer);
134 }
135 
136 // Build FlatBuffer for Output Layer
137 void SerializerStrategy::SerializeOutputLayer(const armnn::IConnectableLayer* layer,
138  LayerBindingId id, const char* name)
139 {
140  IgnoreUnused(name);
141 
142  // Create FlatBuffer BaseLayer
143  auto flatBufferOutputBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Output);
144 
145  // Create FlatBuffer BindableBaseLayer
146  auto flatBufferOutputBindableBaseLayer = serializer::CreateBindableLayerBase(m_flatBufferBuilder,
147  flatBufferOutputBaseLayer,
148  id);
149  // Push layer binding id to outputIds.
150  m_outputIds.push_back(id);
151 
152  // Create the FlatBuffer OutputLayer
153  auto flatBufferOutputLayer = serializer::CreateOutputLayer(m_flatBufferBuilder, flatBufferOutputBindableBaseLayer);
154  // Add the AnyLayer to the FlatBufferLayers
155  CreateAnyLayer(flatBufferOutputLayer.o, serializer::Layer::Layer_OutputLayer);
156 }
157 
158 void SerializerStrategy::SerializeAbsLayer(const armnn::IConnectableLayer* layer, const char* name)
159 {
160  IgnoreUnused(name);
161  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Abs);
162  auto flatBufferAbsLayer = serializer::CreateAbsLayer(m_flatBufferBuilder, flatBufferBaseLayer);
163 
164  CreateAnyLayer(flatBufferAbsLayer.o, serializer::Layer::Layer_AbsLayer);
165 }
166 
167 // Build FlatBuffer for Activation Layer
168 void SerializerStrategy::SerializeActivationLayer(const armnn::IConnectableLayer* layer,
169  const armnn::ActivationDescriptor& descriptor,
170  const char* name)
171 {
172  IgnoreUnused(name);
173 
174  // Create FlatBuffer BaseLayer
175  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Activation);
176 
177  // Create the FlatBuffer ActivationDescriptor
178  auto flatBufferDescriptor = CreateActivationDescriptor(m_flatBufferBuilder,
180  descriptor.m_A,
181  descriptor.m_B);
182 
183  // Create the FlatBuffer ActivationLayer
184  auto flatBufferAdditionLayer = CreateActivationLayer(m_flatBufferBuilder,
185  flatBufferBaseLayer,
186  flatBufferDescriptor);
187 
188  // Add the AnyLayer to the FlatBufferLayers
189  CreateAnyLayer(flatBufferAdditionLayer.o, serializer::Layer::Layer_ActivationLayer);
190 }
191 
192 // Build FlatBuffer for Addition Layer
193 void SerializerStrategy::SerializeAdditionLayer(const armnn::IConnectableLayer* layer, const char* name)
194 {
195  IgnoreUnused(name);
196 
197  // Create FlatBuffer BaseLayer
198  auto flatBufferAdditionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Addition);
199 
200  // Create the FlatBuffer AdditionLayer
201  auto flatBufferAdditionLayer = serializer::CreateAdditionLayer(m_flatBufferBuilder, flatBufferAdditionBaseLayer);
202 
203  // Add the AnyLayer to the FlatBufferLayers
204  CreateAnyLayer(flatBufferAdditionLayer.o, serializer::Layer::Layer_AdditionLayer);
205 }
206 
207 // Build FlatBuffer for ArgMinMax Layer
208 void SerializerStrategy::SerializeArgMinMaxLayer(const armnn::IConnectableLayer *layer,
209  const armnn::ArgMinMaxDescriptor& descriptor,
210  const char *name)
211 {
212  IgnoreUnused(name);
213 
214  // Create FlatBuffer BaseLayer
215  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ArgMinMax);
216 
217  // Create FlatBuffer Descriptor
218  auto flatBufferDescriptor = CreateArgMinMaxDescriptor(m_flatBufferBuilder,
220  descriptor.m_Axis);
221 
222  // Create FlatBuffer ArgMinMaxLayer
223  auto flatBufferLayer = CreateArgMinMaxLayer(m_flatBufferBuilder,
224  flatBufferBaseLayer,
225  flatBufferDescriptor);
226 
228 }
229 
230 // Build FlatBuffer for BatchToSpaceNd Layer
231 void SerializerStrategy::SerializeBatchToSpaceNdLayer(const armnn::IConnectableLayer* layer,
232  const armnn::BatchToSpaceNdDescriptor& descriptor,
233  const char* name)
234 {
235  IgnoreUnused(name);
236 
237  // Create FlatBuffer BaseLayer
238  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchToSpaceNd);
239 
240  std::vector<unsigned int> crops;
241  crops.reserve(descriptor.m_Crops.size() * 2);
242  for (auto& crop : descriptor.m_Crops)
243  {
244  crops.push_back(crop.first);
245  crops.push_back(crop.second);
246  }
247 
248  auto flatBufferDescriptor =
249  CreateBatchToSpaceNdDescriptor(m_flatBufferBuilder,
250  m_flatBufferBuilder.CreateVector(descriptor.m_BlockShape),
251  m_flatBufferBuilder.CreateVector(crops),
253 
254  auto flatBufferLayer = serializer::CreateBatchToSpaceNdLayer(m_flatBufferBuilder,
255  flatBufferBaseLayer,
256  flatBufferDescriptor);
257 
259 }
260 
261 void SerializerStrategy::SerializeBatchNormalizationLayer(
262  const armnn::IConnectableLayer* layer,
263  const armnn::BatchNormalizationDescriptor& batchNormDescriptor,
264  const std::vector<armnn::ConstTensor>& constants,
265  const char* name)
266 {
267  IgnoreUnused(name);
268 
269  const armnn::ConstTensor& mean = constants[0];
270  const armnn::ConstTensor& variance = constants[1];
271  const armnn::ConstTensor& beta = constants[2];
272  const armnn::ConstTensor& gamma = constants[3];
273 
274  auto fbBatchNormalizationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchNormalization);
275  auto fbBatchNormalizationDescriptor = serializer::CreateBatchNormalizationDescriptor(
276  m_flatBufferBuilder,
277  batchNormDescriptor.m_Eps,
278  GetFlatBufferDataLayout(batchNormDescriptor.m_DataLayout));
279 
280  auto fbMeanConstTensorInfo = CreateConstTensorInfo(mean);
281  auto fbVarianceConstTensorInfo = CreateConstTensorInfo(variance);
282  auto fbBetaConstTensorInfo = CreateConstTensorInfo(beta);
283  auto fbGammaConstTensorInfo = CreateConstTensorInfo(gamma);
284  auto fbBatchNormalizationLayer = serializer::CreateBatchNormalizationLayer(m_flatBufferBuilder,
285  fbBatchNormalizationBaseLayer,
286  fbBatchNormalizationDescriptor,
287  fbMeanConstTensorInfo,
288  fbVarianceConstTensorInfo,
289  fbBetaConstTensorInfo,
290  fbGammaConstTensorInfo);
291 
293 }
294 
295 void SerializerStrategy::SerializeCastLayer(const armnn::IConnectableLayer* layer,
296  const char* name)
297 {
298  IgnoreUnused(name);
299 
300  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Cast);
301  auto fbCastLayer = serializer::CreateCastLayer(m_flatBufferBuilder, fbBaseLayer);
303 }
304 
305 void SerializerStrategy::SerializeComparisonLayer(const armnn::IConnectableLayer* layer,
306  const armnn::ComparisonDescriptor& descriptor,
307  const char* name)
308 {
309  IgnoreUnused(name);
310 
312  auto fbDescriptor = serializer::CreateComparisonDescriptor(
313  m_flatBufferBuilder,
315 
316  auto fbLayer = serializer::CreateComparisonLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
318 }
319 
320 // Build FlatBuffer for Constant Layer
321 void SerializerStrategy::SerializeConstantLayer(const armnn::IConnectableLayer* layer,
322  const std::vector<armnn::ConstTensor>& constants,
323  const char* name)
324 {
325  IgnoreUnused(name);
326 
327  armnn::ConstTensor input = constants[0];
328 
329  // Create FlatBuffer BaseLayer
330  auto flatBufferConstantBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Constant);
331 
332  auto flatBufferConstTensorInfo = CreateConstTensorInfo(input);
333 
334  // Create the FlatBuffer ConstantLayer
335  auto flatBufferLayer = CreateConstantLayer(m_flatBufferBuilder,
336  flatBufferConstantBaseLayer,
337  flatBufferConstTensorInfo);
338 
339  // Add the AnyLayer to the FlatBufferLayers
341 }
342 
343 // Build FlatBuffer for Convolution2dLayer
344 void SerializerStrategy::SerializeConvolution2dLayer(const armnn::IConnectableLayer* layer,
345  const armnn::Convolution2dDescriptor& descriptor,
346  const std::vector<armnn::ConstTensor>& constants,
347  const char* name)
348 {
349  IgnoreUnused(name);
350 
351  const armnn::ConstTensor weights = constants[0];
352 
353  // Create FlatBuffer BaseLayer
354  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution2d);
355 
356  auto flatBufferDescriptor = CreateConvolution2dDescriptor(m_flatBufferBuilder,
357  descriptor.m_PadLeft,
358  descriptor.m_PadRight,
359  descriptor.m_PadTop,
360  descriptor.m_PadBottom,
361  descriptor.m_StrideX,
362  descriptor.m_StrideY,
363  descriptor.m_DilationX,
364  descriptor.m_DilationY,
365  descriptor.m_BiasEnabled,
367  auto flatBufferWeightsConstTensorInfo = CreateConstTensorInfo(weights);
368  flatbuffers::Offset<serializer::ConstTensor> flatBufferBiasesConstTensorInfo;
369 
370  if (constants.size() > 1)
371  {
372  const armnn::ConstTensor biases = constants[1];
373  flatBufferBiasesConstTensorInfo = CreateConstTensorInfo(biases);
374  }
375 
376  // Create the FlatBuffer Convolution2dLayer
377  auto flatBufferLayer = CreateConvolution2dLayer(m_flatBufferBuilder,
378  flatBufferBaseLayer,
379  flatBufferDescriptor,
380  flatBufferWeightsConstTensorInfo,
381  flatBufferBiasesConstTensorInfo);
382 
383  // Add the AnyLayer to the FlatBufferLayers
385 }
386 
387 void SerializerStrategy::SerializeDepthToSpaceLayer(const armnn::IConnectableLayer* layer,
388  const armnn::DepthToSpaceDescriptor& descriptor,
389  const char* name)
390 {
391  IgnoreUnused(name);
392 
394  auto fbDescriptor = CreateDepthToSpaceDescriptor(m_flatBufferBuilder,
395  descriptor.m_BlockSize,
397 
398  auto fbLayer = serializer::CreateDepthToSpaceLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
399 
401 }
402 
403 void SerializerStrategy::SerializeDepthwiseConvolution2dLayer(const armnn::IConnectableLayer* layer,
404  const armnn::DepthwiseConvolution2dDescriptor& descriptor,
405  const std::vector<armnn::ConstTensor>& constants,
406  const char* name)
407 {
408  IgnoreUnused(name);
409 
410  const armnn::ConstTensor& weights = constants[0];
411 
413  auto fbDescriptor = CreateDepthwiseConvolution2dDescriptor(m_flatBufferBuilder,
414  descriptor.m_PadLeft,
415  descriptor.m_PadRight,
416  descriptor.m_PadTop,
417  descriptor.m_PadBottom,
418  descriptor.m_StrideX,
419  descriptor.m_StrideY,
420  descriptor.m_DilationX,
421  descriptor.m_DilationY,
422  descriptor.m_BiasEnabled,
424 
425  flatbuffers::Offset<serializer::ConstTensor> fbWeightsConstTensorInfo = CreateConstTensorInfo(weights);
426  flatbuffers::Offset<serializer::ConstTensor> fbBiasesConstTensorInfo;
427 
428  if (constants.size() > 1)
429  {
430  const armnn::ConstTensor& biases = constants[1];
431  fbBiasesConstTensorInfo = CreateConstTensorInfo(biases);
432  }
433 
434  auto flatBufferLayer = CreateDepthwiseConvolution2dLayer(m_flatBufferBuilder,
435  fbBaseLayer,
436  fbDescriptor,
437  fbWeightsConstTensorInfo,
438  fbBiasesConstTensorInfo);
439 
441 }
442 
443 void SerializerStrategy::SerializeDequantizeLayer(const armnn::IConnectableLayer* layer,
444  const char* name)
445 {
446  IgnoreUnused(name);
447 
448  auto fbDequantizeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Dequantize);
449  auto fbDequantizeLayer = serializer::CreateDequantizeLayer(m_flatBufferBuilder, fbDequantizeBaseLayer);
450 
452 }
453 
454 void SerializerStrategy::SerializeDetectionPostProcessLayer(const armnn::IConnectableLayer* layer,
455  const armnn::DetectionPostProcessDescriptor& descriptor,
456  const std::vector<armnn::ConstTensor>& constants,
457  const char* name)
458 {
459  IgnoreUnused(name);
460 
461  const armnn::ConstTensor& anchors = constants[0];
462 
464  auto fbDescriptor = CreateDetectionPostProcessDescriptor(m_flatBufferBuilder,
465  descriptor.m_MaxDetections,
466  descriptor.m_MaxClassesPerDetection,
467  descriptor.m_DetectionsPerClass,
468  descriptor.m_NmsScoreThreshold,
469  descriptor.m_NmsIouThreshold,
470  descriptor.m_NumClasses,
471  descriptor.m_UseRegularNms,
472  descriptor.m_ScaleX,
473  descriptor.m_ScaleY,
474  descriptor.m_ScaleW,
475  descriptor.m_ScaleH);
476 
477  flatbuffers::Offset<serializer::ConstTensor> fbAnchorsConstTensorInfo = CreateConstTensorInfo(anchors);
478 
479  auto flatBufferLayer = CreateDetectionPostProcessLayer(m_flatBufferBuilder,
480  fbBaseLayer,
481  fbDescriptor,
482  fbAnchorsConstTensorInfo);
483 
485 }
486 
487 void SerializerStrategy::SerializeDivisionLayer(const armnn::IConnectableLayer* layer, const char* name)
488 {
489  IgnoreUnused(name);
490 
491  auto fbDivisionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Division);
492  auto fbDivisionLayer = serializer::CreateDivisionLayer(m_flatBufferBuilder, fbDivisionBaseLayer);
493 
495 }
496 
497 void SerializerStrategy::SerializeElementwiseUnaryLayer(const armnn::IConnectableLayer* layer,
498  const armnn::ElementwiseUnaryDescriptor& descriptor,
499  const char* name)
500 {
501  IgnoreUnused(name);
502 
505  m_flatBufferBuilder,
507 
508  auto fbLayer = serializer::CreateElementwiseUnaryLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
510 }
511 
512 void SerializerStrategy::SerializeEqualLayer(const armnn::IConnectableLayer* layer, const char* name)
513 {
514  IgnoreUnused(name);
515 
516  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Equal);
517  auto fbEqualLayer = serializer::CreateEqualLayer(m_flatBufferBuilder, fbBaseLayer);
518 
520 }
521 
522 void SerializerStrategy::SerializeFillLayer(const armnn::IConnectableLayer* layer,
523  const armnn::FillDescriptor& fillDescriptor,
524  const char* name)
525 {
526  IgnoreUnused(name);
527 
528  auto fbFillBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Fill);
529 
530  auto fbDescriptor = serializer::CreateFillDescriptor(m_flatBufferBuilder, fillDescriptor.m_Value);
531 
532  auto fbFillLayer = serializer::CreateFillLayer(m_flatBufferBuilder, fbFillBaseLayer, fbDescriptor);
533 
535 }
536 
537 void SerializerStrategy::SerializeFloorLayer(const armnn::IConnectableLayer *layer, const char *name)
538 {
539  IgnoreUnused(name);
540 
541  auto flatBufferFloorBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Floor);
542  auto flatBufferFloorLayer = serializer::CreateFloorLayer(m_flatBufferBuilder, flatBufferFloorBaseLayer);
543 
544  CreateAnyLayer(flatBufferFloorLayer.o, serializer::Layer::Layer_FloorLayer);
545 }
546 
547 void SerializerStrategy::SerializeGatherLayer(const armnn::IConnectableLayer* layer,
548  const armnn::GatherDescriptor& gatherDescriptor,
549  const char* name)
550 {
551  IgnoreUnused(name);
552 
553  auto fbGatherDescriptor = CreateGatherDescriptor(m_flatBufferBuilder,
554  gatherDescriptor.m_Axis);
555  auto fbGatherBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Gather);
556  auto flatBufferLayer = serializer::CreateGatherLayer(m_flatBufferBuilder, fbGatherBaseLayer, fbGatherDescriptor);
557 
559 }
560 
561 
562 void SerializerStrategy::SerializeGreaterLayer(const armnn::IConnectableLayer* layer, const char* name)
563 {
564  IgnoreUnused(name);
565 
566  auto fbGreaterBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Greater);
567  auto fbGreaterLayer = serializer::CreateGreaterLayer(m_flatBufferBuilder, fbGreaterBaseLayer);
568 
570 }
571 
572 void SerializerStrategy::SerializeInstanceNormalizationLayer(
573  const armnn::IConnectableLayer* layer,
574  const armnn::InstanceNormalizationDescriptor& instanceNormalizationDescriptor,
575  const char* name)
576 {
577  IgnoreUnused(name);
578 
580  m_flatBufferBuilder,
581  instanceNormalizationDescriptor.m_Gamma,
582  instanceNormalizationDescriptor.m_Beta,
583  instanceNormalizationDescriptor.m_Eps,
584  GetFlatBufferDataLayout(instanceNormalizationDescriptor.m_DataLayout));
585 
587  auto fbLayer = serializer::CreateInstanceNormalizationLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
588 
590 }
591 
592 void SerializerStrategy::SerializeL2NormalizationLayer(const armnn::IConnectableLayer* layer,
593  const armnn::L2NormalizationDescriptor& l2NormalizationDescriptor,
594  const char* name)
595 {
596  IgnoreUnused(name);
597 
598  // Create FlatBuffer BaseLayer
600 
601  // Create the FlatBuffer L2Normalization Descriptor
603  m_flatBufferBuilder,
604  GetFlatBufferDataLayout(l2NormalizationDescriptor.m_DataLayout),
605  l2NormalizationDescriptor.m_Eps);
606 
607  // Create FlatBuffer layer
608  auto fbLayer = serializer::CreateL2NormalizationLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
609 
611 }
612 
613 void SerializerStrategy::SerializeLogicalBinaryLayer(const armnn::IConnectableLayer* layer,
614  const armnn::LogicalBinaryDescriptor& descriptor,
615  const char* name)
616 {
617  IgnoreUnused(name);
618 
620  auto fbDescriptor = serializer::CreateLogicalBinaryDescriptor(
621  m_flatBufferBuilder,
623 
624  auto fbLayer = serializer::CreateLogicalBinaryLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
626 }
627 
628 void SerializerStrategy::SerializeLogSoftmaxLayer(const armnn::IConnectableLayer* layer,
629  const armnn::LogSoftmaxDescriptor& logSoftmaxDescriptor,
630  const char* name)
631 {
632  IgnoreUnused(name);
633 
634  // Create FlatBuffer BaseLayer
635  auto flatBufferLogSoftmaxBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_LogSoftmax);
636 
637  // Create the FlatBuffer LogSoftmaxDescriptor
638  auto flatBufferLogSoftmaxDesc =
639  serializer::CreateLogSoftmaxDescriptor(m_flatBufferBuilder,
640  logSoftmaxDescriptor.m_Beta,
641  logSoftmaxDescriptor.m_Axis);
642 
643  // Create the FlatBuffer LogSoftmaxLayer
644  auto flatBufferLogSoftmaxLayer =
645  serializer::CreateLogSoftmaxLayer(m_flatBufferBuilder,
646  flatBufferLogSoftmaxBaseLayer,
647  flatBufferLogSoftmaxDesc);
648 
649  CreateAnyLayer(flatBufferLogSoftmaxLayer.o, serializer::Layer::Layer_LogSoftmaxLayer);
650 }
651 
652 void SerializerStrategy::SerializeLstmLayer(const armnn::IConnectableLayer* layer,
653  const armnn::LstmDescriptor& descriptor,
654  const std::vector<armnn::ConstTensor>& constants,
655  const char* name)
656 {
657  IgnoreUnused(name);
658 
659  auto fbLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Lstm);
660 
661  auto fbLstmDescriptor = serializer::CreateLstmDescriptor(
662  m_flatBufferBuilder,
663  descriptor.m_ActivationFunc,
664  descriptor.m_ClippingThresCell,
665  descriptor.m_ClippingThresProj,
666  descriptor.m_CifgEnabled,
667  descriptor.m_PeepholeEnabled,
668  descriptor.m_ProjectionEnabled,
669  descriptor.m_LayerNormEnabled);
670 
671  // Index for constants vector
672  std::size_t i = 0;
673 
674  // Get mandatory/basic input parameters
675  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]); //InputToForgetWeights
676  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]); //InputToCellWeights
677  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]); //InputToOutputWeights
678  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToForgetWeights
679  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToCellWeights
680  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToOutputWeights
681  auto forgetGateBias = CreateConstTensorInfo(constants[i++]); //ForgetGateBias
682  auto cellBias = CreateConstTensorInfo(constants[i++]); //CellBias
683  auto outputGateBias = CreateConstTensorInfo(constants[i++]); //OutputGateBias
684 
685 
686 
687  //Define optional parameters, these will be set depending on configuration in Lstm descriptor
688  flatbuffers::Offset<serializer::ConstTensor> inputToInputWeights;
689  flatbuffers::Offset<serializer::ConstTensor> recurrentToInputWeights;
690  flatbuffers::Offset<serializer::ConstTensor> cellToInputWeights;
691  flatbuffers::Offset<serializer::ConstTensor> inputGateBias;
692  flatbuffers::Offset<serializer::ConstTensor> projectionWeights;
693  flatbuffers::Offset<serializer::ConstTensor> projectionBias;
694  flatbuffers::Offset<serializer::ConstTensor> cellToForgetWeights;
695  flatbuffers::Offset<serializer::ConstTensor> cellToOutputWeights;
696  flatbuffers::Offset<serializer::ConstTensor> inputLayerNormWeights;
697  flatbuffers::Offset<serializer::ConstTensor> forgetLayerNormWeights;
698  flatbuffers::Offset<serializer::ConstTensor> cellLayerNormWeights;
699  flatbuffers::Offset<serializer::ConstTensor> outputLayerNormWeights;
700 
701  if (!descriptor.m_CifgEnabled)
702  {
703  inputToInputWeights = CreateConstTensorInfo(constants[i++]); //InputToInputWeights
704  recurrentToInputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToInputWeights
705  inputGateBias = CreateConstTensorInfo(constants[i++]); //InputGateBias
706  }
707 
708  if (descriptor.m_PeepholeEnabled)
709  {
710  if (!descriptor.m_CifgEnabled)
711  {
712  cellToInputWeights = CreateConstTensorInfo(constants[i++]); //CellToInputWeights
713  }
714  cellToForgetWeights = CreateConstTensorInfo(constants[i++]); //CellToForgetWeights
715  cellToOutputWeights = CreateConstTensorInfo(constants[i++]); //CellToOutputWeights
716  }
717 
718  if (descriptor.m_ProjectionEnabled)
719  {
720  projectionWeights = CreateConstTensorInfo(constants[i++]); //ProjectionWeights
721  projectionBias = CreateConstTensorInfo(constants[i++]); //ProjectionBias
722  }
723 
724  if (descriptor.m_LayerNormEnabled)
725  {
726  if (!descriptor.m_CifgEnabled)
727  {
728  inputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //InputLayerNormWeights
729  }
730  forgetLayerNormWeights = CreateConstTensorInfo(constants[i++]); //ForgetLayerNormWeights
731  cellLayerNormWeights = CreateConstTensorInfo(constants[i++]); //CellLayerNormWeights
732  outputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //OutputLayerNormWeights
733  }
734 
735  auto fbLstmParams = serializer::CreateLstmInputParams(
736  m_flatBufferBuilder,
737  inputToForgetWeights,
738  inputToCellWeights,
739  inputToOutputWeights,
740  recurrentToForgetWeights,
741  recurrentToCellWeights,
742  recurrentToOutputWeights,
743  forgetGateBias,
744  cellBias,
745  outputGateBias,
746  inputToInputWeights,
747  recurrentToInputWeights,
748  cellToInputWeights,
749  inputGateBias,
750  projectionWeights,
751  projectionBias,
752  cellToForgetWeights,
753  cellToOutputWeights,
754  inputLayerNormWeights,
755  forgetLayerNormWeights,
756  cellLayerNormWeights,
757  outputLayerNormWeights);
758 
759  auto fbLstmLayer = serializer::CreateLstmLayer(
760  m_flatBufferBuilder,
761  fbLstmBaseLayer,
762  fbLstmDescriptor,
763  fbLstmParams);
764 
766 }
767 
768 void SerializerStrategy::SerializeMaximumLayer(const armnn::IConnectableLayer* layer, const char* name)
769 {
770  IgnoreUnused(name);
771 
772  auto fbMaximumBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Maximum);
773  auto fbMaximumLayer = serializer::CreateMaximumLayer(m_flatBufferBuilder, fbMaximumBaseLayer);
774 
776 }
777 
778 void SerializerStrategy::SerializeMeanLayer(const armnn::IConnectableLayer* layer,
779  const armnn::MeanDescriptor& descriptor,
780  const char* name)
781 {
782  IgnoreUnused(name);
783 
784  auto fbMeanBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Mean);
785  auto fbMeanDescriptor = serializer::CreateMeanDescriptor(m_flatBufferBuilder,
786  m_flatBufferBuilder.CreateVector(descriptor.m_Axis),
787  descriptor.m_KeepDims);
788 
789  auto fbMeanLayer = serializer::CreateMeanLayer(m_flatBufferBuilder,
790  fbMeanBaseLayer,
791  fbMeanDescriptor);
792 
794 }
795 
796 void SerializerStrategy::SerializeMinimumLayer(const armnn::IConnectableLayer* layer, const char* name)
797 {
798  IgnoreUnused(name);
799 
800  auto fbMinimumBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Minimum);
801  auto fbMinimumLayer = serializer::CreateMinimumLayer(m_flatBufferBuilder, fbMinimumBaseLayer);
802 
804 }
805 
806 void SerializerStrategy::SerializeMergeLayer(const armnn::IConnectableLayer* layer, const char* name)
807 {
808  IgnoreUnused(name);
809 
810  auto fbMergeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Merge);
811  auto fbMergeLayer = serializer::CreateMergeLayer(m_flatBufferBuilder, fbMergeBaseLayer);
812 
814 }
815 
816 void SerializerStrategy::SerializeMergerLayer(const armnn::IConnectableLayer* layer,
817  const armnn::MergerDescriptor& mergerDescriptor,
818  const char* name)
819 {
820  SerializeConcatLayer(layer, mergerDescriptor, name);
821 }
822 
823 void SerializerStrategy::SerializeConcatLayer(const armnn::IConnectableLayer* layer,
824  const armnn::ConcatDescriptor& concatDescriptor,
825  const char* name)
826 {
827  IgnoreUnused(name);
828 
829  auto flatBufferConcatBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Concat);
830 
831  std::vector<flatbuffers::Offset<UintVector>> views;
832  for (unsigned int v = 0; v < concatDescriptor.GetNumViews(); ++v)
833  {
834  const uint32_t* origin = concatDescriptor.GetViewOrigin(v);
835  std::vector<uint32_t> origins;
836  for (unsigned int d = 0; d < concatDescriptor.GetNumDimensions(); ++d)
837  {
838  origins.push_back(origin[d]);
839  }
840  auto view = m_flatBufferBuilder.CreateVector(origins);
841  auto uintVector = CreateUintVector(m_flatBufferBuilder, view);
842  views.push_back(uintVector);
843  }
844 
845  auto flatBufferConcatDescriptor = CreateOriginsDescriptor(m_flatBufferBuilder,
846  concatDescriptor.GetConcatAxis(),
847  concatDescriptor.GetNumViews(),
848  concatDescriptor.GetNumDimensions(),
849  m_flatBufferBuilder.CreateVector(views));
850 
851  auto flatBufferLayer = CreateConcatLayer(m_flatBufferBuilder,
852  flatBufferConcatBaseLayer,
853  flatBufferConcatDescriptor);
854 
856 }
857 
858 void SerializerStrategy::SerializeMultiplicationLayer(const armnn::IConnectableLayer* layer, const char* name)
859 {
860  IgnoreUnused(name);
861 
862  auto fbMultiplicationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Multiplication);
863  auto fbMultiplicationLayer = serializer::CreateMultiplicationLayer(m_flatBufferBuilder,
864  fbMultiplicationBaseLayer);
865 
867 }
868 
869 void SerializerStrategy::SerializePadLayer(const armnn::IConnectableLayer* layer,
870  const armnn::PadDescriptor& padDescriptor,
871  const char* name)
872 {
873  IgnoreUnused(name);
874 
875  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pad);
876 
877  std::vector<unsigned int> padList;
878  for (auto& p: padDescriptor.m_PadList)
879  {
880  padList.push_back(p.first);
881  padList.push_back(p.second);
882  }
883 
884  auto flatBufferPadDesc = serializer::CreatePadDescriptor(m_flatBufferBuilder,
885  m_flatBufferBuilder.CreateVector(padList),
886  padDescriptor.m_PadValue);
887 
888  auto flatBufferPadLayer = serializer::CreatePadLayer(m_flatBufferBuilder,
889  flatBufferBaseLayer,
890  flatBufferPadDesc);
891 
892  CreateAnyLayer(flatBufferPadLayer.o, serializer::Layer::Layer_PadLayer);
893 }
894 
895 void SerializerStrategy::SerializePermuteLayer(const armnn::IConnectableLayer* layer,
896  const armnn::PermuteDescriptor& permuteDescriptor,
897  const char* name)
898 {
899  IgnoreUnused(name);
900 
901  // Create FlatBuffer BaseLayer
902  auto flatBufferPermuteBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Permute);
903 
904  std::vector<unsigned int> dimMappings;
905  for (unsigned int i=0; i<permuteDescriptor.m_DimMappings.GetSize(); ++i)
906  {
907  dimMappings.push_back(permuteDescriptor.m_DimMappings[i]);
908  }
909 
910  auto flatBufferPermuteDesc = serializer::CreatePermuteDescriptor(m_flatBufferBuilder,
911  m_flatBufferBuilder.CreateVector(dimMappings));
912 
913  // Create the FlatBuffer PermuteLayer
914  auto flatBufferPermuteLayer = serializer::CreatePermuteLayer(m_flatBufferBuilder,
915  flatBufferPermuteBaseLayer,
916  flatBufferPermuteDesc);
917 
918  // Add the AnyLayer to the FlatBufferLayers
919  CreateAnyLayer(flatBufferPermuteLayer.o, serializer::Layer::Layer_PermuteLayer);
920 }
921 
922 // Build FlatBuffer for Rank Layer
923 void SerializerStrategy::SerializeRankLayer(const armnn::IConnectableLayer* layer,
924  const char* name)
925 {
926  IgnoreUnused(name);
927  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Rank);
928  auto flatBufferRankLayer = serializer::CreateRankLayer(m_flatBufferBuilder, flatBufferBaseLayer);
929 
930  CreateAnyLayer(flatBufferRankLayer.o, serializer::Layer::Layer_RankLayer);
931 }
932 
933 void SerializerStrategy::SerializeReduceLayer(const armnn::IConnectableLayer* layer,
934  const armnn::ReduceDescriptor& reduceDescriptor,
935  const char*)
936 {
937  auto fbReduceBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Reduce);
938  auto fbDescriptor = CreateReduceDescriptor(m_flatBufferBuilder,
939  reduceDescriptor.m_KeepDims,
940  m_flatBufferBuilder.CreateVector(reduceDescriptor.m_vAxis),
942  auto fbReduceLayer = serializer::CreateReduceLayer(m_flatBufferBuilder,
943  fbReduceBaseLayer,
944  fbDescriptor);
945 
947 }
948 
949 // Build FlatBuffer for Reshape Layer
950 void SerializerStrategy::SerializeReshapeLayer(const armnn::IConnectableLayer* layer,
951  const armnn::ReshapeDescriptor& reshapeDescriptor,
952  const char* name)
953 {
954  IgnoreUnused(name);
955 
956  // Create FlatBuffer BaseLayer
957  auto flatBufferReshapeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Reshape);
958 
959  std::vector<unsigned int> targetShape;
960  for (unsigned int i =0; i < reshapeDescriptor.m_TargetShape.GetNumDimensions(); i++)
961  {
962  targetShape.push_back(reshapeDescriptor.m_TargetShape[i]);
963  }
964 
965  auto flatBufferReshapeDesc = serializer::CreateReshapeDescriptor(m_flatBufferBuilder,
966  m_flatBufferBuilder.CreateVector(targetShape));
967 
968  // Create the FlatBuffer ReshapeLayer
969  auto flatBufferReshapeLayer = serializer::CreateReshapeLayer(m_flatBufferBuilder, flatBufferReshapeBaseLayer,
970  flatBufferReshapeDesc);
971 
972  // Add the AnyLayer to the FlatBufferLayers
973  CreateAnyLayer(flatBufferReshapeLayer.o, serializer::Layer::Layer_ReshapeLayer);
974 }
975 
976 void SerializerStrategy::SerializeResizeBilinearLayer(const armnn::IConnectableLayer* layer,
977  const armnn::ResizeBilinearDescriptor& resizeDescriptor,
978  const char* name)
979 {
980  IgnoreUnused(name);
981 
982  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ResizeBilinear);
983 
984  auto flatBufferDescriptor =
985  CreateResizeBilinearDescriptor(m_flatBufferBuilder,
986  resizeDescriptor.m_TargetWidth,
987  resizeDescriptor.m_TargetHeight,
988  GetFlatBufferDataLayout(resizeDescriptor.m_DataLayout),
989  resizeDescriptor.m_AlignCorners,
990  resizeDescriptor.m_HalfPixelCenters);
991 
992  auto flatBufferLayer = serializer::CreateResizeBilinearLayer(m_flatBufferBuilder,
993  flatBufferBaseLayer,
994  flatBufferDescriptor);
995 
997 }
998 
999 void SerializerStrategy::SerializeResizeLayer(const armnn::IConnectableLayer* layer,
1000  const armnn::ResizeDescriptor& resizeDescriptor,
1001  const char* name)
1002 {
1003  IgnoreUnused(name);
1004 
1005  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Resize);
1006 
1007  auto flatBufferDescriptor =
1008  CreateResizeDescriptor(m_flatBufferBuilder,
1009  resizeDescriptor.m_TargetHeight,
1010  resizeDescriptor.m_TargetWidth,
1011  GetFlatBufferResizeMethod(resizeDescriptor.m_Method),
1012  GetFlatBufferDataLayout(resizeDescriptor.m_DataLayout),
1013  resizeDescriptor.m_AlignCorners,
1014  resizeDescriptor.m_HalfPixelCenters);
1015 
1016  auto flatBufferLayer = serializer::CreateResizeLayer(m_flatBufferBuilder,
1017  flatBufferBaseLayer,
1018  flatBufferDescriptor);
1019 
1021 }
1022 
1023 void SerializerStrategy::SerializeRsqrtLayer(const armnn::IConnectableLayer* layer, const char* name)
1024 {
1025  IgnoreUnused(name);
1026 
1027  auto fbRsqrtBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Rsqrt);
1028  auto fbRsqrtLayer = serializer::CreateRsqrtLayer(m_flatBufferBuilder, fbRsqrtBaseLayer);
1029 
1031 }
1032 
1033 void SerializerStrategy::SerializeSliceLayer(const armnn::IConnectableLayer* layer,
1034  const armnn::SliceDescriptor& sliceDescriptor,
1035  const char* name)
1036 {
1037  IgnoreUnused(name);
1038 
1039  auto fbSliceBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Slice);
1040  auto fbSliceDescriptor = CreateSliceDescriptor(m_flatBufferBuilder,
1041  m_flatBufferBuilder.CreateVector(sliceDescriptor.m_Begin),
1042  m_flatBufferBuilder.CreateVector(sliceDescriptor.m_Size));
1043 
1044  auto fbSliceLayer = serializer::CreateSliceLayer(m_flatBufferBuilder, fbSliceBaseLayer, fbSliceDescriptor);
1045 
1047 }
1048 
1049 // Build FlatBuffer for Softmax Layer
1050 void SerializerStrategy::SerializeSoftmaxLayer(const armnn::IConnectableLayer* layer,
1051  const armnn::SoftmaxDescriptor& softmaxDescriptor,
1052  const char* name)
1053 {
1054  IgnoreUnused(name);
1055 
1056  // Create FlatBuffer BaseLayer
1057  auto flatBufferSoftmaxBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Softmax);
1058 
1059  // Create the FlatBuffer SoftmaxDescriptor
1060  auto flatBufferSoftmaxDesc =
1061  serializer::CreateSoftmaxDescriptor(m_flatBufferBuilder, softmaxDescriptor.m_Beta);
1062 
1063  // Create the FlatBuffer SoftmaxLayer
1064  auto flatBufferSoftmaxLayer =
1065  serializer::CreateSoftmaxLayer(m_flatBufferBuilder,
1066  flatBufferSoftmaxBaseLayer,
1067  flatBufferSoftmaxDesc);
1068 
1069  CreateAnyLayer(flatBufferSoftmaxLayer.o, serializer::Layer::Layer_SoftmaxLayer);
1070 }
1071 
1072 void SerializerStrategy::SerializePooling2dLayer(const armnn::IConnectableLayer* layer,
1073  const armnn::Pooling2dDescriptor& pooling2dDescriptor,
1074  const char* name)
1075 {
1076  IgnoreUnused(name);
1077 
1078  auto fbPooling2dBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pooling2d);
1079  auto fbPooling2dDescriptor = serializer::CreatePooling2dDescriptor(
1080  m_flatBufferBuilder,
1081  GetFlatBufferPoolingAlgorithm(pooling2dDescriptor.m_PoolType),
1082  pooling2dDescriptor.m_PadLeft,
1083  pooling2dDescriptor.m_PadRight,
1084  pooling2dDescriptor.m_PadTop,
1085  pooling2dDescriptor.m_PadBottom,
1086  pooling2dDescriptor.m_PoolWidth,
1087  pooling2dDescriptor.m_PoolHeight,
1088  pooling2dDescriptor.m_StrideX,
1089  pooling2dDescriptor.m_StrideY,
1091  GetFlatBufferPaddingMethod(pooling2dDescriptor.m_PaddingMethod),
1092  GetFlatBufferDataLayout(pooling2dDescriptor.m_DataLayout));
1093 
1094  auto fbPooling2dLayer = serializer::CreatePooling2dLayer(m_flatBufferBuilder,
1095  fbPooling2dBaseLayer,
1096  fbPooling2dDescriptor);
1097 
1099 }
1100 
1101 void SerializerStrategy::SerializePreluLayer(const armnn::IConnectableLayer* layer,
1102  const char* name)
1103 {
1104  IgnoreUnused(name);
1105 
1106  // Create FlatBuffer BaseLayer
1107  auto flatBufferPreluBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Prelu);
1108 
1109  // Create the FlatBuffer AdditionLayer
1110  auto flatBufferPreluLayer = serializer::CreatePreluLayer(m_flatBufferBuilder, flatBufferPreluBaseLayer);
1111 
1112  // Add the AnyLayer to the FlatBufferLayers
1113  CreateAnyLayer(flatBufferPreluLayer.o, serializer::Layer::Layer_PreluLayer);
1114 }
1115 
1116 void SerializerStrategy::SerializeQuantizeLayer(const armnn::IConnectableLayer *layer, const char *name)
1117 {
1118  IgnoreUnused(name);
1119 
1120  auto fbQuantizeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Quantize);
1121  auto fbQuantizeLayer = serializer::CreateQuantizeLayer(m_flatBufferBuilder,
1122  fbQuantizeBaseLayer);
1124 }
1125 
1126 // Build FlatBuffer for FullyConnected Layer
1127 void SerializerStrategy::SerializeFullyConnectedLayer(const armnn::IConnectableLayer* layer,
1128  const armnn::FullyConnectedDescriptor& fullyConnectedDescriptor,
1129  const char*)
1130 {
1131  // Create FlatBuffer BaseLayer
1132  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_FullyConnected);
1133 
1134  // Create FlatBuffer FullyConnectedDescriptor
1135  auto flatBufferDescriptor =
1136  serializer::CreateFullyConnectedDescriptor(m_flatBufferBuilder,
1137  fullyConnectedDescriptor.m_BiasEnabled,
1138  fullyConnectedDescriptor.m_TransposeWeightMatrix,
1139  fullyConnectedDescriptor.m_ConstantWeights);
1140 
1141  // Create FlatBuffer FullyConnectedLayer
1142  auto flatBufferLayer = serializer::CreateFullyConnectedLayer(m_flatBufferBuilder,
1143  flatBufferBaseLayer,
1144  flatBufferDescriptor);
1145 
1146  // Add created FullyConnectedLayer to the FlatBufferLayers
1148 }
1149 
1150 // Build FlatBuffer for SpaceToBatchNd Layer
1151 void SerializerStrategy::SerializeSpaceToBatchNdLayer(const armnn::IConnectableLayer* layer,
1152  const armnn::SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
1153  const char* name)
1154 {
1155  IgnoreUnused(name);
1156 
1157  // Create FlatBuffer BaseLayer
1158  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_SpaceToBatchNd);
1159 
1160  std::vector<unsigned int> padList;
1161  padList.reserve(spaceToBatchNdDescriptor.m_PadList.size()*2);
1162  for (auto& pad : spaceToBatchNdDescriptor.m_PadList)
1163  {
1164  padList.push_back(pad.first);
1165  padList.push_back(pad.second);
1166  }
1167 
1168  auto flatBufferDescriptor =
1169  CreateSpaceToBatchNdDescriptor(m_flatBufferBuilder,
1170  m_flatBufferBuilder.CreateVector(spaceToBatchNdDescriptor.m_BlockShape),
1171  m_flatBufferBuilder.CreateVector(padList),
1172  GetFlatBufferDataLayout(spaceToBatchNdDescriptor.m_DataLayout));
1173 
1174  auto flatBufferLayer = serializer::CreateSpaceToBatchNdLayer(m_flatBufferBuilder,
1175  flatBufferBaseLayer,
1176  flatBufferDescriptor);
1177 
1179 }
1180 
1181 // Build FlatBuffer for SpaceToDepthLayer
1182 void SerializerStrategy::SerializeSpaceToDepthLayer(const armnn::IConnectableLayer* layer,
1183  const armnn::SpaceToDepthDescriptor& spaceToDepthDescriptor,
1184  const char* name)
1185 {
1186  IgnoreUnused(name);
1187 
1188  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_SpaceToDepth);
1189  auto flatBufferDescriptor =
1190  CreateSpaceToDepthDescriptor(m_flatBufferBuilder,
1191  spaceToDepthDescriptor.m_BlockSize,
1192  GetFlatBufferDataLayout(spaceToDepthDescriptor.m_DataLayout));
1193 
1194  auto flatBufferLayer = serializer::CreateSpaceToDepthLayer(m_flatBufferBuilder,
1195  flatBufferBaseLayer,
1196  flatBufferDescriptor);
1197 
1199 }
1200 
1201 // Build FlatBuffer for Splitter Layer
1202 void SerializerStrategy::SerializeSplitterLayer(const armnn::IConnectableLayer* layer,
1203  const armnn::ViewsDescriptor& viewsDescriptor,
1204  const char* name)
1205 {
1206  IgnoreUnused(name);
1207 
1208  // Create FlatBuffer ViewOrigins
1209  std::vector<flatbuffers::Offset<UintVector>> flatBufferViewOrigins;
1210  flatBufferViewOrigins.reserve(viewsDescriptor.GetNumViews());
1211 
1212  for(unsigned int vIdx = 0; vIdx < viewsDescriptor.GetNumViews(); ++vIdx)
1213  {
1214  std::vector<uint32_t> viewOrigin;
1215  viewOrigin.reserve(viewsDescriptor.GetNumDimensions());
1216 
1217  // Copy vector
1218  for(unsigned int dIdx = 0; dIdx < viewsDescriptor.GetNumDimensions(); ++dIdx)
1219  {
1220  viewOrigin.push_back(viewsDescriptor.GetViewOrigin(vIdx)[dIdx]);
1221  }
1222 
1223  flatBufferViewOrigins.push_back(CreateUintVector(m_flatBufferBuilder,
1224  m_flatBufferBuilder.CreateVector(viewOrigin)));
1225  }
1226 
1227  // Create FlatBuffer OriginsDescriptor
1228  auto flatBufferOriginDescriptor = CreateOriginsDescriptor(m_flatBufferBuilder,
1229  viewsDescriptor.GetOrigins().GetConcatAxis(),
1230  viewsDescriptor.GetOrigins().GetNumViews(),
1231  viewsDescriptor.GetOrigins().GetNumDimensions(),
1232  m_flatBufferBuilder.CreateVector(flatBufferViewOrigins));
1233 
1234  // Create FlatBuffer ViewOrigins
1235  std::vector<flatbuffers::Offset<UintVector>> flatBufferViewSizes;
1236  flatBufferViewSizes.reserve(viewsDescriptor.GetNumViews());
1237 
1238  for(unsigned int vIdx = 0; vIdx < viewsDescriptor.GetNumViews(); ++vIdx)
1239  {
1240  std::vector<uint32_t> viewSize;
1241  viewSize.reserve(viewsDescriptor.GetNumDimensions());
1242 
1243  // Copy vector
1244  for(unsigned int dIdx = 0; dIdx < viewsDescriptor.GetNumDimensions(); ++dIdx)
1245  {
1246  viewSize.push_back(viewsDescriptor.GetViewSizes(vIdx)[dIdx]);
1247  }
1248 
1249  flatBufferViewSizes.push_back(CreateUintVector(m_flatBufferBuilder,
1250  m_flatBufferBuilder.CreateVector(viewSize)));
1251  }
1252 
1253  // Create FlatBuffer ViewsDescriptor
1254  auto flatBufferViewsDescriptor = CreateViewsDescriptor(m_flatBufferBuilder,
1255  flatBufferOriginDescriptor,
1256  m_flatBufferBuilder.CreateVector(flatBufferViewSizes));
1257 
1258  // Create FlatBuffer BaseLayer
1259  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Splitter);
1260 
1261  auto flatBufferSplitterLayer = serializer::CreateSplitterLayer(m_flatBufferBuilder,
1262  flatBufferBaseLayer,
1263  flatBufferViewsDescriptor);
1264 
1265  CreateAnyLayer(flatBufferSplitterLayer.o, serializer::Layer::Layer_SplitterLayer);
1266 }
1267 
1268 void SerializerStrategy::SerializeNormalizationLayer(const armnn::IConnectableLayer* layer,
1269  const armnn::NormalizationDescriptor& descriptor,
1270  const char* name)
1271 {
1272  IgnoreUnused(name);
1273 
1274  auto fbNormalizationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Normalization);
1275 
1276  auto fbNormalizationDescriptor = serializer::CreateNormalizationDescriptor(
1277  m_flatBufferBuilder,
1280  descriptor.m_NormSize,
1281  descriptor.m_Alpha,
1282  descriptor.m_Beta,
1283  descriptor.m_K,
1285 
1286  auto flatBufferLayer = serializer::CreateNormalizationLayer(m_flatBufferBuilder,
1287  fbNormalizationBaseLayer,
1288  fbNormalizationDescriptor);
1289 
1291 }
1292 
1293 void SerializerStrategy::SerializeShapeLayer(const armnn::IConnectableLayer* layer,
1294  const char* name)
1295 {
1296  IgnoreUnused(name);
1297 
1298  auto shapeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Shape);
1299  auto shapeLayer = serializer::CreateShapeLayer(m_flatBufferBuilder, shapeBaseLayer);
1300 
1302 }
1303 
1304 void SerializerStrategy::SerializeStackLayer(const armnn::IConnectableLayer* layer,
1305  const armnn::StackDescriptor& stackDescriptor,
1306  const char* name)
1307 {
1308  IgnoreUnused(name);
1309 
1310  auto stackBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Stack);
1311 
1312  std::vector<unsigned int> inputShape;
1313  for (unsigned int i =0; i < stackDescriptor.m_InputShape.GetNumDimensions(); i++)
1314  {
1315  inputShape.push_back(stackDescriptor.m_InputShape[i]);
1316  }
1317 
1318  auto flatBufferStackDescriptor = CreateStackDescriptor(m_flatBufferBuilder,
1319  stackDescriptor.m_Axis,
1320  stackDescriptor.m_NumInputs,
1321  m_flatBufferBuilder.CreateVector(inputShape));
1322 
1323  auto stackLayer = serializer::CreateStackLayer(m_flatBufferBuilder, stackBaseLayer, flatBufferStackDescriptor);
1325 }
1326 
1327 void SerializerStrategy::SerializeStandInLayer(const armnn::IConnectableLayer *layer,
1328  const armnn::StandInDescriptor& standInDescriptor,
1329  const char *name)
1330 {
1331  IgnoreUnused(name);
1332 
1333  auto fbDescriptor = serializer::CreateStandInDescriptor(m_flatBufferBuilder,
1334  standInDescriptor.m_NumInputs,
1335  standInDescriptor.m_NumOutputs);
1336 
1337  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_StandIn);
1338  auto fbLayer = serializer::CreateStandInLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
1339 
1341 }
1342 
1343 void SerializerStrategy::SerializeStridedSliceLayer(const armnn::IConnectableLayer* layer,
1344  const armnn::StridedSliceDescriptor& stridedSliceDescriptor,
1345  const char* name)
1346 {
1347  IgnoreUnused(name);
1348 
1349  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_StridedSlice);
1350 
1351  auto flatBufferDescriptor =
1352  CreateStridedSliceDescriptor(m_flatBufferBuilder,
1353  m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.m_Begin),
1354  m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.m_End),
1355  m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.m_Stride),
1356  stridedSliceDescriptor.m_BeginMask,
1357  stridedSliceDescriptor.m_EndMask,
1358  stridedSliceDescriptor.m_ShrinkAxisMask,
1359  stridedSliceDescriptor.m_EllipsisMask,
1360  stridedSliceDescriptor.m_NewAxisMask,
1361  GetFlatBufferDataLayout(stridedSliceDescriptor.m_DataLayout));
1362 
1363  auto flatBufferLayer = serializer::CreateStridedSliceLayer(m_flatBufferBuilder,
1364  flatBufferBaseLayer,
1365  flatBufferDescriptor);
1366 
1368 }
1369 
1370 void SerializerStrategy::SerializeSubtractionLayer(const armnn::IConnectableLayer* layer, const char* name)
1371 {
1372  IgnoreUnused(name);
1373 
1374  auto fbSubtractionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Subtraction);
1375  auto fbSubtractionLayer = serializer::CreateSubtractionLayer(m_flatBufferBuilder, fbSubtractionBaseLayer);
1376 
1378 }
1379 
1380 void SerializerStrategy::SerializeSwitchLayer(const armnn::IConnectableLayer* layer, const char* name)
1381 {
1382  IgnoreUnused(name);
1383 
1384  auto fbSwitchBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Switch);
1385  auto fbSwitchLayer = serializer::CreateSwitchLayer(m_flatBufferBuilder, fbSwitchBaseLayer);
1386 
1388 }
1389 
1390 void SerializerStrategy::SerializeTransposeConvolution2dLayer(
1391  const armnn::IConnectableLayer* layer,
1392  const armnn::TransposeConvolution2dDescriptor& descriptor,
1393  const std::vector<armnn::ConstTensor>& constants,
1394  const char* name)
1395 {
1396  IgnoreUnused(name);
1397 
1398  const armnn::ConstTensor& weights = constants.at(0);
1399 
1401  auto fbDescriptor = CreateTransposeConvolution2dDescriptor(m_flatBufferBuilder,
1402  descriptor.m_PadLeft,
1403  descriptor.m_PadRight,
1404  descriptor.m_PadTop,
1405  descriptor.m_PadBottom,
1406  descriptor.m_StrideX,
1407  descriptor.m_StrideY,
1408  descriptor.m_BiasEnabled,
1410 
1411  // weights & biases
1412  auto fbWeightsConstTensorInfo = CreateConstTensorInfo(weights);
1413  flatbuffers::Offset<serializer::ConstTensor> fbBiasesConstTensorInfo;
1414  if (constants.size() > 1)
1415  {
1416  const armnn::ConstTensor& biases = constants.at(1);
1417  fbBiasesConstTensorInfo = CreateConstTensorInfo(biases);
1418  }
1419 
1420  auto fbLayer = CreateTransposeConvolution2dLayer(m_flatBufferBuilder,
1421  fbBaseLayer,
1422  fbDescriptor,
1423  fbWeightsConstTensorInfo,
1424  fbBiasesConstTensorInfo);
1425 
1427 }
1428 
1429 void SerializerStrategy::SerializeTransposeLayer(const armnn::IConnectableLayer* layer,
1430  const armnn::TransposeDescriptor& descriptor,
1431  const char* name)
1432 {
1433  IgnoreUnused(name);
1434 
1435  // Create FlatBuffer BaseLayer
1436  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Transpose);
1437 
1438  std::vector<unsigned int> dimMappings;
1439  for (unsigned int i=0; i<descriptor.m_DimMappings.GetSize(); ++i)
1440  {
1441  dimMappings.push_back(descriptor.m_DimMappings[i]);
1442  }
1443 
1444  auto flatBufferDesc = serializer::CreateTransposeDescriptor(m_flatBufferBuilder,
1445  m_flatBufferBuilder.CreateVector(dimMappings));
1446 
1447  // Create the FlatBuffer TransposeLayer
1448  auto flatBufferLayer = serializer::CreateTransposeLayer(m_flatBufferBuilder,
1449  flatBufferBaseLayer,
1450  flatBufferDesc);
1451 
1452  // Add the AnyLayer to the FlatBufferLayers
1454 }
1455 
1456 void SerializerStrategy::SerializeQLstmLayer(const armnn::IConnectableLayer* layer,
1457  const armnn::QLstmDescriptor& descriptor,
1458  const std::vector<armnn::ConstTensor>& constants,
1459  const char* name)
1460 {
1461  IgnoreUnused(name);
1462 
1463  auto fbQLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_QLstm);
1464 
1465  auto fbQLstmDescriptor = serializer::CreateQLstmDescriptor(
1466  m_flatBufferBuilder,
1467  descriptor.m_CifgEnabled,
1468  descriptor.m_PeepholeEnabled,
1469  descriptor.m_ProjectionEnabled,
1470  descriptor.m_LayerNormEnabled,
1471  descriptor.m_CellClip,
1472  descriptor.m_ProjectionClip,
1473  descriptor.m_InputIntermediateScale,
1474  descriptor.m_ForgetIntermediateScale,
1475  descriptor.m_CellIntermediateScale,
1476  descriptor.m_OutputIntermediateScale,
1477  descriptor.m_HiddenStateZeroPoint,
1478  descriptor.m_HiddenStateScale
1479  );
1480 
1481  // Index for constants vector
1482  std::size_t i = 0;
1483 
1484  // Mandatory params
1485  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]); //InputToForgetWeights
1486  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]); //InputToCellWeights
1487  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]); //InputToOutputWeights
1488  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToForgetWeights
1489  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToCellWeights
1490  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToOutputWeights
1491  auto forgetGateBias = CreateConstTensorInfo(constants[i++]); //ForgetGateBias
1492  auto cellBias = CreateConstTensorInfo(constants[i++]); //CellBias
1493  auto outputGateBias = CreateConstTensorInfo(constants[i++]); //OutputGateBias
1494 
1495  // CIFG
1496  flatbuffers::Offset<serializer::ConstTensor> inputToInputWeights;
1497  flatbuffers::Offset<serializer::ConstTensor> recurrentToInputWeights;
1498  flatbuffers::Offset<serializer::ConstTensor> inputGateBias;
1499 
1500  if (!descriptor.m_CifgEnabled)
1501  {
1502  inputToInputWeights = CreateConstTensorInfo(constants[i++]); //InputToInputWeights
1503  recurrentToInputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToInputWeights
1504  inputGateBias = CreateConstTensorInfo(constants[i++]); //InputGateBias
1505  }
1506 
1507  // Peephole
1508  flatbuffers::Offset<serializer::ConstTensor> cellToInputWeights;
1509  flatbuffers::Offset<serializer::ConstTensor> cellToForgetWeights;
1510  flatbuffers::Offset<serializer::ConstTensor> cellToOutputWeights;
1511 
1512  if (descriptor.m_PeepholeEnabled)
1513  {
1514  if (!descriptor.m_CifgEnabled)
1515  {
1516  cellToInputWeights = CreateConstTensorInfo(constants[i++]); //CellToInputWeights
1517  }
1518  cellToForgetWeights = CreateConstTensorInfo(constants[i++]); //CellToForgetWeights
1519  cellToOutputWeights = CreateConstTensorInfo(constants[i++]); //CellToOutputWeights
1520  }
1521 
1522  // Projection
1523  flatbuffers::Offset<serializer::ConstTensor> projectionWeights;
1524  flatbuffers::Offset<serializer::ConstTensor> projectionBias;
1525 
1526  if (descriptor.m_ProjectionEnabled)
1527  {
1528  projectionWeights = CreateConstTensorInfo(constants[i++]); //ProjectionWeights
1529  projectionBias = CreateConstTensorInfo(constants[i++]); //ProjectionBias
1530  }
1531 
1532  // Layer norm
1533  flatbuffers::Offset<serializer::ConstTensor> inputLayerNormWeights;
1534  flatbuffers::Offset<serializer::ConstTensor> forgetLayerNormWeights;
1535  flatbuffers::Offset<serializer::ConstTensor> cellLayerNormWeights;
1536  flatbuffers::Offset<serializer::ConstTensor> outputLayerNormWeights;
1537 
1538  if (descriptor.m_LayerNormEnabled)
1539  {
1540  if (!descriptor.m_CifgEnabled)
1541  {
1542  inputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //InputLayerNormWeights
1543  }
1544  forgetLayerNormWeights = CreateConstTensorInfo(constants[i++]); //ForgetLayerNormWeights
1545  cellLayerNormWeights = CreateConstTensorInfo(constants[i++]); //CellLayerNormWeights
1546  outputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //OutputLayerNormWeights
1547  }
1548 
1549  auto fbQLstmParams = serializer::CreateQLstmInputParams(
1550  m_flatBufferBuilder,
1551  inputToForgetWeights,
1552  inputToCellWeights,
1553  inputToOutputWeights,
1554  recurrentToForgetWeights,
1555  recurrentToCellWeights,
1556  recurrentToOutputWeights,
1557  forgetGateBias,
1558  cellBias,
1559  outputGateBias,
1560  inputToInputWeights,
1561  recurrentToInputWeights,
1562  inputGateBias,
1563  projectionWeights,
1564  projectionBias,
1565  cellToInputWeights,
1566  cellToForgetWeights,
1567  cellToOutputWeights,
1568  inputLayerNormWeights,
1569  forgetLayerNormWeights,
1570  cellLayerNormWeights,
1571  outputLayerNormWeights);
1572 
1573  auto fbQLstmLayer = serializer::CreateQLstmLayer(
1574  m_flatBufferBuilder,
1575  fbQLstmBaseLayer,
1576  fbQLstmDescriptor,
1577  fbQLstmParams);
1578 
1580 }
1581 
1582 void SerializerStrategy::SerializeQuantizedLstmLayer(const armnn::IConnectableLayer* layer,
1583  const std::vector<armnn::ConstTensor>& constants,
1584  const char* name)
1585 {
1586  IgnoreUnused(name);
1587 
1588  auto fbQuantizedLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_QuantizedLstm);
1589 
1590  // index for constants vector
1591  size_t i = 0;
1592 
1593  // Get input parameters
1594  auto inputToInputWeights = CreateConstTensorInfo(constants[i++]);
1595  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]);
1596  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]);
1597  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]);
1598 
1599  auto recurrentToInputWeights = CreateConstTensorInfo(constants[i++]);
1600  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]);
1601  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]);
1602  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]);
1603 
1604  auto inputGateBias = CreateConstTensorInfo(constants[i++]);
1605  auto forgetGateBias = CreateConstTensorInfo(constants[i++]);
1606  auto cellBias = CreateConstTensorInfo(constants[i++]);
1607  auto outputGateBias = CreateConstTensorInfo(constants[i++]);
1608 
1609  auto fbQuantizedLstmParams = serializer::CreateQuantizedLstmInputParams(
1610  m_flatBufferBuilder,
1611  inputToInputWeights,
1612  inputToForgetWeights,
1613  inputToCellWeights,
1614  inputToOutputWeights,
1615  recurrentToInputWeights,
1616  recurrentToForgetWeights,
1617  recurrentToCellWeights,
1618  recurrentToOutputWeights,
1619  inputGateBias,
1620  forgetGateBias,
1621  cellBias,
1622  outputGateBias);
1623 
1624  auto fbQuantizedLstmLayer = serializer::CreateQuantizedLstmLayer(
1625  m_flatBufferBuilder,
1626  fbQuantizedLstmBaseLayer,
1627  fbQuantizedLstmParams);
1628 
1630 }
1631 
1632 void SerializerStrategy::SerializeUnidirectionalSequenceLstmLayer(
1633  const armnn::IConnectableLayer* layer,
1635  const std::vector<armnn::ConstTensor>& constants,
1636  const char* name)
1637 {
1638  IgnoreUnused(name);
1639 
1640  auto fbUnidirectionalSequenceLstmBaseLayer =
1642 
1643  auto fbUnidirectionalSequenceLstmDescriptor = serializer::CreateUnidirectionalSequenceLstmDescriptor(
1644  m_flatBufferBuilder,
1645  descriptor.m_ActivationFunc,
1646  descriptor.m_ClippingThresCell,
1647  descriptor.m_ClippingThresProj,
1648  descriptor.m_CifgEnabled,
1649  descriptor.m_PeepholeEnabled,
1650  descriptor.m_ProjectionEnabled,
1651  descriptor.m_LayerNormEnabled,
1652  descriptor.m_TimeMajor);
1653 
1654  // Index for constants vector
1655  std::size_t i = 0;
1656 
1657  // Get mandatory/basic input parameters
1658  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]); //InputToForgetWeights
1659  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]); //InputToCellWeights
1660  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]); //InputToOutputWeights
1661  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToForgetWeights
1662  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToCellWeights
1663  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToOutputWeights
1664  auto forgetGateBias = CreateConstTensorInfo(constants[i++]); //ForgetGateBias
1665  auto cellBias = CreateConstTensorInfo(constants[i++]); //CellBias
1666  auto outputGateBias = CreateConstTensorInfo(constants[i++]); //OutputGateBias
1667 
1668  //Define optional parameters, these will be set depending on configuration in Lstm descriptor
1669  flatbuffers::Offset<serializer::ConstTensor> inputToInputWeights;
1670  flatbuffers::Offset<serializer::ConstTensor> recurrentToInputWeights;
1671  flatbuffers::Offset<serializer::ConstTensor> cellToInputWeights;
1672  flatbuffers::Offset<serializer::ConstTensor> inputGateBias;
1673  flatbuffers::Offset<serializer::ConstTensor> projectionWeights;
1674  flatbuffers::Offset<serializer::ConstTensor> projectionBias;
1675  flatbuffers::Offset<serializer::ConstTensor> cellToForgetWeights;
1676  flatbuffers::Offset<serializer::ConstTensor> cellToOutputWeights;
1677  flatbuffers::Offset<serializer::ConstTensor> inputLayerNormWeights;
1678  flatbuffers::Offset<serializer::ConstTensor> forgetLayerNormWeights;
1679  flatbuffers::Offset<serializer::ConstTensor> cellLayerNormWeights;
1680  flatbuffers::Offset<serializer::ConstTensor> outputLayerNormWeights;
1681 
1682  if (!descriptor.m_CifgEnabled)
1683  {
1684  inputToInputWeights = CreateConstTensorInfo(constants[i++]); //InputToInputWeights
1685  recurrentToInputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToInputWeights
1686  inputGateBias = CreateConstTensorInfo(constants[i++]); //InputGateBias
1687  }
1688 
1689  if (descriptor.m_PeepholeEnabled)
1690  {
1691  if (!descriptor.m_CifgEnabled)
1692  {
1693  cellToInputWeights = CreateConstTensorInfo(constants[i++]); //CellToInputWeights
1694  }
1695  cellToForgetWeights = CreateConstTensorInfo(constants[i++]); //CellToForgetWeights
1696  cellToOutputWeights = CreateConstTensorInfo(constants[i++]); //CellToOutputWeights
1697  }
1698 
1699  if (descriptor.m_ProjectionEnabled)
1700  {
1701  projectionWeights = CreateConstTensorInfo(constants[i++]); //ProjectionWeights
1702  projectionBias = CreateConstTensorInfo(constants[i++]); //ProjectionBias
1703  }
1704 
1705  if (descriptor.m_LayerNormEnabled)
1706  {
1707  if (!descriptor.m_CifgEnabled)
1708  {
1709  inputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //InputLayerNormWeights
1710  }
1711  forgetLayerNormWeights = CreateConstTensorInfo(constants[i++]); //ForgetLayerNormWeights
1712  cellLayerNormWeights = CreateConstTensorInfo(constants[i++]); //CellLayerNormWeights
1713  outputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //OutputLayerNormWeights
1714  }
1715 
1716  auto fbUnidirectionalSequenceLstmParams = serializer::CreateLstmInputParams(
1717  m_flatBufferBuilder,
1718  inputToForgetWeights,
1719  inputToCellWeights,
1720  inputToOutputWeights,
1721  recurrentToForgetWeights,
1722  recurrentToCellWeights,
1723  recurrentToOutputWeights,
1724  forgetGateBias,
1725  cellBias,
1726  outputGateBias,
1727  inputToInputWeights,
1728  recurrentToInputWeights,
1729  cellToInputWeights,
1730  inputGateBias,
1731  projectionWeights,
1732  projectionBias,
1733  cellToForgetWeights,
1734  cellToOutputWeights,
1735  inputLayerNormWeights,
1736  forgetLayerNormWeights,
1737  cellLayerNormWeights,
1738  outputLayerNormWeights);
1739 
1740  auto fbUnidirectionalSequenceLstmLayer = serializer::CreateUnidirectionalSequenceLstmLayer(
1741  m_flatBufferBuilder,
1742  fbUnidirectionalSequenceLstmBaseLayer,
1743  fbUnidirectionalSequenceLstmDescriptor,
1744  fbUnidirectionalSequenceLstmParams);
1745 
1746  CreateAnyLayer(fbUnidirectionalSequenceLstmLayer.o, serializer::Layer::Layer_UnidirectionalSequenceLstmLayer);
1747 }
1748 
1749 fb::Offset<serializer::LayerBase> SerializerStrategy::CreateLayerBase(const IConnectableLayer* layer,
1750  const serializer::LayerType layerType)
1751 {
1752 
1753  uint32_t fbIndex = GetSerializedId(layer->GetGuid());
1754 
1755  std::vector<fb::Offset<serializer::InputSlot>> inputSlots = CreateInputSlots(layer);
1756  std::vector<fb::Offset<serializer::OutputSlot>> outputSlots = CreateOutputSlots(layer);
1757 
1758  return serializer::CreateLayerBase(m_flatBufferBuilder,
1759  fbIndex,
1760  m_flatBufferBuilder.CreateString(layer->GetName()),
1761  layerType,
1762  m_flatBufferBuilder.CreateVector(inputSlots),
1763  m_flatBufferBuilder.CreateVector(outputSlots));
1764 }
1765 
1766 void SerializerStrategy::CreateAnyLayer(const flatbuffers::Offset<void>& layer, const serializer::Layer serializerLayer)
1767 {
1768 
1769  auto anyLayer = armnnSerializer::CreateAnyLayer(m_flatBufferBuilder, serializerLayer, layer);
1770  m_serializedLayers.push_back(anyLayer);
1771 }
1772 
1773 template <typename T>
1774 flatbuffers::Offset<flatbuffers::Vector<T>> SerializerStrategy::CreateDataVector(const void* memory, unsigned int size)
1775 {
1776  const T* buffer = reinterpret_cast<const T*>(memory);
1777  std::vector<T> vector(buffer, buffer + (size / sizeof(T)));
1778  auto fbVector = m_flatBufferBuilder.CreateVector(vector);
1779  return fbVector;
1780 }
1781 
1782 flatbuffers::Offset<TensorInfo> SerializerStrategy::CreateTensorInfo(const armnn::TensorInfo& tensorInfo)
1783 {
1784  // Get the dimensions
1785  std::vector<unsigned int> shape;
1786  std::vector<bool> specificity;
1787  // This assumes that the TensorShape constructors have ensured that the size of m_DimensionsSpecificity
1788  // matches the size of dimensions.
1789  for(unsigned int dim = 0; dim < tensorInfo.GetShape().GetNumDimensions(); ++dim)
1790  {
1791  specificity.push_back(tensorInfo.GetShape().GetDimensionSpecificity(dim));
1792 
1793  if (tensorInfo.GetShape().GetDimensionSpecificity(dim))
1794  {
1795  shape.push_back(tensorInfo.GetShape()[dim]);
1796  }
1797  else
1798  {
1799  shape.push_back(0);
1800  }
1801  }
1802 
1803  if (tensorInfo.HasPerAxisQuantization())
1804  {
1805  // Create FlatBuffer TensorInfo
1806  auto flatBufferTensorInfo =
1807  serializer::CreateTensorInfo(m_flatBufferBuilder,
1808  m_flatBufferBuilder.CreateVector(shape),
1809  GetFlatBufferDataType(tensorInfo.GetDataType()),
1810  tensorInfo.GetQuantizationScales()[0],
1811  tensorInfo.GetQuantizationOffset(),
1812  m_flatBufferBuilder.CreateVector(tensorInfo.GetQuantizationScales()),
1813  tensorInfo.GetQuantizationDim().value(),
1814  static_cast<unsigned int>
1815  (tensorInfo.GetShape().GetDimensionality()),
1816  m_flatBufferBuilder.CreateVector(specificity));
1817  return flatBufferTensorInfo;
1818  }
1819 
1820  // Create FlatBuffer TensorInfo
1821  auto flatBufferTensorInfo = serializer::CreateTensorInfo(m_flatBufferBuilder,
1822  m_flatBufferBuilder.CreateVector(shape),
1823  GetFlatBufferDataType(tensorInfo.GetDataType()),
1824  tensorInfo.GetQuantizationScale(),
1825  tensorInfo.GetQuantizationOffset(),
1826  0,
1827  0,
1828  static_cast<unsigned int>
1829  (tensorInfo.GetShape().GetDimensionality()),
1830  m_flatBufferBuilder.CreateVector(specificity));
1831  return flatBufferTensorInfo;
1832 }
1833 
1834 flatbuffers::Offset<serializer::ConstTensor>
1835  SerializerStrategy::CreateConstTensorInfo(const armnn::ConstTensor& constTensor)
1836 {
1837  armnn::TensorInfo tensorInfo = constTensor.GetInfo();
1838 
1839  flatbuffers::Offset<void> fbPayload;
1840 
1841  switch (tensorInfo.GetDataType())
1842  {
1844  {
1845  auto fbVector = CreateDataVector<int64_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1846  flatbuffers::Offset<serializer::LongData> flatBuffersData = serializer::CreateLongData(
1847  m_flatBufferBuilder,
1848  fbVector);
1849  fbPayload = flatBuffersData.o;
1850  break;
1851  }
1854  {
1855  auto fbVector = CreateDataVector<int32_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1856  flatbuffers::Offset<serializer::IntData> flatBuffersData = serializer::CreateIntData(
1857  m_flatBufferBuilder,
1858  fbVector);
1859  fbPayload = flatBuffersData.o;
1860  break;
1861  }
1865  {
1866  auto fbVector = CreateDataVector<int16_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1867  flatbuffers::Offset<serializer::ShortData> flatBuffersData = serializer::CreateShortData(
1868  m_flatBufferBuilder,
1869  fbVector);
1870  fbPayload = flatBuffersData.o;
1871  break;
1872  }
1877  default:
1878  {
1879  auto fbVector = CreateDataVector<int8_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1880  flatbuffers::Offset<serializer::ByteData> flatBuffersData = serializer::CreateByteData(
1881  m_flatBufferBuilder,
1882  fbVector);
1883  fbPayload = flatBuffersData.o;
1884  }
1885  }
1886  flatbuffers::Offset<serializer::ConstTensor> flatBufferConstTensor = serializer::CreateConstTensor(
1887  m_flatBufferBuilder,
1888  CreateTensorInfo(tensorInfo),
1890  fbPayload);
1891  return flatBufferConstTensor;
1892 }
1893 
1894 flatbuffers::Offset<armnnSerializer::FeatureCompatibilityVersions> SerializerStrategy::GetVersionTable()
1895 {
1896  flatbuffers::Offset<armnnSerializer::FeatureCompatibilityVersions> versionsTable =
1898  m_flatBufferBuilder,
1899  1, // Binding ids scheme version
1900  1, // Weights layout scheme version
1901  1 // Constant tensors as inputs version
1902  );
1903  return versionsTable;
1904 }
1905 
1906 std::vector<fb::Offset<serializer::InputSlot>>
1907  SerializerStrategy::CreateInputSlots(const armnn::IConnectableLayer* layer)
1908 {
1909  std::vector<fb::Offset<serializer::InputSlot>> inputSlots;
1910 
1911  // Get the InputSlots
1912  for (unsigned int slotIndex = 0; slotIndex<layer->GetNumInputSlots(); ++slotIndex)
1913  {
1914  const IInputSlot& inputSlot = layer->GetInputSlot(slotIndex);
1915 
1916  // Get the Connection for the InputSlot
1917  const IOutputSlot* connection = inputSlot.GetConnection();
1918 
1919  // Create FlatBuffer Connection
1920  serializer::Connection conn(GetSerializedId(inputSlot.GetConnection()->GetOwningLayerGuid()),
1921  connection->CalculateIndexOnOwner());
1922  // Create FlatBuffer InputSlot
1923  inputSlots.push_back(serializer::CreateInputSlot(m_flatBufferBuilder, slotIndex, &conn));
1924  }
1925  return inputSlots;
1926 }
1927 
1928 std::vector<fb::Offset<serializer::OutputSlot>>
1929  SerializerStrategy::CreateOutputSlots(const armnn::IConnectableLayer* layer)
1930 {
1931  std::vector<fb::Offset<serializer::OutputSlot>> outputSlots;
1932 
1933  // Get the OutputSlots
1934  for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
1935  {
1936  const IOutputSlot& outputSlot = layer->GetOutputSlot(slotIndex);
1937  const armnn::TensorInfo& tensorInfo = outputSlot.GetTensorInfo();
1938 
1939  // Create FlatBuffer Outputslot
1940  outputSlots.push_back(serializer::CreateOutputSlot(m_flatBufferBuilder,
1941  slotIndex,
1942  CreateTensorInfo(tensorInfo)));
1943  }
1944  return outputSlots;
1945 }
1946 
1948  const BaseDescriptor& descriptor,
1949  const std::vector<armnn::ConstTensor>& constants,
1950  const char* name,
1951  const armnn::LayerBindingId id)
1952 {
1953  IgnoreUnused(constants);
1954 
1955  switch (layer->GetType())
1956  {
1958  {
1959  const armnn::ActivationDescriptor& layerDescriptor =
1960  static_cast<const armnn::ActivationDescriptor&>(descriptor);
1961  SerializeActivationLayer(layer, layerDescriptor, name);
1962  break;
1963  }
1965  {
1966  SerializeAdditionLayer(layer, name);
1967  break;
1968  }
1970  {
1971  const armnn::ArgMinMaxDescriptor& layerDescriptor =
1972  static_cast<const armnn::ArgMinMaxDescriptor&>(descriptor);
1973  SerializeArgMinMaxLayer(layer, layerDescriptor, name);
1974  break;
1975  }
1977  {
1978  const armnn::BatchNormalizationDescriptor& layerDescriptor =
1979  static_cast<const armnn::BatchNormalizationDescriptor&>(descriptor);
1980  SerializeBatchNormalizationLayer(layer,
1981  layerDescriptor,
1982  constants,
1983  name);
1984  break;
1985  }
1987  {
1988  const armnn::BatchToSpaceNdDescriptor& layerDescriptor =
1989  static_cast<const armnn::BatchToSpaceNdDescriptor&>(descriptor);
1990  SerializeBatchToSpaceNdLayer(layer,
1991  layerDescriptor,
1992  name);
1993  break;
1994  }
1995  case armnn::LayerType::Cast :
1996  {
1997  SerializeCastLayer(layer, name);
1998  break;
1999  }
2001  {
2002  const armnn::ComparisonDescriptor& layerDescriptor =
2003  static_cast<const armnn::ComparisonDescriptor&>(descriptor);
2004  SerializeComparisonLayer(layer,
2005  layerDescriptor,
2006  name);
2007  break;
2008  }
2010  {
2011  const armnn::ConcatDescriptor& layerDescriptor =
2012  static_cast<const armnn::ConcatDescriptor&>(descriptor);
2013  SerializeConcatLayer(layer,
2014  layerDescriptor,
2015  name);
2016  break;
2017  }
2019  {
2020  SerializeConstantLayer(layer,
2021  constants,
2022  name);
2023  break;
2024  }
2026  {
2027  const armnn::Convolution2dDescriptor& layerDescriptor =
2028  static_cast<const armnn::Convolution2dDescriptor&>(descriptor);
2029  SerializeConvolution2dLayer(layer,
2030  layerDescriptor,
2031  constants,
2032  name);
2033  break;
2034  }
2036  {
2037  const armnn::DepthToSpaceDescriptor& layerDescriptor =
2038  static_cast<const armnn::DepthToSpaceDescriptor&>(descriptor);
2039  SerializeDepthToSpaceLayer(layer,
2040  layerDescriptor,
2041  name);
2042  break;
2043  }
2045  {
2046  const armnn::DepthwiseConvolution2dDescriptor& layerDescriptor =
2047  static_cast<const armnn::DepthwiseConvolution2dDescriptor&>(descriptor);
2048  SerializeDepthwiseConvolution2dLayer(layer,
2049  layerDescriptor,
2050  constants,
2051  name);
2052  break;
2053  }
2055  {
2056  SerializeDequantizeLayer(layer,
2057  name);
2058  break;
2059  }
2061  {
2062  const armnn::DetectionPostProcessDescriptor& layerDescriptor =
2063  static_cast<const armnn::DetectionPostProcessDescriptor&>(descriptor);
2064  SerializeDetectionPostProcessLayer(layer, layerDescriptor, constants, name);
2065  break;
2066  }
2068  {
2069  SerializeDivisionLayer(layer, name);
2070  break;
2071  }
2073  {
2074  const armnn::ElementwiseUnaryDescriptor& layerDescriptor =
2075  static_cast<const armnn::ElementwiseUnaryDescriptor&>(descriptor);
2076  SerializeElementwiseUnaryLayer(layer, layerDescriptor, name);
2077  break;
2078  }
2079  case armnn::LayerType::Fill :
2080  {
2081  const armnn::FillDescriptor& layerDescriptor =
2082  static_cast<const armnn::FillDescriptor&>(descriptor);
2083  SerializeFillLayer(layer, layerDescriptor, name);
2084  break;
2085  }
2087  {
2088  SerializeFloorLayer(layer, name);
2089  break;
2090  }
2092  {
2093  const armnn::FullyConnectedDescriptor& layerDescriptor =
2094  static_cast<const armnn::FullyConnectedDescriptor&>(descriptor);
2095  SerializeFullyConnectedLayer(layer, layerDescriptor, name);
2096  break;
2097  }
2099  {
2100  const armnn::GatherDescriptor& layerDescriptor =
2101  static_cast<const armnn::GatherDescriptor&>(descriptor);
2102  SerializeGatherLayer(layer, layerDescriptor, name);
2103  break;
2104  }
2106  {
2107  SerializeInputLayer(layer, id, name);
2108  break;
2109  }
2111  {
2112  const armnn::InstanceNormalizationDescriptor& layerDescriptor =
2113  static_cast<const armnn::InstanceNormalizationDescriptor&>(descriptor);
2114  SerializeInstanceNormalizationLayer(layer, layerDescriptor, name);
2115  break;
2116  }
2118  {
2119  const armnn::L2NormalizationDescriptor& layerDescriptor =
2120  static_cast<const armnn::L2NormalizationDescriptor&>(descriptor);
2121  SerializeL2NormalizationLayer(layer, layerDescriptor, name);
2122  break;
2123  }
2125  {
2126  const armnn::LogicalBinaryDescriptor& layerDescriptor =
2127  static_cast<const armnn::LogicalBinaryDescriptor&>(descriptor);
2128  SerializeLogicalBinaryLayer(layer, layerDescriptor, name);
2129  break;
2130  }
2132  {
2133  const armnn::LogSoftmaxDescriptor& layerDescriptor =
2134  static_cast<const armnn::LogSoftmaxDescriptor&>(descriptor);
2135  SerializeLogSoftmaxLayer(layer, layerDescriptor, name);
2136  break;
2137  }
2138  case armnn::LayerType::Lstm :
2139  {
2140  const armnn::LstmDescriptor& layerDescriptor =
2141  static_cast<const armnn::LstmDescriptor&>(descriptor);
2142  SerializeLstmLayer(layer, layerDescriptor, constants, name);
2143  break;
2144  }
2146  {
2147  const armnn::QLstmDescriptor& layerDescriptor =
2148  static_cast<const armnn::QLstmDescriptor&>(descriptor);
2149  SerializeQLstmLayer(layer, layerDescriptor, constants, name);
2150  break;
2151  }
2153  {
2154  SerializeMaximumLayer(layer, name);
2155  break;
2156  }
2157  case armnn::LayerType::Mean :
2158  {
2159  const armnn::MeanDescriptor& layerDescriptor =
2160  static_cast<const armnn::MeanDescriptor&>(descriptor);
2161  SerializeMeanLayer(layer, layerDescriptor, name);
2162  break;
2163  }
2165  {
2166  SerializeMergeLayer(layer, name);
2167  break;
2168  }
2170  {
2171  SerializeMinimumLayer(layer, name);
2172  break;
2173  }
2175  {
2176  SerializeMultiplicationLayer(layer, name);
2177  break;
2178  }
2180  {
2181  const armnn::NormalizationDescriptor& layerDescriptor =
2182  static_cast<const armnn::NormalizationDescriptor&>(descriptor);
2183  SerializeNormalizationLayer(layer, layerDescriptor, name);
2184  break;
2185  }
2187  {
2188  SerializeOutputLayer(layer, id, name);
2189  break;
2190  }
2191  case armnn::LayerType::Pad :
2192  {
2193  const armnn::PadDescriptor& layerDescriptor =
2194  static_cast<const armnn::PadDescriptor&>(descriptor);
2195  SerializePadLayer(layer, layerDescriptor, name);
2196  break;
2197  }
2199  {
2200  const armnn::PermuteDescriptor& layerDescriptor =
2201  static_cast<const armnn::PermuteDescriptor&>(descriptor);
2202  SerializePermuteLayer(layer, layerDescriptor, name);
2203  break;
2204  }
2206  {
2207  const armnn::Pooling2dDescriptor& layerDescriptor =
2208  static_cast<const armnn::Pooling2dDescriptor&>(descriptor);
2209  SerializePooling2dLayer(layer, layerDescriptor, name);
2210  break;
2211  }
2213  {
2214  SerializePreluLayer(layer, name);
2215  break;
2216  }
2218  {
2219  SerializeQuantizeLayer(layer, name);
2220  break;
2221  }
2223  SerializeQuantizedLstmLayer(layer, constants, name);
2224  break;
2226  {
2227  const armnn::ReshapeDescriptor &layerDescriptor =
2228  static_cast<const armnn::ReshapeDescriptor &>(descriptor);
2229  SerializeReshapeLayer(layer, layerDescriptor, name);
2230  break;
2231  }
2233  {
2234  SerializeRankLayer(layer, name);
2235  break;
2236  }
2238  {
2239  const armnn::ReduceDescriptor& layerDescriptor =
2240  static_cast<const armnn::ReduceDescriptor&>(descriptor);
2241  SerializeReduceLayer(layer, layerDescriptor, name);
2242  break;
2243  }
2245  {
2246  const armnn::ResizeDescriptor& layerDescriptor =
2247  static_cast<const armnn::ResizeDescriptor&>(descriptor);
2248  SerializeResizeLayer(layer, layerDescriptor, name);
2249  break;
2250  }
2252  {
2253  SerializeShapeLayer(layer, name);
2254  break;
2255  }
2257  {
2258  const armnn::SliceDescriptor& layerDescriptor =
2259  static_cast<const armnn::SliceDescriptor&>(descriptor);
2260  SerializeSliceLayer(layer, layerDescriptor, name);
2261  break;
2262  }
2264  {
2265  const armnn::SoftmaxDescriptor& layerDescriptor =
2266  static_cast<const armnn::SoftmaxDescriptor&>(descriptor);
2267  SerializeSoftmaxLayer(layer, layerDescriptor, name);
2268  break;
2269  }
2271  {
2272  const armnn::SpaceToBatchNdDescriptor& layerDescriptor =
2273  static_cast<const armnn::SpaceToBatchNdDescriptor&>(descriptor);
2274  SerializeSpaceToBatchNdLayer(layer, layerDescriptor, name);
2275  break;
2276  }
2278  {
2279  const armnn::SpaceToDepthDescriptor& layerDescriptor =
2280  static_cast<const armnn::SpaceToDepthDescriptor&>(descriptor);
2281  SerializeSpaceToDepthLayer(layer, layerDescriptor, name);
2282  break;
2283  }
2285  {
2286  const armnn::SplitterDescriptor& layerDescriptor =
2287  static_cast<const armnn::SplitterDescriptor&>(descriptor);
2288  SerializeSplitterLayer(layer, layerDescriptor, name);
2289  break;
2290  }
2292  {
2293  const armnn::StackDescriptor& layerDescriptor =
2294  static_cast<const armnn::StackDescriptor&>(descriptor);
2295  SerializeStackLayer(layer, layerDescriptor, name);
2296  break;
2297  }
2299  {
2300  const armnn::StandInDescriptor& layerDescriptor =
2301  static_cast<const armnn::StandInDescriptor&>(descriptor);
2302  SerializeStandInLayer(layer, layerDescriptor, name);
2303  break;
2304  }
2306  {
2307  const armnn::StridedSliceDescriptor& layerDescriptor =
2308  static_cast<const armnn::StridedSliceDescriptor&>(descriptor);
2309  SerializeStridedSliceLayer(layer, layerDescriptor, name);
2310  break;
2311  }
2313  {
2314  SerializeSubtractionLayer(layer, name);
2315  break;
2316  }
2318  {
2319  SerializeSwitchLayer(layer, name);
2320  break;
2321  }
2323  {
2324  const armnn::TransposeDescriptor& layerDescriptor =
2325  static_cast<const armnn::TransposeDescriptor&>(descriptor);
2326  SerializeTransposeLayer(layer, layerDescriptor, name);
2327  break;
2328  }
2330  {
2331  const armnn::TransposeConvolution2dDescriptor& layerDescriptor =
2332  static_cast<const armnn::TransposeConvolution2dDescriptor&>(descriptor);
2333  SerializeTransposeConvolution2dLayer(layer, layerDescriptor, constants, name);
2334  break;
2335  }
2337  {
2338  const armnn::UnidirectionalSequenceLstmDescriptor& layerDescriptor =
2339  static_cast<const armnn::UnidirectionalSequenceLstmDescriptor&>(descriptor);
2340  SerializeUnidirectionalSequenceLstmLayer(layer, layerDescriptor, constants, name);
2341  break;
2342  }
2343  default:
2344  {
2346  fmt::format("A layer of unknown type was given to the serializer. Layer name: {}; Layer Id: {}",
2347  layer->GetName(),
2348  id));
2349  }
2350  }
2351 }
2352 
2354 {
2355  // Iterate through to network
2356  inNetwork.ExecuteStrategy(m_SerializerStrategy);
2357  flatbuffers::FlatBufferBuilder& fbBuilder = m_SerializerStrategy.GetFlatBufferBuilder();
2358 
2359  // Create FlatBuffer SerializedGraph
2360  auto serializedGraph = serializer::CreateSerializedGraph(
2361  fbBuilder,
2362  fbBuilder.CreateVector(m_SerializerStrategy.GetSerializedLayers()),
2363  fbBuilder.CreateVector(m_SerializerStrategy.GetInputIds()),
2364  fbBuilder.CreateVector(m_SerializerStrategy.GetOutputIds()),
2365  m_SerializerStrategy.GetVersionTable());
2366 
2367  // Serialize the graph
2368  fbBuilder.Finish(serializedGraph);
2369 }
2370 
2371 
2373 {
2374  flatbuffers::FlatBufferBuilder& fbBuilder = m_SerializerStrategy.GetFlatBufferBuilder();
2375 
2376  auto bytesToWrite = armnn::numeric_cast<std::streamsize>(fbBuilder.GetSize());
2377  stream.write(reinterpret_cast<const char*>(fbBuilder.GetBufferPointer()), bytesToWrite);
2378  return !stream.bad();
2379 }
2380 
2381 } // namespace armnnSerializer
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
flatbuffers::Offset< LongData > CreateLongData(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< int64_t >> data=0)
float m_Eps
Used to avoid dividing by zero.
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
bool m_HalfPixelCenters
Half Pixel Centers.
armnnSerializer::UnaryOperation GetFlatBufferUnaryOperation(armnn::UnaryOperation comparisonOperation)
bool m_ProjectionEnabled
Enable/disable the projection layer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< ReshapeDescriptor > CreateReshapeDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> targetShape=0)
bool m_AlignCorners
Aligned corners.
flatbuffers::Offset< ReduceLayer > CreateReduceLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ReduceDescriptor > descriptor=0)
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
uint32_t m_Axis
0-based axis along which to stack the input tensors.
flatbuffers::Offset< OutputSlot > CreateOutputSlot(flatbuffers::FlatBufferBuilder &_fbb, uint32_t index=0, flatbuffers::Offset< armnnSerializer::TensorInfo > tensorInfo=0)
A ViewsDescriptor for the SplitterLayer.
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
flatbuffers::Offset< DepthwiseConvolution2dDescriptor > CreateDepthwiseConvolution2dDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t padLeft=0, uint32_t padRight=0, uint32_t padTop=0, uint32_t padBottom=0, uint32_t strideX=0, uint32_t strideY=0, uint32_t dilationX=1, uint32_t dilationY=1, bool biasEnabled=false, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NCHW)
float m_ScaleW
Center size encoding scale weight.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
void ExecuteStrategy(IStrategy &strategy) const
Definition: Network.cpp:520
virtual unsigned int GetNumInputSlots() const =0
Returns the number of connectable input slots.
float m_K
Kappa value used for the across channel normalization equation.
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
flatbuffers::Offset< AbsLayer > CreateAbsLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
flatbuffers::Offset< LstmLayer > CreateLstmLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::LstmDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::LstmInputParams > inputParams=0)
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
flatbuffers::Offset< L2NormalizationLayer > CreateL2NormalizationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::L2NormalizationDescriptor > descriptor=0)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
flatbuffers::Offset< TransposeConvolution2dDescriptor > CreateTransposeConvolution2dDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t padLeft=0, uint32_t padRight=0, uint32_t padTop=0, uint32_t padBottom=0, uint32_t strideX=0, uint32_t strideY=0, bool biasEnabled=false, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NCHW)
float m_ClippingThresProj
Clipping threshold value for the projection.
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1...
A ReshapeDescriptor for the ReshapeLayer.
flatbuffers::Offset< ResizeDescriptor > CreateResizeDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t targetHeight=0, uint32_t targetWidth=0, armnnSerializer::ResizeMethod method=armnnSerializer::ResizeMethod_NearestNeighbor, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC, bool alignCorners=false, bool halfPixelCenters=false)
flatbuffers::Offset< FillLayer > CreateFillLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::FillDescriptor > descriptor=0)
std::vector< int > m_Begin
Begin values for the input that will be sliced.
flatbuffers::Offset< SoftmaxDescriptor > CreateSoftmaxDescriptor(flatbuffers::FlatBufferBuilder &_fbb, float beta=0.0f)
float m_PadValue
Optional value to use for padding, defaults to 0.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t GetNumDimensions() const
Get the number of dimensions.
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:78
void Serialize(const armnn::INetwork &inNetwork)
Serializes the network to ArmNN SerializedGraph.
float m_ScaleX
Center size encoding scale x.
TensorShape m_InputShape
Required shape of all input tensors.
uint32_t m_TargetWidth
Target width value.
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Dimensionality GetDimensionality() const
Function that returns the tensor type.
Definition: Tensor.hpp:92
flatbuffers::Offset< GatherLayer > CreateGatherLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::GatherDescriptor > descriptor=0)
flatbuffers::Offset< RankLayer > CreateRankLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
bool HasPerAxisQuantization() const
Definition: Tensor.cpp:448
uint32_t m_PoolWidth
Pooling width value.
bool m_PeepholeEnabled
Enable/disable peephole.
armnnSerializer::OutputShapeRounding GetFlatBufferOutputShapeRounding(armnn::OutputShapeRounding outputShapeRounding)
flatbuffers::Offset< TransposeLayer > CreateTransposeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::TransposeDescriptor > descriptor=0)
A Convolution2dDescriptor for the Convolution2dLayer.
float m_Alpha
Alpha value for the normalization equation.
uint32_t m_PadLeft
Padding left value in the width dimension.
flatbuffers::Offset< ComparisonLayer > CreateComparisonLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ComparisonDescriptor > descriptor=0)
bool m_KeepDims
if true then output shape has no change.
float m_HiddenStateScale
Hidden State quantization scale.
bool m_BiasEnabled
Enable/disable bias.
flatbuffers::Offset< ConstTensor > CreateConstTensor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::TensorInfo > info=0, armnnSerializer::ConstTensorData data_type=armnnSerializer::ConstTensorData_NONE, flatbuffers::Offset< void > data=0)
Optional< unsigned int > GetQuantizationDim() const
Definition: Tensor.cpp:496
flatbuffers::Offset< QuantizeLayer > CreateQuantizeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
float m_OutputIntermediateScale
Output intermediate quantization scale.
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
float m_Gamma
Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0. ...
float m_Beta
Exponentiation value.
flatbuffers::Offset< InputSlot > CreateInputSlot(flatbuffers::FlatBufferBuilder &_fbb, uint32_t index=0, const armnnSerializer::Connection *connection=0)
std::vector< unsigned int > m_Size
Size of the slice in each dimension.
static ISerializer * CreateRaw()
Definition: Serializer.cpp:30
flatbuffers::Offset< SpaceToDepthDescriptor > CreateSpaceToDepthDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t blockSize=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:70
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< QuantizedLstmLayer > CreateQuantizedLstmLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::QuantizedLstmInputParams > inputParams=0)
flatbuffers::Offset< TransposeDescriptor > CreateTransposeDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> dimMappings=0)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
serializer::ActivationFunction GetFlatBufferActivationFunction(armnn::ActivationFunction function)
Definition: Serializer.cpp:55
Main network class which provides the interface for building up a neural network. ...
Definition: INetwork.hpp:177
flatbuffers::Offset< DetectionPostProcessDescriptor > CreateDetectionPostProcessDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t maxDetections=0, uint32_t maxClassesPerDetection=0, uint32_t detectionsPerClass=0, float nmsScoreThreshold=0.0f, float nmsIouThreshold=0.0f, uint32_t numClasses=0, bool useRegularNms=false, float scaleX=0.0f, float scaleY=0.0f, float scaleW=0.0f, float scaleH=0.0f)
armnnSerializer::NormalizationAlgorithmMethod GetFlatBufferNormalizationAlgorithmMethod(armnn::NormalizationAlgorithmMethod normalizationAlgorithmMethod)
uint32_t m_PadTop
Padding top value in the height dimension.
flatbuffers::Offset< AnyLayer > CreateAnyLayer(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::Layer layer_type=armnnSerializer::Layer_NONE, flatbuffers::Offset< void > layer=0)
flatbuffers::Offset< DepthwiseConvolution2dLayer > CreateDepthwiseConvolution2dLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::DepthwiseConvolution2dDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > weights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > biases=0)
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
flatbuffers::Offset< MergeLayer > CreateMergeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
uint32_t m_PadRight
Padding right value in the width dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
MemoryType GetMemoryArea() const
Definition: Tensor.hpp:305
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding for input dimension.
uint32_t GetNumViews() const
Get the number of views.
ReduceOperation m_ReduceOperation
Specifies the reduction operation to execute.
flatbuffers::Offset< QLstmInputParams > CreateQLstmInputParams(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > forgetGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > outputGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > projectionWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > projectionBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > forgetLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > outputLayerNormWeights=0)
bool m_TimeMajor
Enable/disable time major.
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
flatbuffers::Offset< TensorInfo > CreateTensorInfo(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> dimensions=0, armnnSerializer::DataType dataType=armnnSerializer::DataType_Float16, float quantizationScale=1.0f, int32_t quantizationOffset=0, flatbuffers::Offset< flatbuffers::Vector< float >> quantizationScales=0, uint32_t quantizationDim=0, uint32_t dimensionality=1, flatbuffers::Offset< flatbuffers::Vector< uint8_t >> dimensionSpecificity=0, bool isConstant=false)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
int32_t m_BeginMask
Begin mask value.
flatbuffers::Offset< FullyConnectedDescriptor > CreateFullyConnectedDescriptor(flatbuffers::FlatBufferBuilder &_fbb, bool biasEnabled=false, bool transposeWeightsMatrix=false, bool constantWeights=true)
flatbuffers::Offset< TransposeConvolution2dLayer > CreateTransposeConvolution2dLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::TransposeConvolution2dDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > weights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > biases=0)
SizeType GetSize() const
Definition: Types.hpp:275
uint32_t m_DilationY
Dilation along y axis.
int32_t m_EndMask
End mask value.
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
flatbuffers::Offset< PreluLayer > CreatePreluLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding values for the input dimension: heightPad{top, bottom} widthPad{left, right}.
std::vector< float > GetQuantizationScales() const
Definition: Tensor.cpp:453
flatbuffers::Offset< StandInDescriptor > CreateStandInDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t numInputs=0, uint32_t numOutputs=0)
bool SaveSerializedToStream(std::ostream &stream)
Serializes the SerializedGraph to the stream.
Definition: Serializer.cpp:50
uint32_t m_DilationY
Dilation factor value for height dimension.
armnnSerializer::ConstTensorData GetFlatBufferConstTensorData(armnn::DataType dataType)
bool GetDimensionSpecificity(unsigned int i) const
Gets information about if the dimension size has been specified or not.
Definition: Tensor.cpp:211
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
void ExecuteStrategy(const armnn::IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id) override
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
flatbuffers::Offset< MultiplicationLayer > CreateMultiplicationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
flatbuffers::Offset< DepthToSpaceLayer > CreateDepthToSpaceLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::DepthToSpaceDescriptor > descriptor=0)
flatbuffers::Offset< InstanceNormalizationLayer > CreateInstanceNormalizationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::InstanceNormalizationDescriptor > descriptor=0)
armnnSerializer::ReduceOperation GetFlatBufferReduceOperation(armnn::ReduceOperation reduceOperation)
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:244
flatbuffers::Offset< SliceLayer > CreateSliceLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::SliceDescriptor > descriptor=0)
armnnSerializer::DataType GetFlatBufferDataType(armnn::DataType dataType)
uint32_t m_NumOutputs
Number of output tensors.
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
flatbuffers::Offset< Convolution2dDescriptor > CreateConvolution2dDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t padLeft=0, uint32_t padRight=0, uint32_t padTop=0, uint32_t padBottom=0, uint32_t strideX=0, uint32_t strideY=0, uint32_t dilationX=1, uint32_t dilationY=1, bool biasEnabled=false, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NCHW)
flatbuffers::Offset< InputLayer > CreateInputLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::BindableLayerBase > base=0)
A ResizeDescriptor for the ResizeLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
Base class for all descriptors.
Definition: Descriptors.hpp:22
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
A StackDescriptor for the StackLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< ShortData > CreateShortData(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< int16_t >> data=0)
serializer::ArgMinMaxFunction GetFlatBufferArgMinMaxFunction(armnn::ArgMinMaxFunction function)
Definition: Serializer.cpp:86
TensorShape m_TargetShape
Target shape value.
bool SaveSerializedToStream(std::ostream &stream)
Serializes the SerializedGraph to the stream.
flatbuffers::Offset< ConcatLayer > CreateConcatLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::OriginsDescriptor > descriptor=0)
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_MaxDetections
Maximum numbers of detections.
A PadDescriptor for the PadLayer.
flatbuffers::Offset< SubtractionLayer > CreateSubtractionLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
flatbuffers::Offset< BindableLayerBase > CreateBindableLayerBase(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, int32_t layerBindingId=0)
const uint32_t * GetViewOrigin(uint32_t idx) const
Return the view origin at the int value idx.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
flatbuffers::Offset< ArgMinMaxLayer > CreateArgMinMaxLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ArgMinMaxDescriptor > descriptor=0)
armnnSerializer::NormalizationAlgorithmChannel GetFlatBufferNormalizationAlgorithmChannel(armnn::NormalizationAlgorithmChannel normalizationAlgorithmChannel)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
flatbuffers::Offset< QLstmDescriptor > CreateQLstmDescriptor(flatbuffers::FlatBufferBuilder &_fbb, bool cifgEnabled=true, bool peepholeEnabled=false, bool projectionEnabled=false, bool layerNormEnabled=false, float cellClip=0.0f, float projectionClip=0.0f, float inputIntermediateScale=0.0f, float forgetIntermediateScale=0.0f, float cellIntermediateScale=0.0f, float outputIntermediateScale=0.0f, int32_t hiddenStateZeroPoint=0, float hiddenStateScale=0.0f)
bool m_LayerNormEnabled
Enable/disable layer normalization.
flatbuffers::Offset< GreaterLayer > CreateGreaterLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
float m_NmsIouThreshold
Intersection over union threshold.
flatbuffers::Offset< ReshapeLayer > CreateReshapeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ReshapeDescriptor > descriptor=0)
armnnSerializer::LogicalBinaryOperation GetFlatBufferLogicalBinaryOperation(armnn::LogicalBinaryOperation logicalBinaryOperation)
flatbuffers::Offset< ArgMinMaxDescriptor > CreateArgMinMaxDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::ArgMinMaxFunction argMinMaxFunction=armnnSerializer::ArgMinMaxFunction_Min, int32_t axis=0)
An LstmDescriptor for the LstmLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
flatbuffers::Offset< AdditionLayer > CreateAdditionLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
uint32_t m_DilationX
Dilation factor value for width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
flatbuffers::Offset< L2NormalizationDescriptor > CreateL2NormalizationDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NCHW, float eps=1e-12f)
std::vector< unsigned int > m_Begin
Beginning indices of the slice in each dimension.
int32_t m_NewAxisMask
New axis mask value.
flatbuffers::Offset< MinimumLayer > CreateMinimumLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept...
flatbuffers::Offset< ByteData > CreateByteData(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< int8_t >> data=0)
std::vector< unsigned int > m_BlockShape
Block shape values.
flatbuffers::Offset< FeatureCompatibilityVersions > CreateFeatureCompatibilityVersions(flatbuffers::FlatBufferBuilder &_fbb, uint32_t bindingIdsScheme=0, uint32_t weightsLayoutScheme=0, uint32_t constantTensorsAsInputs=0)
float m_Eps
Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f...
An output connection slot for a layer.
Definition: INetwork.hpp:37
flatbuffers::Offset< DepthToSpaceDescriptor > CreateDepthToSpaceDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t blockSize=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
A L2NormalizationDescriptor for the L2NormalizationLayer.
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:480
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:56
float GetQuantizationScale() const
Definition: Tensor.cpp:463
flatbuffers::Offset< LstmInputParams > CreateLstmInputParams(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > forgetGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > outputGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > projectionWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > projectionBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > forgetLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > outputLayerNormWeights=0)
DataType GetDataType() const
Definition: Tensor.hpp:198
An OriginsDescriptor for the ConcatLayer.
A ReduceDescriptor for the REDUCE operators.
float m_ProjectionClip
Clipping threshold value for the projection.
flatbuffers::Offset< CastLayer > CreateCastLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
flatbuffers::Offset< LayerBase > CreateLayerBase(flatbuffers::FlatBufferBuilder &_fbb, uint32_t index=0, flatbuffers::Offset< flatbuffers::String > layerName=0, armnnSerializer::LayerType layerType=armnnSerializer::LayerType_Addition, flatbuffers::Offset< flatbuffers::Vector< flatbuffers::Offset< armnnSerializer::InputSlot >>> inputSlots=0, flatbuffers::Offset< flatbuffers::Vector< flatbuffers::Offset< armnnSerializer::OutputSlot >>> outputSlots=0)
flatbuffers::Offset< ShapeLayer > CreateShapeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
A FullyConnectedDescriptor for the FullyConnectedLayer.
int32_t m_EllipsisMask
Ellipsis mask value.
virtual LayerGuid GetGuid() const =0
Returns the unique id of the layer.
bool m_BiasEnabled
Enable/disable bias.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
flatbuffers::Offset< QuantizedLstmInputParams > CreateQuantizedLstmInputParams(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > forgetGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > outputGateBias=0)
flatbuffers::Offset< ReduceDescriptor > CreateReduceDescriptor(flatbuffers::FlatBufferBuilder &_fbb, bool keepDims=false, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> axis=0, armnnSerializer::ReduceOperation reduceOperation=armnnSerializer::ReduceOperation_Sum)
flatbuffers::Offset< StackDescriptor > CreateStackDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t axis=0, uint32_t numInputs=0, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> inputShape=0)
flatbuffers::Offset< BatchToSpaceNdDescriptor > CreateBatchToSpaceNdDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> blockShape=0, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> crops=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
float m_InputIntermediateScale
Input intermediate quantization scale.
flatbuffers::Offset< PadDescriptor > CreatePadDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> padList=0, float padValue=0.0f)
uint32_t m_TargetWidth
Target width value.
flatbuffers::Offset< SplitterLayer > CreateSplitterLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ViewsDescriptor > descriptor=0)
A GatherDescriptor for the GatherLayer.
bool m_PeepholeEnabled
Enable/disable peephole.
uint32_t m_NumClasses
Number of classes.
bool m_HalfPixelCenters
Half Pixel Centers.
flatbuffers::Offset< OutputLayer > CreateOutputLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::BindableLayerBase > base=0)
void Serialize(const armnn::INetwork &inNetwork)
Serializes the network to ArmNN SerializedGraph.
Definition: Serializer.cpp:45
flatbuffers::Offset< SoftmaxLayer > CreateSoftmaxLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::SoftmaxDescriptor > descriptor=0)
flatbuffers::Offset< FillDescriptor > CreateFillDescriptor(flatbuffers::FlatBufferBuilder &_fbb, float value=0.0f)
uint32_t m_PadTop
Padding top value in the height dimension.
A StandInDescriptor for the StandIn layer.
A QLstmDescriptor for the QLstmLayer.
flatbuffers::Offset< StridedSliceLayer > CreateStridedSliceLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::StridedSliceDescriptor > descriptor=0)
virtual unsigned int CalculateIndexOnOwner() const =0
flatbuffers::Offset< LogSoftmaxDescriptor > CreateLogSoftmaxDescriptor(flatbuffers::FlatBufferBuilder &_fbb, float beta=1.0f, int32_t axis=-1)
bool m_UseRegularNms
Use Regular NMS.
flatbuffers::Offset< RsqrtLayer > CreateRsqrtLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< MeanLayer > CreateMeanLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::MeanDescriptor > descriptor=0)
std::vector< unsigned int > m_BlockShape
Block shape value.
std::vector< int > m_Stride
Stride values for the input that will be sliced.
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
flatbuffers::Offset< ActivationLayer > CreateActivationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ActivationDescriptor > descriptor=0)
flatbuffers::Offset< SpaceToDepthLayer > CreateSpaceToDepthLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::SpaceToDepthDescriptor > descriptor=0)
flatbuffers::Offset< SliceDescriptor > CreateSliceDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> begin=0, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> size=0)
const TensorInfo & GetInfo() const
Definition: Tensor.hpp:295
min(a, max(b, input)) ReLu1 & ReLu6.
flatbuffers::Offset< BatchNormalizationLayer > CreateBatchNormalizationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::BatchNormalizationDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > mean=0, flatbuffers::Offset< armnnSerializer::ConstTensor > variance=0, flatbuffers::Offset< armnnSerializer::ConstTensor > beta=0, flatbuffers::Offset< armnnSerializer::ConstTensor > gamma=0)
flatbuffers::Offset< BatchNormalizationDescriptor > CreateBatchNormalizationDescriptor(flatbuffers::FlatBufferBuilder &_fbb, float eps=0.0f, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
uint32_t m_TargetHeight
Target height value.
uint32_t m_NumInputs
Number of input tensors.
flatbuffers::Offset< GatherDescriptor > CreateGatherDescriptor(flatbuffers::FlatBufferBuilder &_fbb, int32_t axis=0)
flatbuffers::Offset< ActivationDescriptor > CreateActivationDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::ActivationFunction activationFunction=armnnSerializer::ActivationFunction_Sigmoid, float a=0.0f, float b=0.0f)
uint32_t m_TargetHeight
Target height value.
uint32_t m_ActivationFunc
The activation function to use.
A SliceDescriptor for the SliceLayer.
flatbuffers::Offset< NormalizationLayer > CreateNormalizationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::NormalizationDescriptor > descriptor=0)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
flatbuffers::Offset< ViewsDescriptor > CreateViewsDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::OriginsDescriptor > origins=0, flatbuffers::Offset< flatbuffers::Vector< flatbuffers::Offset< armnnSerializer::UintVector >>> viewSizes=0)
virtual LayerType GetType() const =0
Returns the armnn::LayerType of this layer.
flatbuffers::Offset< PermuteDescriptor > CreatePermuteDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> dimMappings=0)
float m_ClippingThresCell
Clipping threshold value for the cell state.
unsigned int m_BlockSize
Scalar specifying the input block size. It must be >= 1.
flatbuffers::Offset< MeanDescriptor > CreateMeanDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> axis=0, bool keepDims=false)
const uint32_t * GetViewOrigin(uint32_t idx) const
Get the view origin at the int value idx.
flatbuffers::Offset< StandInLayer > CreateStandInLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::StandInDescriptor > descriptor=0)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_ForgetIntermediateScale
Forget intermediate quantization scale.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_Beta
Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0. ...
armnnSerializer::DataLayout GetFlatBufferDataLayout(armnn::DataLayout dataLayout)
std::vector< uint32_t > m_vAxis
The indices of the dimensions to reduce.
float m_ScaleH
Center size encoding scale height.
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
Definition: Descriptors.hpp:94
std::vector< int > m_End
End values for the input that will be sliced.
flatbuffers::Offset< SwitchLayer > CreateSwitchLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
flatbuffers::Offset< ResizeBilinearDescriptor > CreateResizeBilinearDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t targetWidth=0, uint32_t targetHeight=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC, bool alignCorners=false, bool halfPixelCenters=false)
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
const uint32_t * GetViewSizes(uint32_t idx) const
Get the view sizes at the int value idx.
float m_CellClip
Clipping threshold value for the cell state.
flatbuffers::Offset< ElementwiseUnaryDescriptor > CreateElementwiseUnaryDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::UnaryOperation operation=armnnSerializer::UnaryOperation_Abs)
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:50
uint32_t m_DilationX
Dilation along x axis.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
flatbuffers::Offset< PadLayer > CreatePadLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::PadDescriptor > descriptor=0)
flatbuffers::Offset< FloorLayer > CreateFloorLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
std::unique_ptr< ISerializer, void(*)(ISerializer *serializer)> ISerializerPtr
Definition: ISerializer.hpp:15
flatbuffers::Offset< NormalizationDescriptor > CreateNormalizationDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::NormalizationAlgorithmChannel normChannelType=armnnSerializer::NormalizationAlgorithmChannel_Across, armnnSerializer::NormalizationAlgorithmMethod normMethodType=armnnSerializer::NormalizationAlgorithmMethod_LocalBrightness, uint32_t normSize=0, float alpha=0.0f, float beta=0.0f, float k=0.0f, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NCHW)
uint32_t m_PadLeft
Padding left value in the width dimension.
armnnSerializer::ComparisonOperation GetFlatBufferComparisonOperation(armnn::ComparisonOperation comparisonOperation)
flatbuffers::Offset< Pooling2dDescriptor > CreatePooling2dDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::PoolingAlgorithm poolType=armnnSerializer::PoolingAlgorithm_Max, uint32_t padLeft=0, uint32_t padRight=0, uint32_t padTop=0, uint32_t padBottom=0, uint32_t poolWidth=0, uint32_t poolHeight=0, uint32_t strideX=0, uint32_t strideY=0, armnnSerializer::OutputShapeRounding outputShapeRounding=armnnSerializer::OutputShapeRounding_Floor, armnnSerializer::PaddingMethod paddingMethod=armnnSerializer::PaddingMethod_IgnoreValue, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
flatbuffers::Offset< EqualLayer > CreateEqualLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
bool m_AlignCorners
Aligned corners.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
flatbuffers::Offset< ConstantLayer > CreateConstantLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ConstTensor > input=0)
int32_t m_Axis
The axis in params to gather indices from.
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:98
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
flatbuffers::Offset< UintVector > CreateUintVector(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> data=0)
flatbuffers::Offset< StackLayer > CreateStackLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::StackDescriptor > descriptor=0)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
flatbuffers::Offset< Convolution2dLayer > CreateConvolution2dLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::Convolution2dDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > weights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > biases=0)
flatbuffers::Offset< UnidirectionalSequenceLstmLayer > CreateUnidirectionalSequenceLstmLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::UnidirectionalSequenceLstmDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::LstmInputParams > inputParams=0)
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
flatbuffers::Offset< Pooling2dLayer > CreatePooling2dLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::Pooling2dDescriptor > descriptor=0)
bool m_ProjectionEnabled
Enable/disable the projection layer.
ArgMinMaxFunction
Definition: Types.hpp:82
flatbuffers::Offset< SpaceToBatchNdLayer > CreateSpaceToBatchNdLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::SpaceToBatchNdDescriptor > descriptor=0)
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
armnnSerializer::ResizeMethod GetFlatBufferResizeMethod(armnn::ResizeMethod method)
uint32_t m_NumInputs
Number of input tensors.
flatbuffers::Offset< UnidirectionalSequenceLstmDescriptor > CreateUnidirectionalSequenceLstmDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t activationFunc=0, float clippingThresCell=0.0f, float clippingThresProj=0.0f, bool cifgEnabled=true, bool peepholeEnabled=false, bool projectionEnabled=false, bool layerNormEnabled=false, bool timeMajor=false)
profiling::ProfilingGuid LayerGuid
Define LayerGuid type.
Definition: Types.hpp:313
uint32_t GetNumDimensions() const
Get the number of dimensions.
flatbuffers::Offset< ComparisonDescriptor > CreateComparisonDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::ComparisonOperation operation=armnnSerializer::ComparisonOperation_Equal)
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
A MeanDescriptor for the MeanLayer.
flatbuffers::Offset< MaximumLayer > CreateMaximumLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
virtual const IOutputSlot * GetConnection() const =0
static ISerializerPtr Create()
Definition: Serializer.cpp:35
armnnSerializer::PaddingMethod GetFlatBufferPaddingMethod(armnn::PaddingMethod paddingMethod)
bool m_LayerNormEnabled
Enable/disable layer normalization.
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
uint32_t m_PadRight
Padding right value in the width dimension.
flatbuffers::Offset< InstanceNormalizationDescriptor > CreateInstanceNormalizationDescriptor(flatbuffers::FlatBufferBuilder &_fbb, float gamma=0.0f, float beta=0.0f, float eps=0.0f, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
virtual const TensorInfo & GetTensorInfo() const =0
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:72
flatbuffers::Offset< IntData > CreateIntData(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< int32_t >> data=0)
virtual const char * GetName() const =0
Returns the name of the layer.
float m_ScaleY
Center size encoding scale y.
flatbuffers::Offset< ResizeLayer > CreateResizeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ResizeDescriptor > descriptor=0)
uint32_t GetNumViews() const
Get the number of views.
flatbuffers::Offset< FullyConnectedLayer > CreateFullyConnectedLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::FullyConnectedDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > weights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > biases=0)
float m_NmsScoreThreshold
NMS score threshold.
flatbuffers::Offset< DequantizeLayer > CreateDequantizeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
flatbuffers::Offset< ResizeBilinearLayer > CreateResizeBilinearLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ResizeBilinearDescriptor > descriptor=0)
virtual LayerGuid GetOwningLayerGuid() const =0
A Pooling2dDescriptor for the Pooling2dLayer.
flatbuffers::Offset< DetectionPostProcessLayer > CreateDetectionPostProcessLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::DetectionPostProcessDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > anchors=0)
A NormalizationDescriptor for the NormalizationLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< BatchToSpaceNdLayer > CreateBatchToSpaceNdLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::BatchToSpaceNdDescriptor > descriptor=0)
flatbuffers::Offset< armnnSerializer::FeatureCompatibilityVersions > GetVersionTable()
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
unsigned int GetConcatAxis() const
Get the concatenation axis value.
A ResizeBilinearDescriptor for the ResizeBilinearLayer.
flatbuffers::Offset< LogSoftmaxLayer > CreateLogSoftmaxLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::LogSoftmaxDescriptor > descriptor=0)
float m_CellIntermediateScale
Cell intermediate quantization scale.
flatbuffers::Offset< StridedSliceDescriptor > CreateStridedSliceDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< int32_t >> begin=0, flatbuffers::Offset< flatbuffers::Vector< int32_t >> end=0, flatbuffers::Offset< flatbuffers::Vector< int32_t >> stride=0, int32_t beginMask=0, int32_t endMask=0, int32_t shrinkAxisMask=0, int32_t ellipsisMask=0, int32_t newAxisMask=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
flatbuffers::Offset< OriginsDescriptor > CreateOriginsDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t concatAxis=0, uint32_t numViews=0, uint32_t numDimensions=0, flatbuffers::Offset< flatbuffers::Vector< flatbuffers::Offset< armnnSerializer::UintVector >>> viewOrigins=0)
flatbuffers::Offset< QLstmLayer > CreateQLstmLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::QLstmDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::QLstmInputParams > inputParams=0)
flatbuffers::Offset< LstmDescriptor > CreateLstmDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t activationFunc=0, float clippingThresCell=0.0f, float clippingThresProj=0.0f, bool cifgEnabled=true, bool peepholeEnabled=false, bool projectionEnabled=false, bool layerNormEnabled=false)
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:52
static void Destroy(ISerializer *serializer)
Definition: Serializer.cpp:40
A SoftmaxDescriptor for the SoftmaxLayer.
float m_Beta
Beta value for the normalization equation.
flatbuffers::Offset< ElementwiseUnaryLayer > CreateElementwiseUnaryLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ElementwiseUnaryDescriptor > descriptor=0)
flatbuffers::Offset< PermuteLayer > CreatePermuteLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::PermuteDescriptor > descriptor=0)
const OriginsDescriptor & GetOrigins() const
Get the View Origins.
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
uint32_t m_NormSize
Depth radius value.
flatbuffers::Offset< SpaceToBatchNdDescriptor > CreateSpaceToBatchNdDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> blockShape=0, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> padList=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:48
An input connection slot for a layer.
Definition: INetwork.hpp:24
flatbuffers::Offset< SerializedGraph > CreateSerializedGraph(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< flatbuffers::Offset< armnnSerializer::AnyLayer >>> layers=0, flatbuffers::Offset< flatbuffers::Vector< int32_t >> inputIds=0, flatbuffers::Offset< flatbuffers::Vector< int32_t >> outputIds=0, flatbuffers::Offset< armnnSerializer::FeatureCompatibilityVersions > featureVersions=0)
armnnSerializer::PoolingAlgorithm GetFlatBufferPoolingAlgorithm(armnn::PoolingAlgorithm poolingAlgorithm)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A FillDescriptor for the FillLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
unsigned int GetNumBytes() const
Definition: Tensor.hpp:302
ActivationFunction
Definition: Types.hpp:66
flatbuffers::Offset< LogicalBinaryDescriptor > CreateLogicalBinaryDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::LogicalBinaryOperation operation=armnnSerializer::LogicalBinaryOperation_LogicalAnd)
flatbuffers::Offset< DivisionLayer > CreateDivisionLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
A PermuteDescriptor for the PermuteLayer.
flatbuffers::Offset< LogicalBinaryLayer > CreateLogicalBinaryLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::LogicalBinaryDescriptor > descriptor=0)
uint32_t m_PadRight
Padding right value in the width dimension.
int32_t m_HiddenStateZeroPoint
Hidden State zero point.
bool m_ConstantWeights
Enable/disable constant weights and biases.