ArmNN
 21.05
Serializer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "Serializer.hpp"
6 #include "SerializerUtils.hpp"
7 
8 #include <armnn/Descriptors.hpp>
9 #include <armnn/LstmParams.hpp>
13 
14 #include <fmt/format.h>
15 #include <iostream>
16 
17 using namespace armnn;
18 namespace fb = flatbuffers;
19 namespace serializer = armnnSerializer;
20 
21 namespace armnnSerializer
22 {
23 
24 ISerializer::ISerializer() : pSerializerImpl(new SerializerImpl())
25 {
26 }
27 
28 ISerializer::~ISerializer() = default;
29 
31 {
32  return new ISerializer();
33 }
34 
36 {
38 }
39 
41 {
42  delete serializer;
43 }
44 
46 {
47  pSerializerImpl->Serialize(inNetwork);
48 }
49 
50 bool ISerializer::SaveSerializedToStream(std::ostream& stream)
51 {
52  return pSerializerImpl->SaveSerializedToStream(stream);
53 }
54 
56 {
57  switch (function)
58  {
81  default:
83  }
84 }
85 
87 {
88  switch (function)
89  {
93  default:
95  }
96 }
97 
98 uint32_t SerializerStrategy::GetSerializedId(armnn::LayerGuid guid)
99 {
100  if (m_guidMap.empty())
101  {
102  m_guidMap.insert(std::make_pair(guid, m_layerId));
103  }
104  else if (m_guidMap.find(guid) == m_guidMap.end())
105  {
106  ++m_layerId;
107  m_guidMap.insert(std::make_pair(guid, m_layerId));
108 
109  return m_layerId;
110  }
111  return m_guidMap[guid];
112 }
113 
114 // Build FlatBuffer for Input Layer
115 void SerializerStrategy::SerializeInputLayer(const armnn::IConnectableLayer* layer, LayerBindingId id, const char* name)
116 {
117  IgnoreUnused(name);
118 
119  // Create FlatBuffer BaseLayer
120  auto flatBufferInputBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Input);
121 
122  // Create FlatBuffer BindableBaseLayer
123  auto flatBufferInputBindableBaseLayer = serializer::CreateBindableLayerBase(m_flatBufferBuilder,
124  flatBufferInputBaseLayer,
125  id);
126  // Push layer binding id to outputIds.
127  m_inputIds.push_back(id);
128 
129  // Create the FlatBuffer InputLayer
130  auto flatBufferInputLayer = serializer::CreateInputLayer(m_flatBufferBuilder, flatBufferInputBindableBaseLayer);
131 
132  // Add the AnyLayer to the FlatBufferLayers
133  CreateAnyLayer(flatBufferInputLayer.o, serializer::Layer::Layer_InputLayer);
134 }
135 
136 // Build FlatBuffer for Output Layer
137 void SerializerStrategy::SerializeOutputLayer(const armnn::IConnectableLayer* layer,
138  LayerBindingId id, const char* name)
139 {
140  IgnoreUnused(name);
141 
142  // Create FlatBuffer BaseLayer
143  auto flatBufferOutputBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Output);
144 
145  // Create FlatBuffer BindableBaseLayer
146  auto flatBufferOutputBindableBaseLayer = serializer::CreateBindableLayerBase(m_flatBufferBuilder,
147  flatBufferOutputBaseLayer,
148  id);
149  // Push layer binding id to outputIds.
150  m_outputIds.push_back(id);
151 
152  // Create the FlatBuffer OutputLayer
153  auto flatBufferOutputLayer = serializer::CreateOutputLayer(m_flatBufferBuilder, flatBufferOutputBindableBaseLayer);
154  // Add the AnyLayer to the FlatBufferLayers
155  CreateAnyLayer(flatBufferOutputLayer.o, serializer::Layer::Layer_OutputLayer);
156 }
157 
158 void SerializerStrategy::SerializeAbsLayer(const armnn::IConnectableLayer* layer, const char* name)
159 {
160  IgnoreUnused(name);
161  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Abs);
162  auto flatBufferAbsLayer = serializer::CreateAbsLayer(m_flatBufferBuilder, flatBufferBaseLayer);
163 
164  CreateAnyLayer(flatBufferAbsLayer.o, serializer::Layer::Layer_AbsLayer);
165 }
166 
167 // Build FlatBuffer for Activation Layer
168 void SerializerStrategy::SerializeActivationLayer(const armnn::IConnectableLayer* layer,
169  const armnn::ActivationDescriptor& descriptor,
170  const char* name)
171 {
172  IgnoreUnused(name);
173 
174  // Create FlatBuffer BaseLayer
175  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Activation);
176 
177  // Create the FlatBuffer ActivationDescriptor
178  auto flatBufferDescriptor = CreateActivationDescriptor(m_flatBufferBuilder,
180  descriptor.m_A,
181  descriptor.m_B);
182 
183  // Create the FlatBuffer ActivationLayer
184  auto flatBufferAdditionLayer = CreateActivationLayer(m_flatBufferBuilder,
185  flatBufferBaseLayer,
186  flatBufferDescriptor);
187 
188  // Add the AnyLayer to the FlatBufferLayers
189  CreateAnyLayer(flatBufferAdditionLayer.o, serializer::Layer::Layer_ActivationLayer);
190 }
191 
192 // Build FlatBuffer for Addition Layer
193 void SerializerStrategy::SerializeAdditionLayer(const armnn::IConnectableLayer* layer, const char* name)
194 {
195  IgnoreUnused(name);
196 
197  // Create FlatBuffer BaseLayer
198  auto flatBufferAdditionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Addition);
199 
200  // Create the FlatBuffer AdditionLayer
201  auto flatBufferAdditionLayer = serializer::CreateAdditionLayer(m_flatBufferBuilder, flatBufferAdditionBaseLayer);
202 
203  // Add the AnyLayer to the FlatBufferLayers
204  CreateAnyLayer(flatBufferAdditionLayer.o, serializer::Layer::Layer_AdditionLayer);
205 }
206 
207 // Build FlatBuffer for ArgMinMax Layer
208 void SerializerStrategy::SerializeArgMinMaxLayer(const armnn::IConnectableLayer *layer,
209  const armnn::ArgMinMaxDescriptor& descriptor,
210  const char *name)
211 {
212  IgnoreUnused(name);
213 
214  // Create FlatBuffer BaseLayer
215  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ArgMinMax);
216 
217  // Create FlatBuffer Descriptor
218  auto flatBufferDescriptor = CreateArgMinMaxDescriptor(m_flatBufferBuilder,
220  descriptor.m_Axis);
221 
222  // Create FlatBuffer ArgMinMaxLayer
223  auto flatBufferLayer = CreateArgMinMaxLayer(m_flatBufferBuilder,
224  flatBufferBaseLayer,
225  flatBufferDescriptor);
226 
228 }
229 
230 // Build FlatBuffer for BatchToSpaceNd Layer
231 void SerializerStrategy::SerializeBatchToSpaceNdLayer(const armnn::IConnectableLayer* layer,
232  const armnn::BatchToSpaceNdDescriptor& descriptor,
233  const char* name)
234 {
235  IgnoreUnused(name);
236 
237  // Create FlatBuffer BaseLayer
238  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchToSpaceNd);
239 
240  std::vector<unsigned int> crops;
241  crops.reserve(descriptor.m_Crops.size() * 2);
242  for (auto& crop : descriptor.m_Crops)
243  {
244  crops.push_back(crop.first);
245  crops.push_back(crop.second);
246  }
247 
248  auto flatBufferDescriptor =
249  CreateBatchToSpaceNdDescriptor(m_flatBufferBuilder,
250  m_flatBufferBuilder.CreateVector(descriptor.m_BlockShape),
251  m_flatBufferBuilder.CreateVector(crops),
253 
254  auto flatBufferLayer = serializer::CreateBatchToSpaceNdLayer(m_flatBufferBuilder,
255  flatBufferBaseLayer,
256  flatBufferDescriptor);
257 
259 }
260 
261 void SerializerStrategy::SerializeBatchNormalizationLayer(
262  const armnn::IConnectableLayer* layer,
263  const armnn::BatchNormalizationDescriptor& batchNormDescriptor,
264  const std::vector<armnn::ConstTensor>& constants,
265  const char* name)
266 {
267  IgnoreUnused(name);
268 
269  const armnn::ConstTensor& mean = constants[0];
270  const armnn::ConstTensor& variance = constants[1];
271  const armnn::ConstTensor& beta = constants[2];
272  const armnn::ConstTensor& gamma = constants[3];
273 
274  auto fbBatchNormalizationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchNormalization);
275  auto fbBatchNormalizationDescriptor = serializer::CreateBatchNormalizationDescriptor(
276  m_flatBufferBuilder,
277  batchNormDescriptor.m_Eps,
278  GetFlatBufferDataLayout(batchNormDescriptor.m_DataLayout));
279 
280  auto fbMeanConstTensorInfo = CreateConstTensorInfo(mean);
281  auto fbVarianceConstTensorInfo = CreateConstTensorInfo(variance);
282  auto fbBetaConstTensorInfo = CreateConstTensorInfo(beta);
283  auto fbGammaConstTensorInfo = CreateConstTensorInfo(gamma);
284  auto fbBatchNormalizationLayer = serializer::CreateBatchNormalizationLayer(m_flatBufferBuilder,
285  fbBatchNormalizationBaseLayer,
286  fbBatchNormalizationDescriptor,
287  fbMeanConstTensorInfo,
288  fbVarianceConstTensorInfo,
289  fbBetaConstTensorInfo,
290  fbGammaConstTensorInfo);
291 
293 }
294 
295 void SerializerStrategy::SerializeCastLayer(const armnn::IConnectableLayer* layer,
296  const char* name)
297 {
298  IgnoreUnused(name);
299 
300  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Cast);
301  auto fbCastLayer = serializer::CreateCastLayer(m_flatBufferBuilder, fbBaseLayer);
303 }
304 
305 void SerializerStrategy::SerializeComparisonLayer(const armnn::IConnectableLayer* layer,
306  const armnn::ComparisonDescriptor& descriptor,
307  const char* name)
308 {
309  IgnoreUnused(name);
310 
312  auto fbDescriptor = serializer::CreateComparisonDescriptor(
313  m_flatBufferBuilder,
315 
316  auto fbLayer = serializer::CreateComparisonLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
318 }
319 
320 // Build FlatBuffer for Constant Layer
321 void SerializerStrategy::SerializeConstantLayer(const armnn::IConnectableLayer* layer,
322  const std::vector<armnn::ConstTensor>& constants,
323  const char* name)
324 {
325  IgnoreUnused(name);
326 
327  armnn::ConstTensor input = constants[0];
328 
329  // Create FlatBuffer BaseLayer
330  auto flatBufferConstantBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Constant);
331 
332  auto flatBufferConstTensorInfo = CreateConstTensorInfo(input);
333 
334  // Create the FlatBuffer ConstantLayer
335  auto flatBufferLayer = CreateConstantLayer(m_flatBufferBuilder,
336  flatBufferConstantBaseLayer,
337  flatBufferConstTensorInfo);
338 
339  // Add the AnyLayer to the FlatBufferLayers
341 }
342 
343 // Build FlatBuffer for Convolution2dLayer
344 void SerializerStrategy::SerializeConvolution2dLayer(const armnn::IConnectableLayer* layer,
345  const armnn::Convolution2dDescriptor& descriptor,
346  const std::vector<armnn::ConstTensor>& constants,
347  const char* name)
348 {
349  IgnoreUnused(name);
350 
351  const armnn::ConstTensor weights = constants[0];
352 
353  // Create FlatBuffer BaseLayer
354  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution2d);
355 
356  auto flatBufferDescriptor = CreateConvolution2dDescriptor(m_flatBufferBuilder,
357  descriptor.m_PadLeft,
358  descriptor.m_PadRight,
359  descriptor.m_PadTop,
360  descriptor.m_PadBottom,
361  descriptor.m_StrideX,
362  descriptor.m_StrideY,
363  descriptor.m_DilationX,
364  descriptor.m_DilationY,
365  descriptor.m_BiasEnabled,
367  auto flatBufferWeightsConstTensorInfo = CreateConstTensorInfo(weights);
368  flatbuffers::Offset<serializer::ConstTensor> flatBufferBiasesConstTensorInfo;
369 
370  if (constants.size() > 1)
371  {
372  const armnn::ConstTensor biases = constants[1];
373  flatBufferBiasesConstTensorInfo = CreateConstTensorInfo(biases);
374  }
375 
376  // Create the FlatBuffer Convolution2dLayer
377  auto flatBufferLayer = CreateConvolution2dLayer(m_flatBufferBuilder,
378  flatBufferBaseLayer,
379  flatBufferDescriptor,
380  flatBufferWeightsConstTensorInfo,
381  flatBufferBiasesConstTensorInfo);
382 
383  // Add the AnyLayer to the FlatBufferLayers
385 }
386 
387 void SerializerStrategy::SerializeDepthToSpaceLayer(const armnn::IConnectableLayer* layer,
388  const armnn::DepthToSpaceDescriptor& descriptor,
389  const char* name)
390 {
391  IgnoreUnused(name);
392 
394  auto fbDescriptor = CreateDepthToSpaceDescriptor(m_flatBufferBuilder,
395  descriptor.m_BlockSize,
397 
398  auto fbLayer = serializer::CreateDepthToSpaceLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
399 
401 }
402 
403 void SerializerStrategy::SerializeDepthwiseConvolution2dLayer(const armnn::IConnectableLayer* layer,
404  const armnn::DepthwiseConvolution2dDescriptor& descriptor,
405  const std::vector<armnn::ConstTensor>& constants,
406  const char* name)
407 {
408  IgnoreUnused(name);
409 
410  const armnn::ConstTensor& weights = constants[0];
411 
413  auto fbDescriptor = CreateDepthwiseConvolution2dDescriptor(m_flatBufferBuilder,
414  descriptor.m_PadLeft,
415  descriptor.m_PadRight,
416  descriptor.m_PadTop,
417  descriptor.m_PadBottom,
418  descriptor.m_StrideX,
419  descriptor.m_StrideY,
420  descriptor.m_DilationX,
421  descriptor.m_DilationY,
422  descriptor.m_BiasEnabled,
424 
425  flatbuffers::Offset<serializer::ConstTensor> fbWeightsConstTensorInfo = CreateConstTensorInfo(weights);
426  flatbuffers::Offset<serializer::ConstTensor> fbBiasesConstTensorInfo;
427 
428  if (constants.size() > 1)
429  {
430  const armnn::ConstTensor& biases = constants[1];
431  fbBiasesConstTensorInfo = CreateConstTensorInfo(biases);
432  }
433 
434  auto flatBufferLayer = CreateDepthwiseConvolution2dLayer(m_flatBufferBuilder,
435  fbBaseLayer,
436  fbDescriptor,
437  fbWeightsConstTensorInfo,
438  fbBiasesConstTensorInfo);
439 
441 }
442 
443 void SerializerStrategy::SerializeDequantizeLayer(const armnn::IConnectableLayer* layer,
444  const char* name)
445 {
446  IgnoreUnused(name);
447 
448  auto fbDequantizeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Dequantize);
449  auto fbDequantizeLayer = serializer::CreateDequantizeLayer(m_flatBufferBuilder, fbDequantizeBaseLayer);
450 
452 }
453 
454 void SerializerStrategy::SerializeDetectionPostProcessLayer(const armnn::IConnectableLayer* layer,
455  const armnn::DetectionPostProcessDescriptor& descriptor,
456  const std::vector<armnn::ConstTensor>& constants,
457  const char* name)
458 {
459  IgnoreUnused(name);
460 
461  const armnn::ConstTensor& anchors = constants[0];
462 
464  auto fbDescriptor = CreateDetectionPostProcessDescriptor(m_flatBufferBuilder,
465  descriptor.m_MaxDetections,
466  descriptor.m_MaxClassesPerDetection,
467  descriptor.m_DetectionsPerClass,
468  descriptor.m_NmsScoreThreshold,
469  descriptor.m_NmsIouThreshold,
470  descriptor.m_NumClasses,
471  descriptor.m_UseRegularNms,
472  descriptor.m_ScaleX,
473  descriptor.m_ScaleY,
474  descriptor.m_ScaleW,
475  descriptor.m_ScaleH);
476 
477  flatbuffers::Offset<serializer::ConstTensor> fbAnchorsConstTensorInfo = CreateConstTensorInfo(anchors);
478 
479  auto flatBufferLayer = CreateDetectionPostProcessLayer(m_flatBufferBuilder,
480  fbBaseLayer,
481  fbDescriptor,
482  fbAnchorsConstTensorInfo);
483 
485 }
486 
487 void SerializerStrategy::SerializeDivisionLayer(const armnn::IConnectableLayer* layer, const char* name)
488 {
489  IgnoreUnused(name);
490 
491  auto fbDivisionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Division);
492  auto fbDivisionLayer = serializer::CreateDivisionLayer(m_flatBufferBuilder, fbDivisionBaseLayer);
493 
495 }
496 
497 void SerializerStrategy::SerializeElementwiseUnaryLayer(const armnn::IConnectableLayer* layer,
498  const armnn::ElementwiseUnaryDescriptor& descriptor,
499  const char* name)
500 {
501  IgnoreUnused(name);
502 
505  m_flatBufferBuilder,
507 
508  auto fbLayer = serializer::CreateElementwiseUnaryLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
510 }
511 
512 void SerializerStrategy::SerializeEqualLayer(const armnn::IConnectableLayer* layer, const char* name)
513 {
514  IgnoreUnused(name);
515 
516  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Equal);
517  auto fbEqualLayer = serializer::CreateEqualLayer(m_flatBufferBuilder, fbBaseLayer);
518 
520 }
521 
522 void SerializerStrategy::SerializeFillLayer(const armnn::IConnectableLayer* layer,
523  const armnn::FillDescriptor& fillDescriptor,
524  const char* name)
525 {
526  IgnoreUnused(name);
527 
528  auto fbFillBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Fill);
529 
530  auto fbDescriptor = serializer::CreateFillDescriptor(m_flatBufferBuilder, fillDescriptor.m_Value);
531 
532  auto fbFillLayer = serializer::CreateFillLayer(m_flatBufferBuilder, fbFillBaseLayer, fbDescriptor);
533 
535 }
536 
537 void SerializerStrategy::SerializeFloorLayer(const armnn::IConnectableLayer *layer, const char *name)
538 {
539  IgnoreUnused(name);
540 
541  auto flatBufferFloorBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Floor);
542  auto flatBufferFloorLayer = serializer::CreateFloorLayer(m_flatBufferBuilder, flatBufferFloorBaseLayer);
543 
544  CreateAnyLayer(flatBufferFloorLayer.o, serializer::Layer::Layer_FloorLayer);
545 }
546 
547 void SerializerStrategy::SerializeGatherLayer(const armnn::IConnectableLayer* layer,
548  const armnn::GatherDescriptor& gatherDescriptor,
549  const char* name)
550 {
551  IgnoreUnused(name);
552 
553  auto fbGatherDescriptor = CreateGatherDescriptor(m_flatBufferBuilder,
554  gatherDescriptor.m_Axis);
555  auto fbGatherBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Gather);
556  auto flatBufferLayer = serializer::CreateGatherLayer(m_flatBufferBuilder, fbGatherBaseLayer, fbGatherDescriptor);
557 
559 }
560 
561 
562 void SerializerStrategy::SerializeGreaterLayer(const armnn::IConnectableLayer* layer, const char* name)
563 {
564  IgnoreUnused(name);
565 
566  auto fbGreaterBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Greater);
567  auto fbGreaterLayer = serializer::CreateGreaterLayer(m_flatBufferBuilder, fbGreaterBaseLayer);
568 
570 }
571 
572 void SerializerStrategy::SerializeInstanceNormalizationLayer(
573  const armnn::IConnectableLayer* layer,
574  const armnn::InstanceNormalizationDescriptor& instanceNormalizationDescriptor,
575  const char* name)
576 {
577  IgnoreUnused(name);
578 
580  m_flatBufferBuilder,
581  instanceNormalizationDescriptor.m_Gamma,
582  instanceNormalizationDescriptor.m_Beta,
583  instanceNormalizationDescriptor.m_Eps,
584  GetFlatBufferDataLayout(instanceNormalizationDescriptor.m_DataLayout));
585 
587  auto fbLayer = serializer::CreateInstanceNormalizationLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
588 
590 }
591 
592 void SerializerStrategy::SerializeL2NormalizationLayer(const armnn::IConnectableLayer* layer,
593  const armnn::L2NormalizationDescriptor& l2NormalizationDescriptor,
594  const char* name)
595 {
596  IgnoreUnused(name);
597 
598  // Create FlatBuffer BaseLayer
600 
601  // Create the FlatBuffer L2Normalization Descriptor
603  m_flatBufferBuilder,
604  GetFlatBufferDataLayout(l2NormalizationDescriptor.m_DataLayout),
605  l2NormalizationDescriptor.m_Eps);
606 
607  // Create FlatBuffer layer
608  auto fbLayer = serializer::CreateL2NormalizationLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
609 
611 }
612 
613 void SerializerStrategy::SerializeLogicalBinaryLayer(const armnn::IConnectableLayer* layer,
614  const armnn::LogicalBinaryDescriptor& descriptor,
615  const char* name)
616 {
617  IgnoreUnused(name);
618 
620  auto fbDescriptor = serializer::CreateLogicalBinaryDescriptor(
621  m_flatBufferBuilder,
623 
624  auto fbLayer = serializer::CreateLogicalBinaryLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
626 }
627 
628 void SerializerStrategy::SerializeLogSoftmaxLayer(const armnn::IConnectableLayer* layer,
629  const armnn::LogSoftmaxDescriptor& logSoftmaxDescriptor,
630  const char* name)
631 {
632  IgnoreUnused(name);
633 
634  // Create FlatBuffer BaseLayer
635  auto flatBufferLogSoftmaxBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_LogSoftmax);
636 
637  // Create the FlatBuffer LogSoftmaxDescriptor
638  auto flatBufferLogSoftmaxDesc =
639  serializer::CreateLogSoftmaxDescriptor(m_flatBufferBuilder,
640  logSoftmaxDescriptor.m_Beta,
641  logSoftmaxDescriptor.m_Axis);
642 
643  // Create the FlatBuffer LogSoftmaxLayer
644  auto flatBufferLogSoftmaxLayer =
645  serializer::CreateLogSoftmaxLayer(m_flatBufferBuilder,
646  flatBufferLogSoftmaxBaseLayer,
647  flatBufferLogSoftmaxDesc);
648 
649  CreateAnyLayer(flatBufferLogSoftmaxLayer.o, serializer::Layer::Layer_LogSoftmaxLayer);
650 }
651 
652 void SerializerStrategy::SerializeLstmLayer(const armnn::IConnectableLayer* layer,
653  const armnn::LstmDescriptor& descriptor,
654  const std::vector<armnn::ConstTensor>& constants,
655  const char* name)
656 {
657  IgnoreUnused(name);
658 
659  auto fbLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Lstm);
660 
661  auto fbLstmDescriptor = serializer::CreateLstmDescriptor(
662  m_flatBufferBuilder,
663  descriptor.m_ActivationFunc,
664  descriptor.m_ClippingThresCell,
665  descriptor.m_ClippingThresProj,
666  descriptor.m_CifgEnabled,
667  descriptor.m_PeepholeEnabled,
668  descriptor.m_ProjectionEnabled,
669  descriptor.m_LayerNormEnabled);
670 
671  // Index for constants vector
672  std::size_t i = 0;
673 
674  // Get mandatory/basic input parameters
675  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]); //InputToForgetWeights
676  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]); //InputToCellWeights
677  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]); //InputToOutputWeights
678  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToForgetWeights
679  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToCellWeights
680  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToOutputWeights
681  auto forgetGateBias = CreateConstTensorInfo(constants[i++]); //ForgetGateBias
682  auto cellBias = CreateConstTensorInfo(constants[i++]); //CellBias
683  auto outputGateBias = CreateConstTensorInfo(constants[i++]); //OutputGateBias
684 
685 
686 
687  //Define optional parameters, these will be set depending on configuration in Lstm descriptor
688  flatbuffers::Offset<serializer::ConstTensor> inputToInputWeights;
689  flatbuffers::Offset<serializer::ConstTensor> recurrentToInputWeights;
690  flatbuffers::Offset<serializer::ConstTensor> cellToInputWeights;
691  flatbuffers::Offset<serializer::ConstTensor> inputGateBias;
692  flatbuffers::Offset<serializer::ConstTensor> projectionWeights;
693  flatbuffers::Offset<serializer::ConstTensor> projectionBias;
694  flatbuffers::Offset<serializer::ConstTensor> cellToForgetWeights;
695  flatbuffers::Offset<serializer::ConstTensor> cellToOutputWeights;
696  flatbuffers::Offset<serializer::ConstTensor> inputLayerNormWeights;
697  flatbuffers::Offset<serializer::ConstTensor> forgetLayerNormWeights;
698  flatbuffers::Offset<serializer::ConstTensor> cellLayerNormWeights;
699  flatbuffers::Offset<serializer::ConstTensor> outputLayerNormWeights;
700 
701  if (!descriptor.m_CifgEnabled)
702  {
703  inputToInputWeights = CreateConstTensorInfo(constants[i++]); //InputToInputWeights
704  recurrentToInputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToInputWeights
705  inputGateBias = CreateConstTensorInfo(constants[i++]); //InputGateBias
706  }
707 
708  if (descriptor.m_PeepholeEnabled)
709  {
710  if (!descriptor.m_CifgEnabled)
711  {
712  cellToInputWeights = CreateConstTensorInfo(constants[i++]); //CellToInputWeights
713  }
714  cellToForgetWeights = CreateConstTensorInfo(constants[i++]); //CellToForgetWeights
715  cellToOutputWeights = CreateConstTensorInfo(constants[i++]); //CellToOutputWeights
716  }
717 
718  if (descriptor.m_ProjectionEnabled)
719  {
720  projectionWeights = CreateConstTensorInfo(constants[i++]); //ProjectionWeights
721  projectionBias = CreateConstTensorInfo(constants[i++]); //ProjectionBias
722  }
723 
724  if (descriptor.m_LayerNormEnabled)
725  {
726  if (!descriptor.m_CifgEnabled)
727  {
728  inputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //InputLayerNormWeights
729  }
730  forgetLayerNormWeights = CreateConstTensorInfo(constants[i++]); //ForgetLayerNormWeights
731  cellLayerNormWeights = CreateConstTensorInfo(constants[i++]); //CellLayerNormWeights
732  outputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //OutputLayerNormWeights
733  }
734 
735  auto fbLstmParams = serializer::CreateLstmInputParams(
736  m_flatBufferBuilder,
737  inputToForgetWeights,
738  inputToCellWeights,
739  inputToOutputWeights,
740  recurrentToForgetWeights,
741  recurrentToCellWeights,
742  recurrentToOutputWeights,
743  forgetGateBias,
744  cellBias,
745  outputGateBias,
746  inputToInputWeights,
747  recurrentToInputWeights,
748  cellToInputWeights,
749  inputGateBias,
750  projectionWeights,
751  projectionBias,
752  cellToForgetWeights,
753  cellToOutputWeights,
754  inputLayerNormWeights,
755  forgetLayerNormWeights,
756  cellLayerNormWeights,
757  outputLayerNormWeights);
758 
759  auto fbLstmLayer = serializer::CreateLstmLayer(
760  m_flatBufferBuilder,
761  fbLstmBaseLayer,
762  fbLstmDescriptor,
763  fbLstmParams);
764 
766 }
767 
768 void SerializerStrategy::SerializeMaximumLayer(const armnn::IConnectableLayer* layer, const char* name)
769 {
770  IgnoreUnused(name);
771 
772  auto fbMaximumBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Maximum);
773  auto fbMaximumLayer = serializer::CreateMaximumLayer(m_flatBufferBuilder, fbMaximumBaseLayer);
774 
776 }
777 
778 void SerializerStrategy::SerializeMeanLayer(const armnn::IConnectableLayer* layer,
779  const armnn::MeanDescriptor& descriptor,
780  const char* name)
781 {
782  IgnoreUnused(name);
783 
784  auto fbMeanBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Mean);
785  auto fbMeanDescriptor = serializer::CreateMeanDescriptor(m_flatBufferBuilder,
786  m_flatBufferBuilder.CreateVector(descriptor.m_Axis),
787  descriptor.m_KeepDims);
788 
789  auto fbMeanLayer = serializer::CreateMeanLayer(m_flatBufferBuilder,
790  fbMeanBaseLayer,
791  fbMeanDescriptor);
792 
794 }
795 
796 void SerializerStrategy::SerializeMinimumLayer(const armnn::IConnectableLayer* layer, const char* name)
797 {
798  IgnoreUnused(name);
799 
800  auto fbMinimumBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Minimum);
801  auto fbMinimumLayer = serializer::CreateMinimumLayer(m_flatBufferBuilder, fbMinimumBaseLayer);
802 
804 }
805 
806 void SerializerStrategy::SerializeMergeLayer(const armnn::IConnectableLayer* layer, const char* name)
807 {
808  IgnoreUnused(name);
809 
810  auto fbMergeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Merge);
811  auto fbMergeLayer = serializer::CreateMergeLayer(m_flatBufferBuilder, fbMergeBaseLayer);
812 
814 }
815 
816 void SerializerStrategy::SerializeMergerLayer(const armnn::IConnectableLayer* layer,
817  const armnn::MergerDescriptor& mergerDescriptor,
818  const char* name)
819 {
820  SerializeConcatLayer(layer, mergerDescriptor, name);
821 }
822 
823 void SerializerStrategy::SerializeConcatLayer(const armnn::IConnectableLayer* layer,
824  const armnn::ConcatDescriptor& concatDescriptor,
825  const char* name)
826 {
827  IgnoreUnused(name);
828 
829  auto flatBufferConcatBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Concat);
830 
831  std::vector<flatbuffers::Offset<UintVector>> views;
832  for (unsigned int v = 0; v < concatDescriptor.GetNumViews(); ++v)
833  {
834  const uint32_t* origin = concatDescriptor.GetViewOrigin(v);
835  std::vector<uint32_t> origins;
836  for (unsigned int d = 0; d < concatDescriptor.GetNumDimensions(); ++d)
837  {
838  origins.push_back(origin[d]);
839  }
840  auto view = m_flatBufferBuilder.CreateVector(origins);
841  auto uintVector = CreateUintVector(m_flatBufferBuilder, view);
842  views.push_back(uintVector);
843  }
844 
845  auto flatBufferConcatDescriptor = CreateOriginsDescriptor(m_flatBufferBuilder,
846  concatDescriptor.GetConcatAxis(),
847  concatDescriptor.GetNumViews(),
848  concatDescriptor.GetNumDimensions(),
849  m_flatBufferBuilder.CreateVector(views));
850 
851  auto flatBufferLayer = CreateConcatLayer(m_flatBufferBuilder,
852  flatBufferConcatBaseLayer,
853  flatBufferConcatDescriptor);
854 
856 }
857 
858 void SerializerStrategy::SerializeMultiplicationLayer(const armnn::IConnectableLayer* layer, const char* name)
859 {
860  IgnoreUnused(name);
861 
862  auto fbMultiplicationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Multiplication);
863  auto fbMultiplicationLayer = serializer::CreateMultiplicationLayer(m_flatBufferBuilder,
864  fbMultiplicationBaseLayer);
865 
867 }
868 
869 void SerializerStrategy::SerializePadLayer(const armnn::IConnectableLayer* layer,
870  const armnn::PadDescriptor& padDescriptor,
871  const char* name)
872 {
873  IgnoreUnused(name);
874 
875  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pad);
876 
877  std::vector<unsigned int> padList;
878  for (auto& p: padDescriptor.m_PadList)
879  {
880  padList.push_back(p.first);
881  padList.push_back(p.second);
882  }
883 
884  auto flatBufferPadDesc = serializer::CreatePadDescriptor(m_flatBufferBuilder,
885  m_flatBufferBuilder.CreateVector(padList),
886  padDescriptor.m_PadValue);
887 
888  auto flatBufferPadLayer = serializer::CreatePadLayer(m_flatBufferBuilder,
889  flatBufferBaseLayer,
890  flatBufferPadDesc);
891 
892  CreateAnyLayer(flatBufferPadLayer.o, serializer::Layer::Layer_PadLayer);
893 }
894 
895 void SerializerStrategy::SerializePermuteLayer(const armnn::IConnectableLayer* layer,
896  const armnn::PermuteDescriptor& permuteDescriptor,
897  const char* name)
898 {
899  IgnoreUnused(name);
900 
901  // Create FlatBuffer BaseLayer
902  auto flatBufferPermuteBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Permute);
903 
904  std::vector<unsigned int> dimMappings;
905  for (unsigned int i=0; i<permuteDescriptor.m_DimMappings.GetSize(); ++i)
906  {
907  dimMappings.push_back(permuteDescriptor.m_DimMappings[i]);
908  }
909 
910  auto flatBufferPermuteDesc = serializer::CreatePermuteDescriptor(m_flatBufferBuilder,
911  m_flatBufferBuilder.CreateVector(dimMappings));
912 
913  // Create the FlatBuffer PermuteLayer
914  auto flatBufferPermuteLayer = serializer::CreatePermuteLayer(m_flatBufferBuilder,
915  flatBufferPermuteBaseLayer,
916  flatBufferPermuteDesc);
917 
918  // Add the AnyLayer to the FlatBufferLayers
919  CreateAnyLayer(flatBufferPermuteLayer.o, serializer::Layer::Layer_PermuteLayer);
920 }
921 
922 // Build FlatBuffer for Rank Layer
923 void SerializerStrategy::SerializeRankLayer(const armnn::IConnectableLayer* layer,
924  const char* name)
925 {
926  IgnoreUnused(name);
927  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Rank);
928  auto flatBufferRankLayer = serializer::CreateRankLayer(m_flatBufferBuilder, flatBufferBaseLayer);
929 
930  CreateAnyLayer(flatBufferRankLayer.o, serializer::Layer::Layer_RankLayer);
931 }
932 
933 void SerializerStrategy::SerializeReduceLayer(const armnn::IConnectableLayer* layer,
934  const armnn::ReduceDescriptor& reduceDescriptor,
935  const char*)
936 {
937  auto fbReduceBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Reduce);
938  auto fbDescriptor = CreateReduceDescriptor(m_flatBufferBuilder,
939  reduceDescriptor.m_KeepDims,
940  m_flatBufferBuilder.CreateVector(reduceDescriptor.m_vAxis),
942  auto fbReduceLayer = serializer::CreateReduceLayer(m_flatBufferBuilder,
943  fbReduceBaseLayer,
944  fbDescriptor);
945 
947 }
948 
949 // Build FlatBuffer for Reshape Layer
950 void SerializerStrategy::SerializeReshapeLayer(const armnn::IConnectableLayer* layer,
951  const armnn::ReshapeDescriptor& reshapeDescriptor,
952  const char* name)
953 {
954  IgnoreUnused(name);
955 
956  // Create FlatBuffer BaseLayer
957  auto flatBufferReshapeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Reshape);
958 
959  std::vector<unsigned int> targetShape;
960  for (unsigned int i =0; i < reshapeDescriptor.m_TargetShape.GetNumDimensions(); i++)
961  {
962  targetShape.push_back(reshapeDescriptor.m_TargetShape[i]);
963  }
964 
965  auto flatBufferReshapeDesc = serializer::CreateReshapeDescriptor(m_flatBufferBuilder,
966  m_flatBufferBuilder.CreateVector(targetShape));
967 
968  // Create the FlatBuffer ReshapeLayer
969  auto flatBufferReshapeLayer = serializer::CreateReshapeLayer(m_flatBufferBuilder, flatBufferReshapeBaseLayer,
970  flatBufferReshapeDesc);
971 
972  // Add the AnyLayer to the FlatBufferLayers
973  CreateAnyLayer(flatBufferReshapeLayer.o, serializer::Layer::Layer_ReshapeLayer);
974 }
975 
976 void SerializerStrategy::SerializeResizeBilinearLayer(const armnn::IConnectableLayer* layer,
977  const armnn::ResizeBilinearDescriptor& resizeDescriptor,
978  const char* name)
979 {
980  IgnoreUnused(name);
981 
982  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ResizeBilinear);
983 
984  auto flatBufferDescriptor =
985  CreateResizeBilinearDescriptor(m_flatBufferBuilder,
986  resizeDescriptor.m_TargetWidth,
987  resizeDescriptor.m_TargetHeight,
988  GetFlatBufferDataLayout(resizeDescriptor.m_DataLayout),
989  resizeDescriptor.m_AlignCorners,
990  resizeDescriptor.m_HalfPixelCenters);
991 
992  auto flatBufferLayer = serializer::CreateResizeBilinearLayer(m_flatBufferBuilder,
993  flatBufferBaseLayer,
994  flatBufferDescriptor);
995 
997 }
998 
999 void SerializerStrategy::SerializeResizeLayer(const armnn::IConnectableLayer* layer,
1000  const armnn::ResizeDescriptor& resizeDescriptor,
1001  const char* name)
1002 {
1003  IgnoreUnused(name);
1004 
1005  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Resize);
1006 
1007  auto flatBufferDescriptor =
1008  CreateResizeDescriptor(m_flatBufferBuilder,
1009  resizeDescriptor.m_TargetHeight,
1010  resizeDescriptor.m_TargetWidth,
1011  GetFlatBufferResizeMethod(resizeDescriptor.m_Method),
1012  GetFlatBufferDataLayout(resizeDescriptor.m_DataLayout),
1013  resizeDescriptor.m_AlignCorners,
1014  resizeDescriptor.m_HalfPixelCenters);
1015 
1016  auto flatBufferLayer = serializer::CreateResizeLayer(m_flatBufferBuilder,
1017  flatBufferBaseLayer,
1018  flatBufferDescriptor);
1019 
1021 }
1022 
1023 void SerializerStrategy::SerializeRsqrtLayer(const armnn::IConnectableLayer* layer, const char* name)
1024 {
1025  IgnoreUnused(name);
1026 
1027  auto fbRsqrtBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Rsqrt);
1028  auto fbRsqrtLayer = serializer::CreateRsqrtLayer(m_flatBufferBuilder, fbRsqrtBaseLayer);
1029 
1031 }
1032 
1033 void SerializerStrategy::SerializeSliceLayer(const armnn::IConnectableLayer* layer,
1034  const armnn::SliceDescriptor& sliceDescriptor,
1035  const char* name)
1036 {
1037  IgnoreUnused(name);
1038 
1039  auto fbSliceBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Slice);
1040  auto fbSliceDescriptor = CreateSliceDescriptor(m_flatBufferBuilder,
1041  m_flatBufferBuilder.CreateVector(sliceDescriptor.m_Begin),
1042  m_flatBufferBuilder.CreateVector(sliceDescriptor.m_Size));
1043 
1044  auto fbSliceLayer = serializer::CreateSliceLayer(m_flatBufferBuilder, fbSliceBaseLayer, fbSliceDescriptor);
1045 
1047 }
1048 
1049 // Build FlatBuffer for Softmax Layer
1050 void SerializerStrategy::SerializeSoftmaxLayer(const armnn::IConnectableLayer* layer,
1051  const armnn::SoftmaxDescriptor& softmaxDescriptor,
1052  const char* name)
1053 {
1054  IgnoreUnused(name);
1055 
1056  // Create FlatBuffer BaseLayer
1057  auto flatBufferSoftmaxBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Softmax);
1058 
1059  // Create the FlatBuffer SoftmaxDescriptor
1060  auto flatBufferSoftmaxDesc =
1061  serializer::CreateSoftmaxDescriptor(m_flatBufferBuilder, softmaxDescriptor.m_Beta);
1062 
1063  // Create the FlatBuffer SoftmaxLayer
1064  auto flatBufferSoftmaxLayer =
1065  serializer::CreateSoftmaxLayer(m_flatBufferBuilder,
1066  flatBufferSoftmaxBaseLayer,
1067  flatBufferSoftmaxDesc);
1068 
1069  CreateAnyLayer(flatBufferSoftmaxLayer.o, serializer::Layer::Layer_SoftmaxLayer);
1070 }
1071 
1072 void SerializerStrategy::SerializePooling2dLayer(const armnn::IConnectableLayer* layer,
1073  const armnn::Pooling2dDescriptor& pooling2dDescriptor,
1074  const char* name)
1075 {
1076  IgnoreUnused(name);
1077 
1078  auto fbPooling2dBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pooling2d);
1079  auto fbPooling2dDescriptor = serializer::CreatePooling2dDescriptor(
1080  m_flatBufferBuilder,
1081  GetFlatBufferPoolingAlgorithm(pooling2dDescriptor.m_PoolType),
1082  pooling2dDescriptor.m_PadLeft,
1083  pooling2dDescriptor.m_PadRight,
1084  pooling2dDescriptor.m_PadTop,
1085  pooling2dDescriptor.m_PadBottom,
1086  pooling2dDescriptor.m_PoolWidth,
1087  pooling2dDescriptor.m_PoolHeight,
1088  pooling2dDescriptor.m_StrideX,
1089  pooling2dDescriptor.m_StrideY,
1091  GetFlatBufferPaddingMethod(pooling2dDescriptor.m_PaddingMethod),
1092  GetFlatBufferDataLayout(pooling2dDescriptor.m_DataLayout));
1093 
1094  auto fbPooling2dLayer = serializer::CreatePooling2dLayer(m_flatBufferBuilder,
1095  fbPooling2dBaseLayer,
1096  fbPooling2dDescriptor);
1097 
1099 }
1100 
1101 void SerializerStrategy::SerializePreluLayer(const armnn::IConnectableLayer* layer,
1102  const char* name)
1103 {
1104  IgnoreUnused(name);
1105 
1106  // Create FlatBuffer BaseLayer
1107  auto flatBufferPreluBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Prelu);
1108 
1109  // Create the FlatBuffer AdditionLayer
1110  auto flatBufferPreluLayer = serializer::CreatePreluLayer(m_flatBufferBuilder, flatBufferPreluBaseLayer);
1111 
1112  // Add the AnyLayer to the FlatBufferLayers
1113  CreateAnyLayer(flatBufferPreluLayer.o, serializer::Layer::Layer_PreluLayer);
1114 }
1115 
1116 void SerializerStrategy::SerializeQuantizeLayer(const armnn::IConnectableLayer *layer, const char *name)
1117 {
1118  IgnoreUnused(name);
1119 
1120  auto fbQuantizeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Quantize);
1121  auto fbQuantizeLayer = serializer::CreateQuantizeLayer(m_flatBufferBuilder,
1122  fbQuantizeBaseLayer);
1124 }
1125 
1126 // Build FlatBuffer for FullyConnected Layer
1127 void SerializerStrategy::SerializeFullyConnectedLayer(const armnn::IConnectableLayer* layer,
1128  const armnn::FullyConnectedDescriptor& fullyConnectedDescriptor,
1129  const std::vector<armnn::ConstTensor>& constants,
1130  const char*)
1131 {
1132  // Create FlatBuffer BaseLayer
1133  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_FullyConnected);
1134 
1135  // Create FlatBuffer FullyConnectedDescriptor
1136  auto flatBufferDescriptor =
1137  serializer::CreateFullyConnectedDescriptor(m_flatBufferBuilder,
1138  fullyConnectedDescriptor.m_BiasEnabled,
1139  fullyConnectedDescriptor.m_TransposeWeightMatrix,
1140  fullyConnectedDescriptor.m_ConstantWeights);
1141 
1142  // Create FlatBuffer weights data
1143  flatbuffers::Offset<serializer::ConstTensor> flatBufferWeights;
1144  // Create FlatBuffer bias data
1145  flatbuffers::Offset<serializer::ConstTensor> flatBufferBiases;
1146  if (fullyConnectedDescriptor.m_ConstantWeights && !constants.empty())
1147  {
1148  armnn::ConstTensor weights = constants.at(0);
1149  flatBufferWeights = CreateConstTensorInfo(weights);
1150 
1151  if (fullyConnectedDescriptor.m_BiasEnabled)
1152  {
1153  armnn::ConstTensor biases = constants.at(1);
1154  flatBufferBiases = CreateConstTensorInfo(biases);
1155  }
1156  }
1157 
1158  // Create FlatBuffer FullyConnectedLayer
1159  auto flatBufferLayer = serializer::CreateFullyConnectedLayer(m_flatBufferBuilder,
1160  flatBufferBaseLayer,
1161  flatBufferDescriptor,
1162  flatBufferWeights,
1163  flatBufferBiases);
1164 
1165  // Add created FullyConnectedLayer to the FlatBufferLayers
1167 }
1168 
1169 // Build FlatBuffer for SpaceToBatchNd Layer
1170 void SerializerStrategy::SerializeSpaceToBatchNdLayer(const armnn::IConnectableLayer* layer,
1171  const armnn::SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
1172  const char* name)
1173 {
1174  IgnoreUnused(name);
1175 
1176  // Create FlatBuffer BaseLayer
1177  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_SpaceToBatchNd);
1178 
1179  std::vector<unsigned int> padList;
1180  padList.reserve(spaceToBatchNdDescriptor.m_PadList.size()*2);
1181  for (auto& pad : spaceToBatchNdDescriptor.m_PadList)
1182  {
1183  padList.push_back(pad.first);
1184  padList.push_back(pad.second);
1185  }
1186 
1187  auto flatBufferDescriptor =
1188  CreateSpaceToBatchNdDescriptor(m_flatBufferBuilder,
1189  m_flatBufferBuilder.CreateVector(spaceToBatchNdDescriptor.m_BlockShape),
1190  m_flatBufferBuilder.CreateVector(padList),
1191  GetFlatBufferDataLayout(spaceToBatchNdDescriptor.m_DataLayout));
1192 
1193  auto flatBufferLayer = serializer::CreateSpaceToBatchNdLayer(m_flatBufferBuilder,
1194  flatBufferBaseLayer,
1195  flatBufferDescriptor);
1196 
1198 }
1199 
1200 // Build FlatBuffer for SpaceToDepthLayer
1201 void SerializerStrategy::SerializeSpaceToDepthLayer(const armnn::IConnectableLayer* layer,
1202  const armnn::SpaceToDepthDescriptor& spaceToDepthDescriptor,
1203  const char* name)
1204 {
1205  IgnoreUnused(name);
1206 
1207  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_SpaceToDepth);
1208  auto flatBufferDescriptor =
1209  CreateSpaceToDepthDescriptor(m_flatBufferBuilder,
1210  spaceToDepthDescriptor.m_BlockSize,
1211  GetFlatBufferDataLayout(spaceToDepthDescriptor.m_DataLayout));
1212 
1213  auto flatBufferLayer = serializer::CreateSpaceToDepthLayer(m_flatBufferBuilder,
1214  flatBufferBaseLayer,
1215  flatBufferDescriptor);
1216 
1218 }
1219 
1220 // Build FlatBuffer for Splitter Layer
1221 void SerializerStrategy::SerializeSplitterLayer(const armnn::IConnectableLayer* layer,
1222  const armnn::ViewsDescriptor& viewsDescriptor,
1223  const char* name)
1224 {
1225  IgnoreUnused(name);
1226 
1227  // Create FlatBuffer ViewOrigins
1228  std::vector<flatbuffers::Offset<UintVector>> flatBufferViewOrigins;
1229  flatBufferViewOrigins.reserve(viewsDescriptor.GetNumViews());
1230 
1231  for(unsigned int vIdx = 0; vIdx < viewsDescriptor.GetNumViews(); ++vIdx)
1232  {
1233  std::vector<uint32_t> viewOrigin;
1234  viewOrigin.reserve(viewsDescriptor.GetNumDimensions());
1235 
1236  // Copy vector
1237  for(unsigned int dIdx = 0; dIdx < viewsDescriptor.GetNumDimensions(); ++dIdx)
1238  {
1239  viewOrigin.push_back(viewsDescriptor.GetViewOrigin(vIdx)[dIdx]);
1240  }
1241 
1242  flatBufferViewOrigins.push_back(CreateUintVector(m_flatBufferBuilder,
1243  m_flatBufferBuilder.CreateVector(viewOrigin)));
1244  }
1245 
1246  // Create FlatBuffer OriginsDescriptor
1247  auto flatBufferOriginDescriptor = CreateOriginsDescriptor(m_flatBufferBuilder,
1248  viewsDescriptor.GetOrigins().GetConcatAxis(),
1249  viewsDescriptor.GetOrigins().GetNumViews(),
1250  viewsDescriptor.GetOrigins().GetNumDimensions(),
1251  m_flatBufferBuilder.CreateVector(flatBufferViewOrigins));
1252 
1253  // Create FlatBuffer ViewOrigins
1254  std::vector<flatbuffers::Offset<UintVector>> flatBufferViewSizes;
1255  flatBufferViewSizes.reserve(viewsDescriptor.GetNumViews());
1256 
1257  for(unsigned int vIdx = 0; vIdx < viewsDescriptor.GetNumViews(); ++vIdx)
1258  {
1259  std::vector<uint32_t> viewSize;
1260  viewSize.reserve(viewsDescriptor.GetNumDimensions());
1261 
1262  // Copy vector
1263  for(unsigned int dIdx = 0; dIdx < viewsDescriptor.GetNumDimensions(); ++dIdx)
1264  {
1265  viewSize.push_back(viewsDescriptor.GetViewSizes(vIdx)[dIdx]);
1266  }
1267 
1268  flatBufferViewSizes.push_back(CreateUintVector(m_flatBufferBuilder,
1269  m_flatBufferBuilder.CreateVector(viewSize)));
1270  }
1271 
1272  // Create FlatBuffer ViewsDescriptor
1273  auto flatBufferViewsDescriptor = CreateViewsDescriptor(m_flatBufferBuilder,
1274  flatBufferOriginDescriptor,
1275  m_flatBufferBuilder.CreateVector(flatBufferViewSizes));
1276 
1277  // Create FlatBuffer BaseLayer
1278  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Splitter);
1279 
1280  auto flatBufferSplitterLayer = serializer::CreateSplitterLayer(m_flatBufferBuilder,
1281  flatBufferBaseLayer,
1282  flatBufferViewsDescriptor);
1283 
1284  CreateAnyLayer(flatBufferSplitterLayer.o, serializer::Layer::Layer_SplitterLayer);
1285 }
1286 
1287 void SerializerStrategy::SerializeNormalizationLayer(const armnn::IConnectableLayer* layer,
1288  const armnn::NormalizationDescriptor& descriptor,
1289  const char* name)
1290 {
1291  IgnoreUnused(name);
1292 
1293  auto fbNormalizationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Normalization);
1294 
1295  auto fbNormalizationDescriptor = serializer::CreateNormalizationDescriptor(
1296  m_flatBufferBuilder,
1299  descriptor.m_NormSize,
1300  descriptor.m_Alpha,
1301  descriptor.m_Beta,
1302  descriptor.m_K,
1304 
1305  auto flatBufferLayer = serializer::CreateNormalizationLayer(m_flatBufferBuilder,
1306  fbNormalizationBaseLayer,
1307  fbNormalizationDescriptor);
1308 
1310 }
1311 
1312 void SerializerStrategy::SerializeStackLayer(const armnn::IConnectableLayer* layer,
1313  const armnn::StackDescriptor& stackDescriptor,
1314  const char* name)
1315 {
1316  IgnoreUnused(name);
1317 
1318  auto stackBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Stack);
1319 
1320  std::vector<unsigned int> inputShape;
1321  for (unsigned int i =0; i < stackDescriptor.m_InputShape.GetNumDimensions(); i++)
1322  {
1323  inputShape.push_back(stackDescriptor.m_InputShape[i]);
1324  }
1325 
1326  auto flatBufferStackDescriptor = CreateStackDescriptor(m_flatBufferBuilder,
1327  stackDescriptor.m_Axis,
1328  stackDescriptor.m_NumInputs,
1329  m_flatBufferBuilder.CreateVector(inputShape));
1330 
1331  auto stackLayer = serializer::CreateStackLayer(m_flatBufferBuilder, stackBaseLayer, flatBufferStackDescriptor);
1333 }
1334 
1335 void SerializerStrategy::SerializeStandInLayer(const armnn::IConnectableLayer *layer,
1336  const armnn::StandInDescriptor& standInDescriptor,
1337  const char *name)
1338 {
1339  IgnoreUnused(name);
1340 
1341  auto fbDescriptor = serializer::CreateStandInDescriptor(m_flatBufferBuilder,
1342  standInDescriptor.m_NumInputs,
1343  standInDescriptor.m_NumOutputs);
1344 
1345  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_StandIn);
1346  auto fbLayer = serializer::CreateStandInLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
1347 
1349 }
1350 
1351 void SerializerStrategy::SerializeStridedSliceLayer(const armnn::IConnectableLayer* layer,
1352  const armnn::StridedSliceDescriptor& stridedSliceDescriptor,
1353  const char* name)
1354 {
1355  IgnoreUnused(name);
1356 
1357  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_StridedSlice);
1358 
1359  auto flatBufferDescriptor =
1360  CreateStridedSliceDescriptor(m_flatBufferBuilder,
1361  m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.m_Begin),
1362  m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.m_End),
1363  m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.m_Stride),
1364  stridedSliceDescriptor.m_BeginMask,
1365  stridedSliceDescriptor.m_EndMask,
1366  stridedSliceDescriptor.m_ShrinkAxisMask,
1367  stridedSliceDescriptor.m_EllipsisMask,
1368  stridedSliceDescriptor.m_NewAxisMask,
1369  GetFlatBufferDataLayout(stridedSliceDescriptor.m_DataLayout));
1370 
1371  auto flatBufferLayer = serializer::CreateStridedSliceLayer(m_flatBufferBuilder,
1372  flatBufferBaseLayer,
1373  flatBufferDescriptor);
1374 
1376 }
1377 
1378 void SerializerStrategy::SerializeSubtractionLayer(const armnn::IConnectableLayer* layer, const char* name)
1379 {
1380  IgnoreUnused(name);
1381 
1382  auto fbSubtractionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Subtraction);
1383  auto fbSubtractionLayer = serializer::CreateSubtractionLayer(m_flatBufferBuilder, fbSubtractionBaseLayer);
1384 
1386 }
1387 
1388 void SerializerStrategy::SerializeSwitchLayer(const armnn::IConnectableLayer* layer, const char* name)
1389 {
1390  IgnoreUnused(name);
1391 
1392  auto fbSwitchBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Switch);
1393  auto fbSwitchLayer = serializer::CreateSwitchLayer(m_flatBufferBuilder, fbSwitchBaseLayer);
1394 
1396 }
1397 
1398 void SerializerStrategy::SerializeTransposeConvolution2dLayer(
1399  const armnn::IConnectableLayer* layer,
1400  const armnn::TransposeConvolution2dDescriptor& descriptor,
1401  const std::vector<armnn::ConstTensor>& constants,
1402  const char* name)
1403 {
1404  IgnoreUnused(name);
1405 
1406  const armnn::ConstTensor& weights = constants.at(0);
1407 
1409  auto fbDescriptor = CreateTransposeConvolution2dDescriptor(m_flatBufferBuilder,
1410  descriptor.m_PadLeft,
1411  descriptor.m_PadRight,
1412  descriptor.m_PadTop,
1413  descriptor.m_PadBottom,
1414  descriptor.m_StrideX,
1415  descriptor.m_StrideY,
1416  descriptor.m_BiasEnabled,
1418 
1419  // weights & biases
1420  auto fbWeightsConstTensorInfo = CreateConstTensorInfo(weights);
1421  flatbuffers::Offset<serializer::ConstTensor> fbBiasesConstTensorInfo;
1422  if (constants.size() > 1)
1423  {
1424  const armnn::ConstTensor& biases = constants.at(1);
1425  fbBiasesConstTensorInfo = CreateConstTensorInfo(biases);
1426  }
1427 
1428  auto fbLayer = CreateTransposeConvolution2dLayer(m_flatBufferBuilder,
1429  fbBaseLayer,
1430  fbDescriptor,
1431  fbWeightsConstTensorInfo,
1432  fbBiasesConstTensorInfo);
1433 
1435 }
1436 
1437 void SerializerStrategy::SerializeTransposeLayer(const armnn::IConnectableLayer* layer,
1438  const armnn::TransposeDescriptor& descriptor,
1439  const char* name)
1440 {
1441  IgnoreUnused(name);
1442 
1443  // Create FlatBuffer BaseLayer
1444  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Transpose);
1445 
1446  std::vector<unsigned int> dimMappings;
1447  for (unsigned int i=0; i<descriptor.m_DimMappings.GetSize(); ++i)
1448  {
1449  dimMappings.push_back(descriptor.m_DimMappings[i]);
1450  }
1451 
1452  auto flatBufferDesc = serializer::CreateTransposeDescriptor(m_flatBufferBuilder,
1453  m_flatBufferBuilder.CreateVector(dimMappings));
1454 
1455  // Create the FlatBuffer TransposeLayer
1456  auto flatBufferLayer = serializer::CreateTransposeLayer(m_flatBufferBuilder,
1457  flatBufferBaseLayer,
1458  flatBufferDesc);
1459 
1460  // Add the AnyLayer to the FlatBufferLayers
1462 }
1463 
1464 void SerializerStrategy::SerializeQLstmLayer(const armnn::IConnectableLayer* layer,
1465  const armnn::QLstmDescriptor& descriptor,
1466  const std::vector<armnn::ConstTensor>& constants,
1467  const char* name)
1468 {
1469  IgnoreUnused(name);
1470 
1471  auto fbQLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_QLstm);
1472 
1473  auto fbQLstmDescriptor = serializer::CreateQLstmDescriptor(
1474  m_flatBufferBuilder,
1475  descriptor.m_CifgEnabled,
1476  descriptor.m_PeepholeEnabled,
1477  descriptor.m_ProjectionEnabled,
1478  descriptor.m_LayerNormEnabled,
1479  descriptor.m_CellClip,
1480  descriptor.m_ProjectionClip,
1481  descriptor.m_InputIntermediateScale,
1482  descriptor.m_ForgetIntermediateScale,
1483  descriptor.m_CellIntermediateScale,
1484  descriptor.m_OutputIntermediateScale,
1485  descriptor.m_HiddenStateZeroPoint,
1486  descriptor.m_HiddenStateScale
1487  );
1488 
1489  // Index for constants vector
1490  std::size_t i = 0;
1491 
1492  // Mandatory params
1493  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]); //InputToForgetWeights
1494  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]); //InputToCellWeights
1495  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]); //InputToOutputWeights
1496  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToForgetWeights
1497  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToCellWeights
1498  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToOutputWeights
1499  auto forgetGateBias = CreateConstTensorInfo(constants[i++]); //ForgetGateBias
1500  auto cellBias = CreateConstTensorInfo(constants[i++]); //CellBias
1501  auto outputGateBias = CreateConstTensorInfo(constants[i++]); //OutputGateBias
1502 
1503  // CIFG
1504  flatbuffers::Offset<serializer::ConstTensor> inputToInputWeights;
1505  flatbuffers::Offset<serializer::ConstTensor> recurrentToInputWeights;
1506  flatbuffers::Offset<serializer::ConstTensor> inputGateBias;
1507 
1508  if (!descriptor.m_CifgEnabled)
1509  {
1510  inputToInputWeights = CreateConstTensorInfo(constants[i++]); //InputToInputWeights
1511  recurrentToInputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToInputWeights
1512  inputGateBias = CreateConstTensorInfo(constants[i++]); //InputGateBias
1513  }
1514 
1515  // Peephole
1516  flatbuffers::Offset<serializer::ConstTensor> cellToInputWeights;
1517  flatbuffers::Offset<serializer::ConstTensor> cellToForgetWeights;
1518  flatbuffers::Offset<serializer::ConstTensor> cellToOutputWeights;
1519 
1520  if (descriptor.m_PeepholeEnabled)
1521  {
1522  if (!descriptor.m_CifgEnabled)
1523  {
1524  cellToInputWeights = CreateConstTensorInfo(constants[i++]); //CellToInputWeights
1525  }
1526  cellToForgetWeights = CreateConstTensorInfo(constants[i++]); //CellToForgetWeights
1527  cellToOutputWeights = CreateConstTensorInfo(constants[i++]); //CellToOutputWeights
1528  }
1529 
1530  // Projection
1531  flatbuffers::Offset<serializer::ConstTensor> projectionWeights;
1532  flatbuffers::Offset<serializer::ConstTensor> projectionBias;
1533 
1534  if (descriptor.m_ProjectionEnabled)
1535  {
1536  projectionWeights = CreateConstTensorInfo(constants[i++]); //ProjectionWeights
1537  projectionBias = CreateConstTensorInfo(constants[i++]); //ProjectionBias
1538  }
1539 
1540  // Layer norm
1541  flatbuffers::Offset<serializer::ConstTensor> inputLayerNormWeights;
1542  flatbuffers::Offset<serializer::ConstTensor> forgetLayerNormWeights;
1543  flatbuffers::Offset<serializer::ConstTensor> cellLayerNormWeights;
1544  flatbuffers::Offset<serializer::ConstTensor> outputLayerNormWeights;
1545 
1546  if (descriptor.m_LayerNormEnabled)
1547  {
1548  if (!descriptor.m_CifgEnabled)
1549  {
1550  inputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //InputLayerNormWeights
1551  }
1552  forgetLayerNormWeights = CreateConstTensorInfo(constants[i++]); //ForgetLayerNormWeights
1553  cellLayerNormWeights = CreateConstTensorInfo(constants[i++]); //CellLayerNormWeights
1554  outputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //OutputLayerNormWeights
1555  }
1556 
1557  auto fbQLstmParams = serializer::CreateQLstmInputParams(
1558  m_flatBufferBuilder,
1559  inputToForgetWeights,
1560  inputToCellWeights,
1561  inputToOutputWeights,
1562  recurrentToForgetWeights,
1563  recurrentToCellWeights,
1564  recurrentToOutputWeights,
1565  forgetGateBias,
1566  cellBias,
1567  outputGateBias,
1568  inputToInputWeights,
1569  recurrentToInputWeights,
1570  inputGateBias,
1571  projectionWeights,
1572  projectionBias,
1573  cellToInputWeights,
1574  cellToForgetWeights,
1575  cellToOutputWeights,
1576  inputLayerNormWeights,
1577  forgetLayerNormWeights,
1578  cellLayerNormWeights,
1579  outputLayerNormWeights);
1580 
1581  auto fbQLstmLayer = serializer::CreateQLstmLayer(
1582  m_flatBufferBuilder,
1583  fbQLstmBaseLayer,
1584  fbQLstmDescriptor,
1585  fbQLstmParams);
1586 
1588 }
1589 
1590 void SerializerStrategy::SerializeQuantizedLstmLayer(const armnn::IConnectableLayer* layer,
1591  const std::vector<armnn::ConstTensor>& constants,
1592  const char* name)
1593 {
1594  IgnoreUnused(name);
1595 
1596  auto fbQuantizedLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_QuantizedLstm);
1597 
1598  // index for constants vector
1599  size_t i = 0;
1600 
1601  // Get input parameters
1602  auto inputToInputWeights = CreateConstTensorInfo(constants[i++]);
1603  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]);
1604  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]);
1605  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]);
1606 
1607  auto recurrentToInputWeights = CreateConstTensorInfo(constants[i++]);
1608  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]);
1609  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]);
1610  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]);
1611 
1612  auto inputGateBias = CreateConstTensorInfo(constants[i++]);
1613  auto forgetGateBias = CreateConstTensorInfo(constants[i++]);
1614  auto cellBias = CreateConstTensorInfo(constants[i++]);
1615  auto outputGateBias = CreateConstTensorInfo(constants[i++]);
1616 
1617  auto fbQuantizedLstmParams = serializer::CreateQuantizedLstmInputParams(
1618  m_flatBufferBuilder,
1619  inputToInputWeights,
1620  inputToForgetWeights,
1621  inputToCellWeights,
1622  inputToOutputWeights,
1623  recurrentToInputWeights,
1624  recurrentToForgetWeights,
1625  recurrentToCellWeights,
1626  recurrentToOutputWeights,
1627  inputGateBias,
1628  forgetGateBias,
1629  cellBias,
1630  outputGateBias);
1631 
1632  auto fbQuantizedLstmLayer = serializer::CreateQuantizedLstmLayer(
1633  m_flatBufferBuilder,
1634  fbQuantizedLstmBaseLayer,
1635  fbQuantizedLstmParams);
1636 
1638 }
1639 
1640 fb::Offset<serializer::LayerBase> SerializerStrategy::CreateLayerBase(const IConnectableLayer* layer,
1641  const serializer::LayerType layerType)
1642 {
1643 
1644  uint32_t fbIndex = GetSerializedId(layer->GetGuid());
1645 
1646  std::vector<fb::Offset<serializer::InputSlot>> inputSlots = CreateInputSlots(layer);
1647  std::vector<fb::Offset<serializer::OutputSlot>> outputSlots = CreateOutputSlots(layer);
1648 
1649  return serializer::CreateLayerBase(m_flatBufferBuilder,
1650  fbIndex,
1651  m_flatBufferBuilder.CreateString(layer->GetName()),
1652  layerType,
1653  m_flatBufferBuilder.CreateVector(inputSlots),
1654  m_flatBufferBuilder.CreateVector(outputSlots));
1655 }
1656 
1657 void SerializerStrategy::CreateAnyLayer(const flatbuffers::Offset<void>& layer, const serializer::Layer serializerLayer)
1658 {
1659 
1660  auto anyLayer = armnnSerializer::CreateAnyLayer(m_flatBufferBuilder, serializerLayer, layer);
1661  m_serializedLayers.push_back(anyLayer);
1662 }
1663 
1664 template <typename T>
1665 flatbuffers::Offset<flatbuffers::Vector<T>> SerializerStrategy::CreateDataVector(const void* memory, unsigned int size)
1666 {
1667  const T* buffer = reinterpret_cast<const T*>(memory);
1668  std::vector<T> vector(buffer, buffer + (size / sizeof(T)));
1669  auto fbVector = m_flatBufferBuilder.CreateVector(vector);
1670  return fbVector;
1671 }
1672 
1673 flatbuffers::Offset<TensorInfo> SerializerStrategy::CreateTensorInfo(const armnn::TensorInfo& tensorInfo)
1674 {
1675  // Get the dimensions
1676  std::vector<unsigned int> shape;
1677  std::vector<bool> specificity;
1678  // This assumes that the TensorShape constructors have ensured that the size of m_DimensionsSpecificity
1679  // matches the size of dimensions.
1680  for(unsigned int dim = 0; dim < tensorInfo.GetShape().GetNumDimensions(); ++dim)
1681  {
1682  specificity.push_back(tensorInfo.GetShape().GetDimensionSpecificity(dim));
1683 
1684  if (tensorInfo.GetShape().GetDimensionSpecificity(dim))
1685  {
1686  shape.push_back(tensorInfo.GetShape()[dim]);
1687  }
1688  else
1689  {
1690  shape.push_back(0);
1691  }
1692  }
1693 
1694  if (tensorInfo.HasPerAxisQuantization())
1695  {
1696  // Create FlatBuffer TensorInfo
1697  auto flatBufferTensorInfo =
1698  serializer::CreateTensorInfo(m_flatBufferBuilder,
1699  m_flatBufferBuilder.CreateVector(shape),
1700  GetFlatBufferDataType(tensorInfo.GetDataType()),
1701  tensorInfo.GetQuantizationScales()[0],
1702  tensorInfo.GetQuantizationOffset(),
1703  m_flatBufferBuilder.CreateVector(tensorInfo.GetQuantizationScales()),
1704  tensorInfo.GetQuantizationDim().value(),
1705  static_cast<unsigned int>
1706  (tensorInfo.GetShape().GetDimensionality()),
1707  m_flatBufferBuilder.CreateVector(specificity));
1708  return flatBufferTensorInfo;
1709  }
1710 
1711  // Create FlatBuffer TensorInfo
1712  auto flatBufferTensorInfo = serializer::CreateTensorInfo(m_flatBufferBuilder,
1713  m_flatBufferBuilder.CreateVector(shape),
1714  GetFlatBufferDataType(tensorInfo.GetDataType()),
1715  tensorInfo.GetQuantizationScale(),
1716  tensorInfo.GetQuantizationOffset(),
1717  0,
1718  0,
1719  static_cast<unsigned int>
1720  (tensorInfo.GetShape().GetDimensionality()),
1721  m_flatBufferBuilder.CreateVector(specificity));
1722  return flatBufferTensorInfo;
1723 }
1724 
1725 flatbuffers::Offset<serializer::ConstTensor>
1726  SerializerStrategy::CreateConstTensorInfo(const armnn::ConstTensor& constTensor)
1727 {
1728  armnn::TensorInfo tensorInfo = constTensor.GetInfo();
1729 
1730  flatbuffers::Offset<void> fbPayload;
1731 
1732  switch (tensorInfo.GetDataType())
1733  {
1735  {
1736  auto fbVector = CreateDataVector<int64_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1737  flatbuffers::Offset<serializer::LongData> flatBuffersData = serializer::CreateLongData(
1738  m_flatBufferBuilder,
1739  fbVector);
1740  fbPayload = flatBuffersData.o;
1741  break;
1742  }
1745  {
1746  auto fbVector = CreateDataVector<int32_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1747  flatbuffers::Offset<serializer::IntData> flatBuffersData = serializer::CreateIntData(
1748  m_flatBufferBuilder,
1749  fbVector);
1750  fbPayload = flatBuffersData.o;
1751  break;
1752  }
1756  {
1757  auto fbVector = CreateDataVector<int16_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1758  flatbuffers::Offset<serializer::ShortData> flatBuffersData = serializer::CreateShortData(
1759  m_flatBufferBuilder,
1760  fbVector);
1761  fbPayload = flatBuffersData.o;
1762  break;
1763  }
1768  default:
1769  {
1770  auto fbVector = CreateDataVector<int8_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1771  flatbuffers::Offset<serializer::ByteData> flatBuffersData = serializer::CreateByteData(
1772  m_flatBufferBuilder,
1773  fbVector);
1774  fbPayload = flatBuffersData.o;
1775  }
1776  }
1777  flatbuffers::Offset<serializer::ConstTensor> flatBufferConstTensor = serializer::CreateConstTensor(
1778  m_flatBufferBuilder,
1779  CreateTensorInfo(tensorInfo),
1781  fbPayload);
1782  return flatBufferConstTensor;
1783 }
1784 
1785 flatbuffers::Offset<armnnSerializer::FeatureCompatibilityVersions> SerializerStrategy::GetVersionTable()
1786 {
1787  flatbuffers::Offset<armnnSerializer::FeatureCompatibilityVersions> versionsTable =
1789  m_flatBufferBuilder,
1790  1 // Binding ids scheme version
1791  );
1792  return versionsTable;
1793 }
1794 
1795 std::vector<fb::Offset<serializer::InputSlot>>
1796  SerializerStrategy::CreateInputSlots(const armnn::IConnectableLayer* layer)
1797 {
1798  std::vector<fb::Offset<serializer::InputSlot>> inputSlots;
1799 
1800  // Get the InputSlots
1801  for (unsigned int slotIndex = 0; slotIndex<layer->GetNumInputSlots(); ++slotIndex)
1802  {
1803  const IInputSlot& inputSlot = layer->GetInputSlot(slotIndex);
1804 
1805  // Get the Connection for the InputSlot
1806  const IOutputSlot* connection = inputSlot.GetConnection();
1807 
1808  // Create FlatBuffer Connection
1809  serializer::Connection conn(GetSerializedId(inputSlot.GetConnection()->GetOwningLayerGuid()),
1810  connection->CalculateIndexOnOwner());
1811  // Create FlatBuffer InputSlot
1812  inputSlots.push_back(serializer::CreateInputSlot(m_flatBufferBuilder, slotIndex, &conn));
1813  }
1814  return inputSlots;
1815 }
1816 
1817 std::vector<fb::Offset<serializer::OutputSlot>>
1818  SerializerStrategy::CreateOutputSlots(const armnn::IConnectableLayer* layer)
1819 {
1820  std::vector<fb::Offset<serializer::OutputSlot>> outputSlots;
1821 
1822  // Get the OutputSlots
1823  for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
1824  {
1825  const IOutputSlot& outputSlot = layer->GetOutputSlot(slotIndex);
1826  const armnn::TensorInfo& tensorInfo = outputSlot.GetTensorInfo();
1827 
1828  // Create FlatBuffer Outputslot
1829  outputSlots.push_back(serializer::CreateOutputSlot(m_flatBufferBuilder,
1830  slotIndex,
1831  CreateTensorInfo(tensorInfo)));
1832  }
1833  return outputSlots;
1834 }
1835 
1837  const BaseDescriptor& descriptor,
1838  const std::vector<armnn::ConstTensor>& constants,
1839  const char* name,
1840  const armnn::LayerBindingId id)
1841 {
1842  IgnoreUnused(constants);
1843 
1844  switch (layer->GetType())
1845  {
1847  {
1848  const armnn::ActivationDescriptor& layerDescriptor =
1849  static_cast<const armnn::ActivationDescriptor&>(descriptor);
1850  SerializeActivationLayer(layer, layerDescriptor, name);
1851  break;
1852  }
1854  {
1855  SerializeAdditionLayer(layer, name);
1856  break;
1857  }
1859  {
1860  const armnn::ArgMinMaxDescriptor& layerDescriptor =
1861  static_cast<const armnn::ArgMinMaxDescriptor&>(descriptor);
1862  SerializeArgMinMaxLayer(layer, layerDescriptor, name);
1863  break;
1864  }
1866  {
1867  const armnn::BatchNormalizationDescriptor& layerDescriptor =
1868  static_cast<const armnn::BatchNormalizationDescriptor&>(descriptor);
1869  SerializeBatchNormalizationLayer(layer,
1870  layerDescriptor,
1871  constants,
1872  name);
1873  break;
1874  }
1876  {
1877  const armnn::BatchToSpaceNdDescriptor& layerDescriptor =
1878  static_cast<const armnn::BatchToSpaceNdDescriptor&>(descriptor);
1879  SerializeBatchToSpaceNdLayer(layer,
1880  layerDescriptor,
1881  name);
1882  break;
1883  }
1884  case armnn::LayerType::Cast :
1885  {
1886  SerializeCastLayer(layer, name);
1887  break;
1888  }
1890  {
1891  const armnn::ComparisonDescriptor& layerDescriptor =
1892  static_cast<const armnn::ComparisonDescriptor&>(descriptor);
1893  SerializeComparisonLayer(layer,
1894  layerDescriptor,
1895  name);
1896  break;
1897  }
1899  {
1900  const armnn::ConcatDescriptor& layerDescriptor =
1901  static_cast<const armnn::ConcatDescriptor&>(descriptor);
1902  SerializeConcatLayer(layer,
1903  layerDescriptor,
1904  name);
1905  break;
1906  }
1908  {
1909  SerializeConstantLayer(layer,
1910  constants,
1911  name);
1912  break;
1913  }
1915  {
1916  const armnn::Convolution2dDescriptor& layerDescriptor =
1917  static_cast<const armnn::Convolution2dDescriptor&>(descriptor);
1918  SerializeConvolution2dLayer(layer,
1919  layerDescriptor,
1920  constants,
1921  name);
1922  break;
1923  }
1925  {
1926  const armnn::DepthToSpaceDescriptor& layerDescriptor =
1927  static_cast<const armnn::DepthToSpaceDescriptor&>(descriptor);
1928  SerializeDepthToSpaceLayer(layer,
1929  layerDescriptor,
1930  name);
1931  break;
1932  }
1934  {
1935  const armnn::DepthwiseConvolution2dDescriptor& layerDescriptor =
1936  static_cast<const armnn::DepthwiseConvolution2dDescriptor&>(descriptor);
1937  SerializeDepthwiseConvolution2dLayer(layer,
1938  layerDescriptor,
1939  constants,
1940  name);
1941  break;
1942  }
1944  {
1945  SerializeDequantizeLayer(layer,
1946  name);
1947  break;
1948  }
1950  {
1951  const armnn::DetectionPostProcessDescriptor& layerDescriptor =
1952  static_cast<const armnn::DetectionPostProcessDescriptor&>(descriptor);
1953  SerializeDetectionPostProcessLayer(layer, layerDescriptor, constants, name);
1954  break;
1955  }
1957  {
1958  SerializeDivisionLayer(layer, name);
1959  break;
1960  }
1962  {
1963  const armnn::ElementwiseUnaryDescriptor& layerDescriptor =
1964  static_cast<const armnn::ElementwiseUnaryDescriptor&>(descriptor);
1965  SerializeElementwiseUnaryLayer(layer, layerDescriptor, name);
1966  break;
1967  }
1968  case armnn::LayerType::Fill :
1969  {
1970  const armnn::FillDescriptor& layerDescriptor =
1971  static_cast<const armnn::FillDescriptor&>(descriptor);
1972  SerializeFillLayer(layer, layerDescriptor, name);
1973  break;
1974  }
1976  {
1977  SerializeFloorLayer(layer, name);
1978  break;
1979  }
1981  {
1982  const armnn::FullyConnectedDescriptor& layerDescriptor =
1983  static_cast<const armnn::FullyConnectedDescriptor&>(descriptor);
1984  SerializeFullyConnectedLayer(layer, layerDescriptor, constants, name);
1985  break;
1986  }
1988  {
1989  const armnn::GatherDescriptor& layerDescriptor =
1990  static_cast<const armnn::GatherDescriptor&>(descriptor);
1991  SerializeGatherLayer(layer, layerDescriptor, name);
1992  break;
1993  }
1995  {
1996  SerializeInputLayer(layer, id, name);
1997  break;
1998  }
2000  {
2001  const armnn::InstanceNormalizationDescriptor& layerDescriptor =
2002  static_cast<const armnn::InstanceNormalizationDescriptor&>(descriptor);
2003  SerializeInstanceNormalizationLayer(layer, layerDescriptor, name);
2004  break;
2005  }
2007  {
2008  const armnn::L2NormalizationDescriptor& layerDescriptor =
2009  static_cast<const armnn::L2NormalizationDescriptor&>(descriptor);
2010  SerializeL2NormalizationLayer(layer, layerDescriptor, name);
2011  break;
2012  }
2014  {
2015  const armnn::LogicalBinaryDescriptor& layerDescriptor =
2016  static_cast<const armnn::LogicalBinaryDescriptor&>(descriptor);
2017  SerializeLogicalBinaryLayer(layer, layerDescriptor, name);
2018  break;
2019  }
2021  {
2022  const armnn::LogSoftmaxDescriptor& layerDescriptor =
2023  static_cast<const armnn::LogSoftmaxDescriptor&>(descriptor);
2024  SerializeLogSoftmaxLayer(layer, layerDescriptor, name);
2025  break;
2026  }
2027  case armnn::LayerType::Lstm :
2028  {
2029  const armnn::LstmDescriptor& layerDescriptor =
2030  static_cast<const armnn::LstmDescriptor&>(descriptor);
2031  SerializeLstmLayer(layer, layerDescriptor, constants, name);
2032  break;
2033  }
2035  {
2036  const armnn::QLstmDescriptor& layerDescriptor =
2037  static_cast<const armnn::QLstmDescriptor&>(descriptor);
2038  SerializeQLstmLayer(layer, layerDescriptor, constants, name);
2039  break;
2040  }
2042  {
2043  SerializeMaximumLayer(layer, name);
2044  break;
2045  }
2046  case armnn::LayerType::Mean :
2047  {
2048  const armnn::MeanDescriptor& layerDescriptor =
2049  static_cast<const armnn::MeanDescriptor&>(descriptor);
2050  SerializeMeanLayer(layer, layerDescriptor, name);
2051  break;
2052  }
2054  {
2055  SerializeMergeLayer(layer, name);
2056  break;
2057  }
2059  {
2060  SerializeMinimumLayer(layer, name);
2061  break;
2062  }
2064  {
2065  SerializeMultiplicationLayer(layer, name);
2066  break;
2067  }
2069  {
2070  const armnn::NormalizationDescriptor& layerDescriptor =
2071  static_cast<const armnn::NormalizationDescriptor&>(descriptor);
2072  SerializeNormalizationLayer(layer, layerDescriptor, name);
2073  break;
2074  }
2076  {
2077  SerializeOutputLayer(layer, id, name);
2078  break;
2079  }
2080  case armnn::LayerType::Pad :
2081  {
2082  const armnn::PadDescriptor& layerDescriptor =
2083  static_cast<const armnn::PadDescriptor&>(descriptor);
2084  SerializePadLayer(layer, layerDescriptor, name);
2085  break;
2086  }
2088  {
2089  const armnn::PermuteDescriptor& layerDescriptor =
2090  static_cast<const armnn::PermuteDescriptor&>(descriptor);
2091  SerializePermuteLayer(layer, layerDescriptor, name);
2092  break;
2093  }
2095  {
2096  const armnn::Pooling2dDescriptor& layerDescriptor =
2097  static_cast<const armnn::Pooling2dDescriptor&>(descriptor);
2098  SerializePooling2dLayer(layer, layerDescriptor, name);
2099  break;
2100  }
2102  {
2103  SerializePreluLayer(layer, name);
2104  break;
2105  }
2107  {
2108  SerializeQuantizeLayer(layer, name);
2109  break;
2110  }
2112  SerializeQuantizedLstmLayer(layer, constants, name);
2113  break;
2115  {
2116  const armnn::ReshapeDescriptor &layerDescriptor =
2117  static_cast<const armnn::ReshapeDescriptor &>(descriptor);
2118  SerializeReshapeLayer(layer, layerDescriptor, name);
2119  break;
2120  }
2122  {
2123  SerializeRankLayer(layer, name);
2124  break;
2125  }
2127  {
2128  const armnn::ReduceDescriptor& layerDescriptor =
2129  static_cast<const armnn::ReduceDescriptor&>(descriptor);
2130  SerializeReduceLayer(layer, layerDescriptor, name);
2131  break;
2132  }
2134  {
2135  const armnn::ResizeDescriptor& layerDescriptor =
2136  static_cast<const armnn::ResizeDescriptor&>(descriptor);
2137  SerializeResizeLayer(layer, layerDescriptor, name);
2138  break;
2139  }
2141  {
2142  const armnn::SliceDescriptor& layerDescriptor =
2143  static_cast<const armnn::SliceDescriptor&>(descriptor);
2144  SerializeSliceLayer(layer, layerDescriptor, name);
2145  break;
2146  }
2148  {
2149  const armnn::SoftmaxDescriptor& layerDescriptor =
2150  static_cast<const armnn::SoftmaxDescriptor&>(descriptor);
2151  SerializeSoftmaxLayer(layer, layerDescriptor, name);
2152  break;
2153  }
2155  {
2156  const armnn::SpaceToBatchNdDescriptor& layerDescriptor =
2157  static_cast<const armnn::SpaceToBatchNdDescriptor&>(descriptor);
2158  SerializeSpaceToBatchNdLayer(layer, layerDescriptor, name);
2159  break;
2160  }
2162  {
2163  const armnn::SpaceToDepthDescriptor& layerDescriptor =
2164  static_cast<const armnn::SpaceToDepthDescriptor&>(descriptor);
2165  SerializeSpaceToDepthLayer(layer, layerDescriptor, name);
2166  break;
2167  }
2169  {
2170  const armnn::SplitterDescriptor& layerDescriptor =
2171  static_cast<const armnn::SplitterDescriptor&>(descriptor);
2172  SerializeSplitterLayer(layer, layerDescriptor, name);
2173  break;
2174  }
2176  {
2177  const armnn::StackDescriptor& layerDescriptor =
2178  static_cast<const armnn::StackDescriptor&>(descriptor);
2179  SerializeStackLayer(layer, layerDescriptor, name);
2180  break;
2181  }
2183  {
2184  const armnn::StandInDescriptor& layerDescriptor =
2185  static_cast<const armnn::StandInDescriptor&>(descriptor);
2186  SerializeStandInLayer(layer, layerDescriptor, name);
2187  break;
2188  }
2190  {
2191  const armnn::StridedSliceDescriptor& layerDescriptor =
2192  static_cast<const armnn::StridedSliceDescriptor&>(descriptor);
2193  SerializeStridedSliceLayer(layer, layerDescriptor, name);
2194  break;
2195  }
2197  {
2198  SerializeSubtractionLayer(layer, name);
2199  break;
2200  }
2202  {
2203  SerializeSwitchLayer(layer, name);
2204  break;
2205  }
2207  {
2208  const armnn::TransposeDescriptor& layerDescriptor =
2209  static_cast<const armnn::TransposeDescriptor&>(descriptor);
2210  SerializeTransposeLayer(layer, layerDescriptor, name);
2211  break;
2212  }
2214  {
2215  const armnn::TransposeConvolution2dDescriptor& layerDescriptor =
2216  static_cast<const armnn::TransposeConvolution2dDescriptor&>(descriptor);
2217  SerializeTransposeConvolution2dLayer(layer, layerDescriptor, constants, name);
2218  break;
2219  }
2220  default:
2221  {
2223  fmt::format("A layer of unknown type was given to the serializer. Layer name: {}; Layer Id: {}",
2224  layer->GetName(),
2225  id));
2226  }
2227  }
2228 }
2229 
2231 {
2232  // Iterate through to network
2233  inNetwork.ExecuteStrategy(m_SerializerStrategy);
2234  flatbuffers::FlatBufferBuilder& fbBuilder = m_SerializerStrategy.GetFlatBufferBuilder();
2235 
2236  // Create FlatBuffer SerializedGraph
2237  auto serializedGraph = serializer::CreateSerializedGraph(
2238  fbBuilder,
2239  fbBuilder.CreateVector(m_SerializerStrategy.GetSerializedLayers()),
2240  fbBuilder.CreateVector(m_SerializerStrategy.GetInputIds()),
2241  fbBuilder.CreateVector(m_SerializerStrategy.GetOutputIds()),
2242  m_SerializerStrategy.GetVersionTable());
2243 
2244  // Serialize the graph
2245  fbBuilder.Finish(serializedGraph);
2246 }
2247 
2248 
2250 {
2251  flatbuffers::FlatBufferBuilder& fbBuilder = m_SerializerStrategy.GetFlatBufferBuilder();
2252 
2253  auto bytesToWrite = armnn::numeric_cast<std::streamsize>(fbBuilder.GetSize());
2254  stream.write(reinterpret_cast<const char*>(fbBuilder.GetBufferPointer()), bytesToWrite);
2255  return !stream.bad();
2256 }
2257 
2258 } // namespace armnnSerializer
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
flatbuffers::Offset< LongData > CreateLongData(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< int64_t >> data=0)
float m_Eps
Used to avoid dividing by zero.
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
bool m_HalfPixelCenters
Half Pixel Centers.
armnnSerializer::UnaryOperation GetFlatBufferUnaryOperation(armnn::UnaryOperation comparisonOperation)
bool m_ProjectionEnabled
Enable/disable the projection layer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< ReshapeDescriptor > CreateReshapeDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> targetShape=0)
bool m_AlignCorners
Aligned corners.
flatbuffers::Offset< ReduceLayer > CreateReduceLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ReduceDescriptor > descriptor=0)
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
uint32_t m_Axis
0-based axis along which to stack the input tensors.
flatbuffers::Offset< OutputSlot > CreateOutputSlot(flatbuffers::FlatBufferBuilder &_fbb, uint32_t index=0, flatbuffers::Offset< armnnSerializer::TensorInfo > tensorInfo=0)
A ViewsDescriptor for the SplitterLayer.
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
flatbuffers::Offset< DepthwiseConvolution2dDescriptor > CreateDepthwiseConvolution2dDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t padLeft=0, uint32_t padRight=0, uint32_t padTop=0, uint32_t padBottom=0, uint32_t strideX=0, uint32_t strideY=0, uint32_t dilationX=1, uint32_t dilationY=1, bool biasEnabled=false, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NCHW)
float m_ScaleW
Center size encoding scale weight.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
void ExecuteStrategy(IStrategy &strategy) const
Definition: Network.cpp:519
virtual unsigned int GetNumInputSlots() const =0
Returns the number of connectable input slots.
float m_K
Kappa value used for the across channel normalization equation.
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
flatbuffers::Offset< AbsLayer > CreateAbsLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
flatbuffers::Offset< LstmLayer > CreateLstmLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::LstmDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::LstmInputParams > inputParams=0)
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
flatbuffers::Offset< L2NormalizationLayer > CreateL2NormalizationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::L2NormalizationDescriptor > descriptor=0)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
flatbuffers::Offset< TransposeConvolution2dDescriptor > CreateTransposeConvolution2dDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t padLeft=0, uint32_t padRight=0, uint32_t padTop=0, uint32_t padBottom=0, uint32_t strideX=0, uint32_t strideY=0, bool biasEnabled=false, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NCHW)
float m_ClippingThresProj
Clipping threshold value for the projection.
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1...
A ReshapeDescriptor for the ReshapeLayer.
flatbuffers::Offset< ResizeDescriptor > CreateResizeDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t targetHeight=0, uint32_t targetWidth=0, armnnSerializer::ResizeMethod method=armnnSerializer::ResizeMethod_NearestNeighbor, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC, bool alignCorners=false, bool halfPixelCenters=false)
flatbuffers::Offset< FillLayer > CreateFillLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::FillDescriptor > descriptor=0)
std::vector< int > m_Begin
Begin values for the input that will be sliced.
flatbuffers::Offset< SoftmaxDescriptor > CreateSoftmaxDescriptor(flatbuffers::FlatBufferBuilder &_fbb, float beta=0.0f)
float m_PadValue
Optional value to use for padding, defaults to 0.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t GetNumDimensions() const
Get the number of dimensions.
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:78
void Serialize(const armnn::INetwork &inNetwork)
Serializes the network to ArmNN SerializedGraph.
float m_ScaleX
Center size encoding scale x.
TensorShape m_InputShape
Required shape of all input tensors.
uint32_t m_TargetWidth
Target width value.
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Dimensionality GetDimensionality() const
Function that returns the tensor type.
Definition: Tensor.hpp:92
flatbuffers::Offset< GatherLayer > CreateGatherLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::GatherDescriptor > descriptor=0)
flatbuffers::Offset< RankLayer > CreateRankLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
bool HasPerAxisQuantization() const
Definition: Tensor.cpp:437
uint32_t m_PoolWidth
Pooling width value.
bool m_PeepholeEnabled
Enable/disable peephole.
armnnSerializer::OutputShapeRounding GetFlatBufferOutputShapeRounding(armnn::OutputShapeRounding outputShapeRounding)
flatbuffers::Offset< TransposeLayer > CreateTransposeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::TransposeDescriptor > descriptor=0)
A Convolution2dDescriptor for the Convolution2dLayer.
float m_Alpha
Alpha value for the normalization equation.
uint32_t m_PadLeft
Padding left value in the width dimension.
flatbuffers::Offset< ComparisonLayer > CreateComparisonLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ComparisonDescriptor > descriptor=0)
bool m_KeepDims
if true then output shape has no change.
float m_HiddenStateScale
Hidden State quantization scale.
bool m_BiasEnabled
Enable/disable bias.
flatbuffers::Offset< ConstTensor > CreateConstTensor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::TensorInfo > info=0, armnnSerializer::ConstTensorData data_type=armnnSerializer::ConstTensorData_NONE, flatbuffers::Offset< void > data=0)
Optional< unsigned int > GetQuantizationDim() const
Definition: Tensor.cpp:485
flatbuffers::Offset< QuantizeLayer > CreateQuantizeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
float m_OutputIntermediateScale
Output intermediate quantization scale.
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
float m_Gamma
Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0. ...
float m_Beta
Exponentiation value.
flatbuffers::Offset< InputSlot > CreateInputSlot(flatbuffers::FlatBufferBuilder &_fbb, uint32_t index=0, const armnnSerializer::Connection *connection=0)
std::vector< unsigned int > m_Size
Size of the slice in each dimension.
static ISerializer * CreateRaw()
Definition: Serializer.cpp:30
flatbuffers::Offset< SpaceToDepthDescriptor > CreateSpaceToDepthDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t blockSize=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:70
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< QuantizedLstmLayer > CreateQuantizedLstmLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::QuantizedLstmInputParams > inputParams=0)
flatbuffers::Offset< TransposeDescriptor > CreateTransposeDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> dimMappings=0)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
serializer::ActivationFunction GetFlatBufferActivationFunction(armnn::ActivationFunction function)
Definition: Serializer.cpp:55
Main network class which provides the interface for building up a neural network. ...
Definition: INetwork.hpp:178
flatbuffers::Offset< DetectionPostProcessDescriptor > CreateDetectionPostProcessDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t maxDetections=0, uint32_t maxClassesPerDetection=0, uint32_t detectionsPerClass=0, float nmsScoreThreshold=0.0f, float nmsIouThreshold=0.0f, uint32_t numClasses=0, bool useRegularNms=false, float scaleX=0.0f, float scaleY=0.0f, float scaleW=0.0f, float scaleH=0.0f)
armnnSerializer::NormalizationAlgorithmMethod GetFlatBufferNormalizationAlgorithmMethod(armnn::NormalizationAlgorithmMethod normalizationAlgorithmMethod)
uint32_t m_PadTop
Padding top value in the height dimension.
flatbuffers::Offset< AnyLayer > CreateAnyLayer(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::Layer layer_type=armnnSerializer::Layer_NONE, flatbuffers::Offset< void > layer=0)
flatbuffers::Offset< DepthwiseConvolution2dLayer > CreateDepthwiseConvolution2dLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::DepthwiseConvolution2dDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > weights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > biases=0)
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
flatbuffers::Offset< MergeLayer > CreateMergeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
uint32_t m_PadRight
Padding right value in the width dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
MemoryType GetMemoryArea() const
Definition: Tensor.hpp:292
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding for input dimension.
uint32_t GetNumViews() const
Get the number of views.
ReduceOperation m_ReduceOperation
Specifies the reduction operation to execute.
flatbuffers::Offset< QLstmInputParams > CreateQLstmInputParams(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > forgetGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > outputGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > projectionWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > projectionBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > forgetLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > outputLayerNormWeights=0)
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
int32_t m_BeginMask
Begin mask value.
flatbuffers::Offset< FullyConnectedDescriptor > CreateFullyConnectedDescriptor(flatbuffers::FlatBufferBuilder &_fbb, bool biasEnabled=false, bool transposeWeightsMatrix=false, bool constantWeights=true)
flatbuffers::Offset< TransposeConvolution2dLayer > CreateTransposeConvolution2dLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::TransposeConvolution2dDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > weights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > biases=0)
SizeType GetSize() const
Definition: Types.hpp:274
uint32_t m_DilationY
Dilation along y axis.
int32_t m_EndMask
End mask value.
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
flatbuffers::Offset< TensorInfo > CreateTensorInfo(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> dimensions=0, armnnSerializer::DataType dataType=armnnSerializer::DataType_Float16, float quantizationScale=1.0f, int32_t quantizationOffset=0, flatbuffers::Offset< flatbuffers::Vector< float >> quantizationScales=0, uint32_t quantizationDim=0, uint32_t dimensionality=1, flatbuffers::Offset< flatbuffers::Vector< uint8_t >> dimensionSpecificity=0)
flatbuffers::Offset< PreluLayer > CreatePreluLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding values for the input dimension: heightPad{top, bottom} widthPad{left, right}.
std::vector< float > GetQuantizationScales() const
Definition: Tensor.cpp:442
flatbuffers::Offset< StandInDescriptor > CreateStandInDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t numInputs=0, uint32_t numOutputs=0)
bool SaveSerializedToStream(std::ostream &stream)
Serializes the SerializedGraph to the stream.
Definition: Serializer.cpp:50
uint32_t m_DilationY
Dilation factor value for height dimension.
armnnSerializer::ConstTensorData GetFlatBufferConstTensorData(armnn::DataType dataType)
bool GetDimensionSpecificity(unsigned int i) const
Gets information about if the dimension size has been specified or not.
Definition: Tensor.cpp:211
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
void ExecuteStrategy(const armnn::IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id) override
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
flatbuffers::Offset< MultiplicationLayer > CreateMultiplicationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
flatbuffers::Offset< DepthToSpaceLayer > CreateDepthToSpaceLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::DepthToSpaceDescriptor > descriptor=0)
flatbuffers::Offset< InstanceNormalizationLayer > CreateInstanceNormalizationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::InstanceNormalizationDescriptor > descriptor=0)
armnnSerializer::ReduceOperation GetFlatBufferReduceOperation(armnn::ReduceOperation reduceOperation)
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:243
flatbuffers::Offset< SliceLayer > CreateSliceLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::SliceDescriptor > descriptor=0)
armnnSerializer::DataType GetFlatBufferDataType(armnn::DataType dataType)
uint32_t m_NumOutputs
Number of output tensors.
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
flatbuffers::Offset< Convolution2dDescriptor > CreateConvolution2dDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t padLeft=0, uint32_t padRight=0, uint32_t padTop=0, uint32_t padBottom=0, uint32_t strideX=0, uint32_t strideY=0, uint32_t dilationX=1, uint32_t dilationY=1, bool biasEnabled=false, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NCHW)
flatbuffers::Offset< InputLayer > CreateInputLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::BindableLayerBase > base=0)
A ResizeDescriptor for the ResizeLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
Base class for all descriptors.
Definition: Descriptors.hpp:22
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
A StackDescriptor for the StackLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< ShortData > CreateShortData(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< int16_t >> data=0)
serializer::ArgMinMaxFunction GetFlatBufferArgMinMaxFunction(armnn::ArgMinMaxFunction function)
Definition: Serializer.cpp:86
TensorShape m_TargetShape
Target shape value.
bool SaveSerializedToStream(std::ostream &stream)
Serializes the SerializedGraph to the stream.
flatbuffers::Offset< ConcatLayer > CreateConcatLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::OriginsDescriptor > descriptor=0)
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_MaxDetections
Maximum numbers of detections.
A PadDescriptor for the PadLayer.
flatbuffers::Offset< SubtractionLayer > CreateSubtractionLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
flatbuffers::Offset< BindableLayerBase > CreateBindableLayerBase(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, int32_t layerBindingId=0)
const uint32_t * GetViewOrigin(uint32_t idx) const
Return the view origin at the int value idx.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
flatbuffers::Offset< ArgMinMaxLayer > CreateArgMinMaxLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ArgMinMaxDescriptor > descriptor=0)
armnnSerializer::NormalizationAlgorithmChannel GetFlatBufferNormalizationAlgorithmChannel(armnn::NormalizationAlgorithmChannel normalizationAlgorithmChannel)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
flatbuffers::Offset< QLstmDescriptor > CreateQLstmDescriptor(flatbuffers::FlatBufferBuilder &_fbb, bool cifgEnabled=true, bool peepholeEnabled=false, bool projectionEnabled=false, bool layerNormEnabled=false, float cellClip=0.0f, float projectionClip=0.0f, float inputIntermediateScale=0.0f, float forgetIntermediateScale=0.0f, float cellIntermediateScale=0.0f, float outputIntermediateScale=0.0f, int32_t hiddenStateZeroPoint=0, float hiddenStateScale=0.0f)
bool m_LayerNormEnabled
Enable/disable layer normalization.
flatbuffers::Offset< GreaterLayer > CreateGreaterLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
float m_NmsIouThreshold
Intersection over union threshold.
flatbuffers::Offset< ReshapeLayer > CreateReshapeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ReshapeDescriptor > descriptor=0)
armnnSerializer::LogicalBinaryOperation GetFlatBufferLogicalBinaryOperation(armnn::LogicalBinaryOperation logicalBinaryOperation)
flatbuffers::Offset< ArgMinMaxDescriptor > CreateArgMinMaxDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::ArgMinMaxFunction argMinMaxFunction=armnnSerializer::ArgMinMaxFunction_Min, int32_t axis=0)
An LstmDescriptor for the LstmLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
flatbuffers::Offset< AdditionLayer > CreateAdditionLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
uint32_t m_DilationX
Dilation factor value for width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
flatbuffers::Offset< L2NormalizationDescriptor > CreateL2NormalizationDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NCHW, float eps=1e-12f)
std::vector< unsigned int > m_Begin
Beginning indices of the slice in each dimension.
int32_t m_NewAxisMask
New axis mask value.
flatbuffers::Offset< MinimumLayer > CreateMinimumLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept...
flatbuffers::Offset< ByteData > CreateByteData(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< int8_t >> data=0)
std::vector< unsigned int > m_BlockShape
Block shape values.
float m_Eps
Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f...
An output connection slot for a layer.
Definition: INetwork.hpp:38
flatbuffers::Offset< DepthToSpaceDescriptor > CreateDepthToSpaceDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t blockSize=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
A L2NormalizationDescriptor for the L2NormalizationLayer.
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:469
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:56
float GetQuantizationScale() const
Definition: Tensor.cpp:452
flatbuffers::Offset< LstmInputParams > CreateLstmInputParams(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > forgetGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > outputGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > projectionWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > projectionBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > forgetLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > outputLayerNormWeights=0)
DataType GetDataType() const
Definition: Tensor.hpp:194
An OriginsDescriptor for the ConcatLayer.
A ReduceDescriptor for the REDUCE operators.
float m_ProjectionClip
Clipping threshold value for the projection.
flatbuffers::Offset< CastLayer > CreateCastLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
flatbuffers::Offset< LayerBase > CreateLayerBase(flatbuffers::FlatBufferBuilder &_fbb, uint32_t index=0, flatbuffers::Offset< flatbuffers::String > layerName=0, armnnSerializer::LayerType layerType=armnnSerializer::LayerType_Addition, flatbuffers::Offset< flatbuffers::Vector< flatbuffers::Offset< armnnSerializer::InputSlot >>> inputSlots=0, flatbuffers::Offset< flatbuffers::Vector< flatbuffers::Offset< armnnSerializer::OutputSlot >>> outputSlots=0)
A FullyConnectedDescriptor for the FullyConnectedLayer.
int32_t m_EllipsisMask
Ellipsis mask value.
virtual LayerGuid GetGuid() const =0
Returns the unique id of the layer.
bool m_BiasEnabled
Enable/disable bias.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:314
flatbuffers::Offset< QuantizedLstmInputParams > CreateQuantizedLstmInputParams(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > forgetGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > outputGateBias=0)
flatbuffers::Offset< ReduceDescriptor > CreateReduceDescriptor(flatbuffers::FlatBufferBuilder &_fbb, bool keepDims=false, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> axis=0, armnnSerializer::ReduceOperation reduceOperation=armnnSerializer::ReduceOperation_Sum)
flatbuffers::Offset< StackDescriptor > CreateStackDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t axis=0, uint32_t numInputs=0, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> inputShape=0)
flatbuffers::Offset< BatchToSpaceNdDescriptor > CreateBatchToSpaceNdDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> blockShape=0, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> crops=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
float m_InputIntermediateScale
Input intermediate quantization scale.
flatbuffers::Offset< PadDescriptor > CreatePadDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> padList=0, float padValue=0.0f)
uint32_t m_TargetWidth
Target width value.
flatbuffers::Offset< SplitterLayer > CreateSplitterLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ViewsDescriptor > descriptor=0)
A GatherDescriptor for the GatherLayer.
bool m_PeepholeEnabled
Enable/disable peephole.
uint32_t m_NumClasses
Number of classes.
bool m_HalfPixelCenters
Half Pixel Centers.
flatbuffers::Offset< OutputLayer > CreateOutputLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::BindableLayerBase > base=0)
void Serialize(const armnn::INetwork &inNetwork)
Serializes the network to ArmNN SerializedGraph.
Definition: Serializer.cpp:45
flatbuffers::Offset< SoftmaxLayer > CreateSoftmaxLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::SoftmaxDescriptor > descriptor=0)
flatbuffers::Offset< FillDescriptor > CreateFillDescriptor(flatbuffers::FlatBufferBuilder &_fbb, float value=0.0f)
uint32_t m_PadTop
Padding top value in the height dimension.
A StandInDescriptor for the StandIn layer.
A QLstmDescriptor for the QLstmLayer.
flatbuffers::Offset< StridedSliceLayer > CreateStridedSliceLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::StridedSliceDescriptor > descriptor=0)
virtual unsigned int CalculateIndexOnOwner() const =0
flatbuffers::Offset< LogSoftmaxDescriptor > CreateLogSoftmaxDescriptor(flatbuffers::FlatBufferBuilder &_fbb, float beta=1.0f, int32_t axis=-1)
bool m_UseRegularNms
Use Regular NMS.
flatbuffers::Offset< RsqrtLayer > CreateRsqrtLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< MeanLayer > CreateMeanLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::MeanDescriptor > descriptor=0)
std::vector< unsigned int > m_BlockShape
Block shape value.
std::vector< int > m_Stride
Stride values for the input that will be sliced.
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
flatbuffers::Offset< ActivationLayer > CreateActivationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ActivationDescriptor > descriptor=0)
flatbuffers::Offset< SpaceToDepthLayer > CreateSpaceToDepthLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::SpaceToDepthDescriptor > descriptor=0)
flatbuffers::Offset< SliceDescriptor > CreateSliceDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> begin=0, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> size=0)
const TensorInfo & GetInfo() const
Definition: Tensor.hpp:282
min(a, max(b, input)) ReLu1 & ReLu6.
flatbuffers::Offset< BatchNormalizationLayer > CreateBatchNormalizationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::BatchNormalizationDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > mean=0, flatbuffers::Offset< armnnSerializer::ConstTensor > variance=0, flatbuffers::Offset< armnnSerializer::ConstTensor > beta=0, flatbuffers::Offset< armnnSerializer::ConstTensor > gamma=0)
flatbuffers::Offset< BatchNormalizationDescriptor > CreateBatchNormalizationDescriptor(flatbuffers::FlatBufferBuilder &_fbb, float eps=0.0f, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
uint32_t m_TargetHeight
Target height value.
uint32_t m_NumInputs
Number of input tensors.
flatbuffers::Offset< GatherDescriptor > CreateGatherDescriptor(flatbuffers::FlatBufferBuilder &_fbb, int32_t axis=0)
flatbuffers::Offset< ActivationDescriptor > CreateActivationDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::ActivationFunction activationFunction=armnnSerializer::ActivationFunction_Sigmoid, float a=0.0f, float b=0.0f)
uint32_t m_TargetHeight
Target height value.
uint32_t m_ActivationFunc
The activation function to use.
A SliceDescriptor for the SliceLayer.
flatbuffers::Offset< NormalizationLayer > CreateNormalizationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::NormalizationDescriptor > descriptor=0)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
flatbuffers::Offset< ViewsDescriptor > CreateViewsDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::OriginsDescriptor > origins=0, flatbuffers::Offset< flatbuffers::Vector< flatbuffers::Offset< armnnSerializer::UintVector >>> viewSizes=0)
virtual LayerType GetType() const =0
Returns the armnn::LayerType of this layer.
flatbuffers::Offset< PermuteDescriptor > CreatePermuteDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> dimMappings=0)
float m_ClippingThresCell
Clipping threshold value for the cell state.
unsigned int m_BlockSize
Scalar specifying the input block size. It must be >= 1.
flatbuffers::Offset< MeanDescriptor > CreateMeanDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> axis=0, bool keepDims=false)
const uint32_t * GetViewOrigin(uint32_t idx) const
Get the view origin at the int value idx.
flatbuffers::Offset< StandInLayer > CreateStandInLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::StandInDescriptor > descriptor=0)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_ForgetIntermediateScale
Forget intermediate quantization scale.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_Beta
Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0. ...
armnnSerializer::DataLayout GetFlatBufferDataLayout(armnn::DataLayout dataLayout)
std::vector< uint32_t > m_vAxis
The indices of the dimensions to reduce.
float m_ScaleH
Center size encoding scale height.
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
Definition: Descriptors.hpp:94
std::vector< int > m_End
End values for the input that will be sliced.
flatbuffers::Offset< SwitchLayer > CreateSwitchLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
flatbuffers::Offset< ResizeBilinearDescriptor > CreateResizeBilinearDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t targetWidth=0, uint32_t targetHeight=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC, bool alignCorners=false, bool halfPixelCenters=false)
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
const uint32_t * GetViewSizes(uint32_t idx) const
Get the view sizes at the int value idx.
float m_CellClip
Clipping threshold value for the cell state.
flatbuffers::Offset< ElementwiseUnaryDescriptor > CreateElementwiseUnaryDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::UnaryOperation operation=armnnSerializer::UnaryOperation_Abs)
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:50
uint32_t m_DilationX
Dilation along x axis.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
flatbuffers::Offset< PadLayer > CreatePadLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::PadDescriptor > descriptor=0)
flatbuffers::Offset< FloorLayer > CreateFloorLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
std::unique_ptr< ISerializer, void(*)(ISerializer *serializer)> ISerializerPtr
Definition: ISerializer.hpp:15
flatbuffers::Offset< NormalizationDescriptor > CreateNormalizationDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::NormalizationAlgorithmChannel normChannelType=armnnSerializer::NormalizationAlgorithmChannel_Across, armnnSerializer::NormalizationAlgorithmMethod normMethodType=armnnSerializer::NormalizationAlgorithmMethod_LocalBrightness, uint32_t normSize=0, float alpha=0.0f, float beta=0.0f, float k=0.0f, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NCHW)
uint32_t m_PadLeft
Padding left value in the width dimension.
armnnSerializer::ComparisonOperation GetFlatBufferComparisonOperation(armnn::ComparisonOperation comparisonOperation)
flatbuffers::Offset< Pooling2dDescriptor > CreatePooling2dDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::PoolingAlgorithm poolType=armnnSerializer::PoolingAlgorithm_Max, uint32_t padLeft=0, uint32_t padRight=0, uint32_t padTop=0, uint32_t padBottom=0, uint32_t poolWidth=0, uint32_t poolHeight=0, uint32_t strideX=0, uint32_t strideY=0, armnnSerializer::OutputShapeRounding outputShapeRounding=armnnSerializer::OutputShapeRounding_Floor, armnnSerializer::PaddingMethod paddingMethod=armnnSerializer::PaddingMethod_IgnoreValue, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
flatbuffers::Offset< EqualLayer > CreateEqualLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
bool m_AlignCorners
Aligned corners.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
flatbuffers::Offset< ConstantLayer > CreateConstantLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ConstTensor > input=0)
int32_t m_Axis
The axis in params to gather indices from.
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:98
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
flatbuffers::Offset< UintVector > CreateUintVector(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> data=0)
flatbuffers::Offset< StackLayer > CreateStackLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::StackDescriptor > descriptor=0)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
flatbuffers::Offset< Convolution2dLayer > CreateConvolution2dLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::Convolution2dDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > weights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > biases=0)
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
flatbuffers::Offset< Pooling2dLayer > CreatePooling2dLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::Pooling2dDescriptor > descriptor=0)
bool m_ProjectionEnabled
Enable/disable the projection layer.
ArgMinMaxFunction
Definition: Types.hpp:83
flatbuffers::Offset< SpaceToBatchNdLayer > CreateSpaceToBatchNdLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::SpaceToBatchNdDescriptor > descriptor=0)
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
armnnSerializer::ResizeMethod GetFlatBufferResizeMethod(armnn::ResizeMethod method)
uint32_t m_NumInputs
Number of input tensors.
uint32_t GetNumDimensions() const
Get the number of dimensions.
flatbuffers::Offset< ComparisonDescriptor > CreateComparisonDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::ComparisonOperation operation=armnnSerializer::ComparisonOperation_Equal)
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
A MeanDescriptor for the MeanLayer.
flatbuffers::Offset< MaximumLayer > CreateMaximumLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
virtual const IOutputSlot * GetConnection() const =0
static ISerializerPtr Create()
Definition: Serializer.cpp:35
armnnSerializer::PaddingMethod GetFlatBufferPaddingMethod(armnn::PaddingMethod paddingMethod)
bool m_LayerNormEnabled
Enable/disable layer normalization.
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
uint32_t m_PadRight
Padding right value in the width dimension.
flatbuffers::Offset< InstanceNormalizationDescriptor > CreateInstanceNormalizationDescriptor(flatbuffers::FlatBufferBuilder &_fbb, float gamma=0.0f, float beta=0.0f, float eps=0.0f, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
virtual const TensorInfo & GetTensorInfo() const =0
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:72
flatbuffers::Offset< IntData > CreateIntData(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< int32_t >> data=0)
virtual const char * GetName() const =0
Returns the name of the layer.
float m_ScaleY
Center size encoding scale y.
flatbuffers::Offset< ResizeLayer > CreateResizeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ResizeDescriptor > descriptor=0)
uint32_t GetNumViews() const
Get the number of views.
flatbuffers::Offset< FullyConnectedLayer > CreateFullyConnectedLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::FullyConnectedDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > weights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > biases=0)
float m_NmsScoreThreshold
NMS score threshold.
flatbuffers::Offset< DequantizeLayer > CreateDequantizeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
flatbuffers::Offset< ResizeBilinearLayer > CreateResizeBilinearLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ResizeBilinearDescriptor > descriptor=0)
virtual LayerGuid GetOwningLayerGuid() const =0
A Pooling2dDescriptor for the Pooling2dLayer.
flatbuffers::Offset< DetectionPostProcessLayer > CreateDetectionPostProcessLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::DetectionPostProcessDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > anchors=0)
A NormalizationDescriptor for the NormalizationLayer.
flatbuffers::Offset< FeatureCompatibilityVersions > CreateFeatureCompatibilityVersions(flatbuffers::FlatBufferBuilder &_fbb, uint32_t bindingIdsScheme=0)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< BatchToSpaceNdLayer > CreateBatchToSpaceNdLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::BatchToSpaceNdDescriptor > descriptor=0)
flatbuffers::Offset< armnnSerializer::FeatureCompatibilityVersions > GetVersionTable()
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
unsigned int GetConcatAxis() const
Get the concatenation axis value.
A ResizeBilinearDescriptor for the ResizeBilinearLayer.
flatbuffers::Offset< LogSoftmaxLayer > CreateLogSoftmaxLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::LogSoftmaxDescriptor > descriptor=0)
float m_CellIntermediateScale
Cell intermediate quantization scale.
flatbuffers::Offset< StridedSliceDescriptor > CreateStridedSliceDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< int32_t >> begin=0, flatbuffers::Offset< flatbuffers::Vector< int32_t >> end=0, flatbuffers::Offset< flatbuffers::Vector< int32_t >> stride=0, int32_t beginMask=0, int32_t endMask=0, int32_t shrinkAxisMask=0, int32_t ellipsisMask=0, int32_t newAxisMask=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
flatbuffers::Offset< OriginsDescriptor > CreateOriginsDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t concatAxis=0, uint32_t numViews=0, uint32_t numDimensions=0, flatbuffers::Offset< flatbuffers::Vector< flatbuffers::Offset< armnnSerializer::UintVector >>> viewOrigins=0)
flatbuffers::Offset< QLstmLayer > CreateQLstmLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::QLstmDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::QLstmInputParams > inputParams=0)
flatbuffers::Offset< LstmDescriptor > CreateLstmDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t activationFunc=0, float clippingThresCell=0.0f, float clippingThresProj=0.0f, bool cifgEnabled=true, bool peepholeEnabled=false, bool projectionEnabled=false, bool layerNormEnabled=false)
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:52
static void Destroy(ISerializer *serializer)
Definition: Serializer.cpp:40
A SoftmaxDescriptor for the SoftmaxLayer.
float m_Beta
Beta value for the normalization equation.
flatbuffers::Offset< ElementwiseUnaryLayer > CreateElementwiseUnaryLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ElementwiseUnaryDescriptor > descriptor=0)
flatbuffers::Offset< PermuteLayer > CreatePermuteLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::PermuteDescriptor > descriptor=0)
const OriginsDescriptor & GetOrigins() const
Get the View Origins.
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
uint32_t m_NormSize
Depth radius value.
flatbuffers::Offset< SpaceToBatchNdDescriptor > CreateSpaceToBatchNdDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> blockShape=0, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> padList=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:48
An input connection slot for a layer.
Definition: INetwork.hpp:25
flatbuffers::Offset< SerializedGraph > CreateSerializedGraph(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< flatbuffers::Offset< armnnSerializer::AnyLayer >>> layers=0, flatbuffers::Offset< flatbuffers::Vector< int32_t >> inputIds=0, flatbuffers::Offset< flatbuffers::Vector< int32_t >> outputIds=0, flatbuffers::Offset< armnnSerializer::FeatureCompatibilityVersions > featureVersions=0)
armnnSerializer::PoolingAlgorithm GetFlatBufferPoolingAlgorithm(armnn::PoolingAlgorithm poolingAlgorithm)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A FillDescriptor for the FillLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
unsigned int GetNumBytes() const
Definition: Tensor.hpp:289
ActivationFunction
Definition: Types.hpp:67
flatbuffers::Offset< LogicalBinaryDescriptor > CreateLogicalBinaryDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::LogicalBinaryOperation operation=armnnSerializer::LogicalBinaryOperation_LogicalAnd)
flatbuffers::Offset< DivisionLayer > CreateDivisionLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
A PermuteDescriptor for the PermuteLayer.
flatbuffers::Offset< LogicalBinaryLayer > CreateLogicalBinaryLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::LogicalBinaryDescriptor > descriptor=0)
uint32_t m_PadRight
Padding right value in the width dimension.
int32_t m_HiddenStateZeroPoint
Hidden State zero point.
bool m_ConstantWeights
Enable/disable constant weights and biases.
std::vector< float > anchors({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f })