ArmNN
 21.02
Serializer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "Serializer.hpp"
6 #include "SerializerUtils.hpp"
7 
8 #include <armnn/Descriptors.hpp>
9 #include <armnn/LstmParams.hpp>
13 
14 #include <fmt/format.h>
15 #include <iostream>
16 
17 
18 using namespace armnn;
19 namespace fb = flatbuffers;
20 namespace serializer = armnnSerializer;
21 
22 namespace armnnSerializer
23 {
24 
25 ISerializer::ISerializer() : pSerializerImpl(new SerializerImpl())
26 {
27 }
28 
29 ISerializer::~ISerializer() = default;
30 
32 {
33  return new ISerializer();
34 }
35 
37 {
39 }
40 
42 {
43  delete serializer;
44 }
45 
47 {
48  pSerializerImpl->Serialize(inNetwork);
49 }
50 
51 bool ISerializer::SaveSerializedToStream(std::ostream& stream)
52 {
53  return pSerializerImpl->SaveSerializedToStream(stream);
54 }
55 
57 {
58  switch (function)
59  {
82  default:
84  }
85 }
86 
88 {
89  switch (function)
90  {
94  default:
96  }
97 }
98 
99 uint32_t SerializerStrategy::GetSerializedId(armnn::LayerGuid guid)
100 {
101  if (m_guidMap.empty())
102  {
103  m_guidMap.insert(std::make_pair(guid, m_layerId));
104  }
105  else if (m_guidMap.find(guid) == m_guidMap.end())
106  {
107  ++m_layerId;
108  m_guidMap.insert(std::make_pair(guid, m_layerId));
109 
110  return m_layerId;
111  }
112  return m_guidMap[guid];
113 }
114 
115 // Build FlatBuffer for Input Layer
116 void SerializerStrategy::SerializeInputLayer(const armnn::IConnectableLayer* layer, LayerBindingId id, const char* name)
117 {
118  IgnoreUnused(name);
119 
120  // Create FlatBuffer BaseLayer
121  auto flatBufferInputBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Input);
122 
123  // Create FlatBuffer BindableBaseLayer
124  auto flatBufferInputBindableBaseLayer = serializer::CreateBindableLayerBase(m_flatBufferBuilder,
125  flatBufferInputBaseLayer,
126  id);
127  // Push layer binding id to outputIds.
128  m_inputIds.push_back(id);
129 
130  // Create the FlatBuffer InputLayer
131  auto flatBufferInputLayer = serializer::CreateInputLayer(m_flatBufferBuilder, flatBufferInputBindableBaseLayer);
132 
133  // Add the AnyLayer to the FlatBufferLayers
134  CreateAnyLayer(flatBufferInputLayer.o, serializer::Layer::Layer_InputLayer);
135 }
136 
137 // Build FlatBuffer for Output Layer
138 void SerializerStrategy::SerializeOutputLayer(const armnn::IConnectableLayer* layer,
139  LayerBindingId id, const char* name)
140 {
141  IgnoreUnused(name);
142 
143  // Create FlatBuffer BaseLayer
144  auto flatBufferOutputBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Output);
145 
146  // Create FlatBuffer BindableBaseLayer
147  auto flatBufferOutputBindableBaseLayer = serializer::CreateBindableLayerBase(m_flatBufferBuilder,
148  flatBufferOutputBaseLayer,
149  id);
150  // Push layer binding id to outputIds.
151  m_outputIds.push_back(id);
152 
153  // Create the FlatBuffer OutputLayer
154  auto flatBufferOutputLayer = serializer::CreateOutputLayer(m_flatBufferBuilder, flatBufferOutputBindableBaseLayer);
155  // Add the AnyLayer to the FlatBufferLayers
156  CreateAnyLayer(flatBufferOutputLayer.o, serializer::Layer::Layer_OutputLayer);
157 }
158 
159 void SerializerStrategy::SerializeAbsLayer(const armnn::IConnectableLayer* layer, const char* name)
160 {
161  IgnoreUnused(name);
162  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Abs);
163  auto flatBufferAbsLayer = serializer::CreateAbsLayer(m_flatBufferBuilder, flatBufferBaseLayer);
164 
165  CreateAnyLayer(flatBufferAbsLayer.o, serializer::Layer::Layer_AbsLayer);
166 }
167 
168 // Build FlatBuffer for Activation Layer
169 void SerializerStrategy::SerializeActivationLayer(const armnn::IConnectableLayer* layer,
170  const armnn::ActivationDescriptor& descriptor,
171  const char* name)
172 {
173  IgnoreUnused(name);
174 
175  // Create FlatBuffer BaseLayer
176  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Activation);
177 
178  // Create the FlatBuffer ActivationDescriptor
179  auto flatBufferDescriptor = CreateActivationDescriptor(m_flatBufferBuilder,
181  descriptor.m_A,
182  descriptor.m_B);
183 
184  // Create the FlatBuffer ActivationLayer
185  auto flatBufferAdditionLayer = CreateActivationLayer(m_flatBufferBuilder,
186  flatBufferBaseLayer,
187  flatBufferDescriptor);
188 
189  // Add the AnyLayer to the FlatBufferLayers
190  CreateAnyLayer(flatBufferAdditionLayer.o, serializer::Layer::Layer_ActivationLayer);
191 }
192 
193 // Build FlatBuffer for Addition Layer
194 void SerializerStrategy::SerializeAdditionLayer(const armnn::IConnectableLayer* layer, const char* name)
195 {
196  IgnoreUnused(name);
197 
198  // Create FlatBuffer BaseLayer
199  auto flatBufferAdditionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Addition);
200 
201  // Create the FlatBuffer AdditionLayer
202  auto flatBufferAdditionLayer = serializer::CreateAdditionLayer(m_flatBufferBuilder, flatBufferAdditionBaseLayer);
203 
204  // Add the AnyLayer to the FlatBufferLayers
205  CreateAnyLayer(flatBufferAdditionLayer.o, serializer::Layer::Layer_AdditionLayer);
206 }
207 
208 // Build FlatBuffer for ArgMinMax Layer
209 void SerializerStrategy::SerializeArgMinMaxLayer(const armnn::IConnectableLayer *layer,
210  const armnn::ArgMinMaxDescriptor& descriptor,
211  const char *name)
212 {
213  IgnoreUnused(name);
214 
215  // Create FlatBuffer BaseLayer
216  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ArgMinMax);
217 
218  // Create FlatBuffer Descriptor
219  auto flatBufferDescriptor = CreateArgMinMaxDescriptor(m_flatBufferBuilder,
221  descriptor.m_Axis);
222 
223  // Create FlatBuffer ArgMinMaxLayer
224  auto flatBufferLayer = CreateArgMinMaxLayer(m_flatBufferBuilder,
225  flatBufferBaseLayer,
226  flatBufferDescriptor);
227 
229 }
230 
231 // Build FlatBuffer for BatchToSpaceNd Layer
232 void SerializerStrategy::SerializeBatchToSpaceNdLayer(const armnn::IConnectableLayer* layer,
233  const armnn::BatchToSpaceNdDescriptor& descriptor,
234  const char* name)
235 {
236  IgnoreUnused(name);
237 
238  // Create FlatBuffer BaseLayer
239  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchToSpaceNd);
240 
241  std::vector<unsigned int> crops;
242  crops.reserve(descriptor.m_Crops.size() * 2);
243  for (auto& crop : descriptor.m_Crops)
244  {
245  crops.push_back(crop.first);
246  crops.push_back(crop.second);
247  }
248 
249  auto flatBufferDescriptor =
250  CreateBatchToSpaceNdDescriptor(m_flatBufferBuilder,
251  m_flatBufferBuilder.CreateVector(descriptor.m_BlockShape),
252  m_flatBufferBuilder.CreateVector(crops),
254 
255  auto flatBufferLayer = serializer::CreateBatchToSpaceNdLayer(m_flatBufferBuilder,
256  flatBufferBaseLayer,
257  flatBufferDescriptor);
258 
260 }
261 
262 void SerializerStrategy::SerializeBatchNormalizationLayer(
263  const armnn::IConnectableLayer* layer,
264  const armnn::BatchNormalizationDescriptor& batchNormDescriptor,
265  const std::vector<armnn::ConstTensor>& constants,
266  const char* name)
267 {
268  IgnoreUnused(name);
269 
270  const armnn::ConstTensor& mean = constants[0];
271  const armnn::ConstTensor& variance = constants[1];
272  const armnn::ConstTensor& beta = constants[2];
273  const armnn::ConstTensor& gamma = constants[3];
274 
275  auto fbBatchNormalizationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchNormalization);
276  auto fbBatchNormalizationDescriptor = serializer::CreateBatchNormalizationDescriptor(
277  m_flatBufferBuilder,
278  batchNormDescriptor.m_Eps,
279  GetFlatBufferDataLayout(batchNormDescriptor.m_DataLayout));
280 
281  auto fbMeanConstTensorInfo = CreateConstTensorInfo(mean);
282  auto fbVarianceConstTensorInfo = CreateConstTensorInfo(variance);
283  auto fbBetaConstTensorInfo = CreateConstTensorInfo(beta);
284  auto fbGammaConstTensorInfo = CreateConstTensorInfo(gamma);
285  auto fbBatchNormalizationLayer = serializer::CreateBatchNormalizationLayer(m_flatBufferBuilder,
286  fbBatchNormalizationBaseLayer,
287  fbBatchNormalizationDescriptor,
288  fbMeanConstTensorInfo,
289  fbVarianceConstTensorInfo,
290  fbBetaConstTensorInfo,
291  fbGammaConstTensorInfo);
292 
294 }
295 
296 void SerializerStrategy::SerializeComparisonLayer(const armnn::IConnectableLayer* layer,
297  const armnn::ComparisonDescriptor& descriptor,
298  const char* name)
299 {
300  IgnoreUnused(name);
301 
303  auto fbDescriptor = serializer::CreateComparisonDescriptor(
304  m_flatBufferBuilder,
306 
307  auto fbLayer = serializer::CreateComparisonLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
309 }
310 
311 // Build FlatBuffer for Constant Layer
312 void SerializerStrategy::SerializeConstantLayer(const armnn::IConnectableLayer* layer,
313  const std::vector<armnn::ConstTensor>& constants,
314  const char* name)
315 {
316  IgnoreUnused(name);
317 
318  armnn::ConstTensor input = constants[0];
319 
320  // Create FlatBuffer BaseLayer
321  auto flatBufferConstantBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Constant);
322 
323  auto flatBufferConstTensorInfo = CreateConstTensorInfo(input);
324 
325  // Create the FlatBuffer ConstantLayer
326  auto flatBufferLayer = CreateConstantLayer(m_flatBufferBuilder,
327  flatBufferConstantBaseLayer,
328  flatBufferConstTensorInfo);
329 
330  // Add the AnyLayer to the FlatBufferLayers
332 }
333 
334 // Build FlatBuffer for Convolution2dLayer
335 void SerializerStrategy::SerializeConvolution2dLayer(const armnn::IConnectableLayer* layer,
336  const armnn::Convolution2dDescriptor& descriptor,
337  const std::vector<armnn::ConstTensor>& constants,
338  const char* name)
339 {
340  IgnoreUnused(name);
341 
342  const armnn::ConstTensor weights = constants[0];
343 
344  // Create FlatBuffer BaseLayer
345  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution2d);
346 
347  auto flatBufferDescriptor = CreateConvolution2dDescriptor(m_flatBufferBuilder,
348  descriptor.m_PadLeft,
349  descriptor.m_PadRight,
350  descriptor.m_PadTop,
351  descriptor.m_PadBottom,
352  descriptor.m_StrideX,
353  descriptor.m_StrideY,
354  descriptor.m_DilationX,
355  descriptor.m_DilationY,
356  descriptor.m_BiasEnabled,
358  auto flatBufferWeightsConstTensorInfo = CreateConstTensorInfo(weights);
359  flatbuffers::Offset<serializer::ConstTensor> flatBufferBiasesConstTensorInfo;
360 
361  if (constants.size() > 1)
362  {
363  const armnn::ConstTensor biases = constants[1];
364  flatBufferBiasesConstTensorInfo = CreateConstTensorInfo(biases);
365  }
366 
367  // Create the FlatBuffer Convolution2dLayer
368  auto flatBufferLayer = CreateConvolution2dLayer(m_flatBufferBuilder,
369  flatBufferBaseLayer,
370  flatBufferDescriptor,
371  flatBufferWeightsConstTensorInfo,
372  flatBufferBiasesConstTensorInfo);
373 
374  // Add the AnyLayer to the FlatBufferLayers
376 }
377 
378 void SerializerStrategy::SerializeDepthToSpaceLayer(const armnn::IConnectableLayer* layer,
379  const armnn::DepthToSpaceDescriptor& descriptor,
380  const char* name)
381 {
382  IgnoreUnused(name);
383 
385  auto fbDescriptor = CreateDepthToSpaceDescriptor(m_flatBufferBuilder,
386  descriptor.m_BlockSize,
388 
389  auto fbLayer = serializer::CreateDepthToSpaceLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
390 
392 }
393 
394 void SerializerStrategy::SerializeDepthwiseConvolution2dLayer(const armnn::IConnectableLayer* layer,
395  const armnn::DepthwiseConvolution2dDescriptor& descriptor,
396  const std::vector<armnn::ConstTensor>& constants,
397  const char* name)
398 {
399  IgnoreUnused(name);
400 
401  const armnn::ConstTensor& weights = constants[0];
402 
404  auto fbDescriptor = CreateDepthwiseConvolution2dDescriptor(m_flatBufferBuilder,
405  descriptor.m_PadLeft,
406  descriptor.m_PadRight,
407  descriptor.m_PadTop,
408  descriptor.m_PadBottom,
409  descriptor.m_StrideX,
410  descriptor.m_StrideY,
411  descriptor.m_DilationX,
412  descriptor.m_DilationY,
413  descriptor.m_BiasEnabled,
415 
416  flatbuffers::Offset<serializer::ConstTensor> fbWeightsConstTensorInfo = CreateConstTensorInfo(weights);
417  flatbuffers::Offset<serializer::ConstTensor> fbBiasesConstTensorInfo;
418 
419  if (constants.size() > 1)
420  {
421  const armnn::ConstTensor& biases = constants[1];
422  fbBiasesConstTensorInfo = CreateConstTensorInfo(biases);
423  }
424 
425  auto flatBufferLayer = CreateDepthwiseConvolution2dLayer(m_flatBufferBuilder,
426  fbBaseLayer,
427  fbDescriptor,
428  fbWeightsConstTensorInfo,
429  fbBiasesConstTensorInfo);
430 
432 }
433 
434 void SerializerStrategy::SerializeDequantizeLayer(const armnn::IConnectableLayer* layer,
435  const char* name)
436 {
437  IgnoreUnused(name);
438 
439  auto fbDequantizeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Dequantize);
440  auto fbDequantizeLayer = serializer::CreateDequantizeLayer(m_flatBufferBuilder, fbDequantizeBaseLayer);
441 
443 }
444 
445 void SerializerStrategy::SerializeDetectionPostProcessLayer(const armnn::IConnectableLayer* layer,
446  const armnn::DetectionPostProcessDescriptor& descriptor,
447  const std::vector<armnn::ConstTensor>& constants,
448  const char* name)
449 {
450  IgnoreUnused(name);
451 
452  const armnn::ConstTensor& anchors = constants[0];
453 
455  auto fbDescriptor = CreateDetectionPostProcessDescriptor(m_flatBufferBuilder,
456  descriptor.m_MaxDetections,
457  descriptor.m_MaxClassesPerDetection,
458  descriptor.m_DetectionsPerClass,
459  descriptor.m_NmsScoreThreshold,
460  descriptor.m_NmsIouThreshold,
461  descriptor.m_NumClasses,
462  descriptor.m_UseRegularNms,
463  descriptor.m_ScaleX,
464  descriptor.m_ScaleY,
465  descriptor.m_ScaleW,
466  descriptor.m_ScaleH);
467 
468  flatbuffers::Offset<serializer::ConstTensor> fbAnchorsConstTensorInfo = CreateConstTensorInfo(anchors);
469 
470  auto flatBufferLayer = CreateDetectionPostProcessLayer(m_flatBufferBuilder,
471  fbBaseLayer,
472  fbDescriptor,
473  fbAnchorsConstTensorInfo);
474 
476 }
477 
478 void SerializerStrategy::SerializeDivisionLayer(const armnn::IConnectableLayer* layer, const char* name)
479 {
480  IgnoreUnused(name);
481 
482  auto fbDivisionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Division);
483  auto fbDivisionLayer = serializer::CreateDivisionLayer(m_flatBufferBuilder, fbDivisionBaseLayer);
484 
486 }
487 
488 void SerializerStrategy::SerializeElementwiseUnaryLayer(const armnn::IConnectableLayer* layer,
489  const armnn::ElementwiseUnaryDescriptor& descriptor,
490  const char* name)
491 {
492  IgnoreUnused(name);
493 
496  m_flatBufferBuilder,
498 
499  auto fbLayer = serializer::CreateElementwiseUnaryLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
501 }
502 
503 void SerializerStrategy::SerializeEqualLayer(const armnn::IConnectableLayer* layer, const char* name)
504 {
505  IgnoreUnused(name);
506 
507  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Equal);
508  auto fbEqualLayer = serializer::CreateEqualLayer(m_flatBufferBuilder, fbBaseLayer);
509 
511 }
512 
513 void SerializerStrategy::SerializeFillLayer(const armnn::IConnectableLayer* layer,
514  const armnn::FillDescriptor& fillDescriptor,
515  const char* name)
516 {
517  IgnoreUnused(name);
518 
519  auto fbFillBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Fill);
520 
521  auto fbDescriptor = serializer::CreateFillDescriptor(m_flatBufferBuilder, fillDescriptor.m_Value);
522 
523  auto fbFillLayer = serializer::CreateFillLayer(m_flatBufferBuilder, fbFillBaseLayer, fbDescriptor);
524 
526 }
527 
528 void SerializerStrategy::SerializeFloorLayer(const armnn::IConnectableLayer *layer, const char *name)
529 {
530  IgnoreUnused(name);
531 
532  auto flatBufferFloorBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Floor);
533  auto flatBufferFloorLayer = serializer::CreateFloorLayer(m_flatBufferBuilder, flatBufferFloorBaseLayer);
534 
535  CreateAnyLayer(flatBufferFloorLayer.o, serializer::Layer::Layer_FloorLayer);
536 }
537 
538 void SerializerStrategy::SerializeGatherLayer(const armnn::IConnectableLayer* layer,
539  const armnn::GatherDescriptor& gatherDescriptor,
540  const char* name)
541 {
542  IgnoreUnused(name);
543 
544  auto fbGatherDescriptor = CreateGatherDescriptor(m_flatBufferBuilder,
545  gatherDescriptor.m_Axis);
546  auto fbGatherBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Gather);
547  auto flatBufferLayer = serializer::CreateGatherLayer(m_flatBufferBuilder, fbGatherBaseLayer, fbGatherDescriptor);
548 
550 }
551 
552 
553 void SerializerStrategy::SerializeGreaterLayer(const armnn::IConnectableLayer* layer, const char* name)
554 {
555  IgnoreUnused(name);
556 
557  auto fbGreaterBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Greater);
558  auto fbGreaterLayer = serializer::CreateGreaterLayer(m_flatBufferBuilder, fbGreaterBaseLayer);
559 
561 }
562 
563 void SerializerStrategy::SerializeInstanceNormalizationLayer(
564  const armnn::IConnectableLayer* layer,
565  const armnn::InstanceNormalizationDescriptor& instanceNormalizationDescriptor,
566  const char* name)
567 {
568  IgnoreUnused(name);
569 
571  m_flatBufferBuilder,
572  instanceNormalizationDescriptor.m_Gamma,
573  instanceNormalizationDescriptor.m_Beta,
574  instanceNormalizationDescriptor.m_Eps,
575  GetFlatBufferDataLayout(instanceNormalizationDescriptor.m_DataLayout));
576 
578  auto fbLayer = serializer::CreateInstanceNormalizationLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
579 
581 }
582 
583 void SerializerStrategy::SerializeL2NormalizationLayer(const armnn::IConnectableLayer* layer,
584  const armnn::L2NormalizationDescriptor& l2NormalizationDescriptor,
585  const char* name)
586 {
587  IgnoreUnused(name);
588 
589  // Create FlatBuffer BaseLayer
591 
592  // Create the FlatBuffer L2Normalization Descriptor
594  m_flatBufferBuilder,
595  GetFlatBufferDataLayout(l2NormalizationDescriptor.m_DataLayout),
596  l2NormalizationDescriptor.m_Eps);
597 
598  // Create FlatBuffer layer
599  auto fbLayer = serializer::CreateL2NormalizationLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
600 
602 }
603 
604 void SerializerStrategy::SerializeLogicalBinaryLayer(const armnn::IConnectableLayer* layer,
605  const armnn::LogicalBinaryDescriptor& descriptor,
606  const char* name)
607 {
608  IgnoreUnused(name);
609 
611  auto fbDescriptor = serializer::CreateLogicalBinaryDescriptor(
612  m_flatBufferBuilder,
614 
615  auto fbLayer = serializer::CreateLogicalBinaryLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
617 }
618 
619 void SerializerStrategy::SerializeLogSoftmaxLayer(const armnn::IConnectableLayer* layer,
620  const armnn::LogSoftmaxDescriptor& logSoftmaxDescriptor,
621  const char* name)
622 {
623  IgnoreUnused(name);
624 
625  // Create FlatBuffer BaseLayer
626  auto flatBufferLogSoftmaxBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_LogSoftmax);
627 
628  // Create the FlatBuffer LogSoftmaxDescriptor
629  auto flatBufferLogSoftmaxDesc =
630  serializer::CreateLogSoftmaxDescriptor(m_flatBufferBuilder,
631  logSoftmaxDescriptor.m_Beta,
632  logSoftmaxDescriptor.m_Axis);
633 
634  // Create the FlatBuffer LogSoftmaxLayer
635  auto flatBufferLogSoftmaxLayer =
636  serializer::CreateLogSoftmaxLayer(m_flatBufferBuilder,
637  flatBufferLogSoftmaxBaseLayer,
638  flatBufferLogSoftmaxDesc);
639 
640  CreateAnyLayer(flatBufferLogSoftmaxLayer.o, serializer::Layer::Layer_LogSoftmaxLayer);
641 }
642 
643 void SerializerStrategy::SerializeLstmLayer(const armnn::IConnectableLayer* layer,
644  const armnn::LstmDescriptor& descriptor,
645  const std::vector<armnn::ConstTensor>& constants,
646  const char* name)
647 {
648  IgnoreUnused(name);
649 
650  auto fbLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Lstm);
651 
652  auto fbLstmDescriptor = serializer::CreateLstmDescriptor(
653  m_flatBufferBuilder,
654  descriptor.m_ActivationFunc,
655  descriptor.m_ClippingThresCell,
656  descriptor.m_ClippingThresProj,
657  descriptor.m_CifgEnabled,
658  descriptor.m_PeepholeEnabled,
659  descriptor.m_ProjectionEnabled,
660  descriptor.m_LayerNormEnabled);
661 
662  // Index for constants vector
663  std::size_t i = 0;
664 
665  // Get mandatory/basic input parameters
666  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]); //InputToForgetWeights
667  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]); //InputToCellWeights
668  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]); //InputToOutputWeights
669  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToForgetWeights
670  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToCellWeights
671  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToOutputWeights
672  auto forgetGateBias = CreateConstTensorInfo(constants[i++]); //ForgetGateBias
673  auto cellBias = CreateConstTensorInfo(constants[i++]); //CellBias
674  auto outputGateBias = CreateConstTensorInfo(constants[i++]); //OutputGateBias
675 
676 
677 
678  //Define optional parameters, these will be set depending on configuration in Lstm descriptor
679  flatbuffers::Offset<serializer::ConstTensor> inputToInputWeights;
680  flatbuffers::Offset<serializer::ConstTensor> recurrentToInputWeights;
681  flatbuffers::Offset<serializer::ConstTensor> cellToInputWeights;
682  flatbuffers::Offset<serializer::ConstTensor> inputGateBias;
683  flatbuffers::Offset<serializer::ConstTensor> projectionWeights;
684  flatbuffers::Offset<serializer::ConstTensor> projectionBias;
685  flatbuffers::Offset<serializer::ConstTensor> cellToForgetWeights;
686  flatbuffers::Offset<serializer::ConstTensor> cellToOutputWeights;
687  flatbuffers::Offset<serializer::ConstTensor> inputLayerNormWeights;
688  flatbuffers::Offset<serializer::ConstTensor> forgetLayerNormWeights;
689  flatbuffers::Offset<serializer::ConstTensor> cellLayerNormWeights;
690  flatbuffers::Offset<serializer::ConstTensor> outputLayerNormWeights;
691 
692  if (!descriptor.m_CifgEnabled)
693  {
694  inputToInputWeights = CreateConstTensorInfo(constants[i++]); //InputToInputWeights
695  recurrentToInputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToInputWeights
696  inputGateBias = CreateConstTensorInfo(constants[i++]); //InputGateBias
697  }
698 
699  if (descriptor.m_PeepholeEnabled)
700  {
701  if (!descriptor.m_CifgEnabled)
702  {
703  cellToInputWeights = CreateConstTensorInfo(constants[i++]); //CellToInputWeights
704  }
705  cellToForgetWeights = CreateConstTensorInfo(constants[i++]); //CellToForgetWeights
706  cellToOutputWeights = CreateConstTensorInfo(constants[i++]); //CellToOutputWeights
707  }
708 
709  if (descriptor.m_ProjectionEnabled)
710  {
711  projectionWeights = CreateConstTensorInfo(constants[i++]); //ProjectionWeights
712  projectionBias = CreateConstTensorInfo(constants[i++]); //ProjectionBias
713  }
714 
715  if (descriptor.m_LayerNormEnabled)
716  {
717  if (!descriptor.m_CifgEnabled)
718  {
719  inputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //InputLayerNormWeights
720  }
721  forgetLayerNormWeights = CreateConstTensorInfo(constants[i++]); //ForgetLayerNormWeights
722  cellLayerNormWeights = CreateConstTensorInfo(constants[i++]); //CellLayerNormWeights
723  outputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //OutputLayerNormWeights
724  }
725 
726  auto fbLstmParams = serializer::CreateLstmInputParams(
727  m_flatBufferBuilder,
728  inputToForgetWeights,
729  inputToCellWeights,
730  inputToOutputWeights,
731  recurrentToForgetWeights,
732  recurrentToCellWeights,
733  recurrentToOutputWeights,
734  forgetGateBias,
735  cellBias,
736  outputGateBias,
737  inputToInputWeights,
738  recurrentToInputWeights,
739  cellToInputWeights,
740  inputGateBias,
741  projectionWeights,
742  projectionBias,
743  cellToForgetWeights,
744  cellToOutputWeights,
745  inputLayerNormWeights,
746  forgetLayerNormWeights,
747  cellLayerNormWeights,
748  outputLayerNormWeights);
749 
750  auto fbLstmLayer = serializer::CreateLstmLayer(
751  m_flatBufferBuilder,
752  fbLstmBaseLayer,
753  fbLstmDescriptor,
754  fbLstmParams);
755 
757 }
758 
759 void SerializerStrategy::SerializeMaximumLayer(const armnn::IConnectableLayer* layer, const char* name)
760 {
761  IgnoreUnused(name);
762 
763  auto fbMaximumBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Maximum);
764  auto fbMaximumLayer = serializer::CreateMaximumLayer(m_flatBufferBuilder, fbMaximumBaseLayer);
765 
767 }
768 
769 void SerializerStrategy::SerializeMeanLayer(const armnn::IConnectableLayer* layer,
770  const armnn::MeanDescriptor& descriptor,
771  const char* name)
772 {
773  IgnoreUnused(name);
774 
775  auto fbMeanBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Mean);
776  auto fbMeanDescriptor = serializer::CreateMeanDescriptor(m_flatBufferBuilder,
777  m_flatBufferBuilder.CreateVector(descriptor.m_Axis),
778  descriptor.m_KeepDims);
779 
780  auto fbMeanLayer = serializer::CreateMeanLayer(m_flatBufferBuilder,
781  fbMeanBaseLayer,
782  fbMeanDescriptor);
783 
785 }
786 
787 void SerializerStrategy::SerializeMinimumLayer(const armnn::IConnectableLayer* layer, const char* name)
788 {
789  IgnoreUnused(name);
790 
791  auto fbMinimumBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Minimum);
792  auto fbMinimumLayer = serializer::CreateMinimumLayer(m_flatBufferBuilder, fbMinimumBaseLayer);
793 
795 }
796 
797 void SerializerStrategy::SerializeMergeLayer(const armnn::IConnectableLayer* layer, const char* name)
798 {
799  IgnoreUnused(name);
800 
801  auto fbMergeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Merge);
802  auto fbMergeLayer = serializer::CreateMergeLayer(m_flatBufferBuilder, fbMergeBaseLayer);
803 
805 }
806 
807 void SerializerStrategy::SerializeMergerLayer(const armnn::IConnectableLayer* layer,
808  const armnn::MergerDescriptor& mergerDescriptor,
809  const char* name)
810 {
811  SerializeConcatLayer(layer, mergerDescriptor, name);
812 }
813 
814 void SerializerStrategy::SerializeConcatLayer(const armnn::IConnectableLayer* layer,
815  const armnn::ConcatDescriptor& concatDescriptor,
816  const char* name)
817 {
818  IgnoreUnused(name);
819 
820  auto flatBufferConcatBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Concat);
821 
822  std::vector<flatbuffers::Offset<UintVector>> views;
823  for (unsigned int v = 0; v < concatDescriptor.GetNumViews(); ++v)
824  {
825  const uint32_t* origin = concatDescriptor.GetViewOrigin(v);
826  std::vector<uint32_t> origins;
827  for (unsigned int d = 0; d < concatDescriptor.GetNumDimensions(); ++d)
828  {
829  origins.push_back(origin[d]);
830  }
831  auto view = m_flatBufferBuilder.CreateVector(origins);
832  auto uintVector = CreateUintVector(m_flatBufferBuilder, view);
833  views.push_back(uintVector);
834  }
835 
836  auto flatBufferConcatDescriptor = CreateOriginsDescriptor(m_flatBufferBuilder,
837  concatDescriptor.GetConcatAxis(),
838  concatDescriptor.GetNumViews(),
839  concatDescriptor.GetNumDimensions(),
840  m_flatBufferBuilder.CreateVector(views));
841 
842  auto flatBufferLayer = CreateConcatLayer(m_flatBufferBuilder,
843  flatBufferConcatBaseLayer,
844  flatBufferConcatDescriptor);
845 
847 }
848 
849 void SerializerStrategy::SerializeMultiplicationLayer(const armnn::IConnectableLayer* layer, const char* name)
850 {
851  IgnoreUnused(name);
852 
853  auto fbMultiplicationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Multiplication);
854  auto fbMultiplicationLayer = serializer::CreateMultiplicationLayer(m_flatBufferBuilder,
855  fbMultiplicationBaseLayer);
856 
858 }
859 
860 void SerializerStrategy::SerializePadLayer(const armnn::IConnectableLayer* layer,
861  const armnn::PadDescriptor& padDescriptor,
862  const char* name)
863 {
864  IgnoreUnused(name);
865 
866  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pad);
867 
868  std::vector<unsigned int> padList;
869  for (auto& p: padDescriptor.m_PadList)
870  {
871  padList.push_back(p.first);
872  padList.push_back(p.second);
873  }
874 
875  auto flatBufferPadDesc = serializer::CreatePadDescriptor(m_flatBufferBuilder,
876  m_flatBufferBuilder.CreateVector(padList),
877  padDescriptor.m_PadValue);
878 
879  auto flatBufferPadLayer = serializer::CreatePadLayer(m_flatBufferBuilder,
880  flatBufferBaseLayer,
881  flatBufferPadDesc);
882 
883  CreateAnyLayer(flatBufferPadLayer.o, serializer::Layer::Layer_PadLayer);
884 }
885 
886 void SerializerStrategy::SerializePermuteLayer(const armnn::IConnectableLayer* layer,
887  const armnn::PermuteDescriptor& permuteDescriptor,
888  const char* name)
889 {
890  IgnoreUnused(name);
891 
892  // Create FlatBuffer BaseLayer
893  auto flatBufferPermuteBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Permute);
894 
895  std::vector<unsigned int> dimMappings;
896  for (unsigned int i=0; i<permuteDescriptor.m_DimMappings.GetSize(); ++i)
897  {
898  dimMappings.push_back(permuteDescriptor.m_DimMappings[i]);
899  }
900 
901  auto flatBufferPermuteDesc = serializer::CreatePermuteDescriptor(m_flatBufferBuilder,
902  m_flatBufferBuilder.CreateVector(dimMappings));
903 
904  // Create the FlatBuffer PermuteLayer
905  auto flatBufferPermuteLayer = serializer::CreatePermuteLayer(m_flatBufferBuilder,
906  flatBufferPermuteBaseLayer,
907  flatBufferPermuteDesc);
908 
909  // Add the AnyLayer to the FlatBufferLayers
910  CreateAnyLayer(flatBufferPermuteLayer.o, serializer::Layer::Layer_PermuteLayer);
911 }
912 
913 // Build FlatBuffer for Rank Layer
914 void SerializerStrategy::SerializeRankLayer(const armnn::IConnectableLayer* layer,
915  const char* name)
916 {
917  IgnoreUnused(name);
918  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Rank);
919  auto flatBufferRankLayer = serializer::CreateRankLayer(m_flatBufferBuilder, flatBufferBaseLayer);
920 
921  CreateAnyLayer(flatBufferRankLayer.o, serializer::Layer::Layer_RankLayer);
922 }
923 
924 void SerializerStrategy::SerializeReduceLayer(const armnn::IConnectableLayer* layer,
925  const armnn::ReduceDescriptor& reduceDescriptor,
926  const char*)
927 {
928  auto fbReduceBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Reduce);
929  auto fbDescriptor = CreateReduceDescriptor(m_flatBufferBuilder,
930  reduceDescriptor.m_KeepDims,
931  m_flatBufferBuilder.CreateVector(reduceDescriptor.m_vAxis),
933  auto fbReduceLayer = serializer::CreateReduceLayer(m_flatBufferBuilder,
934  fbReduceBaseLayer,
935  fbDescriptor);
936 
938 }
939 
940 // Build FlatBuffer for Reshape Layer
941 void SerializerStrategy::SerializeReshapeLayer(const armnn::IConnectableLayer* layer,
942  const armnn::ReshapeDescriptor& reshapeDescriptor,
943  const char* name)
944 {
945  IgnoreUnused(name);
946 
947  // Create FlatBuffer BaseLayer
948  auto flatBufferReshapeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Reshape);
949 
950  std::vector<unsigned int> targetShape;
951  for (unsigned int i =0; i < reshapeDescriptor.m_TargetShape.GetNumDimensions(); i++)
952  {
953  targetShape.push_back(reshapeDescriptor.m_TargetShape[i]);
954  }
955 
956  auto flatBufferReshapeDesc = serializer::CreateReshapeDescriptor(m_flatBufferBuilder,
957  m_flatBufferBuilder.CreateVector(targetShape));
958 
959  // Create the FlatBuffer ReshapeLayer
960  auto flatBufferReshapeLayer = serializer::CreateReshapeLayer(m_flatBufferBuilder, flatBufferReshapeBaseLayer,
961  flatBufferReshapeDesc);
962 
963  // Add the AnyLayer to the FlatBufferLayers
964  CreateAnyLayer(flatBufferReshapeLayer.o, serializer::Layer::Layer_ReshapeLayer);
965 }
966 
967 void SerializerStrategy::SerializeResizeBilinearLayer(const armnn::IConnectableLayer* layer,
968  const armnn::ResizeBilinearDescriptor& resizeDescriptor,
969  const char* name)
970 {
971  IgnoreUnused(name);
972 
973  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ResizeBilinear);
974 
975  auto flatBufferDescriptor =
976  CreateResizeBilinearDescriptor(m_flatBufferBuilder,
977  resizeDescriptor.m_TargetWidth,
978  resizeDescriptor.m_TargetHeight,
979  GetFlatBufferDataLayout(resizeDescriptor.m_DataLayout),
980  resizeDescriptor.m_AlignCorners,
981  resizeDescriptor.m_HalfPixelCenters);
982 
983  auto flatBufferLayer = serializer::CreateResizeBilinearLayer(m_flatBufferBuilder,
984  flatBufferBaseLayer,
985  flatBufferDescriptor);
986 
988 }
989 
990 void SerializerStrategy::SerializeResizeLayer(const armnn::IConnectableLayer* layer,
991  const armnn::ResizeDescriptor& resizeDescriptor,
992  const char* name)
993 {
994  IgnoreUnused(name);
995 
996  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Resize);
997 
998  auto flatBufferDescriptor =
999  CreateResizeDescriptor(m_flatBufferBuilder,
1000  resizeDescriptor.m_TargetHeight,
1001  resizeDescriptor.m_TargetWidth,
1002  GetFlatBufferResizeMethod(resizeDescriptor.m_Method),
1003  GetFlatBufferDataLayout(resizeDescriptor.m_DataLayout),
1004  resizeDescriptor.m_AlignCorners,
1005  resizeDescriptor.m_HalfPixelCenters);
1006 
1007  auto flatBufferLayer = serializer::CreateResizeLayer(m_flatBufferBuilder,
1008  flatBufferBaseLayer,
1009  flatBufferDescriptor);
1010 
1012 }
1013 
1014 void SerializerStrategy::SerializeRsqrtLayer(const armnn::IConnectableLayer* layer, const char* name)
1015 {
1016  IgnoreUnused(name);
1017 
1018  auto fbRsqrtBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Rsqrt);
1019  auto fbRsqrtLayer = serializer::CreateRsqrtLayer(m_flatBufferBuilder, fbRsqrtBaseLayer);
1020 
1022 }
1023 
1024 void SerializerStrategy::SerializeSliceLayer(const armnn::IConnectableLayer* layer,
1025  const armnn::SliceDescriptor& sliceDescriptor,
1026  const char* name)
1027 {
1028  IgnoreUnused(name);
1029 
1030  auto fbSliceBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Slice);
1031  auto fbSliceDescriptor = CreateSliceDescriptor(m_flatBufferBuilder,
1032  m_flatBufferBuilder.CreateVector(sliceDescriptor.m_Begin),
1033  m_flatBufferBuilder.CreateVector(sliceDescriptor.m_Size));
1034 
1035  auto fbSliceLayer = serializer::CreateSliceLayer(m_flatBufferBuilder, fbSliceBaseLayer, fbSliceDescriptor);
1036 
1038 }
1039 
1040 // Build FlatBuffer for Softmax Layer
1041 void SerializerStrategy::SerializeSoftmaxLayer(const armnn::IConnectableLayer* layer,
1042  const armnn::SoftmaxDescriptor& softmaxDescriptor,
1043  const char* name)
1044 {
1045  IgnoreUnused(name);
1046 
1047  // Create FlatBuffer BaseLayer
1048  auto flatBufferSoftmaxBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Softmax);
1049 
1050  // Create the FlatBuffer SoftmaxDescriptor
1051  auto flatBufferSoftmaxDesc =
1052  serializer::CreateSoftmaxDescriptor(m_flatBufferBuilder, softmaxDescriptor.m_Beta);
1053 
1054  // Create the FlatBuffer SoftmaxLayer
1055  auto flatBufferSoftmaxLayer =
1056  serializer::CreateSoftmaxLayer(m_flatBufferBuilder,
1057  flatBufferSoftmaxBaseLayer,
1058  flatBufferSoftmaxDesc);
1059 
1060  CreateAnyLayer(flatBufferSoftmaxLayer.o, serializer::Layer::Layer_SoftmaxLayer);
1061 }
1062 
1063 void SerializerStrategy::SerializePooling2dLayer(const armnn::IConnectableLayer* layer,
1064  const armnn::Pooling2dDescriptor& pooling2dDescriptor,
1065  const char* name)
1066 {
1067  IgnoreUnused(name);
1068 
1069  auto fbPooling2dBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pooling2d);
1070  auto fbPooling2dDescriptor = serializer::CreatePooling2dDescriptor(
1071  m_flatBufferBuilder,
1072  GetFlatBufferPoolingAlgorithm(pooling2dDescriptor.m_PoolType),
1073  pooling2dDescriptor.m_PadLeft,
1074  pooling2dDescriptor.m_PadRight,
1075  pooling2dDescriptor.m_PadTop,
1076  pooling2dDescriptor.m_PadBottom,
1077  pooling2dDescriptor.m_PoolWidth,
1078  pooling2dDescriptor.m_PoolHeight,
1079  pooling2dDescriptor.m_StrideX,
1080  pooling2dDescriptor.m_StrideY,
1082  GetFlatBufferPaddingMethod(pooling2dDescriptor.m_PaddingMethod),
1083  GetFlatBufferDataLayout(pooling2dDescriptor.m_DataLayout));
1084 
1085  auto fbPooling2dLayer = serializer::CreatePooling2dLayer(m_flatBufferBuilder,
1086  fbPooling2dBaseLayer,
1087  fbPooling2dDescriptor);
1088 
1090 }
1091 
1092 void SerializerStrategy::SerializePreluLayer(const armnn::IConnectableLayer* layer,
1093  const char* name)
1094 {
1095  IgnoreUnused(name);
1096 
1097  // Create FlatBuffer BaseLayer
1098  auto flatBufferPreluBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Prelu);
1099 
1100  // Create the FlatBuffer AdditionLayer
1101  auto flatBufferPreluLayer = serializer::CreatePreluLayer(m_flatBufferBuilder, flatBufferPreluBaseLayer);
1102 
1103  // Add the AnyLayer to the FlatBufferLayers
1104  CreateAnyLayer(flatBufferPreluLayer.o, serializer::Layer::Layer_PreluLayer);
1105 }
1106 
1107 void SerializerStrategy::SerializeQuantizeLayer(const armnn::IConnectableLayer *layer, const char *name)
1108 {
1109  IgnoreUnused(name);
1110 
1111  auto fbQuantizeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Quantize);
1112  auto fbQuantizeLayer = serializer::CreateQuantizeLayer(m_flatBufferBuilder,
1113  fbQuantizeBaseLayer);
1115 }
1116 
1117 // Build FlatBuffer for FullyConnected Layer
1118 void SerializerStrategy::SerializeFullyConnectedLayer(const armnn::IConnectableLayer* layer,
1119  const armnn::FullyConnectedDescriptor& fullyConnectedDescriptor,
1120  const std::vector<armnn::ConstTensor>& constants,
1121  const char* name)
1122 {
1123  IgnoreUnused(name);
1124 
1125  const armnn::ConstTensor& weights = constants.at(0);
1126 
1127  // Create FlatBuffer BaseLayer
1128  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_FullyConnected);
1129 
1130  // Create FlatBuffer FullyConnectedDescriptor
1131  auto flatBufferDescriptor =
1132  serializer::CreateFullyConnectedDescriptor(m_flatBufferBuilder,
1133  fullyConnectedDescriptor.m_BiasEnabled,
1134  fullyConnectedDescriptor.m_TransposeWeightMatrix);
1135 
1136  // Create FlatBuffer weights data
1137  auto flatBufferWeights = CreateConstTensorInfo(weights);
1138 
1139  // Create FlatBuffer bias data
1140  flatbuffers::Offset<serializer::ConstTensor> flatBufferBiases;
1141  if (fullyConnectedDescriptor.m_BiasEnabled)
1142  {
1143  armnn::ConstTensor biases = constants.at(1);
1144  flatBufferBiases = CreateConstTensorInfo(biases);
1145  }
1146 
1147  // Create FlatBuffer FullyConnectedLayer
1148  auto flatBufferLayer = serializer::CreateFullyConnectedLayer(m_flatBufferBuilder,
1149  flatBufferBaseLayer,
1150  flatBufferDescriptor,
1151  flatBufferWeights,
1152  flatBufferBiases);
1153 
1154  // Add created FullyConnectedLayer to the FlatBufferLayers
1156 }
1157 
1158 // Build FlatBuffer for SpaceToBatchNd Layer
1159 void SerializerStrategy::SerializeSpaceToBatchNdLayer(const armnn::IConnectableLayer* layer,
1160  const armnn::SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
1161  const char* name)
1162 {
1163  IgnoreUnused(name);
1164 
1165  // Create FlatBuffer BaseLayer
1166  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_SpaceToBatchNd);
1167 
1168  std::vector<unsigned int> padList;
1169  padList.reserve(spaceToBatchNdDescriptor.m_PadList.size()*2);
1170  for (auto& pad : spaceToBatchNdDescriptor.m_PadList)
1171  {
1172  padList.push_back(pad.first);
1173  padList.push_back(pad.second);
1174  }
1175 
1176  auto flatBufferDescriptor =
1177  CreateSpaceToBatchNdDescriptor(m_flatBufferBuilder,
1178  m_flatBufferBuilder.CreateVector(spaceToBatchNdDescriptor.m_BlockShape),
1179  m_flatBufferBuilder.CreateVector(padList),
1180  GetFlatBufferDataLayout(spaceToBatchNdDescriptor.m_DataLayout));
1181 
1182  auto flatBufferLayer = serializer::CreateSpaceToBatchNdLayer(m_flatBufferBuilder,
1183  flatBufferBaseLayer,
1184  flatBufferDescriptor);
1185 
1187 }
1188 
1189 // Build FlatBuffer for SpaceToDepthLayer
1190 void SerializerStrategy::SerializeSpaceToDepthLayer(const armnn::IConnectableLayer* layer,
1191  const armnn::SpaceToDepthDescriptor& spaceToDepthDescriptor,
1192  const char* name)
1193 {
1194  IgnoreUnused(name);
1195 
1196  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_SpaceToDepth);
1197  auto flatBufferDescriptor =
1198  CreateSpaceToDepthDescriptor(m_flatBufferBuilder,
1199  spaceToDepthDescriptor.m_BlockSize,
1200  GetFlatBufferDataLayout(spaceToDepthDescriptor.m_DataLayout));
1201 
1202  auto flatBufferLayer = serializer::CreateSpaceToDepthLayer(m_flatBufferBuilder,
1203  flatBufferBaseLayer,
1204  flatBufferDescriptor);
1205 
1207 }
1208 
1209 // Build FlatBuffer for Splitter Layer
1210 void SerializerStrategy::SerializeSplitterLayer(const armnn::IConnectableLayer* layer,
1211  const armnn::ViewsDescriptor& viewsDescriptor,
1212  const char* name)
1213 {
1214  IgnoreUnused(name);
1215 
1216  // Create FlatBuffer ViewOrigins
1217  std::vector<flatbuffers::Offset<UintVector>> flatBufferViewOrigins;
1218  flatBufferViewOrigins.reserve(viewsDescriptor.GetNumViews());
1219 
1220  for(unsigned int vIdx = 0; vIdx < viewsDescriptor.GetNumViews(); ++vIdx)
1221  {
1222  std::vector<uint32_t> viewOrigin;
1223  viewOrigin.reserve(viewsDescriptor.GetNumDimensions());
1224 
1225  // Copy vector
1226  for(unsigned int dIdx = 0; dIdx < viewsDescriptor.GetNumDimensions(); ++dIdx)
1227  {
1228  viewOrigin.push_back(viewsDescriptor.GetViewOrigin(vIdx)[dIdx]);
1229  }
1230 
1231  flatBufferViewOrigins.push_back(CreateUintVector(m_flatBufferBuilder,
1232  m_flatBufferBuilder.CreateVector(viewOrigin)));
1233  }
1234 
1235  // Create FlatBuffer OriginsDescriptor
1236  auto flatBufferOriginDescriptor = CreateOriginsDescriptor(m_flatBufferBuilder,
1237  viewsDescriptor.GetOrigins().GetConcatAxis(),
1238  viewsDescriptor.GetOrigins().GetNumViews(),
1239  viewsDescriptor.GetOrigins().GetNumDimensions(),
1240  m_flatBufferBuilder.CreateVector(flatBufferViewOrigins));
1241 
1242  // Create FlatBuffer ViewOrigins
1243  std::vector<flatbuffers::Offset<UintVector>> flatBufferViewSizes;
1244  flatBufferViewSizes.reserve(viewsDescriptor.GetNumViews());
1245 
1246  for(unsigned int vIdx = 0; vIdx < viewsDescriptor.GetNumViews(); ++vIdx)
1247  {
1248  std::vector<uint32_t> viewSize;
1249  viewSize.reserve(viewsDescriptor.GetNumDimensions());
1250 
1251  // Copy vector
1252  for(unsigned int dIdx = 0; dIdx < viewsDescriptor.GetNumDimensions(); ++dIdx)
1253  {
1254  viewSize.push_back(viewsDescriptor.GetViewSizes(vIdx)[dIdx]);
1255  }
1256 
1257  flatBufferViewSizes.push_back(CreateUintVector(m_flatBufferBuilder,
1258  m_flatBufferBuilder.CreateVector(viewSize)));
1259  }
1260 
1261  // Create FlatBuffer ViewsDescriptor
1262  auto flatBufferViewsDescriptor = CreateViewsDescriptor(m_flatBufferBuilder,
1263  flatBufferOriginDescriptor,
1264  m_flatBufferBuilder.CreateVector(flatBufferViewSizes));
1265 
1266  // Create FlatBuffer BaseLayer
1267  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Splitter);
1268 
1269  auto flatBufferSplitterLayer = serializer::CreateSplitterLayer(m_flatBufferBuilder,
1270  flatBufferBaseLayer,
1271  flatBufferViewsDescriptor);
1272 
1273  CreateAnyLayer(flatBufferSplitterLayer.o, serializer::Layer::Layer_SplitterLayer);
1274 }
1275 
1276 void SerializerStrategy::SerializeNormalizationLayer(const armnn::IConnectableLayer* layer,
1277  const armnn::NormalizationDescriptor& descriptor,
1278  const char* name)
1279 {
1280  IgnoreUnused(name);
1281 
1282  auto fbNormalizationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Normalization);
1283 
1284  auto fbNormalizationDescriptor = serializer::CreateNormalizationDescriptor(
1285  m_flatBufferBuilder,
1288  descriptor.m_NormSize,
1289  descriptor.m_Alpha,
1290  descriptor.m_Beta,
1291  descriptor.m_K,
1293 
1294  auto flatBufferLayer = serializer::CreateNormalizationLayer(m_flatBufferBuilder,
1295  fbNormalizationBaseLayer,
1296  fbNormalizationDescriptor);
1297 
1299 }
1300 
1301 void SerializerStrategy::SerializeStackLayer(const armnn::IConnectableLayer* layer,
1302  const armnn::StackDescriptor& stackDescriptor,
1303  const char* name)
1304 {
1305  IgnoreUnused(name);
1306 
1307  auto stackBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Stack);
1308 
1309  std::vector<unsigned int> inputShape;
1310  for (unsigned int i =0; i < stackDescriptor.m_InputShape.GetNumDimensions(); i++)
1311  {
1312  inputShape.push_back(stackDescriptor.m_InputShape[i]);
1313  }
1314 
1315  auto flatBufferStackDescriptor = CreateStackDescriptor(m_flatBufferBuilder,
1316  stackDescriptor.m_Axis,
1317  stackDescriptor.m_NumInputs,
1318  m_flatBufferBuilder.CreateVector(inputShape));
1319 
1320  auto stackLayer = serializer::CreateStackLayer(m_flatBufferBuilder, stackBaseLayer, flatBufferStackDescriptor);
1322 }
1323 
1324 void SerializerStrategy::SerializeStandInLayer(const armnn::IConnectableLayer *layer,
1325  const armnn::StandInDescriptor& standInDescriptor,
1326  const char *name)
1327 {
1328  IgnoreUnused(name);
1329 
1330  auto fbDescriptor = serializer::CreateStandInDescriptor(m_flatBufferBuilder,
1331  standInDescriptor.m_NumInputs,
1332  standInDescriptor.m_NumOutputs);
1333 
1334  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_StandIn);
1335  auto fbLayer = serializer::CreateStandInLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
1336 
1338 }
1339 
1340 void SerializerStrategy::SerializeStridedSliceLayer(const armnn::IConnectableLayer* layer,
1341  const armnn::StridedSliceDescriptor& stridedSliceDescriptor,
1342  const char* name)
1343 {
1344  IgnoreUnused(name);
1345 
1346  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_StridedSlice);
1347 
1348  auto flatBufferDescriptor =
1349  CreateStridedSliceDescriptor(m_flatBufferBuilder,
1350  m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.m_Begin),
1351  m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.m_End),
1352  m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.m_Stride),
1353  stridedSliceDescriptor.m_BeginMask,
1354  stridedSliceDescriptor.m_EndMask,
1355  stridedSliceDescriptor.m_ShrinkAxisMask,
1356  stridedSliceDescriptor.m_EllipsisMask,
1357  stridedSliceDescriptor.m_NewAxisMask,
1358  GetFlatBufferDataLayout(stridedSliceDescriptor.m_DataLayout));
1359 
1360  auto flatBufferLayer = serializer::CreateStridedSliceLayer(m_flatBufferBuilder,
1361  flatBufferBaseLayer,
1362  flatBufferDescriptor);
1363 
1365 }
1366 
1367 void SerializerStrategy::SerializeSubtractionLayer(const armnn::IConnectableLayer* layer, const char* name)
1368 {
1369  IgnoreUnused(name);
1370 
1371  auto fbSubtractionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Subtraction);
1372  auto fbSubtractionLayer = serializer::CreateSubtractionLayer(m_flatBufferBuilder, fbSubtractionBaseLayer);
1373 
1375 }
1376 
1377 void SerializerStrategy::SerializeSwitchLayer(const armnn::IConnectableLayer* layer, const char* name)
1378 {
1379  IgnoreUnused(name);
1380 
1381  auto fbSwitchBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Switch);
1382  auto fbSwitchLayer = serializer::CreateSwitchLayer(m_flatBufferBuilder, fbSwitchBaseLayer);
1383 
1385 }
1386 
1387 void SerializerStrategy::SerializeTransposeConvolution2dLayer(
1388  const armnn::IConnectableLayer* layer,
1389  const armnn::TransposeConvolution2dDescriptor& descriptor,
1390  const std::vector<armnn::ConstTensor>& constants,
1391  const char* name)
1392 {
1393  IgnoreUnused(name);
1394 
1395  const armnn::ConstTensor& weights = constants.at(0);
1396 
1398  auto fbDescriptor = CreateTransposeConvolution2dDescriptor(m_flatBufferBuilder,
1399  descriptor.m_PadLeft,
1400  descriptor.m_PadRight,
1401  descriptor.m_PadTop,
1402  descriptor.m_PadBottom,
1403  descriptor.m_StrideX,
1404  descriptor.m_StrideY,
1405  descriptor.m_BiasEnabled,
1407 
1408  // weights & biases
1409  auto fbWeightsConstTensorInfo = CreateConstTensorInfo(weights);
1410  flatbuffers::Offset<serializer::ConstTensor> fbBiasesConstTensorInfo;
1411  if (constants.size() > 1)
1412  {
1413  const armnn::ConstTensor& biases = constants.at(1);
1414  fbBiasesConstTensorInfo = CreateConstTensorInfo(biases);
1415  }
1416 
1417  auto fbLayer = CreateTransposeConvolution2dLayer(m_flatBufferBuilder,
1418  fbBaseLayer,
1419  fbDescriptor,
1420  fbWeightsConstTensorInfo,
1421  fbBiasesConstTensorInfo);
1422 
1424 }
1425 
1426 void SerializerStrategy::SerializeTransposeLayer(const armnn::IConnectableLayer* layer,
1427  const armnn::TransposeDescriptor& descriptor,
1428  const char* name)
1429 {
1430  IgnoreUnused(name);
1431 
1432  // Create FlatBuffer BaseLayer
1433  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Transpose);
1434 
1435  std::vector<unsigned int> dimMappings;
1436  for (unsigned int i=0; i<descriptor.m_DimMappings.GetSize(); ++i)
1437  {
1438  dimMappings.push_back(descriptor.m_DimMappings[i]);
1439  }
1440 
1441  auto flatBufferDesc = serializer::CreateTransposeDescriptor(m_flatBufferBuilder,
1442  m_flatBufferBuilder.CreateVector(dimMappings));
1443 
1444  // Create the FlatBuffer TransposeLayer
1445  auto flatBufferLayer = serializer::CreateTransposeLayer(m_flatBufferBuilder,
1446  flatBufferBaseLayer,
1447  flatBufferDesc);
1448 
1449  // Add the AnyLayer to the FlatBufferLayers
1451 }
1452 
1453 void SerializerStrategy::SerializeQLstmLayer(const armnn::IConnectableLayer* layer,
1454  const armnn::QLstmDescriptor& descriptor,
1455  const std::vector<armnn::ConstTensor>& constants,
1456  const char* name)
1457 {
1458  IgnoreUnused(name);
1459 
1460  auto fbQLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_QLstm);
1461 
1462  auto fbQLstmDescriptor = serializer::CreateQLstmDescriptor(
1463  m_flatBufferBuilder,
1464  descriptor.m_CifgEnabled,
1465  descriptor.m_PeepholeEnabled,
1466  descriptor.m_ProjectionEnabled,
1467  descriptor.m_LayerNormEnabled,
1468  descriptor.m_CellClip,
1469  descriptor.m_ProjectionClip,
1470  descriptor.m_InputIntermediateScale,
1471  descriptor.m_ForgetIntermediateScale,
1472  descriptor.m_CellIntermediateScale,
1473  descriptor.m_OutputIntermediateScale,
1474  descriptor.m_HiddenStateZeroPoint,
1475  descriptor.m_HiddenStateScale
1476  );
1477 
1478  // Index for constants vector
1479  std::size_t i = 0;
1480 
1481  // Mandatory params
1482  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]); //InputToForgetWeights
1483  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]); //InputToCellWeights
1484  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]); //InputToOutputWeights
1485  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToForgetWeights
1486  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToCellWeights
1487  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToOutputWeights
1488  auto forgetGateBias = CreateConstTensorInfo(constants[i++]); //ForgetGateBias
1489  auto cellBias = CreateConstTensorInfo(constants[i++]); //CellBias
1490  auto outputGateBias = CreateConstTensorInfo(constants[i++]); //OutputGateBias
1491 
1492  // CIFG
1493  flatbuffers::Offset<serializer::ConstTensor> inputToInputWeights;
1494  flatbuffers::Offset<serializer::ConstTensor> recurrentToInputWeights;
1495  flatbuffers::Offset<serializer::ConstTensor> inputGateBias;
1496 
1497  if (!descriptor.m_CifgEnabled)
1498  {
1499  inputToInputWeights = CreateConstTensorInfo(constants[i++]); //InputToInputWeights
1500  recurrentToInputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToInputWeights
1501  inputGateBias = CreateConstTensorInfo(constants[i++]); //InputGateBias
1502  }
1503 
1504  // Peephole
1505  flatbuffers::Offset<serializer::ConstTensor> cellToInputWeights;
1506  flatbuffers::Offset<serializer::ConstTensor> cellToForgetWeights;
1507  flatbuffers::Offset<serializer::ConstTensor> cellToOutputWeights;
1508 
1509  if (descriptor.m_PeepholeEnabled)
1510  {
1511  if (!descriptor.m_CifgEnabled)
1512  {
1513  cellToInputWeights = CreateConstTensorInfo(constants[i++]); //CellToInputWeights
1514  }
1515  cellToForgetWeights = CreateConstTensorInfo(constants[i++]); //CellToForgetWeights
1516  cellToOutputWeights = CreateConstTensorInfo(constants[i++]); //CellToOutputWeights
1517  }
1518 
1519  // Projection
1520  flatbuffers::Offset<serializer::ConstTensor> projectionWeights;
1521  flatbuffers::Offset<serializer::ConstTensor> projectionBias;
1522 
1523  if (descriptor.m_ProjectionEnabled)
1524  {
1525  projectionWeights = CreateConstTensorInfo(constants[i++]); //ProjectionWeights
1526  projectionBias = CreateConstTensorInfo(constants[i++]); //ProjectionBias
1527  }
1528 
1529  // Layer norm
1530  flatbuffers::Offset<serializer::ConstTensor> inputLayerNormWeights;
1531  flatbuffers::Offset<serializer::ConstTensor> forgetLayerNormWeights;
1532  flatbuffers::Offset<serializer::ConstTensor> cellLayerNormWeights;
1533  flatbuffers::Offset<serializer::ConstTensor> outputLayerNormWeights;
1534 
1535  if (descriptor.m_LayerNormEnabled)
1536  {
1537  if (!descriptor.m_CifgEnabled)
1538  {
1539  inputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //InputLayerNormWeights
1540  }
1541  forgetLayerNormWeights = CreateConstTensorInfo(constants[i++]); //ForgetLayerNormWeights
1542  cellLayerNormWeights = CreateConstTensorInfo(constants[i++]); //CellLayerNormWeights
1543  outputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //OutputLayerNormWeights
1544  }
1545 
1546  auto fbQLstmParams = serializer::CreateQLstmInputParams(
1547  m_flatBufferBuilder,
1548  inputToForgetWeights,
1549  inputToCellWeights,
1550  inputToOutputWeights,
1551  recurrentToForgetWeights,
1552  recurrentToCellWeights,
1553  recurrentToOutputWeights,
1554  forgetGateBias,
1555  cellBias,
1556  outputGateBias,
1557  inputToInputWeights,
1558  recurrentToInputWeights,
1559  inputGateBias,
1560  projectionWeights,
1561  projectionBias,
1562  cellToInputWeights,
1563  cellToForgetWeights,
1564  cellToOutputWeights,
1565  inputLayerNormWeights,
1566  forgetLayerNormWeights,
1567  cellLayerNormWeights,
1568  outputLayerNormWeights);
1569 
1570  auto fbQLstmLayer = serializer::CreateQLstmLayer(
1571  m_flatBufferBuilder,
1572  fbQLstmBaseLayer,
1573  fbQLstmDescriptor,
1574  fbQLstmParams);
1575 
1577 }
1578 
1579 void SerializerStrategy::SerializeQuantizedLstmLayer(const armnn::IConnectableLayer* layer,
1580  const std::vector<armnn::ConstTensor>& constants,
1581  const char* name)
1582 {
1583  IgnoreUnused(name);
1584 
1585  auto fbQuantizedLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_QuantizedLstm);
1586 
1587  // index for constants vector
1588  size_t i = 0;
1589 
1590  // Get input parameters
1591  auto inputToInputWeights = CreateConstTensorInfo(constants[i++]);
1592  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]);
1593  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]);
1594  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]);
1595 
1596  auto recurrentToInputWeights = CreateConstTensorInfo(constants[i++]);
1597  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]);
1598  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]);
1599  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]);
1600 
1601  auto inputGateBias = CreateConstTensorInfo(constants[i++]);
1602  auto forgetGateBias = CreateConstTensorInfo(constants[i++]);
1603  auto cellBias = CreateConstTensorInfo(constants[i++]);
1604  auto outputGateBias = CreateConstTensorInfo(constants[i++]);
1605 
1606  auto fbQuantizedLstmParams = serializer::CreateQuantizedLstmInputParams(
1607  m_flatBufferBuilder,
1608  inputToInputWeights,
1609  inputToForgetWeights,
1610  inputToCellWeights,
1611  inputToOutputWeights,
1612  recurrentToInputWeights,
1613  recurrentToForgetWeights,
1614  recurrentToCellWeights,
1615  recurrentToOutputWeights,
1616  inputGateBias,
1617  forgetGateBias,
1618  cellBias,
1619  outputGateBias);
1620 
1621  auto fbQuantizedLstmLayer = serializer::CreateQuantizedLstmLayer(
1622  m_flatBufferBuilder,
1623  fbQuantizedLstmBaseLayer,
1624  fbQuantizedLstmParams);
1625 
1627 }
1628 
1629 fb::Offset<serializer::LayerBase> SerializerStrategy::CreateLayerBase(const IConnectableLayer* layer,
1630  const serializer::LayerType layerType)
1631 {
1632 
1633  uint32_t fbIndex = GetSerializedId(layer->GetGuid());
1634 
1635  std::vector<fb::Offset<serializer::InputSlot>> inputSlots = CreateInputSlots(layer);
1636  std::vector<fb::Offset<serializer::OutputSlot>> outputSlots = CreateOutputSlots(layer);
1637 
1638  return serializer::CreateLayerBase(m_flatBufferBuilder,
1639  fbIndex,
1640  m_flatBufferBuilder.CreateString(layer->GetName()),
1641  layerType,
1642  m_flatBufferBuilder.CreateVector(inputSlots),
1643  m_flatBufferBuilder.CreateVector(outputSlots));
1644 }
1645 
1646 void SerializerStrategy::CreateAnyLayer(const flatbuffers::Offset<void>& layer, const serializer::Layer serializerLayer)
1647 {
1648 
1649  auto anyLayer = armnnSerializer::CreateAnyLayer(m_flatBufferBuilder, serializerLayer, layer);
1650  m_serializedLayers.push_back(anyLayer);
1651 }
1652 
1653 template <typename T>
1654 flatbuffers::Offset<flatbuffers::Vector<T>> SerializerStrategy::CreateDataVector(const void* memory, unsigned int size)
1655 {
1656  const T* buffer = reinterpret_cast<const T*>(memory);
1657  std::vector<T> vector(buffer, buffer + (size / sizeof(T)));
1658  auto fbVector = m_flatBufferBuilder.CreateVector(vector);
1659  return fbVector;
1660 }
1661 
1662 flatbuffers::Offset<TensorInfo> SerializerStrategy::CreateTensorInfo(const armnn::TensorInfo& tensorInfo)
1663 {
1664  // Get the dimensions
1665  std::vector<unsigned int> shape;
1666  for(unsigned int dim = 0; dim < tensorInfo.GetShape().GetNumDimensions(); ++dim)
1667  {
1668  shape.push_back(tensorInfo.GetShape()[dim]);
1669  }
1670 
1671  std::vector<bool> specificity;
1672  // This assumes that the TensorShape constructors have ensured that the size of m_DimensionsSpecificity
1673  // matches the size of dimensions.
1674  for(unsigned int dim = 0; dim < tensorInfo.GetShape().GetNumDimensions(); ++dim)
1675  {
1676  specificity.push_back(tensorInfo.GetShape().GetDimensionSpecificity(dim));
1677  }
1678 
1679  if (tensorInfo.HasPerAxisQuantization())
1680  {
1681  // Create FlatBuffer TensorInfo
1682  auto flatBufferTensorInfo =
1683  serializer::CreateTensorInfo(m_flatBufferBuilder,
1684  m_flatBufferBuilder.CreateVector(shape),
1685  GetFlatBufferDataType(tensorInfo.GetDataType()),
1686  tensorInfo.GetQuantizationScales()[0],
1687  tensorInfo.GetQuantizationOffset(),
1688  m_flatBufferBuilder.CreateVector(tensorInfo.GetQuantizationScales()),
1689  tensorInfo.GetQuantizationDim().value(),
1690  static_cast<unsigned int>
1691  (tensorInfo.GetShape().GetDimensionality()),
1692  m_flatBufferBuilder.CreateVector(specificity));
1693  return flatBufferTensorInfo;
1694  }
1695 
1696  // Create FlatBuffer TensorInfo
1697  auto flatBufferTensorInfo = serializer::CreateTensorInfo(m_flatBufferBuilder,
1698  m_flatBufferBuilder.CreateVector(shape),
1699  GetFlatBufferDataType(tensorInfo.GetDataType()),
1700  tensorInfo.GetQuantizationScale(),
1701  tensorInfo.GetQuantizationOffset(),
1702  0,
1703  0,
1704  static_cast<unsigned int>
1705  (tensorInfo.GetShape().GetDimensionality()),
1706  m_flatBufferBuilder.CreateVector(specificity));
1707  return flatBufferTensorInfo;
1708 }
1709 
1710 flatbuffers::Offset<serializer::ConstTensor>
1711  SerializerStrategy::CreateConstTensorInfo(const armnn::ConstTensor& constTensor)
1712 {
1713  armnn::TensorInfo tensorInfo = constTensor.GetInfo();
1714 
1715  flatbuffers::Offset<void> fbPayload;
1716 
1717  switch (tensorInfo.GetDataType())
1718  {
1721  {
1722  auto fbVector = CreateDataVector<int32_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1723  flatbuffers::Offset<serializer::IntData> flatBuffersData = serializer::CreateIntData(
1724  m_flatBufferBuilder,
1725  fbVector);
1726  fbPayload = flatBuffersData.o;
1727  break;
1728  }
1732  {
1733  auto fbVector = CreateDataVector<int16_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1734  flatbuffers::Offset<serializer::ShortData> flatBuffersData = serializer::CreateShortData(
1735  m_flatBufferBuilder,
1736  fbVector);
1737  fbPayload = flatBuffersData.o;
1738  break;
1739  }
1744  default:
1745  {
1746  auto fbVector = CreateDataVector<int8_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1747  flatbuffers::Offset<serializer::ByteData> flatBuffersData = serializer::CreateByteData(
1748  m_flatBufferBuilder,
1749  fbVector);
1750  fbPayload = flatBuffersData.o;
1751  }
1752  }
1753  flatbuffers::Offset<serializer::ConstTensor> flatBufferConstTensor = serializer::CreateConstTensor(
1754  m_flatBufferBuilder,
1755  CreateTensorInfo(tensorInfo),
1757  fbPayload);
1758  return flatBufferConstTensor;
1759 }
1760 
1761 flatbuffers::Offset<armnnSerializer::FeatureCompatibilityVersions> SerializerStrategy::GetVersionTable()
1762 {
1763  flatbuffers::Offset<armnnSerializer::FeatureCompatibilityVersions> versionsTable =
1765  m_flatBufferBuilder,
1766  1 // Binding ids scheme version
1767  );
1768  return versionsTable;
1769 }
1770 
1771 std::vector<fb::Offset<serializer::InputSlot>>
1772  SerializerStrategy::CreateInputSlots(const armnn::IConnectableLayer* layer)
1773 {
1774  std::vector<fb::Offset<serializer::InputSlot>> inputSlots;
1775 
1776  // Get the InputSlots
1777  for (unsigned int slotIndex = 0; slotIndex<layer->GetNumInputSlots(); ++slotIndex)
1778  {
1779  const IInputSlot& inputSlot = layer->GetInputSlot(slotIndex);
1780 
1781  // Get the Connection for the InputSlot
1782  const IOutputSlot* connection = inputSlot.GetConnection();
1783 
1784  // Create FlatBuffer Connection
1785  serializer::Connection conn(GetSerializedId(inputSlot.GetConnection()->GetOwningLayerGuid()),
1786  connection->CalculateIndexOnOwner());
1787  // Create FlatBuffer InputSlot
1788  inputSlots.push_back(serializer::CreateInputSlot(m_flatBufferBuilder, slotIndex, &conn));
1789  }
1790  return inputSlots;
1791 }
1792 
1793 std::vector<fb::Offset<serializer::OutputSlot>>
1794  SerializerStrategy::CreateOutputSlots(const armnn::IConnectableLayer* layer)
1795 {
1796  std::vector<fb::Offset<serializer::OutputSlot>> outputSlots;
1797 
1798  // Get the OutputSlots
1799  for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
1800  {
1801  const IOutputSlot& outputSlot = layer->GetOutputSlot(slotIndex);
1802  const armnn::TensorInfo& tensorInfo = outputSlot.GetTensorInfo();
1803 
1804  // Create FlatBuffer Outputslot
1805  outputSlots.push_back(serializer::CreateOutputSlot(m_flatBufferBuilder,
1806  slotIndex,
1807  CreateTensorInfo(tensorInfo)));
1808  }
1809  return outputSlots;
1810 }
1811 
1813  const BaseDescriptor& descriptor,
1814  const std::vector<armnn::ConstTensor>& constants,
1815  const char* name,
1816  const armnn::LayerBindingId id)
1817 {
1818  IgnoreUnused(constants);
1819 
1820  switch (layer->GetType())
1821  {
1823  {
1824  const armnn::ActivationDescriptor& layerDescriptor =
1825  static_cast<const armnn::ActivationDescriptor&>(descriptor);
1826  SerializeActivationLayer(layer, layerDescriptor, name);
1827  break;
1828  }
1830  {
1831  SerializeAdditionLayer(layer, name);
1832  break;
1833  }
1835  {
1836  const armnn::ArgMinMaxDescriptor& layerDescriptor =
1837  static_cast<const armnn::ArgMinMaxDescriptor&>(descriptor);
1838  SerializeArgMinMaxLayer(layer, layerDescriptor, name);
1839  break;
1840  }
1842  {
1843  const armnn::BatchNormalizationDescriptor& layerDescriptor =
1844  static_cast<const armnn::BatchNormalizationDescriptor&>(descriptor);
1845  SerializeBatchNormalizationLayer(layer,
1846  layerDescriptor,
1847  constants,
1848  name);
1849  break;
1850  }
1852  {
1853  const armnn::BatchToSpaceNdDescriptor& layerDescriptor =
1854  static_cast<const armnn::BatchToSpaceNdDescriptor&>(descriptor);
1855  SerializeBatchToSpaceNdLayer(layer,
1856  layerDescriptor,
1857  name);
1858  break;
1859  }
1861  {
1862  const armnn::ComparisonDescriptor& layerDescriptor =
1863  static_cast<const armnn::ComparisonDescriptor&>(descriptor);
1864  SerializeComparisonLayer(layer,
1865  layerDescriptor,
1866  name);
1867  break;
1868  }
1870  {
1871  const armnn::ConcatDescriptor& layerDescriptor =
1872  static_cast<const armnn::ConcatDescriptor&>(descriptor);
1873  SerializeConcatLayer(layer,
1874  layerDescriptor,
1875  name);
1876  break;
1877  }
1879  {
1880  SerializeConstantLayer(layer,
1881  constants,
1882  name);
1883  break;
1884  }
1886  {
1887  const armnn::Convolution2dDescriptor& layerDescriptor =
1888  static_cast<const armnn::Convolution2dDescriptor&>(descriptor);
1889  SerializeConvolution2dLayer(layer,
1890  layerDescriptor,
1891  constants,
1892  name);
1893  break;
1894  }
1896  {
1897  const armnn::DepthToSpaceDescriptor& layerDescriptor =
1898  static_cast<const armnn::DepthToSpaceDescriptor&>(descriptor);
1899  SerializeDepthToSpaceLayer(layer,
1900  layerDescriptor,
1901  name);
1902  break;
1903  }
1905  {
1906  const armnn::DepthwiseConvolution2dDescriptor& layerDescriptor =
1907  static_cast<const armnn::DepthwiseConvolution2dDescriptor&>(descriptor);
1908  SerializeDepthwiseConvolution2dLayer(layer,
1909  layerDescriptor,
1910  constants,
1911  name);
1912  break;
1913  }
1915  {
1916  SerializeDequantizeLayer(layer,
1917  name);
1918  break;
1919  }
1921  {
1922  const armnn::DetectionPostProcessDescriptor& layerDescriptor =
1923  static_cast<const armnn::DetectionPostProcessDescriptor&>(descriptor);
1924  SerializeDetectionPostProcessLayer(layer, layerDescriptor, constants, name);
1925  break;
1926  }
1928  {
1929  SerializeDivisionLayer(layer, name);
1930  break;
1931  }
1933  {
1934  const armnn::ElementwiseUnaryDescriptor& layerDescriptor =
1935  static_cast<const armnn::ElementwiseUnaryDescriptor&>(descriptor);
1936  SerializeElementwiseUnaryLayer(layer, layerDescriptor, name);
1937  break;
1938  }
1939  case armnn::LayerType::Fill :
1940  {
1941  const armnn::FillDescriptor& layerDescriptor =
1942  static_cast<const armnn::FillDescriptor&>(descriptor);
1943  SerializeFillLayer(layer, layerDescriptor, name);
1944  break;
1945  }
1947  {
1948  SerializeFloorLayer(layer, name);
1949  break;
1950  }
1952  {
1953  const armnn::FullyConnectedDescriptor& layerDescriptor =
1954  static_cast<const armnn::FullyConnectedDescriptor&>(descriptor);
1955  SerializeFullyConnectedLayer(layer, layerDescriptor, constants, name);
1956  break;
1957  }
1959  {
1960  const armnn::GatherDescriptor& layerDescriptor =
1961  static_cast<const armnn::GatherDescriptor&>(descriptor);
1962  SerializeGatherLayer(layer, layerDescriptor, name);
1963  break;
1964  }
1966  {
1967  SerializeInputLayer(layer, id, name);
1968  break;
1969  }
1971  {
1972  const armnn::InstanceNormalizationDescriptor& layerDescriptor =
1973  static_cast<const armnn::InstanceNormalizationDescriptor&>(descriptor);
1974  SerializeInstanceNormalizationLayer(layer, layerDescriptor, name);
1975  break;
1976  }
1978  {
1979  const armnn::L2NormalizationDescriptor& layerDescriptor =
1980  static_cast<const armnn::L2NormalizationDescriptor&>(descriptor);
1981  SerializeL2NormalizationLayer(layer, layerDescriptor, name);
1982  break;
1983  }
1985  {
1986  const armnn::LogicalBinaryDescriptor& layerDescriptor =
1987  static_cast<const armnn::LogicalBinaryDescriptor&>(descriptor);
1988  SerializeLogicalBinaryLayer(layer, layerDescriptor, name);
1989  break;
1990  }
1992  {
1993  const armnn::LogSoftmaxDescriptor& layerDescriptor =
1994  static_cast<const armnn::LogSoftmaxDescriptor&>(descriptor);
1995  SerializeLogSoftmaxLayer(layer, layerDescriptor, name);
1996  break;
1997  }
1998  case armnn::LayerType::Lstm :
1999  {
2000  const armnn::LstmDescriptor& layerDescriptor =
2001  static_cast<const armnn::LstmDescriptor&>(descriptor);
2002  SerializeLstmLayer(layer, layerDescriptor, constants, name);
2003  break;
2004  }
2006  {
2007  const armnn::QLstmDescriptor& layerDescriptor =
2008  static_cast<const armnn::QLstmDescriptor&>(descriptor);
2009  SerializeQLstmLayer(layer, layerDescriptor, constants, name);
2010  break;
2011  }
2013  {
2014  SerializeMaximumLayer(layer, name);
2015  break;
2016  }
2017  case armnn::LayerType::Mean :
2018  {
2019  const armnn::MeanDescriptor& layerDescriptor =
2020  static_cast<const armnn::MeanDescriptor&>(descriptor);
2021  SerializeMeanLayer(layer, layerDescriptor, name);
2022  break;
2023  }
2025  {
2026  SerializeMergeLayer(layer, name);
2027  break;
2028  }
2030  {
2031  SerializeMinimumLayer(layer, name);
2032  break;
2033  }
2035  {
2036  SerializeMultiplicationLayer(layer, name);
2037  break;
2038  }
2040  {
2041  const armnn::NormalizationDescriptor& layerDescriptor =
2042  static_cast<const armnn::NormalizationDescriptor&>(descriptor);
2043  SerializeNormalizationLayer(layer, layerDescriptor, name);
2044  break;
2045  }
2047  {
2048  SerializeOutputLayer(layer, id, name);
2049  break;
2050  }
2051  case armnn::LayerType::Pad :
2052  {
2053  const armnn::PadDescriptor& layerDescriptor =
2054  static_cast<const armnn::PadDescriptor&>(descriptor);
2055  SerializePadLayer(layer, layerDescriptor, name);
2056  break;
2057  }
2059  {
2060  const armnn::PermuteDescriptor& layerDescriptor =
2061  static_cast<const armnn::PermuteDescriptor&>(descriptor);
2062  SerializePermuteLayer(layer, layerDescriptor, name);
2063  break;
2064  }
2066  {
2067  const armnn::Pooling2dDescriptor& layerDescriptor =
2068  static_cast<const armnn::Pooling2dDescriptor&>(descriptor);
2069  SerializePooling2dLayer(layer, layerDescriptor, name);
2070  break;
2071  }
2073  {
2074  SerializePreluLayer(layer, name);
2075  break;
2076  }
2078  {
2079  SerializeQuantizeLayer(layer, name);
2080  break;
2081  }
2083  SerializeQuantizedLstmLayer(layer, constants, name);
2084  break;
2086  {
2087  const armnn::ReshapeDescriptor &layerDescriptor =
2088  static_cast<const armnn::ReshapeDescriptor &>(descriptor);
2089  SerializeReshapeLayer(layer, layerDescriptor, name);
2090  break;
2091  }
2093  {
2094  SerializeRankLayer(layer, name);
2095  break;
2096  }
2098  {
2099  const armnn::ReduceDescriptor& layerDescriptor =
2100  static_cast<const armnn::ReduceDescriptor&>(descriptor);
2101  SerializeReduceLayer(layer, layerDescriptor, name);
2102  break;
2103  }
2105  {
2106  const armnn::ResizeDescriptor& layerDescriptor =
2107  static_cast<const armnn::ResizeDescriptor&>(descriptor);
2108  SerializeResizeLayer(layer, layerDescriptor, name);
2109  break;
2110  }
2112  {
2113  const armnn::SliceDescriptor& layerDescriptor =
2114  static_cast<const armnn::SliceDescriptor&>(descriptor);
2115  SerializeSliceLayer(layer, layerDescriptor, name);
2116  break;
2117  }
2119  {
2120  const armnn::SoftmaxDescriptor& layerDescriptor =
2121  static_cast<const armnn::SoftmaxDescriptor&>(descriptor);
2122  SerializeSoftmaxLayer(layer, layerDescriptor, name);
2123  break;
2124  }
2126  {
2127  const armnn::SpaceToBatchNdDescriptor& layerDescriptor =
2128  static_cast<const armnn::SpaceToBatchNdDescriptor&>(descriptor);
2129  SerializeSpaceToBatchNdLayer(layer, layerDescriptor, name);
2130  break;
2131  }
2133  {
2134  const armnn::SpaceToDepthDescriptor& layerDescriptor =
2135  static_cast<const armnn::SpaceToDepthDescriptor&>(descriptor);
2136  SerializeSpaceToDepthLayer(layer, layerDescriptor, name);
2137  break;
2138  }
2140  {
2141  const armnn::SplitterDescriptor& layerDescriptor =
2142  static_cast<const armnn::SplitterDescriptor&>(descriptor);
2143  SerializeSplitterLayer(layer, layerDescriptor, name);
2144  break;
2145  }
2147  {
2148  const armnn::StackDescriptor& layerDescriptor =
2149  static_cast<const armnn::StackDescriptor&>(descriptor);
2150  SerializeStackLayer(layer, layerDescriptor, name);
2151  break;
2152  }
2154  {
2155  const armnn::StandInDescriptor& layerDescriptor =
2156  static_cast<const armnn::StandInDescriptor&>(descriptor);
2157  SerializeStandInLayer(layer, layerDescriptor, name);
2158  break;
2159  }
2161  {
2162  const armnn::StridedSliceDescriptor& layerDescriptor =
2163  static_cast<const armnn::StridedSliceDescriptor&>(descriptor);
2164  SerializeStridedSliceLayer(layer, layerDescriptor, name);
2165  break;
2166  }
2168  {
2169  SerializeSubtractionLayer(layer, name);
2170  break;
2171  }
2173  {
2174  SerializeSwitchLayer(layer, name);
2175  break;
2176  }
2178  {
2179  const armnn::TransposeDescriptor& layerDescriptor =
2180  static_cast<const armnn::TransposeDescriptor&>(descriptor);
2181  SerializeTransposeLayer(layer, layerDescriptor, name);
2182  break;
2183  }
2185  {
2186  const armnn::TransposeConvolution2dDescriptor& layerDescriptor =
2187  static_cast<const armnn::TransposeConvolution2dDescriptor&>(descriptor);
2188  SerializeTransposeConvolution2dLayer(layer, layerDescriptor, constants, name);
2189  break;
2190  }
2191  default:
2192  {
2194  fmt::format("A layer of unknown type was given to the serializer. Layer name: {}; Layer Id: {}",
2195  layer->GetName(),
2196  id));
2197  }
2198  }
2199 }
2200 
2202 {
2203  // Iterate through to network
2204  inNetwork.ExecuteStrategy(m_SerializerStrategy);
2205  flatbuffers::FlatBufferBuilder& fbBuilder = m_SerializerStrategy.GetFlatBufferBuilder();
2206 
2207  // Create FlatBuffer SerializedGraph
2208  auto serializedGraph = serializer::CreateSerializedGraph(
2209  fbBuilder,
2210  fbBuilder.CreateVector(m_SerializerStrategy.GetSerializedLayers()),
2211  fbBuilder.CreateVector(m_SerializerStrategy.GetInputIds()),
2212  fbBuilder.CreateVector(m_SerializerStrategy.GetOutputIds()),
2213  m_SerializerStrategy.GetVersionTable());
2214 
2215  // Serialize the graph
2216  fbBuilder.Finish(serializedGraph);
2217 }
2218 
2219 
2221 {
2222  flatbuffers::FlatBufferBuilder& fbBuilder = m_SerializerStrategy.GetFlatBufferBuilder();
2223 
2224  auto bytesToWrite = armnn::numeric_cast<std::streamsize>(fbBuilder.GetSize());
2225  stream.write(reinterpret_cast<const char*>(fbBuilder.GetBufferPointer()), bytesToWrite);
2226  return !stream.bad();
2227 }
2228 
2229 } // namespace armnnSerializer
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
float m_Eps
Used to avoid dividing by zero.
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
flatbuffers::Offset< FullyConnectedDescriptor > CreateFullyConnectedDescriptor(flatbuffers::FlatBufferBuilder &_fbb, bool biasEnabled=false, bool transposeWeightsMatrix=false)
bool m_HalfPixelCenters
Half Pixel Centers.
armnnSerializer::UnaryOperation GetFlatBufferUnaryOperation(armnn::UnaryOperation comparisonOperation)
bool m_ProjectionEnabled
Enable/disable the projection layer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< ReshapeDescriptor > CreateReshapeDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> targetShape=0)
bool m_AlignCorners
Aligned corners.
flatbuffers::Offset< ReduceLayer > CreateReduceLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ReduceDescriptor > descriptor=0)
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
uint32_t m_Axis
0-based axis along which to stack the input tensors.
flatbuffers::Offset< OutputSlot > CreateOutputSlot(flatbuffers::FlatBufferBuilder &_fbb, uint32_t index=0, flatbuffers::Offset< armnnSerializer::TensorInfo > tensorInfo=0)
A ViewsDescriptor for the SplitterLayer.
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
flatbuffers::Offset< DepthwiseConvolution2dDescriptor > CreateDepthwiseConvolution2dDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t padLeft=0, uint32_t padRight=0, uint32_t padTop=0, uint32_t padBottom=0, uint32_t strideX=0, uint32_t strideY=0, uint32_t dilationX=1, uint32_t dilationY=1, bool biasEnabled=false, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NCHW)
float m_ScaleW
Center size encoding scale weight.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
void ExecuteStrategy(IStrategy &strategy) const
Definition: Network.cpp:500
virtual unsigned int GetNumInputSlots() const =0
Returns the number of connectable input slots.
float m_K
Kappa value used for the across channel normalization equation.
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
flatbuffers::Offset< AbsLayer > CreateAbsLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
flatbuffers::Offset< LstmLayer > CreateLstmLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::LstmDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::LstmInputParams > inputParams=0)
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
flatbuffers::Offset< L2NormalizationLayer > CreateL2NormalizationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::L2NormalizationDescriptor > descriptor=0)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
flatbuffers::Offset< TransposeConvolution2dDescriptor > CreateTransposeConvolution2dDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t padLeft=0, uint32_t padRight=0, uint32_t padTop=0, uint32_t padBottom=0, uint32_t strideX=0, uint32_t strideY=0, bool biasEnabled=false, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NCHW)
float m_ClippingThresProj
Clipping threshold value for the projection.
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1...
A ReshapeDescriptor for the ReshapeLayer.
flatbuffers::Offset< ResizeDescriptor > CreateResizeDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t targetHeight=0, uint32_t targetWidth=0, armnnSerializer::ResizeMethod method=armnnSerializer::ResizeMethod_NearestNeighbor, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC, bool alignCorners=false, bool halfPixelCenters=false)
flatbuffers::Offset< FillLayer > CreateFillLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::FillDescriptor > descriptor=0)
std::vector< int > m_Begin
Begin values for the input that will be sliced.
flatbuffers::Offset< SoftmaxDescriptor > CreateSoftmaxDescriptor(flatbuffers::FlatBufferBuilder &_fbb, float beta=0.0f)
float m_PadValue
Optional value to use for padding, defaults to 0.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t GetNumDimensions() const
Get the number of dimensions.
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:78
void Serialize(const armnn::INetwork &inNetwork)
Serializes the network to ArmNN SerializedGraph.
float m_ScaleX
Center size encoding scale x.
TensorShape m_InputShape
Required shape of all input tensors.
uint32_t m_TargetWidth
Target width value.
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Dimensionality GetDimensionality() const
Function that returns the tensor type.
Definition: Tensor.hpp:92
flatbuffers::Offset< GatherLayer > CreateGatherLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::GatherDescriptor > descriptor=0)
flatbuffers::Offset< RankLayer > CreateRankLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
bool HasPerAxisQuantization() const
Definition: Tensor.cpp:437
uint32_t m_PoolWidth
Pooling width value.
bool m_PeepholeEnabled
Enable/disable peephole.
armnnSerializer::OutputShapeRounding GetFlatBufferOutputShapeRounding(armnn::OutputShapeRounding outputShapeRounding)
flatbuffers::Offset< TransposeLayer > CreateTransposeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::TransposeDescriptor > descriptor=0)
A Convolution2dDescriptor for the Convolution2dLayer.
float m_Alpha
Alpha value for the normalization equation.
uint32_t m_PadLeft
Padding left value in the width dimension.
flatbuffers::Offset< ComparisonLayer > CreateComparisonLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ComparisonDescriptor > descriptor=0)
bool m_KeepDims
if true then output shape has no change.
float m_HiddenStateScale
Hidden State quantization scale.
bool m_BiasEnabled
Enable/disable bias.
flatbuffers::Offset< ConstTensor > CreateConstTensor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::TensorInfo > info=0, armnnSerializer::ConstTensorData data_type=armnnSerializer::ConstTensorData_NONE, flatbuffers::Offset< void > data=0)
Optional< unsigned int > GetQuantizationDim() const
Definition: Tensor.cpp:485
flatbuffers::Offset< QuantizeLayer > CreateQuantizeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
float m_OutputIntermediateScale
Output intermediate quantization scale.
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
float m_Gamma
Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0. ...
float m_Beta
Exponentiation value.
flatbuffers::Offset< InputSlot > CreateInputSlot(flatbuffers::FlatBufferBuilder &_fbb, uint32_t index=0, const armnnSerializer::Connection *connection=0)
std::vector< unsigned int > m_Size
Size of the slice in each dimension.
static ISerializer * CreateRaw()
Definition: Serializer.cpp:31
flatbuffers::Offset< SpaceToDepthDescriptor > CreateSpaceToDepthDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t blockSize=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:70
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< QuantizedLstmLayer > CreateQuantizedLstmLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::QuantizedLstmInputParams > inputParams=0)
flatbuffers::Offset< TransposeDescriptor > CreateTransposeDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> dimMappings=0)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
serializer::ActivationFunction GetFlatBufferActivationFunction(armnn::ActivationFunction function)
Definition: Serializer.cpp:56
Main network class which provides the interface for building up a neural network. ...
Definition: INetwork.hpp:178
flatbuffers::Offset< DetectionPostProcessDescriptor > CreateDetectionPostProcessDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t maxDetections=0, uint32_t maxClassesPerDetection=0, uint32_t detectionsPerClass=0, float nmsScoreThreshold=0.0f, float nmsIouThreshold=0.0f, uint32_t numClasses=0, bool useRegularNms=false, float scaleX=0.0f, float scaleY=0.0f, float scaleW=0.0f, float scaleH=0.0f)
armnnSerializer::NormalizationAlgorithmMethod GetFlatBufferNormalizationAlgorithmMethod(armnn::NormalizationAlgorithmMethod normalizationAlgorithmMethod)
uint32_t m_PadTop
Padding top value in the height dimension.
flatbuffers::Offset< AnyLayer > CreateAnyLayer(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::Layer layer_type=armnnSerializer::Layer_NONE, flatbuffers::Offset< void > layer=0)
flatbuffers::Offset< DepthwiseConvolution2dLayer > CreateDepthwiseConvolution2dLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::DepthwiseConvolution2dDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > weights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > biases=0)
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
flatbuffers::Offset< MergeLayer > CreateMergeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
uint32_t m_PadRight
Padding right value in the width dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
MemoryType GetMemoryArea() const
Definition: Tensor.hpp:292
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding for input dimension.
uint32_t GetNumViews() const
Get the number of views.
ReduceOperation m_ReduceOperation
Specifies the reduction operation to execute.
flatbuffers::Offset< QLstmInputParams > CreateQLstmInputParams(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > forgetGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > outputGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > projectionWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > projectionBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > forgetLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > outputLayerNormWeights=0)
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
int32_t m_BeginMask
Begin mask value.
flatbuffers::Offset< TransposeConvolution2dLayer > CreateTransposeConvolution2dLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::TransposeConvolution2dDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > weights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > biases=0)
SizeType GetSize() const
Definition: Types.hpp:241
uint32_t m_DilationY
Dilation along y axis.
int32_t m_EndMask
End mask value.
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
flatbuffers::Offset< TensorInfo > CreateTensorInfo(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> dimensions=0, armnnSerializer::DataType dataType=armnnSerializer::DataType_Float16, float quantizationScale=1.0f, int32_t quantizationOffset=0, flatbuffers::Offset< flatbuffers::Vector< float >> quantizationScales=0, uint32_t quantizationDim=0, uint32_t dimensionality=1, flatbuffers::Offset< flatbuffers::Vector< uint8_t >> dimensionSpecificity=0)
flatbuffers::Offset< PreluLayer > CreatePreluLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding values for the input dimension: heightPad{top, bottom} widthPad{left, right}.
std::vector< float > GetQuantizationScales() const
Definition: Tensor.cpp:442
flatbuffers::Offset< StandInDescriptor > CreateStandInDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t numInputs=0, uint32_t numOutputs=0)
bool SaveSerializedToStream(std::ostream &stream)
Serializes the SerializedGraph to the stream.
Definition: Serializer.cpp:51
uint32_t m_DilationY
Dilation factor value for height dimension.
armnnSerializer::ConstTensorData GetFlatBufferConstTensorData(armnn::DataType dataType)
bool GetDimensionSpecificity(unsigned int i) const
Gets information about if the dimension size has been specified or not.
Definition: Tensor.cpp:211
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
void ExecuteStrategy(const armnn::IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id) override
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
flatbuffers::Offset< MultiplicationLayer > CreateMultiplicationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
flatbuffers::Offset< DepthToSpaceLayer > CreateDepthToSpaceLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::DepthToSpaceDescriptor > descriptor=0)
flatbuffers::Offset< InstanceNormalizationLayer > CreateInstanceNormalizationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::InstanceNormalizationDescriptor > descriptor=0)
armnnSerializer::ReduceOperation GetFlatBufferReduceOperation(armnn::ReduceOperation reduceOperation)
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:210
flatbuffers::Offset< SliceLayer > CreateSliceLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::SliceDescriptor > descriptor=0)
armnnSerializer::DataType GetFlatBufferDataType(armnn::DataType dataType)
uint32_t m_NumOutputs
Number of output tensors.
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
flatbuffers::Offset< Convolution2dDescriptor > CreateConvolution2dDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t padLeft=0, uint32_t padRight=0, uint32_t padTop=0, uint32_t padBottom=0, uint32_t strideX=0, uint32_t strideY=0, uint32_t dilationX=1, uint32_t dilationY=1, bool biasEnabled=false, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NCHW)
flatbuffers::Offset< InputLayer > CreateInputLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::BindableLayerBase > base=0)
A ResizeDescriptor for the ResizeLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
Base class for all descriptors.
Definition: Descriptors.hpp:22
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
A StackDescriptor for the StackLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< ShortData > CreateShortData(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< int16_t >> data=0)
serializer::ArgMinMaxFunction GetFlatBufferArgMinMaxFunction(armnn::ArgMinMaxFunction function)
Definition: Serializer.cpp:87
TensorShape m_TargetShape
Target shape value.
bool SaveSerializedToStream(std::ostream &stream)
Serializes the SerializedGraph to the stream.
flatbuffers::Offset< ConcatLayer > CreateConcatLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::OriginsDescriptor > descriptor=0)
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_MaxDetections
Maximum numbers of detections.
A PadDescriptor for the PadLayer.
flatbuffers::Offset< SubtractionLayer > CreateSubtractionLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
flatbuffers::Offset< BindableLayerBase > CreateBindableLayerBase(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, int32_t layerBindingId=0)
const uint32_t * GetViewOrigin(uint32_t idx) const
Return the view origin at the int value idx.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
flatbuffers::Offset< ArgMinMaxLayer > CreateArgMinMaxLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ArgMinMaxDescriptor > descriptor=0)
armnnSerializer::NormalizationAlgorithmChannel GetFlatBufferNormalizationAlgorithmChannel(armnn::NormalizationAlgorithmChannel normalizationAlgorithmChannel)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
flatbuffers::Offset< QLstmDescriptor > CreateQLstmDescriptor(flatbuffers::FlatBufferBuilder &_fbb, bool cifgEnabled=true, bool peepholeEnabled=false, bool projectionEnabled=false, bool layerNormEnabled=false, float cellClip=0.0f, float projectionClip=0.0f, float inputIntermediateScale=0.0f, float forgetIntermediateScale=0.0f, float cellIntermediateScale=0.0f, float outputIntermediateScale=0.0f, int32_t hiddenStateZeroPoint=0, float hiddenStateScale=0.0f)
bool m_LayerNormEnabled
Enable/disable layer normalization.
flatbuffers::Offset< GreaterLayer > CreateGreaterLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
float m_NmsIouThreshold
Intersection over union threshold.
flatbuffers::Offset< ReshapeLayer > CreateReshapeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ReshapeDescriptor > descriptor=0)
armnnSerializer::LogicalBinaryOperation GetFlatBufferLogicalBinaryOperation(armnn::LogicalBinaryOperation logicalBinaryOperation)
flatbuffers::Offset< ArgMinMaxDescriptor > CreateArgMinMaxDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::ArgMinMaxFunction argMinMaxFunction=armnnSerializer::ArgMinMaxFunction_Min, int32_t axis=0)
An LstmDescriptor for the LstmLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
flatbuffers::Offset< AdditionLayer > CreateAdditionLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
uint32_t m_DilationX
Dilation factor value for width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
flatbuffers::Offset< L2NormalizationDescriptor > CreateL2NormalizationDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NCHW, float eps=1e-12f)
std::vector< unsigned int > m_Begin
Beginning indices of the slice in each dimension.
int32_t m_NewAxisMask
New axis mask value.
flatbuffers::Offset< MinimumLayer > CreateMinimumLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept...
flatbuffers::Offset< ByteData > CreateByteData(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< int8_t >> data=0)
std::vector< unsigned int > m_BlockShape
Block shape values.
float m_Eps
Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f...
An output connection slot for a layer.
Definition: INetwork.hpp:38
flatbuffers::Offset< DepthToSpaceDescriptor > CreateDepthToSpaceDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t blockSize=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
A L2NormalizationDescriptor for the L2NormalizationLayer.
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:469
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:56
float GetQuantizationScale() const
Definition: Tensor.cpp:452
flatbuffers::Offset< LstmInputParams > CreateLstmInputParams(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > forgetGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > outputGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > projectionWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > projectionBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > forgetLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > outputLayerNormWeights=0)
DataType GetDataType() const
Definition: Tensor.hpp:194
An OriginsDescriptor for the ConcatLayer.
A ReduceDescriptor for the REDUCE operators.
float m_ProjectionClip
Clipping threshold value for the projection.
flatbuffers::Offset< LayerBase > CreateLayerBase(flatbuffers::FlatBufferBuilder &_fbb, uint32_t index=0, flatbuffers::Offset< flatbuffers::String > layerName=0, armnnSerializer::LayerType layerType=armnnSerializer::LayerType_Addition, flatbuffers::Offset< flatbuffers::Vector< flatbuffers::Offset< armnnSerializer::InputSlot >>> inputSlots=0, flatbuffers::Offset< flatbuffers::Vector< flatbuffers::Offset< armnnSerializer::OutputSlot >>> outputSlots=0)
A FullyConnectedDescriptor for the FullyConnectedLayer.
int32_t m_EllipsisMask
Ellipsis mask value.
virtual LayerGuid GetGuid() const =0
Returns the unique id of the layer.
bool m_BiasEnabled
Enable/disable bias.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:314
flatbuffers::Offset< QuantizedLstmInputParams > CreateQuantizedLstmInputParams(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > forgetGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > outputGateBias=0)
flatbuffers::Offset< ReduceDescriptor > CreateReduceDescriptor(flatbuffers::FlatBufferBuilder &_fbb, bool keepDims=false, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> axis=0, armnnSerializer::ReduceOperation reduceOperation=armnnSerializer::ReduceOperation_Sum)
flatbuffers::Offset< StackDescriptor > CreateStackDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t axis=0, uint32_t numInputs=0, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> inputShape=0)
flatbuffers::Offset< BatchToSpaceNdDescriptor > CreateBatchToSpaceNdDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> blockShape=0, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> crops=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
float m_InputIntermediateScale
Input intermediate quantization scale.
flatbuffers::Offset< PadDescriptor > CreatePadDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> padList=0, float padValue=0.0f)
uint32_t m_TargetWidth
Target width value.
flatbuffers::Offset< SplitterLayer > CreateSplitterLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ViewsDescriptor > descriptor=0)
A GatherDescriptor for the GatherLayer.
bool m_PeepholeEnabled
Enable/disable peephole.
uint32_t m_NumClasses
Number of classes.
bool m_HalfPixelCenters
Half Pixel Centers.
flatbuffers::Offset< OutputLayer > CreateOutputLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::BindableLayerBase > base=0)
void Serialize(const armnn::INetwork &inNetwork)
Serializes the network to ArmNN SerializedGraph.
Definition: Serializer.cpp:46
flatbuffers::Offset< SoftmaxLayer > CreateSoftmaxLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::SoftmaxDescriptor > descriptor=0)
flatbuffers::Offset< FillDescriptor > CreateFillDescriptor(flatbuffers::FlatBufferBuilder &_fbb, float value=0.0f)
uint32_t m_PadTop
Padding top value in the height dimension.
A StandInDescriptor for the StandIn layer.
A QLstmDescriptor for the QLstmLayer.
flatbuffers::Offset< StridedSliceLayer > CreateStridedSliceLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::StridedSliceDescriptor > descriptor=0)
virtual unsigned int CalculateIndexOnOwner() const =0
flatbuffers::Offset< LogSoftmaxDescriptor > CreateLogSoftmaxDescriptor(flatbuffers::FlatBufferBuilder &_fbb, float beta=1.0f, int32_t axis=-1)
bool m_UseRegularNms
Use Regular NMS.
flatbuffers::Offset< RsqrtLayer > CreateRsqrtLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< MeanLayer > CreateMeanLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::MeanDescriptor > descriptor=0)
std::vector< unsigned int > m_BlockShape
Block shape value.
std::vector< int > m_Stride
Stride values for the input that will be sliced.
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
flatbuffers::Offset< ActivationLayer > CreateActivationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ActivationDescriptor > descriptor=0)
flatbuffers::Offset< SpaceToDepthLayer > CreateSpaceToDepthLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::SpaceToDepthDescriptor > descriptor=0)
flatbuffers::Offset< SliceDescriptor > CreateSliceDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> begin=0, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> size=0)
const TensorInfo & GetInfo() const
Definition: Tensor.hpp:282
min(a, max(b, input)) ReLu1 & ReLu6.
flatbuffers::Offset< BatchNormalizationLayer > CreateBatchNormalizationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::BatchNormalizationDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > mean=0, flatbuffers::Offset< armnnSerializer::ConstTensor > variance=0, flatbuffers::Offset< armnnSerializer::ConstTensor > beta=0, flatbuffers::Offset< armnnSerializer::ConstTensor > gamma=0)
flatbuffers::Offset< BatchNormalizationDescriptor > CreateBatchNormalizationDescriptor(flatbuffers::FlatBufferBuilder &_fbb, float eps=0.0f, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
uint32_t m_TargetHeight
Target height value.
uint32_t m_NumInputs
Number of input tensors.
flatbuffers::Offset< GatherDescriptor > CreateGatherDescriptor(flatbuffers::FlatBufferBuilder &_fbb, int32_t axis=0)
flatbuffers::Offset< ActivationDescriptor > CreateActivationDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::ActivationFunction activationFunction=armnnSerializer::ActivationFunction_Sigmoid, float a=0.0f, float b=0.0f)
uint32_t m_TargetHeight
Target height value.
uint32_t m_ActivationFunc
The activation function to use.
A SliceDescriptor for the SliceLayer.
flatbuffers::Offset< NormalizationLayer > CreateNormalizationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::NormalizationDescriptor > descriptor=0)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
flatbuffers::Offset< ViewsDescriptor > CreateViewsDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::OriginsDescriptor > origins=0, flatbuffers::Offset< flatbuffers::Vector< flatbuffers::Offset< armnnSerializer::UintVector >>> viewSizes=0)
virtual LayerType GetType() const =0
Returns the armnn::LayerType of this layer.
flatbuffers::Offset< PermuteDescriptor > CreatePermuteDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> dimMappings=0)
float m_ClippingThresCell
Clipping threshold value for the cell state.
unsigned int m_BlockSize
Scalar specifying the input block size. It must be >= 1.
flatbuffers::Offset< MeanDescriptor > CreateMeanDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> axis=0, bool keepDims=false)
const uint32_t * GetViewOrigin(uint32_t idx) const
Get the view origin at the int value idx.
flatbuffers::Offset< StandInLayer > CreateStandInLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::StandInDescriptor > descriptor=0)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_ForgetIntermediateScale
Forget intermediate quantization scale.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_Beta
Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0. ...
armnnSerializer::DataLayout GetFlatBufferDataLayout(armnn::DataLayout dataLayout)
std::vector< uint32_t > m_vAxis
The indices of the dimensions to reduce.
float m_ScaleH
Center size encoding scale height.
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
Definition: Descriptors.hpp:94
std::vector< int > m_End
End values for the input that will be sliced.
flatbuffers::Offset< SwitchLayer > CreateSwitchLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
flatbuffers::Offset< ResizeBilinearDescriptor > CreateResizeBilinearDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t targetWidth=0, uint32_t targetHeight=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC, bool alignCorners=false, bool halfPixelCenters=false)
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
const uint32_t * GetViewSizes(uint32_t idx) const
Get the view sizes at the int value idx.
float m_CellClip
Clipping threshold value for the cell state.
flatbuffers::Offset< ElementwiseUnaryDescriptor > CreateElementwiseUnaryDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::UnaryOperation operation=armnnSerializer::UnaryOperation_Abs)
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:50
uint32_t m_DilationX
Dilation along x axis.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
flatbuffers::Offset< PadLayer > CreatePadLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::PadDescriptor > descriptor=0)
flatbuffers::Offset< FloorLayer > CreateFloorLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
std::unique_ptr< ISerializer, void(*)(ISerializer *serializer)> ISerializerPtr
Definition: ISerializer.hpp:15
flatbuffers::Offset< NormalizationDescriptor > CreateNormalizationDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::NormalizationAlgorithmChannel normChannelType=armnnSerializer::NormalizationAlgorithmChannel_Across, armnnSerializer::NormalizationAlgorithmMethod normMethodType=armnnSerializer::NormalizationAlgorithmMethod_LocalBrightness, uint32_t normSize=0, float alpha=0.0f, float beta=0.0f, float k=0.0f, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NCHW)
uint32_t m_PadLeft
Padding left value in the width dimension.
armnnSerializer::ComparisonOperation GetFlatBufferComparisonOperation(armnn::ComparisonOperation comparisonOperation)
flatbuffers::Offset< Pooling2dDescriptor > CreatePooling2dDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::PoolingAlgorithm poolType=armnnSerializer::PoolingAlgorithm_Max, uint32_t padLeft=0, uint32_t padRight=0, uint32_t padTop=0, uint32_t padBottom=0, uint32_t poolWidth=0, uint32_t poolHeight=0, uint32_t strideX=0, uint32_t strideY=0, armnnSerializer::OutputShapeRounding outputShapeRounding=armnnSerializer::OutputShapeRounding_Floor, armnnSerializer::PaddingMethod paddingMethod=armnnSerializer::PaddingMethod_IgnoreValue, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
flatbuffers::Offset< EqualLayer > CreateEqualLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
bool m_AlignCorners
Aligned corners.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
flatbuffers::Offset< ConstantLayer > CreateConstantLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ConstTensor > input=0)
int32_t m_Axis
The axis in params to gather indices from.
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:98
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
flatbuffers::Offset< UintVector > CreateUintVector(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> data=0)
flatbuffers::Offset< StackLayer > CreateStackLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::StackDescriptor > descriptor=0)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
flatbuffers::Offset< Convolution2dLayer > CreateConvolution2dLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::Convolution2dDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > weights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > biases=0)
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
flatbuffers::Offset< Pooling2dLayer > CreatePooling2dLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::Pooling2dDescriptor > descriptor=0)
bool m_ProjectionEnabled
Enable/disable the projection layer.
ArgMinMaxFunction
Definition: Types.hpp:72
flatbuffers::Offset< SpaceToBatchNdLayer > CreateSpaceToBatchNdLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::SpaceToBatchNdDescriptor > descriptor=0)
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
armnnSerializer::ResizeMethod GetFlatBufferResizeMethod(armnn::ResizeMethod method)
uint32_t m_NumInputs
Number of input tensors.
uint32_t GetNumDimensions() const
Get the number of dimensions.
flatbuffers::Offset< ComparisonDescriptor > CreateComparisonDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::ComparisonOperation operation=armnnSerializer::ComparisonOperation_Equal)
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
A MeanDescriptor for the MeanLayer.
flatbuffers::Offset< MaximumLayer > CreateMaximumLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
virtual const IOutputSlot * GetConnection() const =0
static ISerializerPtr Create()
Definition: Serializer.cpp:36
armnnSerializer::PaddingMethod GetFlatBufferPaddingMethod(armnn::PaddingMethod paddingMethod)
bool m_LayerNormEnabled
Enable/disable layer normalization.
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
uint32_t m_PadRight
Padding right value in the width dimension.
flatbuffers::Offset< InstanceNormalizationDescriptor > CreateInstanceNormalizationDescriptor(flatbuffers::FlatBufferBuilder &_fbb, float gamma=0.0f, float beta=0.0f, float eps=0.0f, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
virtual const TensorInfo & GetTensorInfo() const =0
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:72
flatbuffers::Offset< IntData > CreateIntData(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< int32_t >> data=0)
virtual const char * GetName() const =0
Returns the name of the layer.
float m_ScaleY
Center size encoding scale y.
flatbuffers::Offset< ResizeLayer > CreateResizeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ResizeDescriptor > descriptor=0)
uint32_t GetNumViews() const
Get the number of views.
flatbuffers::Offset< FullyConnectedLayer > CreateFullyConnectedLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::FullyConnectedDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > weights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > biases=0)
float m_NmsScoreThreshold
NMS score threshold.
flatbuffers::Offset< DequantizeLayer > CreateDequantizeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
flatbuffers::Offset< ResizeBilinearLayer > CreateResizeBilinearLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ResizeBilinearDescriptor > descriptor=0)
virtual LayerGuid GetOwningLayerGuid() const =0
A Pooling2dDescriptor for the Pooling2dLayer.
flatbuffers::Offset< DetectionPostProcessLayer > CreateDetectionPostProcessLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::DetectionPostProcessDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > anchors=0)
A NormalizationDescriptor for the NormalizationLayer.
flatbuffers::Offset< FeatureCompatibilityVersions > CreateFeatureCompatibilityVersions(flatbuffers::FlatBufferBuilder &_fbb, uint32_t bindingIdsScheme=0)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< BatchToSpaceNdLayer > CreateBatchToSpaceNdLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::BatchToSpaceNdDescriptor > descriptor=0)
flatbuffers::Offset< armnnSerializer::FeatureCompatibilityVersions > GetVersionTable()
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
unsigned int GetConcatAxis() const
Get the concatenation axis value.
A ResizeBilinearDescriptor for the ResizeBilinearLayer.
flatbuffers::Offset< LogSoftmaxLayer > CreateLogSoftmaxLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::LogSoftmaxDescriptor > descriptor=0)
float m_CellIntermediateScale
Cell intermediate quantization scale.
flatbuffers::Offset< StridedSliceDescriptor > CreateStridedSliceDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< int32_t >> begin=0, flatbuffers::Offset< flatbuffers::Vector< int32_t >> end=0, flatbuffers::Offset< flatbuffers::Vector< int32_t >> stride=0, int32_t beginMask=0, int32_t endMask=0, int32_t shrinkAxisMask=0, int32_t ellipsisMask=0, int32_t newAxisMask=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
flatbuffers::Offset< OriginsDescriptor > CreateOriginsDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t concatAxis=0, uint32_t numViews=0, uint32_t numDimensions=0, flatbuffers::Offset< flatbuffers::Vector< flatbuffers::Offset< armnnSerializer::UintVector >>> viewOrigins=0)
flatbuffers::Offset< QLstmLayer > CreateQLstmLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::QLstmDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::QLstmInputParams > inputParams=0)
flatbuffers::Offset< LstmDescriptor > CreateLstmDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t activationFunc=0, float clippingThresCell=0.0f, float clippingThresProj=0.0f, bool cifgEnabled=true, bool peepholeEnabled=false, bool projectionEnabled=false, bool layerNormEnabled=false)
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:52
static void Destroy(ISerializer *serializer)
Definition: Serializer.cpp:41
A SoftmaxDescriptor for the SoftmaxLayer.
float m_Beta
Beta value for the normalization equation.
flatbuffers::Offset< ElementwiseUnaryLayer > CreateElementwiseUnaryLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ElementwiseUnaryDescriptor > descriptor=0)
flatbuffers::Offset< PermuteLayer > CreatePermuteLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::PermuteDescriptor > descriptor=0)
const OriginsDescriptor & GetOrigins() const
Get the View Origins.
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
uint32_t m_NormSize
Depth radius value.
flatbuffers::Offset< SpaceToBatchNdDescriptor > CreateSpaceToBatchNdDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> blockShape=0, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> padList=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:48
An input connection slot for a layer.
Definition: INetwork.hpp:25
flatbuffers::Offset< SerializedGraph > CreateSerializedGraph(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< flatbuffers::Offset< armnnSerializer::AnyLayer >>> layers=0, flatbuffers::Offset< flatbuffers::Vector< int32_t >> inputIds=0, flatbuffers::Offset< flatbuffers::Vector< int32_t >> outputIds=0, flatbuffers::Offset< armnnSerializer::FeatureCompatibilityVersions > featureVersions=0)
armnnSerializer::PoolingAlgorithm GetFlatBufferPoolingAlgorithm(armnn::PoolingAlgorithm poolingAlgorithm)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A FillDescriptor for the FillLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
unsigned int GetNumBytes() const
Definition: Tensor.hpp:289
ActivationFunction
Definition: Types.hpp:56
flatbuffers::Offset< LogicalBinaryDescriptor > CreateLogicalBinaryDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::LogicalBinaryOperation operation=armnnSerializer::LogicalBinaryOperation_LogicalAnd)
flatbuffers::Offset< DivisionLayer > CreateDivisionLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
A PermuteDescriptor for the PermuteLayer.
flatbuffers::Offset< LogicalBinaryLayer > CreateLogicalBinaryLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::LogicalBinaryDescriptor > descriptor=0)
uint32_t m_PadRight
Padding right value in the width dimension.
int32_t m_HiddenStateZeroPoint
Hidden State zero point.
std::vector< float > anchors({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f })