ArmNN
 24.02
Serializer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017,2019-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "Serializer.hpp"
6 #include "SerializerUtils.hpp"
7 
8 #include <armnn/Descriptors.hpp>
9 #include <armnn/LstmParams.hpp>
13 
14 #include <fmt/format.h>
15 #include <iostream>
16 
17 using namespace armnn;
18 namespace fb = flatbuffers;
19 namespace serializer = armnnSerializer;
20 
21 namespace armnnSerializer
22 {
23 
24 ISerializer::ISerializer() : pSerializerImpl(new SerializerImpl())
25 {
26 }
27 
28 ISerializer::~ISerializer() = default;
29 
30 ISerializer* ISerializer::CreateRaw()
31 {
32  return new ISerializer();
33 }
34 
35 ISerializerPtr ISerializer::Create()
36 {
37  return ISerializerPtr(CreateRaw(), &ISerializer::Destroy);
38 }
39 
40 void ISerializer::Destroy(ISerializer* serializer)
41 {
42  delete serializer;
43 }
44 
45 void ISerializer::Serialize(const armnn::INetwork& inNetwork)
46 {
47  pSerializerImpl->Serialize(inNetwork);
48 }
49 
50 bool ISerializer::SaveSerializedToStream(std::ostream& stream)
51 {
52  return pSerializerImpl->SaveSerializedToStream(stream);
53 }
54 
56 {
57  switch (function)
58  {
60  return serializer::ActivationFunction::ActivationFunction_Sigmoid;
62  return serializer::ActivationFunction::ActivationFunction_TanH;
64  return serializer::ActivationFunction::ActivationFunction_Linear;
66  return serializer::ActivationFunction::ActivationFunction_ReLu;
68  return serializer::ActivationFunction::ActivationFunction_BoundedReLu;
70  return serializer::ActivationFunction::ActivationFunction_LeakyReLu;
72  return serializer::ActivationFunction::ActivationFunction_Abs;
74  return serializer::ActivationFunction::ActivationFunction_Sqrt;
76  return serializer::ActivationFunction::ActivationFunction_Square;
78  return serializer::ActivationFunction::ActivationFunction_Elu;
80  return serializer::ActivationFunction::ActivationFunction_HardSwish;
82  return serializer::ActivationFunction::ActivationFunction_Gelu;
83  default:
84  return serializer::ActivationFunction::ActivationFunction_Sigmoid;
85  }
86 }
87 
89 {
90  switch (function)
91  {
93  return serializer::ArgMinMaxFunction::ArgMinMaxFunction_Max;
95  default:
96  return serializer::ArgMinMaxFunction::ArgMinMaxFunction_Min;
97  }
98 }
99 
100 uint32_t SerializerStrategy::GetSerializedId(LayerGuid guid)
101 {
102  if (m_guidMap.empty())
103  {
104  m_guidMap.insert(std::make_pair(guid, m_layerId));
105  }
106  else if (m_guidMap.find(guid) == m_guidMap.end())
107  {
108  ++m_layerId;
109  m_guidMap.insert(std::make_pair(guid, m_layerId));
110 
111  return m_layerId;
112  }
113  return m_guidMap[guid];
114 }
115 
116 // Build FlatBuffer for Input Layer
117 void SerializerStrategy::SerializeInputLayer(const armnn::IConnectableLayer* layer, LayerBindingId id, const char* name)
118 {
119  IgnoreUnused(name);
120 
121  // Create FlatBuffer BaseLayer
122  auto flatBufferInputBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Input);
123 
124  // Create FlatBuffer BindableBaseLayer
125  auto flatBufferInputBindableBaseLayer = serializer::CreateBindableLayerBase(m_flatBufferBuilder,
126  flatBufferInputBaseLayer,
127  id);
128  // Push layer binding id to outputIds.
129  m_inputIds.push_back(id);
130 
131  // Create the FlatBuffer InputLayer
132  auto flatBufferInputLayer = serializer::CreateInputLayer(m_flatBufferBuilder, flatBufferInputBindableBaseLayer);
133 
134  // Add the AnyLayer to the FlatBufferLayers
135  CreateAnyLayer(flatBufferInputLayer.o, serializer::Layer::Layer_InputLayer);
136 }
137 
138 // Build FlatBuffer for Output Layer
139 void SerializerStrategy::SerializeOutputLayer(const armnn::IConnectableLayer* layer,
140  LayerBindingId id, const char* name)
141 {
142  IgnoreUnused(name);
143 
144  // Create FlatBuffer BaseLayer
145  auto flatBufferOutputBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Output);
146 
147  // Create FlatBuffer BindableBaseLayer
148  auto flatBufferOutputBindableBaseLayer = serializer::CreateBindableLayerBase(m_flatBufferBuilder,
149  flatBufferOutputBaseLayer,
150  id);
151  // Push layer binding id to outputIds.
152  m_outputIds.push_back(id);
153 
154  // Create the FlatBuffer OutputLayer
155  auto flatBufferOutputLayer = serializer::CreateOutputLayer(m_flatBufferBuilder, flatBufferOutputBindableBaseLayer);
156  // Add the AnyLayer to the FlatBufferLayers
157  CreateAnyLayer(flatBufferOutputLayer.o, serializer::Layer::Layer_OutputLayer);
158 }
159 
160 // Build FlatBuffer for Activation Layer
161 void SerializerStrategy::SerializeActivationLayer(const armnn::IConnectableLayer* layer,
162  const armnn::ActivationDescriptor& descriptor,
163  const char* name)
164 {
165  IgnoreUnused(name);
166 
167  // Create FlatBuffer BaseLayer
168  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Activation);
169 
170  // Create the FlatBuffer ActivationDescriptor
171  auto flatBufferDescriptor = CreateActivationDescriptor(m_flatBufferBuilder,
173  descriptor.m_A,
174  descriptor.m_B);
175 
176  // Create the FlatBuffer ActivationLayer
177  auto flatBufferAdditionLayer = CreateActivationLayer(m_flatBufferBuilder,
178  flatBufferBaseLayer,
179  flatBufferDescriptor);
180 
181  // Add the AnyLayer to the FlatBufferLayers
182  CreateAnyLayer(flatBufferAdditionLayer.o, serializer::Layer::Layer_ActivationLayer);
183 }
184 
185 // Build FlatBuffer for Addition Layer
186 void SerializerStrategy::SerializeAdditionLayer(const armnn::IConnectableLayer* layer, const char* name)
187 {
188  IgnoreUnused(name);
189 
190  // Create FlatBuffer BaseLayer
191  auto flatBufferAdditionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Addition);
192 
193  // Create the FlatBuffer AdditionLayer
194  auto flatBufferAdditionLayer = serializer::CreateAdditionLayer(m_flatBufferBuilder, flatBufferAdditionBaseLayer);
195 
196  // Add the AnyLayer to the FlatBufferLayers
197  CreateAnyLayer(flatBufferAdditionLayer.o, serializer::Layer::Layer_AdditionLayer);
198 }
199 
200 // Build FlatBuffer for ArgMinMax Layer
201 void SerializerStrategy::SerializeArgMinMaxLayer(const armnn::IConnectableLayer *layer,
202  const armnn::ArgMinMaxDescriptor& descriptor,
203  const char *name)
204 {
205  IgnoreUnused(name);
206 
207  // Create FlatBuffer BaseLayer
208  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ArgMinMax);
209 
210  // Create FlatBuffer Descriptor
211  auto flatBufferDescriptor = CreateArgMinMaxDescriptor(m_flatBufferBuilder,
213  descriptor.m_Axis);
214 
215  // Create FlatBuffer ArgMinMaxLayer
216  auto flatBufferLayer = CreateArgMinMaxLayer(m_flatBufferBuilder,
217  flatBufferBaseLayer,
218  flatBufferDescriptor);
219 
220  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ArgMinMaxLayer);
221 }
222 
223 void SerializerStrategy::SerializeBatchMatMulLayer(const armnn::IConnectableLayer* layer,
224  const armnn::BatchMatMulDescriptor& descriptor,
225  const char* name)
226 {
227  IgnoreUnused(name);
228 
229  // Create FlatBuffer BaseLayer
230  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchMatMul);
231 
232  // Create the FlatBuffer BatchMatMulDescriptor
233  auto flatBufferDescriptor = CreateBatchMatMulDescriptor(m_flatBufferBuilder,
234  descriptor.m_TransposeX,
235  descriptor.m_TransposeY,
236  descriptor.m_AdjointX,
237  descriptor.m_AdjointY,
240 
241  // Create the FlatBuffer BatchMatMulLayer
242  auto flatBufferBatchMatMulLayer = CreateBatchMatMulLayer(m_flatBufferBuilder,
243  flatBufferBaseLayer,
244  flatBufferDescriptor);
245 
246  // Add the AnyLayer to the FlatBufferLayers
247  CreateAnyLayer(flatBufferBatchMatMulLayer.o, serializer::Layer::Layer_BatchMatMulLayer);
248 }
249 
250 // Build FlatBuffer for BatchToSpaceNd Layer
251 void SerializerStrategy::SerializeBatchToSpaceNdLayer(const armnn::IConnectableLayer* layer,
252  const armnn::BatchToSpaceNdDescriptor& descriptor,
253  const char* name)
254 {
255  IgnoreUnused(name);
256 
257  // Create FlatBuffer BaseLayer
258  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchToSpaceNd);
259 
260  std::vector<unsigned int> crops;
261  crops.reserve(descriptor.m_Crops.size() * 2);
262  for (auto& crop : descriptor.m_Crops)
263  {
264  crops.push_back(crop.first);
265  crops.push_back(crop.second);
266  }
267 
268  auto flatBufferDescriptor =
269  CreateBatchToSpaceNdDescriptor(m_flatBufferBuilder,
270  m_flatBufferBuilder.CreateVector(descriptor.m_BlockShape),
271  m_flatBufferBuilder.CreateVector(crops),
273 
274  auto flatBufferLayer = serializer::CreateBatchToSpaceNdLayer(m_flatBufferBuilder,
275  flatBufferBaseLayer,
276  flatBufferDescriptor);
277 
278  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_BatchToSpaceNdLayer);
279 }
280 
281 void SerializerStrategy::SerializeBatchNormalizationLayer(
282  const armnn::IConnectableLayer* layer,
283  const armnn::BatchNormalizationDescriptor& batchNormDescriptor,
284  const std::vector<armnn::ConstTensor>& constants,
285  const char* name)
286 {
287  IgnoreUnused(name);
288 
289  const armnn::ConstTensor& mean = constants[0];
290  const armnn::ConstTensor& variance = constants[1];
291  const armnn::ConstTensor& beta = constants[2];
292  const armnn::ConstTensor& gamma = constants[3];
293 
294  auto fbBatchNormalizationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchNormalization);
295  auto fbBatchNormalizationDescriptor = serializer::CreateBatchNormalizationDescriptor(
296  m_flatBufferBuilder,
297  batchNormDescriptor.m_Eps,
298  GetFlatBufferDataLayout(batchNormDescriptor.m_DataLayout));
299 
300  auto fbMeanConstTensorInfo = CreateConstTensorInfo(mean);
301  auto fbVarianceConstTensorInfo = CreateConstTensorInfo(variance);
302  auto fbBetaConstTensorInfo = CreateConstTensorInfo(beta);
303  auto fbGammaConstTensorInfo = CreateConstTensorInfo(gamma);
304  auto fbBatchNormalizationLayer = serializer::CreateBatchNormalizationLayer(m_flatBufferBuilder,
305  fbBatchNormalizationBaseLayer,
306  fbBatchNormalizationDescriptor,
307  fbMeanConstTensorInfo,
308  fbVarianceConstTensorInfo,
309  fbBetaConstTensorInfo,
310  fbGammaConstTensorInfo);
311 
312  CreateAnyLayer(fbBatchNormalizationLayer.o, serializer::Layer::Layer_BatchNormalizationLayer);
313 }
314 
315 void SerializerStrategy::SerializeCastLayer(const armnn::IConnectableLayer* layer,
316  const char* name)
317 {
318  IgnoreUnused(name);
319 
320  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Cast);
321  auto fbCastLayer = serializer::CreateCastLayer(m_flatBufferBuilder, fbBaseLayer);
322  CreateAnyLayer(fbCastLayer.o, serializer::Layer::Layer_CastLayer);
323 }
324 
325 void SerializerStrategy::SerializeChannelShuffleLayer(const armnn::IConnectableLayer* layer,
326  const armnn::ChannelShuffleDescriptor& descriptor,
327  const char* name)
328 {
329  IgnoreUnused(name);
330  auto fbDescriptor = CreateChannelShuffleDescriptor(m_flatBufferBuilder,
331  descriptor.m_Axis,
332  descriptor.m_NumGroups);
333  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ChannelShuffle);
334  auto fbChannelShuffleLayer = serializer::CreateChannelShuffleLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
335  CreateAnyLayer(fbChannelShuffleLayer.o, serializer::Layer::Layer_ChannelShuffleLayer);
336 }
337 
338 void SerializerStrategy::SerializeComparisonLayer(const armnn::IConnectableLayer* layer,
339  const armnn::ComparisonDescriptor& descriptor,
340  const char* name)
341 {
342  IgnoreUnused(name);
343 
344  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Comparison);
345  auto fbDescriptor = serializer::CreateComparisonDescriptor(
346  m_flatBufferBuilder,
348 
349  auto fbLayer = serializer::CreateComparisonLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
350  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_ComparisonLayer);
351 }
352 
353 // Build FlatBuffer for Constant Layer
354 void SerializerStrategy::SerializeConstantLayer(const armnn::IConnectableLayer* layer,
355  const std::vector<armnn::ConstTensor>& constants,
356  const char* name)
357 {
358  IgnoreUnused(name);
359 
360  armnn::ConstTensor input = constants[0];
361 
362  // Create FlatBuffer BaseLayer
363  auto flatBufferConstantBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Constant);
364 
365  auto flatBufferConstTensorInfo = CreateConstTensorInfo(input);
366 
367  // Create the FlatBuffer ConstantLayer
368  auto flatBufferLayer = CreateConstantLayer(m_flatBufferBuilder,
369  flatBufferConstantBaseLayer,
370  flatBufferConstTensorInfo);
371 
372  // Add the AnyLayer to the FlatBufferLayers
373  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ConstantLayer);
374 }
375 
376 // Build FlatBuffer for Convolution2dLayer
377 void SerializerStrategy::SerializeConvolution2dLayer(const armnn::IConnectableLayer* layer,
378  const armnn::Convolution2dDescriptor& descriptor,
379  const char* name)
380 {
381  IgnoreUnused(name);
382 
383  // Create FlatBuffer BaseLayer
384  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution2d);
385 
386  auto flatBufferDescriptor = CreateConvolution2dDescriptor(m_flatBufferBuilder,
387  descriptor.m_PadLeft,
388  descriptor.m_PadRight,
389  descriptor.m_PadTop,
390  descriptor.m_PadBottom,
391  descriptor.m_StrideX,
392  descriptor.m_StrideY,
393  descriptor.m_DilationX,
394  descriptor.m_DilationY,
395  descriptor.m_BiasEnabled,
397 
398  // Create the FlatBuffer Convolution2dLayer
399  auto flatBufferLayer = CreateConvolution2dLayer(m_flatBufferBuilder,
400  flatBufferBaseLayer,
401  flatBufferDescriptor);
402 
403  // Add the AnyLayer to the FlatBufferLayers
404  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_Convolution2dLayer);
405 }
406 
407 // Build FlatBuffer for Convolution3dLayer
408 void SerializerStrategy::SerializeConvolution3dLayer(const armnn::IConnectableLayer* layer,
409  const armnn::Convolution3dDescriptor& descriptor,
410  const char* name)
411 {
412  IgnoreUnused(name);
413 
414  // Create FlatBuffer BaseLayer
415  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution3d);
416 
417  auto flatBufferDescriptor = CreateConvolution3dDescriptor(m_flatBufferBuilder,
418  descriptor.m_PadLeft,
419  descriptor.m_PadRight,
420  descriptor.m_PadTop,
421  descriptor.m_PadBottom,
422  descriptor.m_PadFront,
423  descriptor.m_PadBack,
424  descriptor.m_StrideX,
425  descriptor.m_StrideY,
426  descriptor.m_StrideZ,
427  descriptor.m_DilationX,
428  descriptor.m_DilationY,
429  descriptor.m_DilationZ,
430  descriptor.m_BiasEnabled,
432 
433  // Create the FlatBuffer Convolution3dLayer
434  auto flatBufferLayer = CreateConvolution3dLayer(m_flatBufferBuilder,
435  flatBufferBaseLayer,
436  flatBufferDescriptor);
437 
438  // Add the AnyLayer to the FlatBufferLayers
439  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_Convolution3dLayer);
440 }
441 
442 void SerializerStrategy::SerializeDepthToSpaceLayer(const armnn::IConnectableLayer* layer,
443  const armnn::DepthToSpaceDescriptor& descriptor,
444  const char* name)
445 {
446  IgnoreUnused(name);
447 
448  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_DepthToSpace);
449  auto fbDescriptor = CreateDepthToSpaceDescriptor(m_flatBufferBuilder,
450  descriptor.m_BlockSize,
452 
453  auto fbLayer = serializer::CreateDepthToSpaceLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
454 
455  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_DepthToSpaceLayer);
456 }
457 
458 void SerializerStrategy::SerializeDepthwiseConvolution2dLayer(const armnn::IConnectableLayer* layer,
459  const armnn::DepthwiseConvolution2dDescriptor& descriptor,
460  const char* name)
461 {
462  IgnoreUnused(name);
463 
464  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_DepthwiseConvolution2d);
465  auto fbDescriptor = CreateDepthwiseConvolution2dDescriptor(m_flatBufferBuilder,
466  descriptor.m_PadLeft,
467  descriptor.m_PadRight,
468  descriptor.m_PadTop,
469  descriptor.m_PadBottom,
470  descriptor.m_StrideX,
471  descriptor.m_StrideY,
472  descriptor.m_DilationX,
473  descriptor.m_DilationY,
474  descriptor.m_BiasEnabled,
476 
477  auto flatBufferLayer = CreateDepthwiseConvolution2dLayer(m_flatBufferBuilder,
478  fbBaseLayer,
479  fbDescriptor);
480 
481  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_DepthwiseConvolution2dLayer);
482 }
483 
484 void SerializerStrategy::SerializeDequantizeLayer(const armnn::IConnectableLayer* layer,
485  const char* name)
486 {
487  IgnoreUnused(name);
488 
489  auto fbDequantizeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Dequantize);
490  auto fbDequantizeLayer = serializer::CreateDequantizeLayer(m_flatBufferBuilder, fbDequantizeBaseLayer);
491 
492  CreateAnyLayer(fbDequantizeLayer.o, serializer::Layer::Layer_DequantizeLayer);
493 }
494 
495 void SerializerStrategy::SerializeDetectionPostProcessLayer(const armnn::IConnectableLayer* layer,
496  const armnn::DetectionPostProcessDescriptor& descriptor,
497  const std::vector<armnn::ConstTensor>& constants,
498  const char* name)
499 {
500  IgnoreUnused(name);
501 
502  const armnn::ConstTensor& anchors = constants[0];
503 
504  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_DetectionPostProcess);
505  auto fbDescriptor = CreateDetectionPostProcessDescriptor(m_flatBufferBuilder,
506  descriptor.m_MaxDetections,
507  descriptor.m_MaxClassesPerDetection,
508  descriptor.m_DetectionsPerClass,
509  descriptor.m_NmsScoreThreshold,
510  descriptor.m_NmsIouThreshold,
511  descriptor.m_NumClasses,
512  descriptor.m_UseRegularNms,
513  descriptor.m_ScaleX,
514  descriptor.m_ScaleY,
515  descriptor.m_ScaleW,
516  descriptor.m_ScaleH);
517 
518  flatbuffers::Offset<serializer::ConstTensor> fbAnchorsConstTensorInfo = CreateConstTensorInfo(anchors);
519 
520  auto flatBufferLayer = CreateDetectionPostProcessLayer(m_flatBufferBuilder,
521  fbBaseLayer,
522  fbDescriptor,
523  fbAnchorsConstTensorInfo);
524 
525  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_DetectionPostProcessLayer);
526 }
527 
528 void SerializerStrategy::SerializeDivisionLayer(const armnn::IConnectableLayer* layer, const char* name)
529 {
530  IgnoreUnused(name);
531 
532  auto fbDivisionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Division);
533  auto fbDivisionLayer = serializer::CreateDivisionLayer(m_flatBufferBuilder, fbDivisionBaseLayer);
534 
535  CreateAnyLayer(fbDivisionLayer.o, serializer::Layer::Layer_DivisionLayer);
536 }
537 
538 void SerializerStrategy::SerializeElementwiseBinaryLayer(const armnn::IConnectableLayer* layer,
539  const armnn::ElementwiseBinaryDescriptor& descriptor,
540  const char* name)
541 {
542  IgnoreUnused(name);
543 
544  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ElementwiseBinary);
545  auto fbDescriptor = serializer::CreateElementwiseBinaryDescriptor(
546  m_flatBufferBuilder,
548 
549  auto fbLayer = serializer::CreateElementwiseBinaryLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
550  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_ElementwiseBinaryLayer);
551 }
552 
553 void SerializerStrategy::SerializeElementwiseUnaryLayer(const armnn::IConnectableLayer* layer,
554  const armnn::ElementwiseUnaryDescriptor& descriptor,
555  const char* name)
556 {
557  IgnoreUnused(name);
558 
559  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ElementwiseUnary);
560  auto fbDescriptor = serializer::CreateElementwiseUnaryDescriptor(
561  m_flatBufferBuilder,
563 
564  auto fbLayer = serializer::CreateElementwiseUnaryLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
565  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_ElementwiseUnaryLayer);
566 }
567 
568 void SerializerStrategy::SerializeFillLayer(const armnn::IConnectableLayer* layer,
569  const armnn::FillDescriptor& fillDescriptor,
570  const char* name)
571 {
572  IgnoreUnused(name);
573 
574  auto fbFillBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Fill);
575 
576  auto fbDescriptor = serializer::CreateFillDescriptor(m_flatBufferBuilder, fillDescriptor.m_Value);
577 
578  auto fbFillLayer = serializer::CreateFillLayer(m_flatBufferBuilder, fbFillBaseLayer, fbDescriptor);
579 
580  CreateAnyLayer(fbFillLayer.o, serializer::Layer::Layer_FillLayer);
581 }
582 
583 void SerializerStrategy::SerializeFloorLayer(const armnn::IConnectableLayer *layer, const char *name)
584 {
585  IgnoreUnused(name);
586 
587  auto flatBufferFloorBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Floor);
588  auto flatBufferFloorLayer = serializer::CreateFloorLayer(m_flatBufferBuilder, flatBufferFloorBaseLayer);
589 
590  CreateAnyLayer(flatBufferFloorLayer.o, serializer::Layer::Layer_FloorLayer);
591 }
592 
593 void SerializerStrategy::SerializeGatherLayer(const armnn::IConnectableLayer* layer,
594  const armnn::GatherDescriptor& gatherDescriptor,
595  const char* name)
596 {
597  IgnoreUnused(name);
598 
599  auto fbGatherDescriptor = CreateGatherDescriptor(m_flatBufferBuilder,
600  gatherDescriptor.m_Axis);
601  auto fbGatherBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Gather);
602  auto flatBufferLayer = serializer::CreateGatherLayer(m_flatBufferBuilder, fbGatherBaseLayer, fbGatherDescriptor);
603 
604  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_GatherLayer);
605 }
606 
607 void SerializerStrategy::SerializeGatherNdLayer(const armnn::IConnectableLayer* layer,
608  const char* name)
609 {
610  IgnoreUnused(name);
611 
612  auto fbGatherNdBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_GatherNd);
613  auto flatBufferLayer = serializer::CreateGatherNdLayer(m_flatBufferBuilder, fbGatherNdBaseLayer);
614 
615  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_GatherNdLayer);
616 }
617 
618 void SerializerStrategy::SerializeInstanceNormalizationLayer(
619  const armnn::IConnectableLayer* layer,
620  const armnn::InstanceNormalizationDescriptor& instanceNormalizationDescriptor,
621  const char* name)
622 {
623  IgnoreUnused(name);
624 
625  auto fbDescriptor = serializer::CreateInstanceNormalizationDescriptor(
626  m_flatBufferBuilder,
627  instanceNormalizationDescriptor.m_Gamma,
628  instanceNormalizationDescriptor.m_Beta,
629  instanceNormalizationDescriptor.m_Eps,
630  GetFlatBufferDataLayout(instanceNormalizationDescriptor.m_DataLayout));
631 
632  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_InstanceNormalization);
633  auto fbLayer = serializer::CreateInstanceNormalizationLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
634 
635  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_InstanceNormalizationLayer);
636 }
637 
638 void SerializerStrategy::SerializeL2NormalizationLayer(const armnn::IConnectableLayer* layer,
639  const armnn::L2NormalizationDescriptor& l2NormalizationDescriptor,
640  const char* name)
641 {
642  IgnoreUnused(name);
643 
644  // Create FlatBuffer BaseLayer
645  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_L2Normalization);
646 
647  // Create the FlatBuffer L2Normalization Descriptor
648  auto fbDescriptor = serializer::CreateL2NormalizationDescriptor(
649  m_flatBufferBuilder,
650  GetFlatBufferDataLayout(l2NormalizationDescriptor.m_DataLayout),
651  l2NormalizationDescriptor.m_Eps);
652 
653  // Create FlatBuffer layer
654  auto fbLayer = serializer::CreateL2NormalizationLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
655 
656  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_L2NormalizationLayer);
657 }
658 
659 void SerializerStrategy::SerializeLogicalBinaryLayer(const armnn::IConnectableLayer* layer,
660  const armnn::LogicalBinaryDescriptor& descriptor,
661  const char* name)
662 {
663  IgnoreUnused(name);
664 
665  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_LogicalBinary);
666  auto fbDescriptor = serializer::CreateLogicalBinaryDescriptor(
667  m_flatBufferBuilder,
669 
670  auto fbLayer = serializer::CreateLogicalBinaryLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
671  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_LogicalBinaryLayer);
672 }
673 
674 void SerializerStrategy::SerializeLogSoftmaxLayer(const armnn::IConnectableLayer* layer,
675  const armnn::LogSoftmaxDescriptor& logSoftmaxDescriptor,
676  const char* name)
677 {
678  IgnoreUnused(name);
679 
680  // Create FlatBuffer BaseLayer
681  auto flatBufferLogSoftmaxBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_LogSoftmax);
682 
683  // Create the FlatBuffer LogSoftmaxDescriptor
684  auto flatBufferLogSoftmaxDesc =
685  serializer::CreateLogSoftmaxDescriptor(m_flatBufferBuilder,
686  logSoftmaxDescriptor.m_Beta,
687  logSoftmaxDescriptor.m_Axis);
688 
689  // Create the FlatBuffer LogSoftmaxLayer
690  auto flatBufferLogSoftmaxLayer =
691  serializer::CreateLogSoftmaxLayer(m_flatBufferBuilder,
692  flatBufferLogSoftmaxBaseLayer,
693  flatBufferLogSoftmaxDesc);
694 
695  CreateAnyLayer(flatBufferLogSoftmaxLayer.o, serializer::Layer::Layer_LogSoftmaxLayer);
696 }
697 
698 void SerializerStrategy::SerializeLstmLayer(const armnn::IConnectableLayer* layer,
699  const armnn::LstmDescriptor& descriptor,
700  const std::vector<armnn::ConstTensor>& constants,
701  const char* name)
702 {
703  IgnoreUnused(name);
704 
705  auto fbLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Lstm);
706 
707  auto fbLstmDescriptor = serializer::CreateLstmDescriptor(
708  m_flatBufferBuilder,
709  descriptor.m_ActivationFunc,
710  descriptor.m_ClippingThresCell,
711  descriptor.m_ClippingThresProj,
712  descriptor.m_CifgEnabled,
713  descriptor.m_PeepholeEnabled,
714  descriptor.m_ProjectionEnabled,
715  descriptor.m_LayerNormEnabled);
716 
717  // Index for constants vector
718  std::size_t i = 0;
719 
720  // Get mandatory/basic input parameters
721  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]); //InputToForgetWeights
722  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]); //InputToCellWeights
723  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]); //InputToOutputWeights
724  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToForgetWeights
725  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToCellWeights
726  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToOutputWeights
727  auto forgetGateBias = CreateConstTensorInfo(constants[i++]); //ForgetGateBias
728  auto cellBias = CreateConstTensorInfo(constants[i++]); //CellBias
729  auto outputGateBias = CreateConstTensorInfo(constants[i++]); //OutputGateBias
730 
731 
732 
733  //Define optional parameters, these will be set depending on configuration in Lstm descriptor
734  flatbuffers::Offset<serializer::ConstTensor> inputToInputWeights;
735  flatbuffers::Offset<serializer::ConstTensor> recurrentToInputWeights;
736  flatbuffers::Offset<serializer::ConstTensor> cellToInputWeights;
737  flatbuffers::Offset<serializer::ConstTensor> inputGateBias;
738  flatbuffers::Offset<serializer::ConstTensor> projectionWeights;
739  flatbuffers::Offset<serializer::ConstTensor> projectionBias;
740  flatbuffers::Offset<serializer::ConstTensor> cellToForgetWeights;
741  flatbuffers::Offset<serializer::ConstTensor> cellToOutputWeights;
742  flatbuffers::Offset<serializer::ConstTensor> inputLayerNormWeights;
743  flatbuffers::Offset<serializer::ConstTensor> forgetLayerNormWeights;
744  flatbuffers::Offset<serializer::ConstTensor> cellLayerNormWeights;
745  flatbuffers::Offset<serializer::ConstTensor> outputLayerNormWeights;
746 
747  if (!descriptor.m_CifgEnabled)
748  {
749  inputToInputWeights = CreateConstTensorInfo(constants[i++]); //InputToInputWeights
750  recurrentToInputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToInputWeights
751  inputGateBias = CreateConstTensorInfo(constants[i++]); //InputGateBias
752  }
753 
754  if (descriptor.m_PeepholeEnabled)
755  {
756  if (!descriptor.m_CifgEnabled)
757  {
758  cellToInputWeights = CreateConstTensorInfo(constants[i++]); //CellToInputWeights
759  }
760  cellToForgetWeights = CreateConstTensorInfo(constants[i++]); //CellToForgetWeights
761  cellToOutputWeights = CreateConstTensorInfo(constants[i++]); //CellToOutputWeights
762  }
763 
764  if (descriptor.m_ProjectionEnabled)
765  {
766  projectionWeights = CreateConstTensorInfo(constants[i++]); //ProjectionWeights
767  projectionBias = CreateConstTensorInfo(constants[i++]); //ProjectionBias
768  }
769 
770  if (descriptor.m_LayerNormEnabled)
771  {
772  if (!descriptor.m_CifgEnabled)
773  {
774  inputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //InputLayerNormWeights
775  }
776  forgetLayerNormWeights = CreateConstTensorInfo(constants[i++]); //ForgetLayerNormWeights
777  cellLayerNormWeights = CreateConstTensorInfo(constants[i++]); //CellLayerNormWeights
778  outputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //OutputLayerNormWeights
779  }
780 
781  auto fbLstmParams = serializer::CreateLstmInputParams(
782  m_flatBufferBuilder,
783  inputToForgetWeights,
784  inputToCellWeights,
785  inputToOutputWeights,
786  recurrentToForgetWeights,
787  recurrentToCellWeights,
788  recurrentToOutputWeights,
789  forgetGateBias,
790  cellBias,
791  outputGateBias,
792  inputToInputWeights,
793  recurrentToInputWeights,
794  cellToInputWeights,
795  inputGateBias,
796  projectionWeights,
797  projectionBias,
798  cellToForgetWeights,
799  cellToOutputWeights,
800  inputLayerNormWeights,
801  forgetLayerNormWeights,
802  cellLayerNormWeights,
803  outputLayerNormWeights);
804 
805  auto fbLstmLayer = serializer::CreateLstmLayer(
806  m_flatBufferBuilder,
807  fbLstmBaseLayer,
808  fbLstmDescriptor,
809  fbLstmParams);
810 
811  CreateAnyLayer(fbLstmLayer.o, serializer::Layer::Layer_LstmLayer);
812 }
813 
814 void SerializerStrategy::SerializeMaximumLayer(const armnn::IConnectableLayer* layer, const char* name)
815 {
816  IgnoreUnused(name);
817 
818  auto fbMaximumBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Maximum);
819  auto fbMaximumLayer = serializer::CreateMaximumLayer(m_flatBufferBuilder, fbMaximumBaseLayer);
820 
821  CreateAnyLayer(fbMaximumLayer.o, serializer::Layer::Layer_MaximumLayer);
822 }
823 
824 void SerializerStrategy::SerializeMeanLayer(const armnn::IConnectableLayer* layer,
825  const armnn::MeanDescriptor& descriptor,
826  const char* name)
827 {
828  IgnoreUnused(name);
829 
830  auto fbMeanBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Mean);
831  auto fbMeanDescriptor = serializer::CreateMeanDescriptor(m_flatBufferBuilder,
832  m_flatBufferBuilder.CreateVector(descriptor.m_Axis),
833  descriptor.m_KeepDims);
834 
835  auto fbMeanLayer = serializer::CreateMeanLayer(m_flatBufferBuilder,
836  fbMeanBaseLayer,
837  fbMeanDescriptor);
838 
839  CreateAnyLayer(fbMeanLayer.o, serializer::Layer::Layer_MeanLayer);
840 }
841 
842 void SerializerStrategy::SerializeMinimumLayer(const armnn::IConnectableLayer* layer, const char* name)
843 {
844  IgnoreUnused(name);
845 
846  auto fbMinimumBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Minimum);
847  auto fbMinimumLayer = serializer::CreateMinimumLayer(m_flatBufferBuilder, fbMinimumBaseLayer);
848 
849  CreateAnyLayer(fbMinimumLayer.o, serializer::Layer::Layer_MinimumLayer);
850 }
851 
852 void SerializerStrategy::SerializeMergeLayer(const armnn::IConnectableLayer* layer, const char* name)
853 {
854  IgnoreUnused(name);
855 
856  auto fbMergeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Merge);
857  auto fbMergeLayer = serializer::CreateMergeLayer(m_flatBufferBuilder, fbMergeBaseLayer);
858 
859  CreateAnyLayer(fbMergeLayer.o, serializer::Layer::Layer_MergeLayer);
860 }
861 
862 void SerializerStrategy::SerializeConcatLayer(const armnn::IConnectableLayer* layer,
863  const armnn::ConcatDescriptor& concatDescriptor,
864  const char* name)
865 {
866  IgnoreUnused(name);
867 
868  auto flatBufferConcatBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Concat);
869 
870  std::vector<flatbuffers::Offset<UintVector>> views;
871  for (unsigned int v = 0; v < concatDescriptor.GetNumViews(); ++v)
872  {
873  const uint32_t* origin = concatDescriptor.GetViewOrigin(v);
874  std::vector<uint32_t> origins;
875  for (unsigned int d = 0; d < concatDescriptor.GetNumDimensions(); ++d)
876  {
877  origins.push_back(origin[d]);
878  }
879  auto view = m_flatBufferBuilder.CreateVector(origins);
880  auto uintVector = CreateUintVector(m_flatBufferBuilder, view);
881  views.push_back(uintVector);
882  }
883 
884  auto flatBufferConcatDescriptor = CreateOriginsDescriptor(m_flatBufferBuilder,
885  concatDescriptor.GetConcatAxis(),
886  concatDescriptor.GetNumViews(),
887  concatDescriptor.GetNumDimensions(),
888  m_flatBufferBuilder.CreateVector(views));
889 
890  auto flatBufferLayer = CreateConcatLayer(m_flatBufferBuilder,
891  flatBufferConcatBaseLayer,
892  flatBufferConcatDescriptor);
893 
894  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ConcatLayer);
895 }
896 
897 void SerializerStrategy::SerializeMultiplicationLayer(const armnn::IConnectableLayer* layer, const char* name)
898 {
899  IgnoreUnused(name);
900 
901  auto fbMultiplicationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Multiplication);
902  auto fbMultiplicationLayer = serializer::CreateMultiplicationLayer(m_flatBufferBuilder,
903  fbMultiplicationBaseLayer);
904 
905  CreateAnyLayer(fbMultiplicationLayer.o, serializer::Layer::Layer_MultiplicationLayer);
906 }
907 
908 void SerializerStrategy::SerializePadLayer(const armnn::IConnectableLayer* layer,
909  const armnn::PadDescriptor& padDescriptor,
910  const char* name)
911 {
912  IgnoreUnused(name);
913 
914  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pad);
915 
916  std::vector<unsigned int> padList;
917  for (auto& p: padDescriptor.m_PadList)
918  {
919  padList.push_back(p.first);
920  padList.push_back(p.second);
921  }
922 
923  auto flatBufferPadDesc = serializer::CreatePadDescriptor(m_flatBufferBuilder,
924  m_flatBufferBuilder.CreateVector(padList),
925  padDescriptor.m_PadValue,
926  GetFlatBufferPaddingMode(padDescriptor.m_PaddingMode));
927 
928  auto flatBufferPadLayer = serializer::CreatePadLayer(m_flatBufferBuilder,
929  flatBufferBaseLayer,
930  flatBufferPadDesc);
931 
932  CreateAnyLayer(flatBufferPadLayer.o, serializer::Layer::Layer_PadLayer);
933 }
934 
935 void SerializerStrategy::SerializePermuteLayer(const armnn::IConnectableLayer* layer,
936  const armnn::PermuteDescriptor& permuteDescriptor,
937  const char* name)
938 {
939  IgnoreUnused(name);
940 
941  // Create FlatBuffer BaseLayer
942  auto flatBufferPermuteBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Permute);
943 
944  std::vector<unsigned int> dimMappings;
945  for (unsigned int i=0; i<permuteDescriptor.m_DimMappings.GetSize(); ++i)
946  {
947  dimMappings.push_back(permuteDescriptor.m_DimMappings[i]);
948  }
949 
950  auto flatBufferPermuteDesc = serializer::CreatePermuteDescriptor(m_flatBufferBuilder,
951  m_flatBufferBuilder.CreateVector(dimMappings));
952 
953  // Create the FlatBuffer PermuteLayer
954  auto flatBufferPermuteLayer = serializer::CreatePermuteLayer(m_flatBufferBuilder,
955  flatBufferPermuteBaseLayer,
956  flatBufferPermuteDesc);
957 
958  // Add the AnyLayer to the FlatBufferLayers
959  CreateAnyLayer(flatBufferPermuteLayer.o, serializer::Layer::Layer_PermuteLayer);
960 }
961 
962 // Build FlatBuffer for Rank Layer
963 void SerializerStrategy::SerializeRankLayer(const armnn::IConnectableLayer* layer,
964  const char* name)
965 {
966  IgnoreUnused(name);
967  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Rank);
968  auto flatBufferRankLayer = serializer::CreateRankLayer(m_flatBufferBuilder, flatBufferBaseLayer);
969 
970  CreateAnyLayer(flatBufferRankLayer.o, serializer::Layer::Layer_RankLayer);
971 }
972 
973 void SerializerStrategy::SerializeReduceLayer(const armnn::IConnectableLayer* layer,
974  const armnn::ReduceDescriptor& reduceDescriptor,
975  const char*)
976 {
977  auto fbReduceBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Reduce);
978  auto fbDescriptor = CreateReduceDescriptor(m_flatBufferBuilder,
979  reduceDescriptor.m_KeepDims,
980  m_flatBufferBuilder.CreateVector(reduceDescriptor.m_vAxis),
982  auto fbReduceLayer = serializer::CreateReduceLayer(m_flatBufferBuilder,
983  fbReduceBaseLayer,
984  fbDescriptor);
985 
986  CreateAnyLayer(fbReduceLayer.o, serializer::Layer::Layer_ReduceLayer);
987 }
988 
989 // Build FlatBuffer for Reshape Layer
990 void SerializerStrategy::SerializeReshapeLayer(const armnn::IConnectableLayer* layer,
991  const armnn::ReshapeDescriptor& reshapeDescriptor,
992  const char* name)
993 {
994  IgnoreUnused(name);
995 
996  // Create FlatBuffer BaseLayer
997  auto flatBufferReshapeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Reshape);
998 
999  std::vector<unsigned int> targetShape;
1000  for (unsigned int i =0; i < reshapeDescriptor.m_TargetShape.GetNumDimensions(); i++)
1001  {
1002  targetShape.push_back(reshapeDescriptor.m_TargetShape[i]);
1003  }
1004 
1005  auto flatBufferReshapeDesc = serializer::CreateReshapeDescriptor(m_flatBufferBuilder,
1006  m_flatBufferBuilder.CreateVector(targetShape));
1007 
1008  // Create the FlatBuffer ReshapeLayer
1009  auto flatBufferReshapeLayer = serializer::CreateReshapeLayer(m_flatBufferBuilder, flatBufferReshapeBaseLayer,
1010  flatBufferReshapeDesc);
1011 
1012  // Add the AnyLayer to the FlatBufferLayers
1013  CreateAnyLayer(flatBufferReshapeLayer.o, serializer::Layer::Layer_ReshapeLayer);
1014 }
1015 
1016 void SerializerStrategy::SerializeResizeLayer(const armnn::IConnectableLayer* layer,
1017  const armnn::ResizeDescriptor& resizeDescriptor,
1018  const char* name)
1019 {
1020  IgnoreUnused(name);
1021 
1022  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Resize);
1023 
1024  auto flatBufferDescriptor =
1025  CreateResizeDescriptor(m_flatBufferBuilder,
1026  resizeDescriptor.m_TargetHeight,
1027  resizeDescriptor.m_TargetWidth,
1028  GetFlatBufferResizeMethod(resizeDescriptor.m_Method),
1029  GetFlatBufferDataLayout(resizeDescriptor.m_DataLayout),
1030  resizeDescriptor.m_AlignCorners,
1031  resizeDescriptor.m_HalfPixelCenters);
1032 
1033  auto flatBufferLayer = serializer::CreateResizeLayer(m_flatBufferBuilder,
1034  flatBufferBaseLayer,
1035  flatBufferDescriptor);
1036 
1037  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ResizeLayer);
1038 }
1039 
1040 void SerializerStrategy::SerializeReverseV2Layer(const armnn::IConnectableLayer* layer,
1041  const char* name)
1042 {
1043  IgnoreUnused(name);
1044 
1045  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ReverseV2);
1046 
1047  auto flatBufferLayer = serializer::CreateReverseV2Layer(m_flatBufferBuilder,
1048  flatBufferBaseLayer);
1049 
1050  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ReverseV2Layer);
1051 }
1052 
1053 void SerializerStrategy::SerializeSliceLayer(const armnn::IConnectableLayer* layer,
1054  const armnn::SliceDescriptor& sliceDescriptor,
1055  const char* name)
1056 {
1057  IgnoreUnused(name);
1058 
1059  auto fbSliceBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Slice);
1060  auto fbSliceDescriptor = CreateSliceDescriptor(m_flatBufferBuilder,
1061  m_flatBufferBuilder.CreateVector(sliceDescriptor.m_Begin),
1062  m_flatBufferBuilder.CreateVector(sliceDescriptor.m_Size));
1063 
1064  auto fbSliceLayer = serializer::CreateSliceLayer(m_flatBufferBuilder, fbSliceBaseLayer, fbSliceDescriptor);
1065 
1066  CreateAnyLayer(fbSliceLayer.o, serializer::Layer::Layer_SliceLayer);
1067 }
1068 
1069 // Build FlatBuffer for Softmax Layer
1070 void SerializerStrategy::SerializeSoftmaxLayer(const armnn::IConnectableLayer* layer,
1071  const armnn::SoftmaxDescriptor& softmaxDescriptor,
1072  const char* name)
1073 {
1074  IgnoreUnused(name);
1075 
1076  // Create FlatBuffer BaseLayer
1077  auto flatBufferSoftmaxBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Softmax);
1078 
1079  // Create the FlatBuffer SoftmaxDescriptor
1080  auto flatBufferSoftmaxDesc =
1081  serializer::CreateSoftmaxDescriptor(m_flatBufferBuilder,
1082  softmaxDescriptor.m_Beta,
1083  softmaxDescriptor.m_Axis);
1084 
1085  // Create the FlatBuffer SoftmaxLayer
1086  auto flatBufferSoftmaxLayer =
1087  serializer::CreateSoftmaxLayer(m_flatBufferBuilder,
1088  flatBufferSoftmaxBaseLayer,
1089  flatBufferSoftmaxDesc);
1090 
1091  CreateAnyLayer(flatBufferSoftmaxLayer.o, serializer::Layer::Layer_SoftmaxLayer);
1092 }
1093 
1094 void SerializerStrategy::SerializePooling2dLayer(const armnn::IConnectableLayer* layer,
1095  const armnn::Pooling2dDescriptor& pooling2dDescriptor,
1096  const char* name)
1097 {
1098  IgnoreUnused(name);
1099 
1100  auto fbPooling2dBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pooling2d);
1101  auto fbPooling2dDescriptor = serializer::CreatePooling2dDescriptor(
1102  m_flatBufferBuilder,
1103  GetFlatBufferPoolingAlgorithm(pooling2dDescriptor.m_PoolType),
1104  pooling2dDescriptor.m_PadLeft,
1105  pooling2dDescriptor.m_PadRight,
1106  pooling2dDescriptor.m_PadTop,
1107  pooling2dDescriptor.m_PadBottom,
1108  pooling2dDescriptor.m_PoolWidth,
1109  pooling2dDescriptor.m_PoolHeight,
1110  pooling2dDescriptor.m_StrideX,
1111  pooling2dDescriptor.m_StrideY,
1113  GetFlatBufferPaddingMethod(pooling2dDescriptor.m_PaddingMethod),
1114  GetFlatBufferDataLayout(pooling2dDescriptor.m_DataLayout));
1115 
1116  auto fbPooling2dLayer = serializer::CreatePooling2dLayer(m_flatBufferBuilder,
1117  fbPooling2dBaseLayer,
1118  fbPooling2dDescriptor);
1119 
1120  CreateAnyLayer(fbPooling2dLayer.o, serializer::Layer::Layer_Pooling2dLayer);
1121 }
1122 
1123 void SerializerStrategy::SerializePooling3dLayer(const armnn::IConnectableLayer* layer,
1124  const armnn::Pooling3dDescriptor& pooling3dDescriptor,
1125  const char* name)
1126 {
1127  IgnoreUnused(name);
1128 
1129  auto fbPooling3dBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pooling3d);
1130  auto fbPooling3dDescriptor = serializer::CreatePooling3dDescriptor(
1131  m_flatBufferBuilder,
1132  GetFlatBufferPoolingAlgorithm(pooling3dDescriptor.m_PoolType),
1133  pooling3dDescriptor.m_PadLeft,
1134  pooling3dDescriptor.m_PadRight,
1135  pooling3dDescriptor.m_PadTop,
1136  pooling3dDescriptor.m_PadBottom,
1137  pooling3dDescriptor.m_PadFront,
1138  pooling3dDescriptor.m_PadBack,
1139  pooling3dDescriptor.m_PoolWidth,
1140  pooling3dDescriptor.m_PoolHeight,
1141  pooling3dDescriptor.m_PoolDepth,
1142  pooling3dDescriptor.m_StrideX,
1143  pooling3dDescriptor.m_StrideY,
1144  pooling3dDescriptor.m_StrideZ,
1146  GetFlatBufferPaddingMethod(pooling3dDescriptor.m_PaddingMethod),
1147  GetFlatBufferDataLayout(pooling3dDescriptor.m_DataLayout));
1148 
1149  auto fbPooling3dLayer = serializer::CreatePooling3dLayer(m_flatBufferBuilder,
1150  fbPooling3dBaseLayer,
1151  fbPooling3dDescriptor);
1152 
1153  CreateAnyLayer(fbPooling3dLayer.o, serializer::Layer::Layer_Pooling3dLayer);
1154 }
1155 
1156 void SerializerStrategy::SerializePreluLayer(const armnn::IConnectableLayer* layer,
1157  const char* name)
1158 {
1159  IgnoreUnused(name);
1160 
1161  // Create FlatBuffer BaseLayer
1162  auto flatBufferPreluBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Prelu);
1163 
1164  // Create the FlatBuffer AdditionLayer
1165  auto flatBufferPreluLayer = serializer::CreatePreluLayer(m_flatBufferBuilder, flatBufferPreluBaseLayer);
1166 
1167  // Add the AnyLayer to the FlatBufferLayers
1168  CreateAnyLayer(flatBufferPreluLayer.o, serializer::Layer::Layer_PreluLayer);
1169 }
1170 
1171 void SerializerStrategy::SerializeQuantizeLayer(const armnn::IConnectableLayer *layer, const char *name)
1172 {
1173  IgnoreUnused(name);
1174 
1175  auto fbQuantizeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Quantize);
1176  auto fbQuantizeLayer = serializer::CreateQuantizeLayer(m_flatBufferBuilder,
1177  fbQuantizeBaseLayer);
1178  CreateAnyLayer(fbQuantizeLayer.o, serializer::Layer::Layer_QuantizeLayer);
1179 }
1180 
1181 // Build FlatBuffer for FullyConnected Layer
1182 void SerializerStrategy::SerializeFullyConnectedLayer(const armnn::IConnectableLayer* layer,
1183  const armnn::FullyConnectedDescriptor& fullyConnectedDescriptor,
1184  const char*)
1185 {
1186  // Create FlatBuffer BaseLayer
1187  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_FullyConnected);
1188 
1189  // Create FlatBuffer FullyConnectedDescriptor
1190  auto flatBufferDescriptor =
1191  serializer::CreateFullyConnectedDescriptor(m_flatBufferBuilder,
1192  fullyConnectedDescriptor.m_BiasEnabled,
1193  fullyConnectedDescriptor.m_TransposeWeightMatrix,
1194  fullyConnectedDescriptor.m_ConstantWeights);
1195 
1196  // Create FlatBuffer FullyConnectedLayer
1197  auto flatBufferLayer = serializer::CreateFullyConnectedLayer(m_flatBufferBuilder,
1198  flatBufferBaseLayer,
1199  flatBufferDescriptor);
1200 
1201  // Add created FullyConnectedLayer to the FlatBufferLayers
1202  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_FullyConnectedLayer);
1203 }
1204 
1205 // Build FlatBuffer for SpaceToBatchNd Layer
1206 void SerializerStrategy::SerializeSpaceToBatchNdLayer(const armnn::IConnectableLayer* layer,
1207  const armnn::SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
1208  const char* name)
1209 {
1210  IgnoreUnused(name);
1211 
1212  // Create FlatBuffer BaseLayer
1213  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_SpaceToBatchNd);
1214 
1215  std::vector<unsigned int> padList;
1216  padList.reserve(spaceToBatchNdDescriptor.m_PadList.size()*2);
1217  for (auto& pad : spaceToBatchNdDescriptor.m_PadList)
1218  {
1219  padList.push_back(pad.first);
1220  padList.push_back(pad.second);
1221  }
1222 
1223  auto flatBufferDescriptor =
1224  CreateSpaceToBatchNdDescriptor(m_flatBufferBuilder,
1225  m_flatBufferBuilder.CreateVector(spaceToBatchNdDescriptor.m_BlockShape),
1226  m_flatBufferBuilder.CreateVector(padList),
1227  GetFlatBufferDataLayout(spaceToBatchNdDescriptor.m_DataLayout));
1228 
1229  auto flatBufferLayer = serializer::CreateSpaceToBatchNdLayer(m_flatBufferBuilder,
1230  flatBufferBaseLayer,
1231  flatBufferDescriptor);
1232 
1233  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_SpaceToBatchNdLayer);
1234 }
1235 
1236 // Build FlatBuffer for SpaceToDepthLayer
1237 void SerializerStrategy::SerializeSpaceToDepthLayer(const armnn::IConnectableLayer* layer,
1238  const armnn::SpaceToDepthDescriptor& spaceToDepthDescriptor,
1239  const char* name)
1240 {
1241  IgnoreUnused(name);
1242 
1243  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_SpaceToDepth);
1244  auto flatBufferDescriptor =
1245  CreateSpaceToDepthDescriptor(m_flatBufferBuilder,
1246  spaceToDepthDescriptor.m_BlockSize,
1247  GetFlatBufferDataLayout(spaceToDepthDescriptor.m_DataLayout));
1248 
1249  auto flatBufferLayer = serializer::CreateSpaceToDepthLayer(m_flatBufferBuilder,
1250  flatBufferBaseLayer,
1251  flatBufferDescriptor);
1252 
1253  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_SpaceToDepthLayer);
1254 }
1255 
1256 // Build FlatBuffer for Splitter Layer
1257 void SerializerStrategy::SerializeSplitterLayer(const armnn::IConnectableLayer* layer,
1258  const armnn::ViewsDescriptor& viewsDescriptor,
1259  const char* name)
1260 {
1261  IgnoreUnused(name);
1262 
1263  // Create FlatBuffer ViewOrigins
1264  std::vector<flatbuffers::Offset<UintVector>> flatBufferViewOrigins;
1265  flatBufferViewOrigins.reserve(viewsDescriptor.GetNumViews());
1266 
1267  for(unsigned int vIdx = 0; vIdx < viewsDescriptor.GetNumViews(); ++vIdx)
1268  {
1269  std::vector<uint32_t> viewOrigin;
1270  viewOrigin.reserve(viewsDescriptor.GetNumDimensions());
1271 
1272  // Copy vector
1273  for(unsigned int dIdx = 0; dIdx < viewsDescriptor.GetNumDimensions(); ++dIdx)
1274  {
1275  viewOrigin.push_back(viewsDescriptor.GetViewOrigin(vIdx)[dIdx]);
1276  }
1277 
1278  flatBufferViewOrigins.push_back(CreateUintVector(m_flatBufferBuilder,
1279  m_flatBufferBuilder.CreateVector(viewOrigin)));
1280  }
1281 
1282  // Create FlatBuffer OriginsDescriptor
1283  auto flatBufferOriginDescriptor = CreateOriginsDescriptor(m_flatBufferBuilder,
1284  viewsDescriptor.GetOrigins().GetConcatAxis(),
1285  viewsDescriptor.GetOrigins().GetNumViews(),
1286  viewsDescriptor.GetOrigins().GetNumDimensions(),
1287  m_flatBufferBuilder.CreateVector(flatBufferViewOrigins));
1288 
1289  // Create FlatBuffer ViewOrigins
1290  std::vector<flatbuffers::Offset<UintVector>> flatBufferViewSizes;
1291  flatBufferViewSizes.reserve(viewsDescriptor.GetNumViews());
1292 
1293  for(unsigned int vIdx = 0; vIdx < viewsDescriptor.GetNumViews(); ++vIdx)
1294  {
1295  std::vector<uint32_t> viewSize;
1296  viewSize.reserve(viewsDescriptor.GetNumDimensions());
1297 
1298  // Copy vector
1299  for(unsigned int dIdx = 0; dIdx < viewsDescriptor.GetNumDimensions(); ++dIdx)
1300  {
1301  viewSize.push_back(viewsDescriptor.GetViewSizes(vIdx)[dIdx]);
1302  }
1303 
1304  flatBufferViewSizes.push_back(CreateUintVector(m_flatBufferBuilder,
1305  m_flatBufferBuilder.CreateVector(viewSize)));
1306  }
1307 
1308  // Create FlatBuffer ViewsDescriptor
1309  auto flatBufferViewsDescriptor = CreateViewsDescriptor(m_flatBufferBuilder,
1310  flatBufferOriginDescriptor,
1311  m_flatBufferBuilder.CreateVector(flatBufferViewSizes),
1312  viewsDescriptor.HasAxis(),
1313  viewsDescriptor.GetAxis());
1314 
1315  // Create FlatBuffer BaseLayer
1316  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Splitter);
1317 
1318  auto flatBufferSplitterLayer = serializer::CreateSplitterLayer(m_flatBufferBuilder,
1319  flatBufferBaseLayer,
1320  flatBufferViewsDescriptor);
1321 
1322  CreateAnyLayer(flatBufferSplitterLayer.o, serializer::Layer::Layer_SplitterLayer);
1323 }
1324 
1325 void SerializerStrategy::SerializeNormalizationLayer(const armnn::IConnectableLayer* layer,
1326  const armnn::NormalizationDescriptor& descriptor,
1327  const char* name)
1328 {
1329  IgnoreUnused(name);
1330 
1331  auto fbNormalizationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Normalization);
1332 
1333  auto fbNormalizationDescriptor = serializer::CreateNormalizationDescriptor(
1334  m_flatBufferBuilder,
1337  descriptor.m_NormSize,
1338  descriptor.m_Alpha,
1339  descriptor.m_Beta,
1340  descriptor.m_K,
1342 
1343  auto flatBufferLayer = serializer::CreateNormalizationLayer(m_flatBufferBuilder,
1344  fbNormalizationBaseLayer,
1345  fbNormalizationDescriptor);
1346 
1347  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_NormalizationLayer);
1348 }
1349 
1350 void SerializerStrategy::SerializeShapeLayer(const armnn::IConnectableLayer* layer,
1351  const char* name)
1352 {
1353  IgnoreUnused(name);
1354 
1355  auto shapeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Shape);
1356  auto shapeLayer = serializer::CreateShapeLayer(m_flatBufferBuilder, shapeBaseLayer);
1357 
1358  CreateAnyLayer(shapeLayer.o, serializer::Layer::Layer_ShapeLayer);
1359 }
1360 
1361 void SerializerStrategy::SerializeStackLayer(const armnn::IConnectableLayer* layer,
1362  const armnn::StackDescriptor& stackDescriptor,
1363  const char* name)
1364 {
1365  IgnoreUnused(name);
1366 
1367  auto stackBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Stack);
1368 
1369  std::vector<unsigned int> inputShape;
1370  for (unsigned int i =0; i < stackDescriptor.m_InputShape.GetNumDimensions(); i++)
1371  {
1372  inputShape.push_back(stackDescriptor.m_InputShape[i]);
1373  }
1374 
1375  auto flatBufferStackDescriptor = CreateStackDescriptor(m_flatBufferBuilder,
1376  stackDescriptor.m_Axis,
1377  stackDescriptor.m_NumInputs,
1378  m_flatBufferBuilder.CreateVector(inputShape));
1379 
1380  auto stackLayer = serializer::CreateStackLayer(m_flatBufferBuilder, stackBaseLayer, flatBufferStackDescriptor);
1381  CreateAnyLayer(stackLayer.o, serializer::Layer::Layer_StackLayer);
1382 }
1383 
1384 void SerializerStrategy::SerializeStandInLayer(const armnn::IConnectableLayer *layer,
1385  const armnn::StandInDescriptor& standInDescriptor,
1386  const char *name)
1387 {
1388  IgnoreUnused(name);
1389 
1390  auto fbDescriptor = serializer::CreateStandInDescriptor(m_flatBufferBuilder,
1391  standInDescriptor.m_NumInputs,
1392  standInDescriptor.m_NumOutputs);
1393 
1394  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_StandIn);
1395  auto fbLayer = serializer::CreateStandInLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
1396 
1397  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_StandInLayer);
1398 }
1399 
1400 void SerializerStrategy::SerializeStridedSliceLayer(const armnn::IConnectableLayer* layer,
1401  const armnn::StridedSliceDescriptor& stridedSliceDescriptor,
1402  const char* name)
1403 {
1404  IgnoreUnused(name);
1405 
1406  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_StridedSlice);
1407 
1408  auto flatBufferDescriptor =
1409  CreateStridedSliceDescriptor(m_flatBufferBuilder,
1410  m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.m_Begin),
1411  m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.m_End),
1412  m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.m_Stride),
1413  stridedSliceDescriptor.m_BeginMask,
1414  stridedSliceDescriptor.m_EndMask,
1415  stridedSliceDescriptor.m_ShrinkAxisMask,
1416  stridedSliceDescriptor.m_EllipsisMask,
1417  stridedSliceDescriptor.m_NewAxisMask,
1418  GetFlatBufferDataLayout(stridedSliceDescriptor.m_DataLayout));
1419 
1420  auto flatBufferLayer = serializer::CreateStridedSliceLayer(m_flatBufferBuilder,
1421  flatBufferBaseLayer,
1422  flatBufferDescriptor);
1423 
1424  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_StridedSliceLayer);
1425 }
1426 
1427 void SerializerStrategy::SerializeSubtractionLayer(const armnn::IConnectableLayer* layer, const char* name)
1428 {
1429  IgnoreUnused(name);
1430 
1431  auto fbSubtractionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Subtraction);
1432  auto fbSubtractionLayer = serializer::CreateSubtractionLayer(m_flatBufferBuilder, fbSubtractionBaseLayer);
1433 
1434  CreateAnyLayer(fbSubtractionLayer.o, serializer::Layer::Layer_SubtractionLayer);
1435 }
1436 
1437 void SerializerStrategy::SerializeSwitchLayer(const armnn::IConnectableLayer* layer, const char* name)
1438 {
1439  IgnoreUnused(name);
1440 
1441  auto fbSwitchBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Switch);
1442  auto fbSwitchLayer = serializer::CreateSwitchLayer(m_flatBufferBuilder, fbSwitchBaseLayer);
1443 
1444  CreateAnyLayer(fbSwitchLayer.o, serializer::Layer::Layer_SwitchLayer);
1445 }
1446 
1447 void SerializerStrategy::SerializeTileLayer(const armnn::IConnectableLayer* layer,
1448  const armnn::TileDescriptor& descriptor,
1449  const char* name)
1450 {
1451  IgnoreUnused(name);
1452 
1453  // Create FlatBuffer BaseLayer
1454  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Tile);
1455 
1456  auto flatBufferDesc = serializer::CreateTileDescriptor(m_flatBufferBuilder,
1457  m_flatBufferBuilder.CreateVector(descriptor.m_Multiples));
1458 
1459  // Create the FlatBuffer TileLayer
1460  auto flatBufferLayer = serializer::CreateTileLayer(m_flatBufferBuilder,
1461  flatBufferBaseLayer,
1462  flatBufferDesc);
1463 
1464  // Add the AnyLayer to the FlatBufferLayers
1465  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_TileLayer);
1466 }
1467 
1468 void SerializerStrategy::SerializeTransposeConvolution2dLayer(
1469  const armnn::IConnectableLayer* layer,
1470  const armnn::TransposeConvolution2dDescriptor& descriptor,
1471  const std::vector<armnn::ConstTensor>& constants,
1472  const char* name)
1473 {
1474  IgnoreUnused(name);
1475 
1476  const armnn::ConstTensor& weights = constants.at(0);
1477 
1478  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution2d);
1479  auto fbDescriptor = CreateTransposeConvolution2dDescriptor(m_flatBufferBuilder,
1480  descriptor.m_PadLeft,
1481  descriptor.m_PadRight,
1482  descriptor.m_PadTop,
1483  descriptor.m_PadBottom,
1484  descriptor.m_StrideX,
1485  descriptor.m_StrideY,
1486  descriptor.m_BiasEnabled,
1488 
1489  // weights & biases
1490  auto fbWeightsConstTensorInfo = CreateConstTensorInfo(weights);
1491  flatbuffers::Offset<serializer::ConstTensor> fbBiasesConstTensorInfo;
1492  if (constants.size() > 1)
1493  {
1494  const armnn::ConstTensor& biases = constants.at(1);
1495  fbBiasesConstTensorInfo = CreateConstTensorInfo(biases);
1496  }
1497 
1498  auto fbLayer = CreateTransposeConvolution2dLayer(m_flatBufferBuilder,
1499  fbBaseLayer,
1500  fbDescriptor,
1501  fbWeightsConstTensorInfo,
1502  fbBiasesConstTensorInfo);
1503 
1504  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_TransposeConvolution2dLayer);
1505 }
1506 
1507 void SerializerStrategy::SerializeTransposeLayer(const armnn::IConnectableLayer* layer,
1508  const armnn::TransposeDescriptor& descriptor,
1509  const char* name)
1510 {
1511  IgnoreUnused(name);
1512 
1513  // Create FlatBuffer BaseLayer
1514  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Transpose);
1515 
1516  std::vector<unsigned int> dimMappings;
1517  for (unsigned int i=0; i<descriptor.m_DimMappings.GetSize(); ++i)
1518  {
1519  dimMappings.push_back(descriptor.m_DimMappings[i]);
1520  }
1521 
1522  auto flatBufferDesc = serializer::CreateTransposeDescriptor(m_flatBufferBuilder,
1523  m_flatBufferBuilder.CreateVector(dimMappings));
1524 
1525  // Create the FlatBuffer TransposeLayer
1526  auto flatBufferLayer = serializer::CreateTransposeLayer(m_flatBufferBuilder,
1527  flatBufferBaseLayer,
1528  flatBufferDesc);
1529 
1530  // Add the AnyLayer to the FlatBufferLayers
1531  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_TransposeLayer);
1532 }
1533 
1534 void SerializerStrategy::SerializeQLstmLayer(const armnn::IConnectableLayer* layer,
1535  const armnn::QLstmDescriptor& descriptor,
1536  const std::vector<armnn::ConstTensor>& constants,
1537  const char* name)
1538 {
1539  IgnoreUnused(name);
1540 
1541  auto fbQLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_QLstm);
1542 
1543  auto fbQLstmDescriptor = serializer::CreateQLstmDescriptor(
1544  m_flatBufferBuilder,
1545  descriptor.m_CifgEnabled,
1546  descriptor.m_PeepholeEnabled,
1547  descriptor.m_ProjectionEnabled,
1548  descriptor.m_LayerNormEnabled,
1549  descriptor.m_CellClip,
1550  descriptor.m_ProjectionClip,
1551  descriptor.m_InputIntermediateScale,
1552  descriptor.m_ForgetIntermediateScale,
1553  descriptor.m_CellIntermediateScale,
1554  descriptor.m_OutputIntermediateScale,
1555  descriptor.m_HiddenStateZeroPoint,
1556  descriptor.m_HiddenStateScale
1557  );
1558 
1559  // Index for constants vector
1560  std::size_t i = 0;
1561 
1562  // Mandatory params
1563  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]); //InputToForgetWeights
1564  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]); //InputToCellWeights
1565  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]); //InputToOutputWeights
1566  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToForgetWeights
1567  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToCellWeights
1568  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToOutputWeights
1569  auto forgetGateBias = CreateConstTensorInfo(constants[i++]); //ForgetGateBias
1570  auto cellBias = CreateConstTensorInfo(constants[i++]); //CellBias
1571  auto outputGateBias = CreateConstTensorInfo(constants[i++]); //OutputGateBias
1572 
1573  // CIFG
1574  flatbuffers::Offset<serializer::ConstTensor> inputToInputWeights;
1575  flatbuffers::Offset<serializer::ConstTensor> recurrentToInputWeights;
1576  flatbuffers::Offset<serializer::ConstTensor> inputGateBias;
1577 
1578  if (!descriptor.m_CifgEnabled)
1579  {
1580  inputToInputWeights = CreateConstTensorInfo(constants[i++]); //InputToInputWeights
1581  recurrentToInputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToInputWeights
1582  inputGateBias = CreateConstTensorInfo(constants[i++]); //InputGateBias
1583  }
1584 
1585  // Peephole
1586  flatbuffers::Offset<serializer::ConstTensor> cellToInputWeights;
1587  flatbuffers::Offset<serializer::ConstTensor> cellToForgetWeights;
1588  flatbuffers::Offset<serializer::ConstTensor> cellToOutputWeights;
1589 
1590  if (descriptor.m_PeepholeEnabled)
1591  {
1592  if (!descriptor.m_CifgEnabled)
1593  {
1594  cellToInputWeights = CreateConstTensorInfo(constants[i++]); //CellToInputWeights
1595  }
1596  cellToForgetWeights = CreateConstTensorInfo(constants[i++]); //CellToForgetWeights
1597  cellToOutputWeights = CreateConstTensorInfo(constants[i++]); //CellToOutputWeights
1598  }
1599 
1600  // Projection
1601  flatbuffers::Offset<serializer::ConstTensor> projectionWeights;
1602  flatbuffers::Offset<serializer::ConstTensor> projectionBias;
1603 
1604  if (descriptor.m_ProjectionEnabled)
1605  {
1606  projectionWeights = CreateConstTensorInfo(constants[i++]); //ProjectionWeights
1607  projectionBias = CreateConstTensorInfo(constants[i++]); //ProjectionBias
1608  }
1609 
1610  // Layer norm
1611  flatbuffers::Offset<serializer::ConstTensor> inputLayerNormWeights;
1612  flatbuffers::Offset<serializer::ConstTensor> forgetLayerNormWeights;
1613  flatbuffers::Offset<serializer::ConstTensor> cellLayerNormWeights;
1614  flatbuffers::Offset<serializer::ConstTensor> outputLayerNormWeights;
1615 
1616  if (descriptor.m_LayerNormEnabled)
1617  {
1618  if (!descriptor.m_CifgEnabled)
1619  {
1620  inputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //InputLayerNormWeights
1621  }
1622  forgetLayerNormWeights = CreateConstTensorInfo(constants[i++]); //ForgetLayerNormWeights
1623  cellLayerNormWeights = CreateConstTensorInfo(constants[i++]); //CellLayerNormWeights
1624  outputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //OutputLayerNormWeights
1625  }
1626 
1627  auto fbQLstmParams = serializer::CreateQLstmInputParams(
1628  m_flatBufferBuilder,
1629  inputToForgetWeights,
1630  inputToCellWeights,
1631  inputToOutputWeights,
1632  recurrentToForgetWeights,
1633  recurrentToCellWeights,
1634  recurrentToOutputWeights,
1635  forgetGateBias,
1636  cellBias,
1637  outputGateBias,
1638  inputToInputWeights,
1639  recurrentToInputWeights,
1640  inputGateBias,
1641  projectionWeights,
1642  projectionBias,
1643  cellToInputWeights,
1644  cellToForgetWeights,
1645  cellToOutputWeights,
1646  inputLayerNormWeights,
1647  forgetLayerNormWeights,
1648  cellLayerNormWeights,
1649  outputLayerNormWeights);
1650 
1651  auto fbQLstmLayer = serializer::CreateQLstmLayer(
1652  m_flatBufferBuilder,
1653  fbQLstmBaseLayer,
1654  fbQLstmDescriptor,
1655  fbQLstmParams);
1656 
1657  CreateAnyLayer(fbQLstmLayer.o, serializer::Layer::Layer_QLstmLayer);
1658 }
1659 
1660 void SerializerStrategy::SerializeQuantizedLstmLayer(const armnn::IConnectableLayer* layer,
1661  const std::vector<armnn::ConstTensor>& constants,
1662  const char* name)
1663 {
1664  IgnoreUnused(name);
1665 
1666  auto fbQuantizedLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_QuantizedLstm);
1667 
1668  // index for constants vector
1669  size_t i = 0;
1670 
1671  // Get input parameters
1672  auto inputToInputWeights = CreateConstTensorInfo(constants[i++]);
1673  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]);
1674  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]);
1675  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]);
1676 
1677  auto recurrentToInputWeights = CreateConstTensorInfo(constants[i++]);
1678  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]);
1679  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]);
1680  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]);
1681 
1682  auto inputGateBias = CreateConstTensorInfo(constants[i++]);
1683  auto forgetGateBias = CreateConstTensorInfo(constants[i++]);
1684  auto cellBias = CreateConstTensorInfo(constants[i++]);
1685  auto outputGateBias = CreateConstTensorInfo(constants[i++]);
1686 
1687  auto fbQuantizedLstmParams = serializer::CreateQuantizedLstmInputParams(
1688  m_flatBufferBuilder,
1689  inputToInputWeights,
1690  inputToForgetWeights,
1691  inputToCellWeights,
1692  inputToOutputWeights,
1693  recurrentToInputWeights,
1694  recurrentToForgetWeights,
1695  recurrentToCellWeights,
1696  recurrentToOutputWeights,
1697  inputGateBias,
1698  forgetGateBias,
1699  cellBias,
1700  outputGateBias);
1701 
1702  auto fbQuantizedLstmLayer = serializer::CreateQuantizedLstmLayer(
1703  m_flatBufferBuilder,
1704  fbQuantizedLstmBaseLayer,
1705  fbQuantizedLstmParams);
1706 
1707  CreateAnyLayer(fbQuantizedLstmLayer.o, serializer::Layer::Layer_QuantizedLstmLayer);
1708 }
1709 
1710 void SerializerStrategy::SerializeUnidirectionalSequenceLstmLayer(
1711  const armnn::IConnectableLayer* layer,
1713  const std::vector<armnn::ConstTensor>& constants,
1714  const char* name)
1715 {
1716  IgnoreUnused(name);
1717 
1718  auto fbUnidirectionalSequenceLstmBaseLayer =
1719  CreateLayerBase(layer, serializer::LayerType::LayerType_UnidirectionalSequenceLstm);
1720 
1721  auto fbUnidirectionalSequenceLstmDescriptor = serializer::CreateUnidirectionalSequenceLstmDescriptor(
1722  m_flatBufferBuilder,
1723  descriptor.m_ActivationFunc,
1724  descriptor.m_ClippingThresCell,
1725  descriptor.m_ClippingThresProj,
1726  descriptor.m_CifgEnabled,
1727  descriptor.m_PeepholeEnabled,
1728  descriptor.m_ProjectionEnabled,
1729  descriptor.m_LayerNormEnabled,
1730  descriptor.m_TimeMajor);
1731 
1732  // Index for constants vector
1733  std::size_t i = 0;
1734 
1735  // Get mandatory/basic input parameters
1736  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]); //InputToForgetWeights
1737  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]); //InputToCellWeights
1738  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]); //InputToOutputWeights
1739  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToForgetWeights
1740  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToCellWeights
1741  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToOutputWeights
1742  auto forgetGateBias = CreateConstTensorInfo(constants[i++]); //ForgetGateBias
1743  auto cellBias = CreateConstTensorInfo(constants[i++]); //CellBias
1744  auto outputGateBias = CreateConstTensorInfo(constants[i++]); //OutputGateBias
1745 
1746  //Define optional parameters, these will be set depending on configuration in Lstm descriptor
1747  flatbuffers::Offset<serializer::ConstTensor> inputToInputWeights;
1748  flatbuffers::Offset<serializer::ConstTensor> recurrentToInputWeights;
1749  flatbuffers::Offset<serializer::ConstTensor> cellToInputWeights;
1750  flatbuffers::Offset<serializer::ConstTensor> inputGateBias;
1751  flatbuffers::Offset<serializer::ConstTensor> projectionWeights;
1752  flatbuffers::Offset<serializer::ConstTensor> projectionBias;
1753  flatbuffers::Offset<serializer::ConstTensor> cellToForgetWeights;
1754  flatbuffers::Offset<serializer::ConstTensor> cellToOutputWeights;
1755  flatbuffers::Offset<serializer::ConstTensor> inputLayerNormWeights;
1756  flatbuffers::Offset<serializer::ConstTensor> forgetLayerNormWeights;
1757  flatbuffers::Offset<serializer::ConstTensor> cellLayerNormWeights;
1758  flatbuffers::Offset<serializer::ConstTensor> outputLayerNormWeights;
1759 
1760  if (!descriptor.m_CifgEnabled)
1761  {
1762  inputToInputWeights = CreateConstTensorInfo(constants[i++]); //InputToInputWeights
1763  recurrentToInputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToInputWeights
1764  inputGateBias = CreateConstTensorInfo(constants[i++]); //InputGateBias
1765  }
1766 
1767  if (descriptor.m_PeepholeEnabled)
1768  {
1769  if (!descriptor.m_CifgEnabled)
1770  {
1771  cellToInputWeights = CreateConstTensorInfo(constants[i++]); //CellToInputWeights
1772  }
1773  cellToForgetWeights = CreateConstTensorInfo(constants[i++]); //CellToForgetWeights
1774  cellToOutputWeights = CreateConstTensorInfo(constants[i++]); //CellToOutputWeights
1775  }
1776 
1777  if (descriptor.m_ProjectionEnabled)
1778  {
1779  projectionWeights = CreateConstTensorInfo(constants[i++]); //ProjectionWeights
1780  projectionBias = CreateConstTensorInfo(constants[i++]); //ProjectionBias
1781  }
1782 
1783  if (descriptor.m_LayerNormEnabled)
1784  {
1785  if (!descriptor.m_CifgEnabled)
1786  {
1787  inputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //InputLayerNormWeights
1788  }
1789  forgetLayerNormWeights = CreateConstTensorInfo(constants[i++]); //ForgetLayerNormWeights
1790  cellLayerNormWeights = CreateConstTensorInfo(constants[i++]); //CellLayerNormWeights
1791  outputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //OutputLayerNormWeights
1792  }
1793 
1794  auto fbUnidirectionalSequenceLstmParams = serializer::CreateLstmInputParams(
1795  m_flatBufferBuilder,
1796  inputToForgetWeights,
1797  inputToCellWeights,
1798  inputToOutputWeights,
1799  recurrentToForgetWeights,
1800  recurrentToCellWeights,
1801  recurrentToOutputWeights,
1802  forgetGateBias,
1803  cellBias,
1804  outputGateBias,
1805  inputToInputWeights,
1806  recurrentToInputWeights,
1807  cellToInputWeights,
1808  inputGateBias,
1809  projectionWeights,
1810  projectionBias,
1811  cellToForgetWeights,
1812  cellToOutputWeights,
1813  inputLayerNormWeights,
1814  forgetLayerNormWeights,
1815  cellLayerNormWeights,
1816  outputLayerNormWeights);
1817 
1818  auto fbUnidirectionalSequenceLstmLayer = serializer::CreateUnidirectionalSequenceLstmLayer(
1819  m_flatBufferBuilder,
1820  fbUnidirectionalSequenceLstmBaseLayer,
1821  fbUnidirectionalSequenceLstmDescriptor,
1822  fbUnidirectionalSequenceLstmParams);
1823 
1824  CreateAnyLayer(fbUnidirectionalSequenceLstmLayer.o, serializer::Layer::Layer_UnidirectionalSequenceLstmLayer);
1825 }
1826 
1827 fb::Offset<serializer::LayerBase> SerializerStrategy::CreateLayerBase(const IConnectableLayer* layer,
1828  const serializer::LayerType layerType)
1829 {
1830 
1831  uint32_t fbIndex = GetSerializedId(layer->GetGuid());
1832 
1833  std::vector<fb::Offset<serializer::InputSlot>> inputSlots = CreateInputSlots(layer);
1834  std::vector<fb::Offset<serializer::OutputSlot>> outputSlots = CreateOutputSlots(layer);
1835 
1836  return serializer::CreateLayerBase(m_flatBufferBuilder,
1837  fbIndex,
1838  m_flatBufferBuilder.CreateString(layer->GetName()),
1839  layerType,
1840  m_flatBufferBuilder.CreateVector(inputSlots),
1841  m_flatBufferBuilder.CreateVector(outputSlots));
1842 }
1843 
1844 void SerializerStrategy::CreateAnyLayer(const flatbuffers::Offset<void>& layer, const serializer::Layer serializerLayer)
1845 {
1846 
1847  auto anyLayer = armnnSerializer::CreateAnyLayer(m_flatBufferBuilder, serializerLayer, layer);
1848  m_serializedLayers.push_back(anyLayer);
1849 }
1850 
1851 template <typename T>
1852 flatbuffers::Offset<flatbuffers::Vector<T>> SerializerStrategy::CreateDataVector(const void* memory, unsigned int size)
1853 {
1854  const T* buffer = reinterpret_cast<const T*>(memory);
1855  std::vector<T> vector(buffer, buffer + (size / sizeof(T)));
1856  auto fbVector = m_flatBufferBuilder.CreateVector(vector);
1857  return fbVector;
1858 }
1859 
1860 flatbuffers::Offset<TensorInfo> SerializerStrategy::CreateTensorInfo(const armnn::TensorInfo& tensorInfo)
1861 {
1862  // Get the dimensions
1863  std::vector<unsigned int> shape;
1864  std::vector<bool> specificity;
1865  // This assumes that the TensorShape constructors have ensured that the size of m_DimensionsSpecificity
1866  // matches the size of dimensions.
1867  for(unsigned int dim = 0; dim < tensorInfo.GetShape().GetNumDimensions(); ++dim)
1868  {
1869  specificity.push_back(tensorInfo.GetShape().GetDimensionSpecificity(dim));
1870 
1871  if (tensorInfo.GetShape().GetDimensionSpecificity(dim))
1872  {
1873  shape.push_back(tensorInfo.GetShape()[dim]);
1874  }
1875  else
1876  {
1877  shape.push_back(0);
1878  }
1879  }
1880 
1881  if (tensorInfo.HasPerAxisQuantization())
1882  {
1883  // Create FlatBuffer TensorInfo
1884  auto flatBufferTensorInfo =
1885  serializer::CreateTensorInfo(m_flatBufferBuilder,
1886  m_flatBufferBuilder.CreateVector(shape),
1887  GetFlatBufferDataType(tensorInfo.GetDataType()),
1888  tensorInfo.GetQuantizationScales()[0],
1889  tensorInfo.GetQuantizationOffset(),
1890  m_flatBufferBuilder.CreateVector(tensorInfo.GetQuantizationScales()),
1891  tensorInfo.GetQuantizationDim().value(),
1892  static_cast<unsigned int>
1893  (tensorInfo.GetShape().GetDimensionality()),
1894  m_flatBufferBuilder.CreateVector(specificity));
1895  return flatBufferTensorInfo;
1896  }
1897 
1898  // Create FlatBuffer TensorInfo
1899  auto flatBufferTensorInfo = serializer::CreateTensorInfo(m_flatBufferBuilder,
1900  m_flatBufferBuilder.CreateVector(shape),
1901  GetFlatBufferDataType(tensorInfo.GetDataType()),
1902  tensorInfo.GetQuantizationScale(),
1903  tensorInfo.GetQuantizationOffset(),
1904  0,
1905  0,
1906  static_cast<unsigned int>
1907  (tensorInfo.GetShape().GetDimensionality()),
1908  m_flatBufferBuilder.CreateVector(specificity));
1909  return flatBufferTensorInfo;
1910 }
1911 
1912 flatbuffers::Offset<serializer::ConstTensor>
1913  SerializerStrategy::CreateConstTensorInfo(const armnn::ConstTensor& constTensor)
1914 {
1915  armnn::TensorInfo tensorInfo = constTensor.GetInfo();
1916 
1917  flatbuffers::Offset<void> fbPayload;
1918 
1919  switch (tensorInfo.GetDataType())
1920  {
1922  {
1923  auto fbVector = CreateDataVector<int64_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1924  flatbuffers::Offset<serializer::LongData> flatBuffersData = serializer::CreateLongData(
1925  m_flatBufferBuilder,
1926  fbVector);
1927  fbPayload = flatBuffersData.o;
1928  break;
1929  }
1932  {
1933  auto fbVector = CreateDataVector<int32_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1934  flatbuffers::Offset<serializer::IntData> flatBuffersData = serializer::CreateIntData(
1935  m_flatBufferBuilder,
1936  fbVector);
1937  fbPayload = flatBuffersData.o;
1938  break;
1939  }
1943  {
1944  auto fbVector = CreateDataVector<int16_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1945  flatbuffers::Offset<serializer::ShortData> flatBuffersData = serializer::CreateShortData(
1946  m_flatBufferBuilder,
1947  fbVector);
1948  fbPayload = flatBuffersData.o;
1949  break;
1950  }
1955  default:
1956  {
1957  auto fbVector = CreateDataVector<int8_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1958  flatbuffers::Offset<serializer::ByteData> flatBuffersData = serializer::CreateByteData(
1959  m_flatBufferBuilder,
1960  fbVector);
1961  fbPayload = flatBuffersData.o;
1962  }
1963  }
1964  flatbuffers::Offset<serializer::ConstTensor> flatBufferConstTensor = serializer::CreateConstTensor(
1965  m_flatBufferBuilder,
1966  CreateTensorInfo(tensorInfo),
1968  fbPayload);
1969  return flatBufferConstTensor;
1970 }
1971 
1972 flatbuffers::Offset<armnnSerializer::FeatureCompatibilityVersions> SerializerStrategy::GetVersionTable()
1973 {
1974  flatbuffers::Offset<armnnSerializer::FeatureCompatibilityVersions> versionsTable =
1975  serializer::CreateFeatureCompatibilityVersions(
1976  m_flatBufferBuilder,
1977  1, // Binding ids scheme version
1978  1, // Weights layout scheme version
1979  1 // Constant tensors as inputs version
1980  );
1981  return versionsTable;
1982 }
1983 
1984 std::vector<fb::Offset<serializer::InputSlot>>
1985  SerializerStrategy::CreateInputSlots(const armnn::IConnectableLayer* layer)
1986 {
1987  std::vector<fb::Offset<serializer::InputSlot>> inputSlots;
1988 
1989  // Get the InputSlots
1990  for (unsigned int slotIndex = 0; slotIndex<layer->GetNumInputSlots(); ++slotIndex)
1991  {
1992  const IInputSlot& inputSlot = layer->GetInputSlot(slotIndex);
1993 
1994  // Get the Connection for the InputSlot
1995  const IOutputSlot* connection = inputSlot.GetConnection();
1996  bool isOverridden = inputSlot.IsTensorInfoOverridden();
1997 
1998  flatbuffers::Offset<TensorInfo> overriddenTensorInfo = CreateTensorInfo(inputSlot.GetTensorInfo());
1999 
2000  // Create FlatBuffer Connection
2001  serializer::Connection conn(GetSerializedId(inputSlot.GetConnection()->GetOwningLayerGuid()),
2002  connection->CalculateIndexOnOwner());
2003  // Create FlatBuffer InputSlot
2004  inputSlots.push_back(serializer::CreateInputSlot(m_flatBufferBuilder, slotIndex, &conn, isOverridden,
2005  overriddenTensorInfo));
2006  }
2007  return inputSlots;
2008 }
2009 
2010 std::vector<fb::Offset<serializer::OutputSlot>>
2011  SerializerStrategy::CreateOutputSlots(const armnn::IConnectableLayer* layer)
2012 {
2013  std::vector<fb::Offset<serializer::OutputSlot>> outputSlots;
2014 
2015  // Get the OutputSlots
2016  for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
2017  {
2018  const IOutputSlot& outputSlot = layer->GetOutputSlot(slotIndex);
2019  const armnn::TensorInfo& tensorInfo = outputSlot.GetTensorInfo();
2020 
2021  // Create FlatBuffer Outputslot
2022  outputSlots.push_back(serializer::CreateOutputSlot(m_flatBufferBuilder,
2023  slotIndex,
2024  CreateTensorInfo(tensorInfo)));
2025  }
2026  return outputSlots;
2027 }
2028 
2029 void SerializerStrategy::ExecuteStrategy(const armnn::IConnectableLayer* layer,
2030  const BaseDescriptor& descriptor,
2031  const std::vector<armnn::ConstTensor>& constants,
2032  const char* name,
2033  const armnn::LayerBindingId id)
2034 {
2035  IgnoreUnused(constants);
2036 
2037  switch (layer->GetType())
2038  {
2040  {
2041  const armnn::ActivationDescriptor& layerDescriptor =
2042  static_cast<const armnn::ActivationDescriptor&>(descriptor);
2043  SerializeActivationLayer(layer, layerDescriptor, name);
2044  break;
2045  }
2047  {
2048  SerializeAdditionLayer(layer, name);
2049  break;
2050  }
2052  {
2053  const armnn::ArgMinMaxDescriptor& layerDescriptor =
2054  static_cast<const armnn::ArgMinMaxDescriptor&>(descriptor);
2055  SerializeArgMinMaxLayer(layer, layerDescriptor, name);
2056  break;
2057  }
2059  {
2060  const armnn::BatchMatMulDescriptor& layerDescriptor =
2061  static_cast<const armnn::BatchMatMulDescriptor&>(descriptor);
2062  SerializeBatchMatMulLayer(layer,
2063  layerDescriptor,
2064  name);
2065  break;
2066  }
2068  {
2069  const armnn::BatchNormalizationDescriptor& layerDescriptor =
2070  static_cast<const armnn::BatchNormalizationDescriptor&>(descriptor);
2071  SerializeBatchNormalizationLayer(layer,
2072  layerDescriptor,
2073  constants,
2074  name);
2075  break;
2076  }
2078  {
2079  const armnn::BatchToSpaceNdDescriptor& layerDescriptor =
2080  static_cast<const armnn::BatchToSpaceNdDescriptor&>(descriptor);
2081  SerializeBatchToSpaceNdLayer(layer,
2082  layerDescriptor,
2083  name);
2084  break;
2085  }
2086  case armnn::LayerType::Cast :
2087  {
2088  SerializeCastLayer(layer, name);
2089  break;
2090  }
2092  {
2093  const armnn::ChannelShuffleDescriptor& layerDescriptor =
2094  static_cast<const armnn::ChannelShuffleDescriptor&>(descriptor);
2095  SerializeChannelShuffleLayer(layer,
2096  layerDescriptor,
2097  name);
2098  break;
2099  }
2101  {
2102  const armnn::ComparisonDescriptor& layerDescriptor =
2103  static_cast<const armnn::ComparisonDescriptor&>(descriptor);
2104  SerializeComparisonLayer(layer,
2105  layerDescriptor,
2106  name);
2107  break;
2108  }
2110  {
2111  const armnn::ConcatDescriptor& layerDescriptor =
2112  static_cast<const armnn::ConcatDescriptor&>(descriptor);
2113  SerializeConcatLayer(layer,
2114  layerDescriptor,
2115  name);
2116  break;
2117  }
2119  {
2120  SerializeConstantLayer(layer,
2121  constants,
2122  name);
2123  break;
2124  }
2126  {
2127  const armnn::Convolution2dDescriptor& layerDescriptor =
2128  static_cast<const armnn::Convolution2dDescriptor&>(descriptor);
2129  SerializeConvolution2dLayer(layer,
2130  layerDescriptor,
2131  name);
2132  break;
2133  }
2135  {
2136  const armnn::Convolution3dDescriptor& layerDescriptor =
2137  static_cast<const armnn::Convolution3dDescriptor&>(descriptor);
2138  SerializeConvolution3dLayer(layer,
2139  layerDescriptor,
2140  name);
2141  break;
2142  }
2144  {
2145  const armnn::DepthToSpaceDescriptor& layerDescriptor =
2146  static_cast<const armnn::DepthToSpaceDescriptor&>(descriptor);
2147  SerializeDepthToSpaceLayer(layer,
2148  layerDescriptor,
2149  name);
2150  break;
2151  }
2153  {
2154  const armnn::DepthwiseConvolution2dDescriptor& layerDescriptor =
2155  static_cast<const armnn::DepthwiseConvolution2dDescriptor&>(descriptor);
2156  SerializeDepthwiseConvolution2dLayer(layer,
2157  layerDescriptor,
2158  name);
2159  break;
2160  }
2162  {
2163  SerializeDequantizeLayer(layer,
2164  name);
2165  break;
2166  }
2168  {
2169  const armnn::DetectionPostProcessDescriptor& layerDescriptor =
2170  static_cast<const armnn::DetectionPostProcessDescriptor&>(descriptor);
2171  SerializeDetectionPostProcessLayer(layer, layerDescriptor, constants, name);
2172  break;
2173  }
2175  {
2176  SerializeDivisionLayer(layer, name);
2177  break;
2178  }
2180  {
2181  const armnn::ElementwiseBinaryDescriptor& layerDescriptor =
2182  static_cast<const armnn::ElementwiseBinaryDescriptor&>(descriptor);
2183  SerializeElementwiseBinaryLayer(layer, layerDescriptor, name);
2184  break;
2185  }
2187  {
2188  const armnn::ElementwiseUnaryDescriptor& layerDescriptor =
2189  static_cast<const armnn::ElementwiseUnaryDescriptor&>(descriptor);
2190  SerializeElementwiseUnaryLayer(layer, layerDescriptor, name);
2191  break;
2192  }
2193  case armnn::LayerType::Fill :
2194  {
2195  const armnn::FillDescriptor& layerDescriptor =
2196  static_cast<const armnn::FillDescriptor&>(descriptor);
2197  SerializeFillLayer(layer, layerDescriptor, name);
2198  break;
2199  }
2201  {
2202  SerializeFloorLayer(layer, name);
2203  break;
2204  }
2206  {
2207  const armnn::FullyConnectedDescriptor& layerDescriptor =
2208  static_cast<const armnn::FullyConnectedDescriptor&>(descriptor);
2209  SerializeFullyConnectedLayer(layer, layerDescriptor, name);
2210  break;
2211  }
2213  {
2214  const armnn::GatherDescriptor& layerDescriptor =
2215  static_cast<const armnn::GatherDescriptor&>(descriptor);
2216  SerializeGatherLayer(layer, layerDescriptor, name);
2217  break;
2218  }
2220  {
2221  SerializeGatherNdLayer(layer, name);
2222  break;
2223  }
2225  {
2226  SerializeInputLayer(layer, id, name);
2227  break;
2228  }
2230  {
2231  const armnn::InstanceNormalizationDescriptor& layerDescriptor =
2232  static_cast<const armnn::InstanceNormalizationDescriptor&>(descriptor);
2233  SerializeInstanceNormalizationLayer(layer, layerDescriptor, name);
2234  break;
2235  }
2237  {
2238  const armnn::L2NormalizationDescriptor& layerDescriptor =
2239  static_cast<const armnn::L2NormalizationDescriptor&>(descriptor);
2240  SerializeL2NormalizationLayer(layer, layerDescriptor, name);
2241  break;
2242  }
2244  {
2245  const armnn::LogicalBinaryDescriptor& layerDescriptor =
2246  static_cast<const armnn::LogicalBinaryDescriptor&>(descriptor);
2247  SerializeLogicalBinaryLayer(layer, layerDescriptor, name);
2248  break;
2249  }
2251  {
2252  const armnn::LogSoftmaxDescriptor& layerDescriptor =
2253  static_cast<const armnn::LogSoftmaxDescriptor&>(descriptor);
2254  SerializeLogSoftmaxLayer(layer, layerDescriptor, name);
2255  break;
2256  }
2257  case armnn::LayerType::Lstm :
2258  {
2259  const armnn::LstmDescriptor& layerDescriptor =
2260  static_cast<const armnn::LstmDescriptor&>(descriptor);
2261  SerializeLstmLayer(layer, layerDescriptor, constants, name);
2262  break;
2263  }
2265  {
2266  const armnn::QLstmDescriptor& layerDescriptor =
2267  static_cast<const armnn::QLstmDescriptor&>(descriptor);
2268  SerializeQLstmLayer(layer, layerDescriptor, constants, name);
2269  break;
2270  }
2272  {
2273  SerializeMaximumLayer(layer, name);
2274  break;
2275  }
2276  case armnn::LayerType::Mean :
2277  {
2278  const armnn::MeanDescriptor& layerDescriptor =
2279  static_cast<const armnn::MeanDescriptor&>(descriptor);
2280  SerializeMeanLayer(layer, layerDescriptor, name);
2281  break;
2282  }
2284  {
2285  SerializeMergeLayer(layer, name);
2286  break;
2287  }
2289  {
2290  SerializeMinimumLayer(layer, name);
2291  break;
2292  }
2294  {
2295  SerializeMultiplicationLayer(layer, name);
2296  break;
2297  }
2299  {
2300  const armnn::NormalizationDescriptor& layerDescriptor =
2301  static_cast<const armnn::NormalizationDescriptor&>(descriptor);
2302  SerializeNormalizationLayer(layer, layerDescriptor, name);
2303  break;
2304  }
2306  {
2307  SerializeOutputLayer(layer, id, name);
2308  break;
2309  }
2310  case armnn::LayerType::Pad :
2311  {
2312  const armnn::PadDescriptor& layerDescriptor =
2313  static_cast<const armnn::PadDescriptor&>(descriptor);
2314  SerializePadLayer(layer, layerDescriptor, name);
2315  break;
2316  }
2318  {
2319  const armnn::PermuteDescriptor& layerDescriptor =
2320  static_cast<const armnn::PermuteDescriptor&>(descriptor);
2321  SerializePermuteLayer(layer, layerDescriptor, name);
2322  break;
2323  }
2325  {
2326  const armnn::Pooling2dDescriptor& layerDescriptor =
2327  static_cast<const armnn::Pooling2dDescriptor&>(descriptor);
2328  SerializePooling2dLayer(layer, layerDescriptor, name);
2329  break;
2330  }
2332  {
2333  const armnn::Pooling3dDescriptor& layerDescriptor =
2334  static_cast<const armnn::Pooling3dDescriptor&>(descriptor);
2335  SerializePooling3dLayer(layer, layerDescriptor, name);
2336  break;
2337  }
2339  {
2340  SerializePreluLayer(layer, name);
2341  break;
2342  }
2344  {
2345  SerializeQuantizeLayer(layer, name);
2346  break;
2347  }
2349  SerializeQuantizedLstmLayer(layer, constants, name);
2350  break;
2352  {
2353  const armnn::ReshapeDescriptor &layerDescriptor =
2354  static_cast<const armnn::ReshapeDescriptor &>(descriptor);
2355  SerializeReshapeLayer(layer, layerDescriptor, name);
2356  break;
2357  }
2359  {
2360  SerializeRankLayer(layer, name);
2361  break;
2362  }
2364  {
2365  const armnn::ReduceDescriptor& layerDescriptor =
2366  static_cast<const armnn::ReduceDescriptor&>(descriptor);
2367  SerializeReduceLayer(layer, layerDescriptor, name);
2368  break;
2369  }
2371  {
2372  const armnn::ResizeDescriptor& layerDescriptor =
2373  static_cast<const armnn::ResizeDescriptor&>(descriptor);
2374  SerializeResizeLayer(layer, layerDescriptor, name);
2375  break;
2376  }
2378  {
2379  SerializeReverseV2Layer(layer, name);
2380  break;
2381  }
2383  {
2384  SerializeShapeLayer(layer, name);
2385  break;
2386  }
2388  {
2389  const armnn::SliceDescriptor& layerDescriptor =
2390  static_cast<const armnn::SliceDescriptor&>(descriptor);
2391  SerializeSliceLayer(layer, layerDescriptor, name);
2392  break;
2393  }
2395  {
2396  const armnn::SoftmaxDescriptor& layerDescriptor =
2397  static_cast<const armnn::SoftmaxDescriptor&>(descriptor);
2398  SerializeSoftmaxLayer(layer, layerDescriptor, name);
2399  break;
2400  }
2402  {
2403  const armnn::SpaceToBatchNdDescriptor& layerDescriptor =
2404  static_cast<const armnn::SpaceToBatchNdDescriptor&>(descriptor);
2405  SerializeSpaceToBatchNdLayer(layer, layerDescriptor, name);
2406  break;
2407  }
2409  {
2410  const armnn::SpaceToDepthDescriptor& layerDescriptor =
2411  static_cast<const armnn::SpaceToDepthDescriptor&>(descriptor);
2412  SerializeSpaceToDepthLayer(layer, layerDescriptor, name);
2413  break;
2414  }
2416  {
2417  const armnn::SplitterDescriptor& layerDescriptor =
2418  static_cast<const armnn::SplitterDescriptor&>(descriptor);
2419  SerializeSplitterLayer(layer, layerDescriptor, name);
2420  break;
2421  }
2423  {
2424  const armnn::StackDescriptor& layerDescriptor =
2425  static_cast<const armnn::StackDescriptor&>(descriptor);
2426  SerializeStackLayer(layer, layerDescriptor, name);
2427  break;
2428  }
2430  {
2431  const armnn::StandInDescriptor& layerDescriptor =
2432  static_cast<const armnn::StandInDescriptor&>(descriptor);
2433  SerializeStandInLayer(layer, layerDescriptor, name);
2434  break;
2435  }
2437  {
2438  const armnn::StridedSliceDescriptor& layerDescriptor =
2439  static_cast<const armnn::StridedSliceDescriptor&>(descriptor);
2440  SerializeStridedSliceLayer(layer, layerDescriptor, name);
2441  break;
2442  }
2444  {
2445  SerializeSubtractionLayer(layer, name);
2446  break;
2447  }
2449  {
2450  SerializeSwitchLayer(layer, name);
2451  break;
2452  }
2454  {
2455  const armnn::TileDescriptor& layerDescriptor =
2456  static_cast<const armnn::TileDescriptor&>(descriptor);
2457  SerializeTileLayer(layer, layerDescriptor, name);
2458  break;
2459  }
2461  {
2462  const armnn::TransposeDescriptor& layerDescriptor =
2463  static_cast<const armnn::TransposeDescriptor&>(descriptor);
2464  SerializeTransposeLayer(layer, layerDescriptor, name);
2465  break;
2466  }
2468  {
2469  const armnn::TransposeConvolution2dDescriptor& layerDescriptor =
2470  static_cast<const armnn::TransposeConvolution2dDescriptor&>(descriptor);
2471  SerializeTransposeConvolution2dLayer(layer, layerDescriptor, constants, name);
2472  break;
2473  }
2475  {
2476  const armnn::UnidirectionalSequenceLstmDescriptor& layerDescriptor =
2477  static_cast<const armnn::UnidirectionalSequenceLstmDescriptor&>(descriptor);
2478  SerializeUnidirectionalSequenceLstmLayer(layer, layerDescriptor, constants, name);
2479  break;
2480  }
2481  default:
2482  {
2484  fmt::format("A layer of unknown type was given to the serializer. Layer name: {}; Layer Id: {}",
2485  layer->GetName(),
2486  id));
2487  }
2488  }
2489 }
2490 
2491 void ISerializer::SerializerImpl::Serialize(const INetwork& inNetwork)
2492 {
2493  // Iterate through to network
2494  inNetwork.ExecuteStrategy(m_SerializerStrategy);
2495  flatbuffers::FlatBufferBuilder& fbBuilder = m_SerializerStrategy.GetFlatBufferBuilder();
2496 
2497  // Create FlatBuffer SerializedGraph
2498  auto serializedGraph = serializer::CreateSerializedGraph(
2499  fbBuilder,
2500  fbBuilder.CreateVector(m_SerializerStrategy.GetSerializedLayers()),
2501  fbBuilder.CreateVector(m_SerializerStrategy.GetInputIds()),
2502  fbBuilder.CreateVector(m_SerializerStrategy.GetOutputIds()),
2503  m_SerializerStrategy.GetVersionTable());
2504 
2505  // Serialize the graph
2506  fbBuilder.Finish(serializedGraph);
2507 }
2508 
2509 
2510 bool ISerializer::SerializerImpl::SaveSerializedToStream(std::ostream& stream)
2511 {
2512  flatbuffers::FlatBufferBuilder& fbBuilder = m_SerializerStrategy.GetFlatBufferBuilder();
2513 
2514  auto bytesToWrite = armnn::numeric_cast<std::streamsize>(fbBuilder.GetSize());
2515  stream.write(reinterpret_cast<const char*>(fbBuilder.GetBufferPointer()), bytesToWrite);
2516  return !stream.bad();
2517 }
2518 
2519 } // namespace armnnSerializer
armnn::BatchNormalizationDescriptor
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
Definition: Descriptors.hpp:828
armnn::Convolution2dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:570
armnn::ArgMinMaxFunction::Max
@ Max
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
armnn::InstanceNormalizationDescriptor::m_Beta
float m_Beta
Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0.
Definition: Descriptors.hpp:867
armnn::SliceDescriptor::m_Begin
std::vector< unsigned int > m_Begin
Beginning indices of the slice in each dimension.
Definition: Descriptors.hpp:1244
armnn::INetwork::ExecuteStrategy
void ExecuteStrategy(IStrategy &strategy) const
Definition: Network.cpp:666
armnn::ChannelShuffleDescriptor::m_Axis
uint32_t m_Axis
Axis to apply channel shuffle operation on.
Definition: Descriptors.hpp:1580
armnn::OriginsDescriptor::GetConcatAxis
unsigned int GetConcatAxis() const
Get the concatenation axis value.
Definition: Descriptors.cpp:162
armnn::DataType::Boolean
@ Boolean
armnn::FullyConnectedDescriptor::m_ConstantWeights
bool m_ConstantWeights
Enable/disable constant weights and biases.
Definition: Descriptors.hpp:530
armnn::Pooling2dDescriptor::m_PaddingMethod
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
Definition: Descriptors.hpp:425
armnn::ViewsDescriptor
A ViewsDescriptor for the SplitterLayer.
Definition: Descriptors.hpp:244
armnn::DetectionPostProcessDescriptor::m_NmsScoreThreshold
float m_NmsScoreThreshold
NMS score threshold.
Definition: Descriptors.hpp:751
armnn::LayerType::Permute
@ Permute
armnn::QLstmDescriptor::m_ForgetIntermediateScale
float m_ForgetIntermediateScale
Forget intermediate quantization scale.
Definition: Descriptors.hpp:1428
armnn::ActivationDescriptor
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
armnn::TransposeConvolution2dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:1469
armnn::BatchMatMulDescriptor::m_TransposeX
bool m_TransposeX
Transpose the slices of each input tensor Transpose and Adjoint can not both be set to true for the s...
Definition: Descriptors.hpp:1612
armnn::OriginsDescriptor::GetNumViews
uint32_t GetNumViews() const
Get the number of views.
Definition: Descriptors.cpp:187
armnn::FullyConnectedDescriptor
A FullyConnectedDescriptor for the FullyConnectedLayer.
Definition: Descriptors.hpp:507
armnn::DetectionPostProcessDescriptor::m_ScaleX
float m_ScaleX
Center size encoding scale x.
Definition: Descriptors.hpp:759
armnn::LayerType::Splitter
@ Splitter
armnn::BaseTensor::GetMemoryArea
MemoryType GetMemoryArea() const
Definition: Tensor.hpp:307
armnn::LayerType::BatchNormalization
@ BatchNormalization
armnn::QLstmDescriptor
A QLstmDescriptor for the QLstmLayer.
Definition: Descriptors.hpp:1380
Serializer.hpp
armnn::TransposeConvolution2dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:1477
armnn::IConnectableLayer::GetNumInputSlots
virtual unsigned int GetNumInputSlots() const =0
Returns the number of connectable input slots.
armnn::ActivationFunction::LeakyReLu
@ LeakyReLu
armnnSerializer::GetFlatBufferNormalizationAlgorithmChannel
armnnSerializer::NormalizationAlgorithmChannel GetFlatBufferNormalizationAlgorithmChannel(armnn::NormalizationAlgorithmChannel normalizationAlgorithmChannel)
Definition: SerializerUtils.cpp:213
armnn::QLstmDescriptor::m_ProjectionEnabled
bool m_ProjectionEnabled
Enable/disable the projection layer.
Definition: Descriptors.hpp:1422
armnn::Pooling3dDescriptor::m_OutputShapeRounding
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
Definition: Descriptors.hpp:499
armnn::ResizeDescriptor::m_HalfPixelCenters
bool m_HalfPixelCenters
Half Pixel Centers.
Definition: Descriptors.hpp:1018
armnn::LstmDescriptor::m_TimeMajor
bool m_TimeMajor
Enable/disable time major.
Definition: Descriptors.hpp:1154
armnn::BatchNormalizationDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:843
armnn::Pooling3dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:479
Descriptors.hpp
armnn::SpaceToBatchNdDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:1071
armnn::FullyConnectedDescriptor::m_TransposeWeightMatrix
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
Definition: Descriptors.hpp:528
armnn::ResizeDescriptor::m_TargetHeight
uint32_t m_TargetHeight
Target height value.
Definition: Descriptors.hpp:1009
armnn::DepthwiseConvolution2dDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:708
armnn::TensorInfo::GetQuantizationScales
std::vector< float > GetQuantizationScales() const
Definition: Tensor.cpp:451
armnn::Pooling2dDescriptor::m_PoolHeight
uint32_t m_PoolHeight
Pooling height value.
Definition: Descriptors.hpp:417
armnn::StridedSliceDescriptor::m_Begin
std::vector< int > m_Begin
Begin values for the input that will be sliced.
Definition: Descriptors.hpp:1342
armnn::DetectionPostProcessDescriptor::m_ScaleY
float m_ScaleY
Center size encoding scale y.
Definition: Descriptors.hpp:761
armnn::Pooling3dDescriptor
A Pooling3dDescriptor for the Pooling3dLayer.
Definition: Descriptors.hpp:431
armnn::BatchMatMulDescriptor::m_AdjointX
bool m_AdjointX
Adjoint the slices of each input tensor Transpose and Adjoint can not both be set to true for the sam...
Definition: Descriptors.hpp:1617
armnn::DetectionPostProcessDescriptor::m_MaxDetections
uint32_t m_MaxDetections
Maximum numbers of detections.
Definition: Descriptors.hpp:745
armnn::Convolution3dDescriptor::m_PadFront
uint32_t m_PadFront
Padding front value in the depth dimension.
Definition: Descriptors.hpp:637
armnn::ResizeDescriptor
A ResizeDescriptor for the ResizeLayer.
Definition: Descriptors.hpp:985
armnn::IInputSlot::IsTensorInfoOverridden
virtual bool IsTensorInfoOverridden() const =0
Returns true if this InputSlot has an overridden TensorInfo that was set through a call to SetTensorI...
armnn::ArgMinMaxDescriptor
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
armnn::IConnectableLayer::GetName
virtual const char * GetName() const =0
Returns the name of the layer.
armnn::ActivationDescriptor::m_A
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH,...
Definition: Descriptors.hpp:61
armnn::InstanceNormalizationDescriptor
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
Definition: Descriptors.hpp:847
armnn::StridedSliceDescriptor::m_EllipsisMask
int32_t m_EllipsisMask
Ellipsis mask value.
Definition: Descriptors.hpp:1357
armnn::ViewsDescriptor::HasAxis
bool HasAxis() const
Returns true if an axis has been set.
Definition: Descriptors.cpp:387
armnn::SoftmaxDescriptor::m_Beta
float m_Beta
Exponentiation value.
Definition: Descriptors.hpp:190
armnn::GatherDescriptor
A GatherDescriptor for the GatherLayer.
Definition: Descriptors.hpp:965
armnn::TensorShape::GetDimensionSpecificity
bool GetDimensionSpecificity(unsigned int i) const
Gets information about if the dimension size has been specified or not.
Definition: Tensor.cpp:211
armnn::TensorInfo::GetQuantizationScale
float GetQuantizationScale() const
Definition: Tensor.cpp:461
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
armnn::DepthwiseConvolution2dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:710
armnn::ActivationFunction::Sqrt
@ Sqrt
armnn::BatchMatMulDescriptor::m_DataLayoutX
DataLayout m_DataLayoutX
Data layout of each input tensor, such as NHWC/NDHWC (leave as default for arbitrary layout)
Definition: Descriptors.hpp:1621
armnn::L2NormalizationDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:824
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::LayerType::Floor
@ Floor
armnn::L2NormalizationDescriptor
A L2NormalizationDescriptor for the L2NormalizationLayer.
Definition: Descriptors.hpp:809
armnnSerializer::GetFlatBufferComparisonOperation
armnnSerializer::ComparisonOperation GetFlatBufferComparisonOperation(armnn::ComparisonOperation comparisonOperation)
Definition: SerializerUtils.cpp:11
armnn::BaseTensor::GetNumBytes
unsigned int GetNumBytes() const
Definition: Tensor.hpp:304
armnn::NormalizationDescriptor::m_Beta
float m_Beta
Beta value for the normalization equation.
Definition: Descriptors.hpp:801
armnn::OriginsDescriptor::GetNumDimensions
uint32_t GetNumDimensions() const
Get the number of dimensions.
Definition: Descriptors.cpp:192
armnn::LayerType::Transpose
@ Transpose
armnn::NormalizationDescriptor
A NormalizationDescriptor for the NormalizationLayer.
Definition: Descriptors.hpp:769
armnn::LayerType::Comparison
@ Comparison
armnn::Pooling2dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:421
armnn::BatchToSpaceNdDescriptor::m_BlockShape
std::vector< unsigned int > m_BlockShape
Block shape values.
Definition: Descriptors.hpp:898
armnn::IConnectableLayer::GetGuid
virtual LayerGuid GetGuid() const =0
Returns the unique id of the layer.
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::ChannelShuffleDescriptor
A ChannelShuffleDescriptor for the ChannelShuffle operator.
Definition: Descriptors.hpp:1562
armnn::DataType::Float32
@ Float32
armnn::ResizeDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:1014
armnn::ActivationFunction::TanH
@ TanH
armnn::DepthwiseConvolution2dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:692
armnnSerializer::GetFlatBufferDataType
armnnSerializer::DataType GetFlatBufferDataType(armnn::DataType dataType)
Definition: SerializerUtils.cpp:67
armnn::Convolution2dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:576
armnn::Pooling2dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:411
armnn::Convolution3dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:633
armnnSerializer
Definition: ISerializer.hpp:11
armnn::ArgMinMaxDescriptor::m_Function
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:81
armnn::LayerType::Tile
@ Tile
armnn::SpaceToBatchNdDescriptor::m_BlockShape
std::vector< unsigned int > m_BlockShape
Block shape value.
Definition: Descriptors.hpp:1066
armnn::StridedSliceDescriptor::m_BeginMask
int32_t m_BeginMask
Begin mask value.
Definition: Descriptors.hpp:1350
armnn::BatchMatMulDescriptor::m_AdjointY
bool m_AdjointY
Definition: Descriptors.hpp:1618
armnn::Convolution3dDescriptor::m_DilationX
uint32_t m_DilationX
Dilation along x axis.
Definition: Descriptors.hpp:647
armnn::Convolution3dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:635
armnn::DataType::QAsymmU8
@ QAsymmU8
armnn::QLstmDescriptor::m_InputIntermediateScale
float m_InputIntermediateScale
Input intermediate quantization scale.
Definition: Descriptors.hpp:1426
armnn::ArgMinMaxFunction
ArgMinMaxFunction
Definition: Types.hpp:103
armnn::DetectionPostProcessDescriptor::m_ScaleW
float m_ScaleW
Center size encoding scale weight.
Definition: Descriptors.hpp:763
armnn::ActivationFunction::BoundedReLu
@ BoundedReLu
min(a, max(b, input)) ReLu1 & ReLu6.
armnn::LayerType::Stack
@ Stack
armnn::DataType::QSymmS8
@ QSymmS8
armnn::StackDescriptor
A StackDescriptor for the StackLayer.
Definition: Descriptors.hpp:1251
armnn::IOutputSlot::GetTensorInfo
virtual const TensorInfo & GetTensorInfo() const =0
IgnoreUnused.hpp
armnn::Pooling3dDescriptor::m_StrideZ
uint32_t m_StrideZ
Stride value when proceeding through input for the depth dimension.
Definition: Descriptors.hpp:497
armnn::LayerType::Normalization
@ Normalization
armnn::NormalizationDescriptor::m_NormSize
uint32_t m_NormSize
Depth radius value.
Definition: Descriptors.hpp:797
armnn::LayerType::QuantizedLstm
@ QuantizedLstm
armnn::Pooling2dDescriptor::m_PoolWidth
uint32_t m_PoolWidth
Pooling width value.
Definition: Descriptors.hpp:415
armnn::StandInDescriptor::m_NumInputs
uint32_t m_NumInputs
Number of input tensors.
Definition: Descriptors.hpp:1297
armnn::LayerType::Reduce
@ Reduce
armnn::Pooling3dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCDHW, NDHWC).
Definition: Descriptors.hpp:503
armnn::Convolution2dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:566
armnn::BatchToSpaceNdDescriptor::m_Crops
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
Definition: Descriptors.hpp:900
armnn::TensorInfo::HasPerAxisQuantization
bool HasPerAxisQuantization() const
Definition: Tensor.cpp:446
armnn::DepthwiseConvolution2dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:702
armnn::Convolution2dDescriptor::m_DilationY
uint32_t m_DilationY
Dilation along y axis.
Definition: Descriptors.hpp:580
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
armnn::Pooling3dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:481
armnn::DetectionPostProcessDescriptor::m_MaxClassesPerDetection
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
Definition: Descriptors.hpp:747
armnnSerializer::GetFlatBufferResizeMethod
armnnSerializer::ResizeMethod GetFlatBufferResizeMethod(armnn::ResizeMethod method)
Definition: SerializerUtils.cpp:241
armnn::IConnectableLayer::GetNumOutputSlots
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
armnnSerializer::ISerializer
Definition: ISerializer.hpp:17
armnn::QLstmDescriptor::m_CellIntermediateScale
float m_CellIntermediateScale
Cell intermediate quantization scale.
Definition: Descriptors.hpp:1430
armnn::ActivationFunction::HardSwish
@ HardSwish
armnn::DataType::QSymmS16
@ QSymmS16
armnn::ActivationFunction::Gelu
@ Gelu
armnnSerializer::GetFlatBufferActivationFunction
serializer::ActivationFunction GetFlatBufferActivationFunction(armnn::ActivationFunction function)
Definition: Serializer.cpp:55
armnn::NormalizationDescriptor::m_NormMethodType
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
Definition: Descriptors.hpp:795
armnn::LayerType::GatherNd
@ GatherNd
armnn::LayerType::ElementwiseBinary
@ ElementwiseBinary
armnn::TransposeConvolution2dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:1475
NumericCast.hpp
armnn::DataType::BFloat16
@ BFloat16
armnn::Pooling3dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:495
armnn::ReduceDescriptor::m_ReduceOperation
ReduceOperation m_ReduceOperation
Specifies the reduction operation to execute.
Definition: Descriptors.hpp:1558
armnnSerializer::GetFlatBufferConstTensorData
armnnSerializer::ConstTensorData GetFlatBufferConstTensorData(armnn::DataType dataType)
Definition: SerializerUtils.cpp:45
armnn::MeanDescriptor::m_KeepDims
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept.
Definition: Descriptors.hpp:1192
armnn::FillDescriptor::m_Value
float m_Value
Definition: Descriptors.hpp:940
armnn::ElementwiseBinaryDescriptor
A ElementwiseBinaryDescriptor for the ElementwiseBinaryLayer.
Definition: Descriptors.hpp:109
armnn::ViewsDescriptor::GetViewSizes
const uint32_t * GetViewSizes(uint32_t idx) const
Get the view sizes at the int value idx.
Definition: Descriptors.cpp:346
armnnSerializer::GetFlatBufferBinaryOperation
armnnSerializer::BinaryOperation GetFlatBufferBinaryOperation(armnn::BinaryOperation binaryOperation)
Definition: SerializerUtils.cpp:110
armnn::Pooling3dDescriptor::m_PoolType
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
Definition: Descriptors.hpp:473
armnn::ResizeDescriptor::m_Method
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
Definition: Descriptors.hpp:1012
armnn::SpaceToBatchNdDescriptor::m_PadList
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding values for the input dimension: heightPad{top, bottom} widthPad{left,...
Definition: Descriptors.hpp:1069
armnn::LayerType::Slice
@ Slice
armnn::LstmDescriptor::m_PeepholeEnabled
bool m_PeepholeEnabled
Enable/disable peephole.
Definition: Descriptors.hpp:1148
armnn::Convolution3dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:631
armnn::NormalizationDescriptor::m_NormChannelType
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
Definition: Descriptors.hpp:793
LstmParams.hpp
armnn::IOutputSlot
An output connection slot for a layer.
Definition: INetwork.hpp:53
armnnSerializer::GetFlatBufferDataLayout
armnnSerializer::DataLayout GetFlatBufferDataLayout(armnn::DataLayout dataLayout)
Definition: SerializerUtils.cpp:94
armnn::StackDescriptor::m_NumInputs
uint32_t m_NumInputs
Number of input tensors.
Definition: Descriptors.hpp:1275
armnn::DataType::Float16
@ Float16
armnn::LstmDescriptor::m_ClippingThresProj
float m_ClippingThresProj
Clipping threshold value for the projection.
Definition: Descriptors.hpp:1144
armnn::LayerType::ChannelShuffle
@ ChannelShuffle
armnn::Pooling3dDescriptor::m_PoolWidth
uint32_t m_PoolWidth
Pooling width value.
Definition: Descriptors.hpp:487
armnn::StridedSliceDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:1363
armnn::Pooling2dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:427
armnn::TensorShape::GetNumDimensions
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
armnn::DepthwiseConvolution2dDescriptor::m_DilationY
uint32_t m_DilationY
Dilation factor value for height dimension.
Definition: Descriptors.hpp:706
armnn::Pooling2dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:413
armnn::LayerType::Subtraction
@ Subtraction
armnn::LayerType::Prelu
@ Prelu
armnn::ViewsDescriptor::GetAxis
int32_t GetAxis() const
Get the axis value.
Definition: Descriptors.cpp:381
armnn::Pooling3dDescriptor::m_PaddingMethod
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
Definition: Descriptors.hpp:501
armnn::Pooling2dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:409
armnn::FullyConnectedDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:526
armnn::LayerType::LogicalBinary
@ LogicalBinary
armnn::LogicalBinaryDescriptor::m_Operation
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
Definition: Descriptors.hpp:1534
armnn::LayerType::Concat
@ Concat
armnn::PadDescriptor
A PadDescriptor for the PadLayer.
Definition: Descriptors.hpp:1196
armnn::TransposeDescriptor
A TransposeDescriptor for the TransposeLayer.
Definition: Descriptors.hpp:1490
armnn::Convolution3dDescriptor::m_DilationZ
uint32_t m_DilationZ
Dilation along z axis.
Definition: Descriptors.hpp:651
armnn::BatchMatMulDescriptor::m_TransposeY
bool m_TransposeY
Definition: Descriptors.hpp:1613
armnn::ViewsDescriptor::GetOrigins
const OriginsDescriptor & GetOrigins() const
Get the View Origins.
Definition: Descriptors.cpp:351
armnn::DetectionPostProcessDescriptor::m_NumClasses
uint32_t m_NumClasses
Number of classes.
Definition: Descriptors.hpp:755
armnn::TensorInfo::GetQuantizationDim
Optional< unsigned int > GetQuantizationDim() const
Definition: Tensor.cpp:494
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
armnn::LayerType::Merge
@ Merge
armnn::StackDescriptor::m_Axis
uint32_t m_Axis
0-based axis along which to stack the input tensors.
Definition: Descriptors.hpp:1273
armnn::SliceDescriptor
A SliceDescriptor for the SliceLayer.
Definition: Descriptors.hpp:1228
armnn::LayerType::StandIn
@ StandIn
armnn::LayerType::Softmax
@ Softmax
armnnSerializer::GetFlatBufferUnaryOperation
armnnSerializer::UnaryOperation GetFlatBufferUnaryOperation(armnn::UnaryOperation unaryOperation)
Definition: SerializerUtils.cpp:135
armnn::InstanceNormalizationDescriptor::m_Gamma
float m_Gamma
Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0.
Definition: Descriptors.hpp:865
armnn::DetectionPostProcessDescriptor::m_NmsIouThreshold
float m_NmsIouThreshold
Intersection over union threshold.
Definition: Descriptors.hpp:753
armnn::ActivationFunction::Elu
@ Elu
armnn::Convolution2dDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:582
armnn::BatchMatMulDescriptor::m_DataLayoutY
DataLayout m_DataLayoutY
Definition: Descriptors.hpp:1622
armnn::ReshapeDescriptor
A ReshapeDescriptor for the ReshapeLayer.
Definition: Descriptors.hpp:1023
armnnSerializer::GetFlatBufferNormalizationAlgorithmMethod
armnnSerializer::NormalizationAlgorithmMethod GetFlatBufferNormalizationAlgorithmMethod(armnn::NormalizationAlgorithmMethod normalizationAlgorithmMethod)
Definition: SerializerUtils.cpp:227
armnn::InvalidArgumentException
Definition: Exceptions.hpp:80
armnn::LayerType::Quantize
@ Quantize
armnn::PadDescriptor::m_PadValue
float m_PadValue
Optional value to use for padding, defaults to 0.
Definition: Descriptors.hpp:1221
armnn::LayerBindingId
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:309
armnn::ActivationFunction::Linear
@ Linear
armnn::DepthwiseConvolution2dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:694
armnn::Convolution3dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:629
armnn::ActivationDescriptor::m_Function
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu,...
Definition: Descriptors.hpp:59
armnn::NormalizationDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:805
armnn::LayerType::Multiplication
@ Multiplication
armnn::PermuteDescriptor
A PermuteDescriptor for the PermuteLayer.
Definition: Descriptors.hpp:149
armnn::BatchMatMulDescriptor
A BatchMatMulDescriptor for the BatchMatMul operator.
Definition: Descriptors.hpp:1584
armnn::IOutputSlot::GetOwningLayerGuid
virtual LayerGuid GetOwningLayerGuid() const =0
armnnSerializer::GetFlatBufferArgMinMaxFunction
serializer::ArgMinMaxFunction GetFlatBufferArgMinMaxFunction(armnn::ArgMinMaxFunction function)
Definition: Serializer.cpp:88
armnn::Convolution3dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:643
armnn::IConnectableLayer::GetType
virtual LayerType GetType() const =0
Returns the armnn::LayerType of this layer.
armnn::LayerType::Addition
@ Addition
armnn::Convolution2dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:584
armnn::QLstmDescriptor::m_ProjectionClip
float m_ProjectionClip
Clipping threshold value for the projection.
Definition: Descriptors.hpp:1416
armnnSerializer::GetFlatBufferPaddingMethod
armnnSerializer::PaddingMethod GetFlatBufferPaddingMethod(armnn::PaddingMethod paddingMethod)
Definition: SerializerUtils.cpp:188
armnn::GatherDescriptor::m_Axis
int32_t m_Axis
The axis in params to gather indices from.
Definition: Descriptors.hpp:981
armnn::Convolution3dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:641
armnn::SpaceToBatchNdDescriptor
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
Definition: Descriptors.hpp:1043
armnn::Convolution2dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:572
armnn::Convolution3dDescriptor
A Convolution3dDescriptor for the Convolution3dLayer.
Definition: Descriptors.hpp:588
armnn::LayerType::DepthToSpace
@ DepthToSpace
armnn::ReshapeDescriptor::m_TargetShape
TensorShape m_TargetShape
Target shape value.
Definition: Descriptors.hpp:1039
armnn::BaseDescriptor
Base class for all descriptors.
Definition: Descriptors.hpp:22
armnn::TransposeConvolution2dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:1479
armnn::StridedSliceDescriptor::m_EndMask
int32_t m_EndMask
End mask value.
Definition: Descriptors.hpp:1353
armnn::BaseTensor::GetInfo
const TensorInfo & GetInfo() const
Definition: Tensor.hpp:297
armnn::Pooling2dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:407
armnn::PermuteDescriptor::m_DimMappings
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination,...
Definition: Descriptors.hpp:173
armnn::ActivationFunction
ActivationFunction
Definition: Types.hpp:86
armnn::StandInDescriptor::m_NumOutputs
uint32_t m_NumOutputs
Number of output tensors.
Definition: Descriptors.hpp:1299
armnn::Pooling3dDescriptor::m_PadFront
uint32_t m_PadFront
Padding front value in the depth dimension.
Definition: Descriptors.hpp:483
SerializerUtils.hpp
armnn::StackDescriptor::m_InputShape
TensorShape m_InputShape
Required shape of all input tensors.
Definition: Descriptors.hpp:1277
armnn::LayerType::DetectionPostProcess
@ DetectionPostProcess
armnn::Convolution2dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:574
armnn::QLstmDescriptor::m_OutputIntermediateScale
float m_OutputIntermediateScale
Output intermediate quantization scale.
Definition: Descriptors.hpp:1432
armnn::LayerType::Pooling2d
@ Pooling2d
armnn::Convolution2dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:568
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:200
armnnSerializer::GetFlatBufferReduceOperation
armnnSerializer::ReduceOperation GetFlatBufferReduceOperation(armnn::ReduceOperation reduceOperation)
Definition: SerializerUtils.cpp:254
armnn::DetectionPostProcessDescriptor::m_DetectionsPerClass
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
Definition: Descriptors.hpp:749
LayerGuid
arm::pipe::ProfilingGuid LayerGuid
Define LayerGuid type.
Definition: Types.hpp:26
armnn::LayerType::Division
@ Division
armnn::DetectionPostProcessDescriptor::m_ScaleH
float m_ScaleH
Center size encoding scale height.
Definition: Descriptors.hpp:765
armnn::DataType::Signed32
@ Signed32
armnn::LayerType::Shape
@ Shape
armnn::ReduceDescriptor::m_KeepDims
bool m_KeepDims
if true then output shape has no change.
Definition: Descriptors.hpp:1554
armnn::BatchToSpaceNdDescriptor
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
Definition: Descriptors.hpp:875
armnn::Convolution2dDescriptor
A Convolution2dDescriptor for the Convolution2dLayer.
Definition: Descriptors.hpp:534
armnn::ActivationFunction::Abs
@ Abs
armnn::DepthwiseConvolution2dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:698
armnn::ComparisonDescriptor
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
armnn::FillDescriptor
A FillDescriptor for the FillLayer.
Definition: Descriptors.hpp:925
armnn::DataType::QAsymmS8
@ QAsymmS8
armnn::ComparisonDescriptor::m_Operation
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
Definition: Descriptors.hpp:105
armnn::Pooling3dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:477
armnn::ElementwiseUnaryDescriptor::m_Operation
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
Definition: Descriptors.hpp:145
armnn::ArgMinMaxFunction::Min
@ Min
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::LayerType::Gather
@ Gather
armnn::StridedSliceDescriptor::m_ShrinkAxisMask
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1.
Definition: Descriptors.hpp:1355
armnn::StandInDescriptor
A StandInDescriptor for the StandIn layer.
Definition: Descriptors.hpp:1281
armnn::Pooling2dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:419
armnn::LayerType::Pooling3d
@ Pooling3d
armnn::SpaceToDepthDescriptor::m_BlockSize
unsigned int m_BlockSize
Scalar specifying the input block size. It must be >= 1.
Definition: Descriptors.hpp:1092
armnn::ResizeDescriptor::m_TargetWidth
uint32_t m_TargetWidth
Target width value.
Definition: Descriptors.hpp:1007
armnn::LayerType::LogSoftmax
@ LogSoftmax
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::Pooling3dDescriptor::m_PoolHeight
uint32_t m_PoolHeight
Pooling height value.
Definition: Descriptors.hpp:489
armnn::StridedSliceDescriptor::m_Stride
std::vector< int > m_Stride
Stride values for the input that will be sliced.
Definition: Descriptors.hpp:1346
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LayerType::Cast
@ Cast
armnn::InstanceNormalizationDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:871
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
armnn::PermutationVector::GetSize
SizeType GetSize() const
Definition: Types.hpp:357
armnn::LstmDescriptor
An LstmDescriptor for the LstmLayer.
Definition: Descriptors.hpp:1102
armnn::StridedSliceDescriptor
A StridedSliceDescriptor for the StridedSliceLayer.
Definition: Descriptors.hpp:1303
armnn::LayerType::Switch
@ Switch
armnn::TransposeDescriptor::m_DimMappings
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination,...
Definition: Descriptors.hpp:1514
armnnSerializer::GetFlatBufferPoolingAlgorithm
armnnSerializer::PoolingAlgorithm GetFlatBufferPoolingAlgorithm(armnn::PoolingAlgorithm poolingAlgorithm)
Definition: SerializerUtils.cpp:162
armnn::Pooling3dDescriptor::m_PadBack
uint32_t m_PadBack
Padding back value in the depth dimension.
Definition: Descriptors.hpp:485
armnn::ResizeDescriptor::m_AlignCorners
bool m_AlignCorners
Aligned corners.
Definition: Descriptors.hpp:1016
armnn::TileDescriptor::m_Multiples
std::vector< uint32_t > m_Multiples
The vector to multiply the input shape by.
Definition: Descriptors.hpp:1656
armnn::MeanDescriptor::m_Axis
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
Definition: Descriptors.hpp:1190
armnn::LstmDescriptor::m_CifgEnabled
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
Definition: Descriptors.hpp:1146
armnn::LogicalBinaryDescriptor
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
Definition: Descriptors.hpp:1518
armnn::NormalizationDescriptor::m_Alpha
float m_Alpha
Alpha value for the normalization equation.
Definition: Descriptors.hpp:799
armnn::Pooling3dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:493
armnn::IInputSlot::GetTensorInfo
virtual const TensorInfo & GetTensorInfo() const =0
Gets the TensorInfo for this InputSlot.
armnn::LayerType::Reshape
@ Reshape
armnn::Pooling3dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:475
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:193
armnn::QLstmDescriptor::m_HiddenStateZeroPoint
int32_t m_HiddenStateZeroPoint
Hidden State zero point.
Definition: Descriptors.hpp:1434
armnn::Convolution2dDescriptor::m_DilationX
uint32_t m_DilationX
Dilation along x axis.
Definition: Descriptors.hpp:578
armnn::SoftmaxDescriptor::m_Axis
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
Definition: Descriptors.hpp:192
armnn::Convolution3dDescriptor::m_PadBack
uint32_t m_PadBack
Padding back value in the depth dimension.
Definition: Descriptors.hpp:639
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::LayerType::Fill
@ Fill
armnn::ReduceDescriptor::m_vAxis
std::vector< uint32_t > m_vAxis
The indices of the dimensions to reduce.
Definition: Descriptors.hpp:1556
armnn::PadDescriptor::m_PadList
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding for input dimension.
Definition: Descriptors.hpp:1218
armnn::LayerType::L2Normalization
@ L2Normalization
armnn::LstmDescriptor::m_LayerNormEnabled
bool m_LayerNormEnabled
Enable/disable layer normalization.
Definition: Descriptors.hpp:1152
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::TransposeConvolution2dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:1473
armnn::ViewsDescriptor::GetNumDimensions
uint32_t GetNumDimensions() const
Get the number of dimensions.
Definition: Descriptors.cpp:306
armnn::TransposeConvolution2dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:1471
armnn::LayerType::Minimum
@ Minimum
armnnSerializer::GetFlatBufferOutputShapeRounding
armnnSerializer::OutputShapeRounding GetFlatBufferOutputShapeRounding(armnn::OutputShapeRounding outputShapeRounding)
Definition: SerializerUtils.cpp:176
armnn::QLstmDescriptor::m_CifgEnabled
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
Definition: Descriptors.hpp:1418
armnn::ElementwiseBinaryDescriptor::m_Operation
BinaryOperation m_Operation
Specifies the elementwiseBinary operation to execute.
Definition: Descriptors.hpp:125
armnn::LayerType::UnidirectionalSequenceLstm
@ UnidirectionalSequenceLstm
armnn::IOutputSlot::CalculateIndexOnOwner
virtual unsigned int CalculateIndexOnOwner() const =0
armnn::LayerType::ReverseV2
@ ReverseV2
armnnSerializer::ISerializerPtr
std::unique_ptr< ISerializer, void(*)(ISerializer *serializer)> ISerializerPtr
Definition: ISerializer.hpp:15
armnn::Convolution3dDescriptor::m_DilationY
uint32_t m_DilationY
Dilation along y axis.
Definition: Descriptors.hpp:649
armnn::OriginsDescriptor
An OriginsDescriptor for the ConcatLayer.
Definition: Descriptors.hpp:201
armnn::ActivationFunction::ReLu
@ ReLu
armnn::IConnectableLayer::GetOutputSlot
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::ElementwiseUnaryDescriptor
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:129
armnn::TransposeConvolution2dDescriptor
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
Definition: Descriptors.hpp:1440
armnn::IConnectableLayer::GetInputSlot
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
armnn::Convolution3dDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:653
armnn::LayerType::ArgMinMax
@ ArgMinMax
armnn::ArgMinMaxDescriptor::m_Axis
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:83
armnn::ActivationDescriptor::m_B
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:63
armnn::LayerType::Pad
@ Pad
armnn::Convolution3dDescriptor::m_StrideZ
uint32_t m_StrideZ
Stride value when proceeding through input for the depth dimension.
Definition: Descriptors.hpp:645
QuantizedLstmParams.hpp
armnn::DetectionPostProcessDescriptor::m_UseRegularNms
bool m_UseRegularNms
Use Regular NMS.
Definition: Descriptors.hpp:757
armnn::ViewsDescriptor::GetNumViews
uint32_t GetNumViews() const
Get the number of views.
Definition: Descriptors.cpp:301
armnn::QLstmDescriptor::m_HiddenStateScale
float m_HiddenStateScale
Hidden State quantization scale.
Definition: Descriptors.hpp:1436
armnn::LayerType::Rank
@ Rank
armnn::NormalizationDescriptor::m_K
float m_K
Kappa value used for the across channel normalization equation.
Definition: Descriptors.hpp:803
armnn::LayerType::Mean
@ Mean
armnn::LstmDescriptor::m_ProjectionEnabled
bool m_ProjectionEnabled
Enable/disable the projection layer.
Definition: Descriptors.hpp:1150
armnn::StridedSliceDescriptor::m_End
std::vector< int > m_End
End values for the input that will be sliced.
Definition: Descriptors.hpp:1344
armnn::PadDescriptor::m_PaddingMode
PaddingMode m_PaddingMode
Specifies the Padding mode (Constant, Reflect or Symmetric)
Definition: Descriptors.hpp:1224
armnn::ConstTensor
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:329
armnn::ActivationFunction::Square
@ Square
armnn::IConnectableLayer
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:80
armnn::IInputSlot
An input connection slot for a layer.
Definition: INetwork.hpp:25
armnn::Pooling2dDescriptor::m_OutputShapeRounding
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
Definition: Descriptors.hpp:423
armnn::LayerType::Input
@ Input
armnn::TransposeConvolution2dDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:1481
armnn::QLstmDescriptor::m_CellClip
float m_CellClip
Clipping threshold value for the cell state.
Definition: Descriptors.hpp:1414
armnn::LayerType::Resize
@ Resize
armnn::DetectionPostProcessDescriptor
Definition: Descriptors.hpp:713
armnn::QLstmDescriptor::m_LayerNormEnabled
bool m_LayerNormEnabled
Enable/disable layer normalization.
Definition: Descriptors.hpp:1424
armnn::ChannelShuffleDescriptor::m_NumGroups
uint32_t m_NumGroups
Number of groups for the channel shuffle operation.
Definition: Descriptors.hpp:1578
armnn::Convolution3dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NDHWC, NCDHW).
Definition: Descriptors.hpp:655
armnn::L2NormalizationDescriptor::m_Eps
float m_Eps
Used to avoid dividing by zero.
Definition: Descriptors.hpp:822
armnn::OriginsDescriptor::GetViewOrigin
const uint32_t * GetViewOrigin(uint32_t idx) const
Return the view origin at the int value idx.
Definition: Descriptors.cpp:197
armnn::DataType::Signed64
@ Signed64
armnn::TransposeConvolution2dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:1483
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::SliceDescriptor::m_Size
std::vector< unsigned int > m_Size
Size of the slice in each dimension.
Definition: Descriptors.hpp:1247
armnn::Pooling2dDescriptor
A Pooling2dDescriptor for the Pooling2dLayer.
Definition: Descriptors.hpp:371
armnn::LstmDescriptor::m_ActivationFunc
uint32_t m_ActivationFunc
The activation function to use.
Definition: Descriptors.hpp:1140
armnn::BatchToSpaceNdDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:902
armnn::LayerType::Maximum
@ Maximum
armnn::TensorInfo::GetQuantizationOffset
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:478
armnn::LayerType::Activation
@ Activation
armnn::DepthwiseConvolution2dDescriptor
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
Definition: Descriptors.hpp:659
armnn::LayerType::Lstm
@ Lstm
armnn::LayerType::Dequantize
@ Dequantize
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::ReduceDescriptor
A ReduceDescriptor for the REDUCE operators.
Definition: Descriptors.hpp:1538
armnn::DepthwiseConvolution2dDescriptor::m_DilationX
uint32_t m_DilationX
Dilation factor value for width dimension.
Definition: Descriptors.hpp:704
armnnSerializer::GetFlatBufferPaddingMode
armnnSerializer::PaddingMode GetFlatBufferPaddingMode(armnn::PaddingMode paddingMode)
Definition: SerializerUtils.cpp:200
armnn::BatchNormalizationDescriptor::m_Eps
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
Definition: Descriptors.hpp:841
armnn::LstmDescriptor::m_ClippingThresCell
float m_ClippingThresCell
Clipping threshold value for the cell state.
Definition: Descriptors.hpp:1142
armnn::Pooling3dDescriptor::m_PoolDepth
uint32_t m_PoolDepth
Pooling depth value.
Definition: Descriptors.hpp:491
armnn::TensorShape::GetDimensionality
Dimensionality GetDimensionality() const
Function that returns the tensor type.
Definition: Tensor.hpp:92
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:491
armnn::StridedSliceDescriptor::m_NewAxisMask
int32_t m_NewAxisMask
New axis mask value.
Definition: Descriptors.hpp:1360
armnn::MeanDescriptor
A MeanDescriptor for the MeanLayer.
Definition: Descriptors.hpp:1172
armnn::QLstmDescriptor::m_PeepholeEnabled
bool m_PeepholeEnabled
Enable/disable peephole.
Definition: Descriptors.hpp:1420
armnn::LayerType::QLstm
@ QLstm
armnn::OptionalReferenceSwitch::value
const T & value() const
Definition: Optional.hpp:146
armnn::TileDescriptor
Definition: Descriptors.hpp:1640
armnn::SoftmaxDescriptor
A SoftmaxDescriptor for the SoftmaxLayer.
Definition: Descriptors.hpp:177
armnn::Pooling2dDescriptor::m_PoolType
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
Definition: Descriptors.hpp:405
armnn::InstanceNormalizationDescriptor::m_Eps
float m_Eps
Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f.
Definition: Descriptors.hpp:869
armnnSerializer::GetFlatBufferLogicalBinaryOperation
armnnSerializer::LogicalBinaryOperation GetFlatBufferLogicalBinaryOperation(armnn::LogicalBinaryOperation logicalBinaryOperation)
Definition: SerializerUtils.cpp:31
armnn::SpaceToDepthDescriptor
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
Definition: Descriptors.hpp:1075
armnn::LayerType::Output
@ Output
armnn::LayerType::Constant
@ Constant
armnn::ViewsDescriptor::GetViewOrigin
const uint32_t * GetViewOrigin(uint32_t idx) const
Get the view origin at the int value idx.
Definition: Descriptors.cpp:311
armnn::ActivationFunction::Sigmoid
@ Sigmoid
armnn::SpaceToDepthDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:1095
armnn::INetwork
Main network class which provides the interface for building up a neural network.
Definition: INetwork.hpp:347
armnn::DepthwiseConvolution2dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:700
armnn::DepthwiseConvolution2dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:696
armnn::IInputSlot::GetConnection
virtual const IOutputSlot * GetConnection() const =0