ArmNN
 24.05
Serializer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017,2019-2024 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "Serializer.hpp"
6 #include "SerializerUtils.hpp"
7 
8 #include <armnn/Descriptors.hpp>
9 #include <armnn/LstmParams.hpp>
13 
14 #include <fmt/format.h>
15 #include <iostream>
16 
17 using namespace armnn;
18 namespace fb = flatbuffers;
19 namespace serializer = armnnSerializer;
20 
21 namespace armnnSerializer
22 {
23 
24 ISerializer::ISerializer() : pSerializerImpl(new SerializerImpl())
25 {
26 }
27 
28 ISerializer::~ISerializer() = default;
29 
30 ISerializer* ISerializer::CreateRaw()
31 {
32  return new ISerializer();
33 }
34 
35 ISerializerPtr ISerializer::Create()
36 {
37  return ISerializerPtr(CreateRaw(), &ISerializer::Destroy);
38 }
39 
40 void ISerializer::Destroy(ISerializer* serializer)
41 {
42  delete serializer;
43 }
44 
45 void ISerializer::Serialize(const armnn::INetwork& inNetwork)
46 {
47  pSerializerImpl->Serialize(inNetwork);
48 }
49 
50 bool ISerializer::SaveSerializedToStream(std::ostream& stream)
51 {
52  return pSerializerImpl->SaveSerializedToStream(stream);
53 }
54 
56 {
57  switch (function)
58  {
60  return serializer::ActivationFunction::ActivationFunction_Sigmoid;
62  return serializer::ActivationFunction::ActivationFunction_TanH;
64  return serializer::ActivationFunction::ActivationFunction_Linear;
66  return serializer::ActivationFunction::ActivationFunction_ReLu;
68  return serializer::ActivationFunction::ActivationFunction_BoundedReLu;
70  return serializer::ActivationFunction::ActivationFunction_LeakyReLu;
72  return serializer::ActivationFunction::ActivationFunction_Abs;
74  return serializer::ActivationFunction::ActivationFunction_Sqrt;
76  return serializer::ActivationFunction::ActivationFunction_Square;
78  return serializer::ActivationFunction::ActivationFunction_Elu;
80  return serializer::ActivationFunction::ActivationFunction_HardSwish;
82  return serializer::ActivationFunction::ActivationFunction_Gelu;
83  default:
84  return serializer::ActivationFunction::ActivationFunction_Sigmoid;
85  }
86 }
87 
89 {
90  switch (function)
91  {
93  return serializer::ArgMinMaxFunction::ArgMinMaxFunction_Max;
95  default:
96  return serializer::ArgMinMaxFunction::ArgMinMaxFunction_Min;
97  }
98 }
99 
101 {
102  switch (function)
103  {
105  return serializer::ScatterNdFunction::ScatterNdFunction_Update;
107  return serializer::ScatterNdFunction::ScatterNdFunction_Add;
109  return serializer::ScatterNdFunction::ScatterNdFunction_Sub;
111  return serializer::ScatterNdFunction::ScatterNdFunction_Max;
113  return serializer::ScatterNdFunction::ScatterNdFunction_Min;
114  default:
115  return serializer::ScatterNdFunction::ScatterNdFunction_Update;
116  }
117 }
118 
119 uint32_t SerializerStrategy::GetSerializedId(LayerGuid guid)
120 {
121  if (m_guidMap.empty())
122  {
123  m_guidMap.insert(std::make_pair(guid, m_layerId));
124  }
125  else if (m_guidMap.find(guid) == m_guidMap.end())
126  {
127  ++m_layerId;
128  m_guidMap.insert(std::make_pair(guid, m_layerId));
129 
130  return m_layerId;
131  }
132  return m_guidMap[guid];
133 }
134 
135 // Build FlatBuffer for Input Layer
136 void SerializerStrategy::SerializeInputLayer(const armnn::IConnectableLayer* layer, LayerBindingId id, const char* name)
137 {
138  IgnoreUnused(name);
139 
140  // Create FlatBuffer BaseLayer
141  auto flatBufferInputBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Input);
142 
143  // Create FlatBuffer BindableBaseLayer
144  auto flatBufferInputBindableBaseLayer = serializer::CreateBindableLayerBase(m_flatBufferBuilder,
145  flatBufferInputBaseLayer,
146  id);
147  // Push layer binding id to outputIds.
148  m_inputIds.push_back(id);
149 
150  // Create the FlatBuffer InputLayer
151  auto flatBufferInputLayer = serializer::CreateInputLayer(m_flatBufferBuilder, flatBufferInputBindableBaseLayer);
152 
153  // Add the AnyLayer to the FlatBufferLayers
154  CreateAnyLayer(flatBufferInputLayer.o, serializer::Layer::Layer_InputLayer);
155 }
156 
157 // Build FlatBuffer for Output Layer
158 void SerializerStrategy::SerializeOutputLayer(const armnn::IConnectableLayer* layer,
159  LayerBindingId id, const char* name)
160 {
161  IgnoreUnused(name);
162 
163  // Create FlatBuffer BaseLayer
164  auto flatBufferOutputBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Output);
165 
166  // Create FlatBuffer BindableBaseLayer
167  auto flatBufferOutputBindableBaseLayer = serializer::CreateBindableLayerBase(m_flatBufferBuilder,
168  flatBufferOutputBaseLayer,
169  id);
170  // Push layer binding id to outputIds.
171  m_outputIds.push_back(id);
172 
173  // Create the FlatBuffer OutputLayer
174  auto flatBufferOutputLayer = serializer::CreateOutputLayer(m_flatBufferBuilder, flatBufferOutputBindableBaseLayer);
175  // Add the AnyLayer to the FlatBufferLayers
176  CreateAnyLayer(flatBufferOutputLayer.o, serializer::Layer::Layer_OutputLayer);
177 }
178 
179 // Build FlatBuffer for Activation Layer
180 void SerializerStrategy::SerializeActivationLayer(const armnn::IConnectableLayer* layer,
181  const armnn::ActivationDescriptor& descriptor,
182  const char* name)
183 {
184  IgnoreUnused(name);
185 
186  // Create FlatBuffer BaseLayer
187  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Activation);
188 
189  // Create the FlatBuffer ActivationDescriptor
190  auto flatBufferDescriptor = CreateActivationDescriptor(m_flatBufferBuilder,
192  descriptor.m_A,
193  descriptor.m_B);
194 
195  // Create the FlatBuffer ActivationLayer
196  auto flatBufferAdditionLayer = CreateActivationLayer(m_flatBufferBuilder,
197  flatBufferBaseLayer,
198  flatBufferDescriptor);
199 
200  // Add the AnyLayer to the FlatBufferLayers
201  CreateAnyLayer(flatBufferAdditionLayer.o, serializer::Layer::Layer_ActivationLayer);
202 }
203 
204 // Build FlatBuffer for Addition Layer
205 void SerializerStrategy::SerializeAdditionLayer(const armnn::IConnectableLayer* layer, const char* name)
206 {
207  IgnoreUnused(name);
208 
209  // Create FlatBuffer BaseLayer
210  auto flatBufferAdditionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Addition);
211 
212  // Create the FlatBuffer AdditionLayer
213  auto flatBufferAdditionLayer = serializer::CreateAdditionLayer(m_flatBufferBuilder, flatBufferAdditionBaseLayer);
214 
215  // Add the AnyLayer to the FlatBufferLayers
216  CreateAnyLayer(flatBufferAdditionLayer.o, serializer::Layer::Layer_AdditionLayer);
217 }
218 
219 // Build FlatBuffer for ArgMinMax Layer
220 void SerializerStrategy::SerializeArgMinMaxLayer(const armnn::IConnectableLayer *layer,
221  const armnn::ArgMinMaxDescriptor& descriptor,
222  const char *name)
223 {
224  IgnoreUnused(name);
225 
226  // Create FlatBuffer BaseLayer
227  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ArgMinMax);
228 
229  // Create FlatBuffer Descriptor
230  auto flatBufferDescriptor = CreateArgMinMaxDescriptor(m_flatBufferBuilder,
232  descriptor.m_Axis);
233 
234  // Create FlatBuffer ArgMinMaxLayer
235  auto flatBufferLayer = CreateArgMinMaxLayer(m_flatBufferBuilder,
236  flatBufferBaseLayer,
237  flatBufferDescriptor);
238 
239  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ArgMinMaxLayer);
240 }
241 
242 void SerializerStrategy::SerializeBatchMatMulLayer(const armnn::IConnectableLayer* layer,
243  const armnn::BatchMatMulDescriptor& descriptor,
244  const char* name)
245 {
246  IgnoreUnused(name);
247 
248  // Create FlatBuffer BaseLayer
249  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchMatMul);
250 
251  // Create the FlatBuffer BatchMatMulDescriptor
252  auto flatBufferDescriptor = CreateBatchMatMulDescriptor(m_flatBufferBuilder,
253  descriptor.m_TransposeX,
254  descriptor.m_TransposeY,
255  descriptor.m_AdjointX,
256  descriptor.m_AdjointY,
259 
260  // Create the FlatBuffer BatchMatMulLayer
261  auto flatBufferBatchMatMulLayer = CreateBatchMatMulLayer(m_flatBufferBuilder,
262  flatBufferBaseLayer,
263  flatBufferDescriptor);
264 
265  // Add the AnyLayer to the FlatBufferLayers
266  CreateAnyLayer(flatBufferBatchMatMulLayer.o, serializer::Layer::Layer_BatchMatMulLayer);
267 }
268 
269 // Build FlatBuffer for BatchToSpaceNd Layer
270 void SerializerStrategy::SerializeBatchToSpaceNdLayer(const armnn::IConnectableLayer* layer,
271  const armnn::BatchToSpaceNdDescriptor& descriptor,
272  const char* name)
273 {
274  IgnoreUnused(name);
275 
276  // Create FlatBuffer BaseLayer
277  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchToSpaceNd);
278 
279  std::vector<unsigned int> crops;
280  crops.reserve(descriptor.m_Crops.size() * 2);
281  for (auto& crop : descriptor.m_Crops)
282  {
283  crops.push_back(crop.first);
284  crops.push_back(crop.second);
285  }
286 
287  auto flatBufferDescriptor =
288  CreateBatchToSpaceNdDescriptor(m_flatBufferBuilder,
289  m_flatBufferBuilder.CreateVector(descriptor.m_BlockShape),
290  m_flatBufferBuilder.CreateVector(crops),
292 
293  auto flatBufferLayer = serializer::CreateBatchToSpaceNdLayer(m_flatBufferBuilder,
294  flatBufferBaseLayer,
295  flatBufferDescriptor);
296 
297  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_BatchToSpaceNdLayer);
298 }
299 
300 void SerializerStrategy::SerializeBatchNormalizationLayer(
301  const armnn::IConnectableLayer* layer,
302  const armnn::BatchNormalizationDescriptor& batchNormDescriptor,
303  const std::vector<armnn::ConstTensor>& constants,
304  const char* name)
305 {
306  IgnoreUnused(name);
307 
308  const armnn::ConstTensor& mean = constants[0];
309  const armnn::ConstTensor& variance = constants[1];
310  const armnn::ConstTensor& beta = constants[2];
311  const armnn::ConstTensor& gamma = constants[3];
312 
313  auto fbBatchNormalizationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchNormalization);
314  auto fbBatchNormalizationDescriptor = serializer::CreateBatchNormalizationDescriptor(
315  m_flatBufferBuilder,
316  batchNormDescriptor.m_Eps,
317  GetFlatBufferDataLayout(batchNormDescriptor.m_DataLayout));
318 
319  auto fbMeanConstTensorInfo = CreateConstTensorInfo(mean);
320  auto fbVarianceConstTensorInfo = CreateConstTensorInfo(variance);
321  auto fbBetaConstTensorInfo = CreateConstTensorInfo(beta);
322  auto fbGammaConstTensorInfo = CreateConstTensorInfo(gamma);
323  auto fbBatchNormalizationLayer = serializer::CreateBatchNormalizationLayer(m_flatBufferBuilder,
324  fbBatchNormalizationBaseLayer,
325  fbBatchNormalizationDescriptor,
326  fbMeanConstTensorInfo,
327  fbVarianceConstTensorInfo,
328  fbBetaConstTensorInfo,
329  fbGammaConstTensorInfo);
330 
331  CreateAnyLayer(fbBatchNormalizationLayer.o, serializer::Layer::Layer_BatchNormalizationLayer);
332 }
333 
334 void SerializerStrategy::SerializeCastLayer(const armnn::IConnectableLayer* layer,
335  const char* name)
336 {
337  IgnoreUnused(name);
338 
339  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Cast);
340  auto fbCastLayer = serializer::CreateCastLayer(m_flatBufferBuilder, fbBaseLayer);
341  CreateAnyLayer(fbCastLayer.o, serializer::Layer::Layer_CastLayer);
342 }
343 
344 void SerializerStrategy::SerializeChannelShuffleLayer(const armnn::IConnectableLayer* layer,
345  const armnn::ChannelShuffleDescriptor& descriptor,
346  const char* name)
347 {
348  IgnoreUnused(name);
349  auto fbDescriptor = CreateChannelShuffleDescriptor(m_flatBufferBuilder,
350  descriptor.m_Axis,
351  descriptor.m_NumGroups);
352  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ChannelShuffle);
353  auto fbChannelShuffleLayer = serializer::CreateChannelShuffleLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
354  CreateAnyLayer(fbChannelShuffleLayer.o, serializer::Layer::Layer_ChannelShuffleLayer);
355 }
356 
357 void SerializerStrategy::SerializeComparisonLayer(const armnn::IConnectableLayer* layer,
358  const armnn::ComparisonDescriptor& descriptor,
359  const char* name)
360 {
361  IgnoreUnused(name);
362 
363  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Comparison);
364  auto fbDescriptor = serializer::CreateComparisonDescriptor(
365  m_flatBufferBuilder,
367 
368  auto fbLayer = serializer::CreateComparisonLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
369  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_ComparisonLayer);
370 }
371 
372 // Build FlatBuffer for Constant Layer
373 void SerializerStrategy::SerializeConstantLayer(const armnn::IConnectableLayer* layer,
374  const std::vector<armnn::ConstTensor>& constants,
375  const char* name)
376 {
377  IgnoreUnused(name);
378 
379  armnn::ConstTensor input = constants[0];
380 
381  // Create FlatBuffer BaseLayer
382  auto flatBufferConstantBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Constant);
383 
384  auto flatBufferConstTensorInfo = CreateConstTensorInfo(input);
385 
386  // Create the FlatBuffer ConstantLayer
387  auto flatBufferLayer = CreateConstantLayer(m_flatBufferBuilder,
388  flatBufferConstantBaseLayer,
389  flatBufferConstTensorInfo);
390 
391  // Add the AnyLayer to the FlatBufferLayers
392  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ConstantLayer);
393 }
394 
395 // Build FlatBuffer for Convolution2dLayer
396 void SerializerStrategy::SerializeConvolution2dLayer(const armnn::IConnectableLayer* layer,
397  const armnn::Convolution2dDescriptor& descriptor,
398  const char* name)
399 {
400  IgnoreUnused(name);
401 
402  // Create FlatBuffer BaseLayer
403  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution2d);
404 
405  auto flatBufferDescriptor = CreateConvolution2dDescriptor(m_flatBufferBuilder,
406  descriptor.m_PadLeft,
407  descriptor.m_PadRight,
408  descriptor.m_PadTop,
409  descriptor.m_PadBottom,
410  descriptor.m_StrideX,
411  descriptor.m_StrideY,
412  descriptor.m_DilationX,
413  descriptor.m_DilationY,
414  descriptor.m_BiasEnabled,
416 
417  // Create the FlatBuffer Convolution2dLayer
418  auto flatBufferLayer = CreateConvolution2dLayer(m_flatBufferBuilder,
419  flatBufferBaseLayer,
420  flatBufferDescriptor);
421 
422  // Add the AnyLayer to the FlatBufferLayers
423  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_Convolution2dLayer);
424 }
425 
426 // Build FlatBuffer for Convolution3dLayer
427 void SerializerStrategy::SerializeConvolution3dLayer(const armnn::IConnectableLayer* layer,
428  const armnn::Convolution3dDescriptor& descriptor,
429  const char* name)
430 {
431  IgnoreUnused(name);
432 
433  // Create FlatBuffer BaseLayer
434  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution3d);
435 
436  auto flatBufferDescriptor = CreateConvolution3dDescriptor(m_flatBufferBuilder,
437  descriptor.m_PadLeft,
438  descriptor.m_PadRight,
439  descriptor.m_PadTop,
440  descriptor.m_PadBottom,
441  descriptor.m_PadFront,
442  descriptor.m_PadBack,
443  descriptor.m_StrideX,
444  descriptor.m_StrideY,
445  descriptor.m_StrideZ,
446  descriptor.m_DilationX,
447  descriptor.m_DilationY,
448  descriptor.m_DilationZ,
449  descriptor.m_BiasEnabled,
451 
452  // Create the FlatBuffer Convolution3dLayer
453  auto flatBufferLayer = CreateConvolution3dLayer(m_flatBufferBuilder,
454  flatBufferBaseLayer,
455  flatBufferDescriptor);
456 
457  // Add the AnyLayer to the FlatBufferLayers
458  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_Convolution3dLayer);
459 }
460 
461 void SerializerStrategy::SerializeDepthToSpaceLayer(const armnn::IConnectableLayer* layer,
462  const armnn::DepthToSpaceDescriptor& descriptor,
463  const char* name)
464 {
465  IgnoreUnused(name);
466 
467  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_DepthToSpace);
468  auto fbDescriptor = CreateDepthToSpaceDescriptor(m_flatBufferBuilder,
469  descriptor.m_BlockSize,
471 
472  auto fbLayer = serializer::CreateDepthToSpaceLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
473 
474  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_DepthToSpaceLayer);
475 }
476 
477 void SerializerStrategy::SerializeDepthwiseConvolution2dLayer(const armnn::IConnectableLayer* layer,
478  const armnn::DepthwiseConvolution2dDescriptor& descriptor,
479  const char* name)
480 {
481  IgnoreUnused(name);
482 
483  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_DepthwiseConvolution2d);
484  auto fbDescriptor = CreateDepthwiseConvolution2dDescriptor(m_flatBufferBuilder,
485  descriptor.m_PadLeft,
486  descriptor.m_PadRight,
487  descriptor.m_PadTop,
488  descriptor.m_PadBottom,
489  descriptor.m_StrideX,
490  descriptor.m_StrideY,
491  descriptor.m_DilationX,
492  descriptor.m_DilationY,
493  descriptor.m_BiasEnabled,
495 
496  auto flatBufferLayer = CreateDepthwiseConvolution2dLayer(m_flatBufferBuilder,
497  fbBaseLayer,
498  fbDescriptor);
499 
500  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_DepthwiseConvolution2dLayer);
501 }
502 
503 void SerializerStrategy::SerializeDequantizeLayer(const armnn::IConnectableLayer* layer,
504  const char* name)
505 {
506  IgnoreUnused(name);
507 
508  auto fbDequantizeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Dequantize);
509  auto fbDequantizeLayer = serializer::CreateDequantizeLayer(m_flatBufferBuilder, fbDequantizeBaseLayer);
510 
511  CreateAnyLayer(fbDequantizeLayer.o, serializer::Layer::Layer_DequantizeLayer);
512 }
513 
514 void SerializerStrategy::SerializeDetectionPostProcessLayer(const armnn::IConnectableLayer* layer,
515  const armnn::DetectionPostProcessDescriptor& descriptor,
516  const std::vector<armnn::ConstTensor>& constants,
517  const char* name)
518 {
519  IgnoreUnused(name);
520 
521  const armnn::ConstTensor& anchors = constants[0];
522 
523  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_DetectionPostProcess);
524  auto fbDescriptor = CreateDetectionPostProcessDescriptor(m_flatBufferBuilder,
525  descriptor.m_MaxDetections,
526  descriptor.m_MaxClassesPerDetection,
527  descriptor.m_DetectionsPerClass,
528  descriptor.m_NmsScoreThreshold,
529  descriptor.m_NmsIouThreshold,
530  descriptor.m_NumClasses,
531  descriptor.m_UseRegularNms,
532  descriptor.m_ScaleX,
533  descriptor.m_ScaleY,
534  descriptor.m_ScaleW,
535  descriptor.m_ScaleH);
536 
537  flatbuffers::Offset<serializer::ConstTensor> fbAnchorsConstTensorInfo = CreateConstTensorInfo(anchors);
538 
539  auto flatBufferLayer = CreateDetectionPostProcessLayer(m_flatBufferBuilder,
540  fbBaseLayer,
541  fbDescriptor,
542  fbAnchorsConstTensorInfo);
543 
544  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_DetectionPostProcessLayer);
545 }
546 
547 void SerializerStrategy::SerializeDivisionLayer(const armnn::IConnectableLayer* layer, const char* name)
548 {
549  IgnoreUnused(name);
550 
551  auto fbDivisionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Division);
552  auto fbDivisionLayer = serializer::CreateDivisionLayer(m_flatBufferBuilder, fbDivisionBaseLayer);
553 
554  CreateAnyLayer(fbDivisionLayer.o, serializer::Layer::Layer_DivisionLayer);
555 }
556 
557 void SerializerStrategy::SerializeElementwiseBinaryLayer(const armnn::IConnectableLayer* layer,
558  const armnn::ElementwiseBinaryDescriptor& descriptor,
559  const char* name)
560 {
561  IgnoreUnused(name);
562 
563  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ElementwiseBinary);
564  auto fbDescriptor = serializer::CreateElementwiseBinaryDescriptor(
565  m_flatBufferBuilder,
567 
568  auto fbLayer = serializer::CreateElementwiseBinaryLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
569  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_ElementwiseBinaryLayer);
570 }
571 
572 void SerializerStrategy::SerializeElementwiseUnaryLayer(const armnn::IConnectableLayer* layer,
573  const armnn::ElementwiseUnaryDescriptor& descriptor,
574  const char* name)
575 {
576  IgnoreUnused(name);
577 
578  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ElementwiseUnary);
579  auto fbDescriptor = serializer::CreateElementwiseUnaryDescriptor(
580  m_flatBufferBuilder,
582 
583  auto fbLayer = serializer::CreateElementwiseUnaryLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
584  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_ElementwiseUnaryLayer);
585 }
586 
587 void SerializerStrategy::SerializeFillLayer(const armnn::IConnectableLayer* layer,
588  const armnn::FillDescriptor& fillDescriptor,
589  const char* name)
590 {
591  IgnoreUnused(name);
592 
593  auto fbFillBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Fill);
594 
595  auto fbDescriptor = serializer::CreateFillDescriptor(m_flatBufferBuilder, fillDescriptor.m_Value);
596 
597  auto fbFillLayer = serializer::CreateFillLayer(m_flatBufferBuilder, fbFillBaseLayer, fbDescriptor);
598 
599  CreateAnyLayer(fbFillLayer.o, serializer::Layer::Layer_FillLayer);
600 }
601 
602 void SerializerStrategy::SerializeFloorLayer(const armnn::IConnectableLayer *layer, const char *name)
603 {
604  IgnoreUnused(name);
605 
606  auto flatBufferFloorBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Floor);
607  auto flatBufferFloorLayer = serializer::CreateFloorLayer(m_flatBufferBuilder, flatBufferFloorBaseLayer);
608 
609  CreateAnyLayer(flatBufferFloorLayer.o, serializer::Layer::Layer_FloorLayer);
610 }
611 
612 void SerializerStrategy::SerializeGatherLayer(const armnn::IConnectableLayer* layer,
613  const armnn::GatherDescriptor& gatherDescriptor,
614  const char* name)
615 {
616  IgnoreUnused(name);
617 
618  auto fbGatherDescriptor = CreateGatherDescriptor(m_flatBufferBuilder,
619  gatherDescriptor.m_Axis);
620  auto fbGatherBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Gather);
621  auto flatBufferLayer = serializer::CreateGatherLayer(m_flatBufferBuilder, fbGatherBaseLayer, fbGatherDescriptor);
622 
623  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_GatherLayer);
624 }
625 
626 void SerializerStrategy::SerializeGatherNdLayer(const armnn::IConnectableLayer* layer,
627  const char* name)
628 {
629  IgnoreUnused(name);
630 
631  auto fbGatherNdBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_GatherNd);
632  auto flatBufferLayer = serializer::CreateGatherNdLayer(m_flatBufferBuilder, fbGatherNdBaseLayer);
633 
634  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_GatherNdLayer);
635 }
636 
637 void SerializerStrategy::SerializeInstanceNormalizationLayer(
638  const armnn::IConnectableLayer* layer,
639  const armnn::InstanceNormalizationDescriptor& instanceNormalizationDescriptor,
640  const char* name)
641 {
642  IgnoreUnused(name);
643 
644  auto fbDescriptor = serializer::CreateInstanceNormalizationDescriptor(
645  m_flatBufferBuilder,
646  instanceNormalizationDescriptor.m_Gamma,
647  instanceNormalizationDescriptor.m_Beta,
648  instanceNormalizationDescriptor.m_Eps,
649  GetFlatBufferDataLayout(instanceNormalizationDescriptor.m_DataLayout));
650 
651  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_InstanceNormalization);
652  auto fbLayer = serializer::CreateInstanceNormalizationLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
653 
654  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_InstanceNormalizationLayer);
655 }
656 
657 void SerializerStrategy::SerializeL2NormalizationLayer(const armnn::IConnectableLayer* layer,
658  const armnn::L2NormalizationDescriptor& l2NormalizationDescriptor,
659  const char* name)
660 {
661  IgnoreUnused(name);
662 
663  // Create FlatBuffer BaseLayer
664  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_L2Normalization);
665 
666  // Create the FlatBuffer L2Normalization Descriptor
667  auto fbDescriptor = serializer::CreateL2NormalizationDescriptor(
668  m_flatBufferBuilder,
669  GetFlatBufferDataLayout(l2NormalizationDescriptor.m_DataLayout),
670  l2NormalizationDescriptor.m_Eps);
671 
672  // Create FlatBuffer layer
673  auto fbLayer = serializer::CreateL2NormalizationLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
674 
675  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_L2NormalizationLayer);
676 }
677 
678 void SerializerStrategy::SerializeLogicalBinaryLayer(const armnn::IConnectableLayer* layer,
679  const armnn::LogicalBinaryDescriptor& descriptor,
680  const char* name)
681 {
682  IgnoreUnused(name);
683 
684  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_LogicalBinary);
685  auto fbDescriptor = serializer::CreateLogicalBinaryDescriptor(
686  m_flatBufferBuilder,
688 
689  auto fbLayer = serializer::CreateLogicalBinaryLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
690  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_LogicalBinaryLayer);
691 }
692 
693 void SerializerStrategy::SerializeLogSoftmaxLayer(const armnn::IConnectableLayer* layer,
694  const armnn::LogSoftmaxDescriptor& logSoftmaxDescriptor,
695  const char* name)
696 {
697  IgnoreUnused(name);
698 
699  // Create FlatBuffer BaseLayer
700  auto flatBufferLogSoftmaxBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_LogSoftmax);
701 
702  // Create the FlatBuffer LogSoftmaxDescriptor
703  auto flatBufferLogSoftmaxDesc =
704  serializer::CreateLogSoftmaxDescriptor(m_flatBufferBuilder,
705  logSoftmaxDescriptor.m_Beta,
706  logSoftmaxDescriptor.m_Axis);
707 
708  // Create the FlatBuffer LogSoftmaxLayer
709  auto flatBufferLogSoftmaxLayer =
710  serializer::CreateLogSoftmaxLayer(m_flatBufferBuilder,
711  flatBufferLogSoftmaxBaseLayer,
712  flatBufferLogSoftmaxDesc);
713 
714  CreateAnyLayer(flatBufferLogSoftmaxLayer.o, serializer::Layer::Layer_LogSoftmaxLayer);
715 }
716 
717 void SerializerStrategy::SerializeLstmLayer(const armnn::IConnectableLayer* layer,
718  const armnn::LstmDescriptor& descriptor,
719  const std::vector<armnn::ConstTensor>& constants,
720  const char* name)
721 {
722  IgnoreUnused(name);
723 
724  auto fbLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Lstm);
725 
726  auto fbLstmDescriptor = serializer::CreateLstmDescriptor(
727  m_flatBufferBuilder,
728  descriptor.m_ActivationFunc,
729  descriptor.m_ClippingThresCell,
730  descriptor.m_ClippingThresProj,
731  descriptor.m_CifgEnabled,
732  descriptor.m_PeepholeEnabled,
733  descriptor.m_ProjectionEnabled,
734  descriptor.m_LayerNormEnabled);
735 
736  // Index for constants vector
737  std::size_t i = 0;
738 
739  // Get mandatory/basic input parameters
740  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]); //InputToForgetWeights
741  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]); //InputToCellWeights
742  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]); //InputToOutputWeights
743  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToForgetWeights
744  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToCellWeights
745  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToOutputWeights
746  auto forgetGateBias = CreateConstTensorInfo(constants[i++]); //ForgetGateBias
747  auto cellBias = CreateConstTensorInfo(constants[i++]); //CellBias
748  auto outputGateBias = CreateConstTensorInfo(constants[i++]); //OutputGateBias
749 
750 
751 
752  //Define optional parameters, these will be set depending on configuration in Lstm descriptor
753  flatbuffers::Offset<serializer::ConstTensor> inputToInputWeights;
754  flatbuffers::Offset<serializer::ConstTensor> recurrentToInputWeights;
755  flatbuffers::Offset<serializer::ConstTensor> cellToInputWeights;
756  flatbuffers::Offset<serializer::ConstTensor> inputGateBias;
757  flatbuffers::Offset<serializer::ConstTensor> projectionWeights;
758  flatbuffers::Offset<serializer::ConstTensor> projectionBias;
759  flatbuffers::Offset<serializer::ConstTensor> cellToForgetWeights;
760  flatbuffers::Offset<serializer::ConstTensor> cellToOutputWeights;
761  flatbuffers::Offset<serializer::ConstTensor> inputLayerNormWeights;
762  flatbuffers::Offset<serializer::ConstTensor> forgetLayerNormWeights;
763  flatbuffers::Offset<serializer::ConstTensor> cellLayerNormWeights;
764  flatbuffers::Offset<serializer::ConstTensor> outputLayerNormWeights;
765 
766  if (!descriptor.m_CifgEnabled)
767  {
768  inputToInputWeights = CreateConstTensorInfo(constants[i++]); //InputToInputWeights
769  recurrentToInputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToInputWeights
770  inputGateBias = CreateConstTensorInfo(constants[i++]); //InputGateBias
771  }
772 
773  if (descriptor.m_PeepholeEnabled)
774  {
775  if (!descriptor.m_CifgEnabled)
776  {
777  cellToInputWeights = CreateConstTensorInfo(constants[i++]); //CellToInputWeights
778  }
779  cellToForgetWeights = CreateConstTensorInfo(constants[i++]); //CellToForgetWeights
780  cellToOutputWeights = CreateConstTensorInfo(constants[i++]); //CellToOutputWeights
781  }
782 
783  if (descriptor.m_ProjectionEnabled)
784  {
785  projectionWeights = CreateConstTensorInfo(constants[i++]); //ProjectionWeights
786  projectionBias = CreateConstTensorInfo(constants[i++]); //ProjectionBias
787  }
788 
789  if (descriptor.m_LayerNormEnabled)
790  {
791  if (!descriptor.m_CifgEnabled)
792  {
793  inputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //InputLayerNormWeights
794  }
795  forgetLayerNormWeights = CreateConstTensorInfo(constants[i++]); //ForgetLayerNormWeights
796  cellLayerNormWeights = CreateConstTensorInfo(constants[i++]); //CellLayerNormWeights
797  outputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //OutputLayerNormWeights
798  }
799 
800  auto fbLstmParams = serializer::CreateLstmInputParams(
801  m_flatBufferBuilder,
802  inputToForgetWeights,
803  inputToCellWeights,
804  inputToOutputWeights,
805  recurrentToForgetWeights,
806  recurrentToCellWeights,
807  recurrentToOutputWeights,
808  forgetGateBias,
809  cellBias,
810  outputGateBias,
811  inputToInputWeights,
812  recurrentToInputWeights,
813  cellToInputWeights,
814  inputGateBias,
815  projectionWeights,
816  projectionBias,
817  cellToForgetWeights,
818  cellToOutputWeights,
819  inputLayerNormWeights,
820  forgetLayerNormWeights,
821  cellLayerNormWeights,
822  outputLayerNormWeights);
823 
824  auto fbLstmLayer = serializer::CreateLstmLayer(
825  m_flatBufferBuilder,
826  fbLstmBaseLayer,
827  fbLstmDescriptor,
828  fbLstmParams);
829 
830  CreateAnyLayer(fbLstmLayer.o, serializer::Layer::Layer_LstmLayer);
831 }
832 
833 void SerializerStrategy::SerializeMaximumLayer(const armnn::IConnectableLayer* layer, const char* name)
834 {
835  IgnoreUnused(name);
836 
837  auto fbMaximumBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Maximum);
838  auto fbMaximumLayer = serializer::CreateMaximumLayer(m_flatBufferBuilder, fbMaximumBaseLayer);
839 
840  CreateAnyLayer(fbMaximumLayer.o, serializer::Layer::Layer_MaximumLayer);
841 }
842 
843 void SerializerStrategy::SerializeMeanLayer(const armnn::IConnectableLayer* layer,
844  const armnn::MeanDescriptor& descriptor,
845  const char* name)
846 {
847  IgnoreUnused(name);
848 
849  auto fbMeanBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Mean);
850  auto fbMeanDescriptor = serializer::CreateMeanDescriptor(m_flatBufferBuilder,
851  m_flatBufferBuilder.CreateVector(descriptor.m_Axis),
852  descriptor.m_KeepDims);
853 
854  auto fbMeanLayer = serializer::CreateMeanLayer(m_flatBufferBuilder,
855  fbMeanBaseLayer,
856  fbMeanDescriptor);
857 
858  CreateAnyLayer(fbMeanLayer.o, serializer::Layer::Layer_MeanLayer);
859 }
860 
861 void SerializerStrategy::SerializeMinimumLayer(const armnn::IConnectableLayer* layer, const char* name)
862 {
863  IgnoreUnused(name);
864 
865  auto fbMinimumBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Minimum);
866  auto fbMinimumLayer = serializer::CreateMinimumLayer(m_flatBufferBuilder, fbMinimumBaseLayer);
867 
868  CreateAnyLayer(fbMinimumLayer.o, serializer::Layer::Layer_MinimumLayer);
869 }
870 
871 void SerializerStrategy::SerializeMergeLayer(const armnn::IConnectableLayer* layer, const char* name)
872 {
873  IgnoreUnused(name);
874 
875  auto fbMergeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Merge);
876  auto fbMergeLayer = serializer::CreateMergeLayer(m_flatBufferBuilder, fbMergeBaseLayer);
877 
878  CreateAnyLayer(fbMergeLayer.o, serializer::Layer::Layer_MergeLayer);
879 }
880 
881 void SerializerStrategy::SerializeConcatLayer(const armnn::IConnectableLayer* layer,
882  const armnn::ConcatDescriptor& concatDescriptor,
883  const char* name)
884 {
885  IgnoreUnused(name);
886 
887  auto flatBufferConcatBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Concat);
888 
889  std::vector<flatbuffers::Offset<UintVector>> views;
890  for (unsigned int v = 0; v < concatDescriptor.GetNumViews(); ++v)
891  {
892  const uint32_t* origin = concatDescriptor.GetViewOrigin(v);
893  std::vector<uint32_t> origins;
894  for (unsigned int d = 0; d < concatDescriptor.GetNumDimensions(); ++d)
895  {
896  origins.push_back(origin[d]);
897  }
898  auto view = m_flatBufferBuilder.CreateVector(origins);
899  auto uintVector = CreateUintVector(m_flatBufferBuilder, view);
900  views.push_back(uintVector);
901  }
902 
903  auto flatBufferConcatDescriptor = CreateOriginsDescriptor(m_flatBufferBuilder,
904  concatDescriptor.GetConcatAxis(),
905  concatDescriptor.GetNumViews(),
906  concatDescriptor.GetNumDimensions(),
907  m_flatBufferBuilder.CreateVector(views));
908 
909  auto flatBufferLayer = CreateConcatLayer(m_flatBufferBuilder,
910  flatBufferConcatBaseLayer,
911  flatBufferConcatDescriptor);
912 
913  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ConcatLayer);
914 }
915 
916 void SerializerStrategy::SerializeMultiplicationLayer(const armnn::IConnectableLayer* layer, const char* name)
917 {
918  IgnoreUnused(name);
919 
920  auto fbMultiplicationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Multiplication);
921  auto fbMultiplicationLayer = serializer::CreateMultiplicationLayer(m_flatBufferBuilder,
922  fbMultiplicationBaseLayer);
923 
924  CreateAnyLayer(fbMultiplicationLayer.o, serializer::Layer::Layer_MultiplicationLayer);
925 }
926 
927 void SerializerStrategy::SerializePadLayer(const armnn::IConnectableLayer* layer,
928  const armnn::PadDescriptor& padDescriptor,
929  const char* name)
930 {
931  IgnoreUnused(name);
932 
933  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pad);
934 
935  std::vector<unsigned int> padList;
936  for (auto& p: padDescriptor.m_PadList)
937  {
938  padList.push_back(p.first);
939  padList.push_back(p.second);
940  }
941 
942  auto flatBufferPadDesc = serializer::CreatePadDescriptor(m_flatBufferBuilder,
943  m_flatBufferBuilder.CreateVector(padList),
944  padDescriptor.m_PadValue,
945  GetFlatBufferPaddingMode(padDescriptor.m_PaddingMode));
946 
947  auto flatBufferPadLayer = serializer::CreatePadLayer(m_flatBufferBuilder,
948  flatBufferBaseLayer,
949  flatBufferPadDesc);
950 
951  CreateAnyLayer(flatBufferPadLayer.o, serializer::Layer::Layer_PadLayer);
952 }
953 
954 void SerializerStrategy::SerializePermuteLayer(const armnn::IConnectableLayer* layer,
955  const armnn::PermuteDescriptor& permuteDescriptor,
956  const char* name)
957 {
958  IgnoreUnused(name);
959 
960  // Create FlatBuffer BaseLayer
961  auto flatBufferPermuteBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Permute);
962 
963  std::vector<unsigned int> dimMappings;
964  for (unsigned int i=0; i<permuteDescriptor.m_DimMappings.GetSize(); ++i)
965  {
966  dimMappings.push_back(permuteDescriptor.m_DimMappings[i]);
967  }
968 
969  auto flatBufferPermuteDesc = serializer::CreatePermuteDescriptor(m_flatBufferBuilder,
970  m_flatBufferBuilder.CreateVector(dimMappings));
971 
972  // Create the FlatBuffer PermuteLayer
973  auto flatBufferPermuteLayer = serializer::CreatePermuteLayer(m_flatBufferBuilder,
974  flatBufferPermuteBaseLayer,
975  flatBufferPermuteDesc);
976 
977  // Add the AnyLayer to the FlatBufferLayers
978  CreateAnyLayer(flatBufferPermuteLayer.o, serializer::Layer::Layer_PermuteLayer);
979 }
980 
981 // Build FlatBuffer for Rank Layer
982 void SerializerStrategy::SerializeRankLayer(const armnn::IConnectableLayer* layer,
983  const char* name)
984 {
985  IgnoreUnused(name);
986  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Rank);
987  auto flatBufferRankLayer = serializer::CreateRankLayer(m_flatBufferBuilder, flatBufferBaseLayer);
988 
989  CreateAnyLayer(flatBufferRankLayer.o, serializer::Layer::Layer_RankLayer);
990 }
991 
992 void SerializerStrategy::SerializeReduceLayer(const armnn::IConnectableLayer* layer,
993  const armnn::ReduceDescriptor& reduceDescriptor,
994  const char*)
995 {
996  auto fbReduceBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Reduce);
997  auto fbDescriptor = CreateReduceDescriptor(m_flatBufferBuilder,
998  reduceDescriptor.m_KeepDims,
999  m_flatBufferBuilder.CreateVector(reduceDescriptor.m_vAxis),
1001  auto fbReduceLayer = serializer::CreateReduceLayer(m_flatBufferBuilder,
1002  fbReduceBaseLayer,
1003  fbDescriptor);
1004 
1005  CreateAnyLayer(fbReduceLayer.o, serializer::Layer::Layer_ReduceLayer);
1006 }
1007 
1008 // Build FlatBuffer for Reshape Layer
1009 void SerializerStrategy::SerializeReshapeLayer(const armnn::IConnectableLayer* layer,
1010  const armnn::ReshapeDescriptor& reshapeDescriptor,
1011  const char* name)
1012 {
1013  IgnoreUnused(name);
1014 
1015  // Create FlatBuffer BaseLayer
1016  auto flatBufferReshapeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Reshape);
1017 
1018  std::vector<unsigned int> targetShape;
1019  for (unsigned int i =0; i < reshapeDescriptor.m_TargetShape.GetNumDimensions(); i++)
1020  {
1021  targetShape.push_back(reshapeDescriptor.m_TargetShape[i]);
1022  }
1023 
1024  auto flatBufferReshapeDesc = serializer::CreateReshapeDescriptor(m_flatBufferBuilder,
1025  m_flatBufferBuilder.CreateVector(targetShape));
1026 
1027  // Create the FlatBuffer ReshapeLayer
1028  auto flatBufferReshapeLayer = serializer::CreateReshapeLayer(m_flatBufferBuilder, flatBufferReshapeBaseLayer,
1029  flatBufferReshapeDesc);
1030 
1031  // Add the AnyLayer to the FlatBufferLayers
1032  CreateAnyLayer(flatBufferReshapeLayer.o, serializer::Layer::Layer_ReshapeLayer);
1033 }
1034 
1035 void SerializerStrategy::SerializeResizeLayer(const armnn::IConnectableLayer* layer,
1036  const armnn::ResizeDescriptor& resizeDescriptor,
1037  const char* name)
1038 {
1039  IgnoreUnused(name);
1040 
1041  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Resize);
1042 
1043  auto flatBufferDescriptor =
1044  CreateResizeDescriptor(m_flatBufferBuilder,
1045  resizeDescriptor.m_TargetHeight,
1046  resizeDescriptor.m_TargetWidth,
1047  GetFlatBufferResizeMethod(resizeDescriptor.m_Method),
1048  GetFlatBufferDataLayout(resizeDescriptor.m_DataLayout),
1049  resizeDescriptor.m_AlignCorners,
1050  resizeDescriptor.m_HalfPixelCenters);
1051 
1052  auto flatBufferLayer = serializer::CreateResizeLayer(m_flatBufferBuilder,
1053  flatBufferBaseLayer,
1054  flatBufferDescriptor);
1055 
1056  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ResizeLayer);
1057 }
1058 
1059 void SerializerStrategy::SerializeReverseV2Layer(const armnn::IConnectableLayer* layer,
1060  const char* name)
1061 {
1062  IgnoreUnused(name);
1063 
1064  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ReverseV2);
1065 
1066  auto flatBufferLayer = serializer::CreateReverseV2Layer(m_flatBufferBuilder,
1067  flatBufferBaseLayer);
1068 
1069  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ReverseV2Layer);
1070 }
1071 
1072 void SerializerStrategy::SerializeSliceLayer(const armnn::IConnectableLayer* layer,
1073  const armnn::SliceDescriptor& sliceDescriptor,
1074  const char* name)
1075 {
1076  IgnoreUnused(name);
1077 
1078  auto fbSliceBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Slice);
1079  auto fbSliceDescriptor = CreateSliceDescriptor(m_flatBufferBuilder,
1080  m_flatBufferBuilder.CreateVector(sliceDescriptor.m_Begin),
1081  m_flatBufferBuilder.CreateVector(sliceDescriptor.m_Size));
1082 
1083  auto fbSliceLayer = serializer::CreateSliceLayer(m_flatBufferBuilder, fbSliceBaseLayer, fbSliceDescriptor);
1084 
1085  CreateAnyLayer(fbSliceLayer.o, serializer::Layer::Layer_SliceLayer);
1086 }
1087 
1088 // Build FlatBuffer for Softmax Layer
1089 void SerializerStrategy::SerializeSoftmaxLayer(const armnn::IConnectableLayer* layer,
1090  const armnn::SoftmaxDescriptor& softmaxDescriptor,
1091  const char* name)
1092 {
1093  IgnoreUnused(name);
1094 
1095  // Create FlatBuffer BaseLayer
1096  auto flatBufferSoftmaxBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Softmax);
1097 
1098  // Create the FlatBuffer SoftmaxDescriptor
1099  auto flatBufferSoftmaxDesc =
1100  serializer::CreateSoftmaxDescriptor(m_flatBufferBuilder,
1101  softmaxDescriptor.m_Beta,
1102  softmaxDescriptor.m_Axis);
1103 
1104  // Create the FlatBuffer SoftmaxLayer
1105  auto flatBufferSoftmaxLayer =
1106  serializer::CreateSoftmaxLayer(m_flatBufferBuilder,
1107  flatBufferSoftmaxBaseLayer,
1108  flatBufferSoftmaxDesc);
1109 
1110  CreateAnyLayer(flatBufferSoftmaxLayer.o, serializer::Layer::Layer_SoftmaxLayer);
1111 }
1112 
1113 void SerializerStrategy::SerializePooling2dLayer(const armnn::IConnectableLayer* layer,
1114  const armnn::Pooling2dDescriptor& pooling2dDescriptor,
1115  const char* name)
1116 {
1117  IgnoreUnused(name);
1118 
1119  auto fbPooling2dBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pooling2d);
1120  auto fbPooling2dDescriptor = serializer::CreatePooling2dDescriptor(
1121  m_flatBufferBuilder,
1122  GetFlatBufferPoolingAlgorithm(pooling2dDescriptor.m_PoolType),
1123  pooling2dDescriptor.m_PadLeft,
1124  pooling2dDescriptor.m_PadRight,
1125  pooling2dDescriptor.m_PadTop,
1126  pooling2dDescriptor.m_PadBottom,
1127  pooling2dDescriptor.m_PoolWidth,
1128  pooling2dDescriptor.m_PoolHeight,
1129  pooling2dDescriptor.m_StrideX,
1130  pooling2dDescriptor.m_StrideY,
1132  GetFlatBufferPaddingMethod(pooling2dDescriptor.m_PaddingMethod),
1133  GetFlatBufferDataLayout(pooling2dDescriptor.m_DataLayout));
1134 
1135  auto fbPooling2dLayer = serializer::CreatePooling2dLayer(m_flatBufferBuilder,
1136  fbPooling2dBaseLayer,
1137  fbPooling2dDescriptor);
1138 
1139  CreateAnyLayer(fbPooling2dLayer.o, serializer::Layer::Layer_Pooling2dLayer);
1140 }
1141 
1142 void SerializerStrategy::SerializePooling3dLayer(const armnn::IConnectableLayer* layer,
1143  const armnn::Pooling3dDescriptor& pooling3dDescriptor,
1144  const char* name)
1145 {
1146  IgnoreUnused(name);
1147 
1148  auto fbPooling3dBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pooling3d);
1149  auto fbPooling3dDescriptor = serializer::CreatePooling3dDescriptor(
1150  m_flatBufferBuilder,
1151  GetFlatBufferPoolingAlgorithm(pooling3dDescriptor.m_PoolType),
1152  pooling3dDescriptor.m_PadLeft,
1153  pooling3dDescriptor.m_PadRight,
1154  pooling3dDescriptor.m_PadTop,
1155  pooling3dDescriptor.m_PadBottom,
1156  pooling3dDescriptor.m_PadFront,
1157  pooling3dDescriptor.m_PadBack,
1158  pooling3dDescriptor.m_PoolWidth,
1159  pooling3dDescriptor.m_PoolHeight,
1160  pooling3dDescriptor.m_PoolDepth,
1161  pooling3dDescriptor.m_StrideX,
1162  pooling3dDescriptor.m_StrideY,
1163  pooling3dDescriptor.m_StrideZ,
1165  GetFlatBufferPaddingMethod(pooling3dDescriptor.m_PaddingMethod),
1166  GetFlatBufferDataLayout(pooling3dDescriptor.m_DataLayout));
1167 
1168  auto fbPooling3dLayer = serializer::CreatePooling3dLayer(m_flatBufferBuilder,
1169  fbPooling3dBaseLayer,
1170  fbPooling3dDescriptor);
1171 
1172  CreateAnyLayer(fbPooling3dLayer.o, serializer::Layer::Layer_Pooling3dLayer);
1173 }
1174 
1175 void SerializerStrategy::SerializePreluLayer(const armnn::IConnectableLayer* layer,
1176  const char* name)
1177 {
1178  IgnoreUnused(name);
1179 
1180  // Create FlatBuffer BaseLayer
1181  auto flatBufferPreluBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Prelu);
1182 
1183  // Create the FlatBuffer AdditionLayer
1184  auto flatBufferPreluLayer = serializer::CreatePreluLayer(m_flatBufferBuilder, flatBufferPreluBaseLayer);
1185 
1186  // Add the AnyLayer to the FlatBufferLayers
1187  CreateAnyLayer(flatBufferPreluLayer.o, serializer::Layer::Layer_PreluLayer);
1188 }
1189 
1190 void SerializerStrategy::SerializeQuantizeLayer(const armnn::IConnectableLayer *layer, const char *name)
1191 {
1192  IgnoreUnused(name);
1193 
1194  auto fbQuantizeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Quantize);
1195  auto fbQuantizeLayer = serializer::CreateQuantizeLayer(m_flatBufferBuilder,
1196  fbQuantizeBaseLayer);
1197  CreateAnyLayer(fbQuantizeLayer.o, serializer::Layer::Layer_QuantizeLayer);
1198 }
1199 
1200 // Build FlatBuffer for FullyConnected Layer
1201 void SerializerStrategy::SerializeFullyConnectedLayer(const armnn::IConnectableLayer* layer,
1202  const armnn::FullyConnectedDescriptor& fullyConnectedDescriptor,
1203  const char*)
1204 {
1205  // Create FlatBuffer BaseLayer
1206  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_FullyConnected);
1207 
1208  // Create FlatBuffer FullyConnectedDescriptor
1209  auto flatBufferDescriptor =
1210  serializer::CreateFullyConnectedDescriptor(m_flatBufferBuilder,
1211  fullyConnectedDescriptor.m_BiasEnabled,
1212  fullyConnectedDescriptor.m_TransposeWeightMatrix,
1213  fullyConnectedDescriptor.m_ConstantWeights);
1214 
1215  // Create FlatBuffer FullyConnectedLayer
1216  auto flatBufferLayer = serializer::CreateFullyConnectedLayer(m_flatBufferBuilder,
1217  flatBufferBaseLayer,
1218  flatBufferDescriptor);
1219 
1220  // Add created FullyConnectedLayer to the FlatBufferLayers
1221  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_FullyConnectedLayer);
1222 }
1223 
1224 // Build FlatBuffer for SpaceToBatchNd Layer
1225 void SerializerStrategy::SerializeSpaceToBatchNdLayer(const armnn::IConnectableLayer* layer,
1226  const armnn::SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
1227  const char* name)
1228 {
1229  IgnoreUnused(name);
1230 
1231  // Create FlatBuffer BaseLayer
1232  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_SpaceToBatchNd);
1233 
1234  std::vector<unsigned int> padList;
1235  padList.reserve(spaceToBatchNdDescriptor.m_PadList.size()*2);
1236  for (auto& pad : spaceToBatchNdDescriptor.m_PadList)
1237  {
1238  padList.push_back(pad.first);
1239  padList.push_back(pad.second);
1240  }
1241 
1242  auto flatBufferDescriptor =
1243  CreateSpaceToBatchNdDescriptor(m_flatBufferBuilder,
1244  m_flatBufferBuilder.CreateVector(spaceToBatchNdDescriptor.m_BlockShape),
1245  m_flatBufferBuilder.CreateVector(padList),
1246  GetFlatBufferDataLayout(spaceToBatchNdDescriptor.m_DataLayout));
1247 
1248  auto flatBufferLayer = serializer::CreateSpaceToBatchNdLayer(m_flatBufferBuilder,
1249  flatBufferBaseLayer,
1250  flatBufferDescriptor);
1251 
1252  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_SpaceToBatchNdLayer);
1253 }
1254 
1255 // Build FlatBuffer for SpaceToDepthLayer
1256 void SerializerStrategy::SerializeSpaceToDepthLayer(const armnn::IConnectableLayer* layer,
1257  const armnn::SpaceToDepthDescriptor& spaceToDepthDescriptor,
1258  const char* name)
1259 {
1260  IgnoreUnused(name);
1261 
1262  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_SpaceToDepth);
1263  auto flatBufferDescriptor =
1264  CreateSpaceToDepthDescriptor(m_flatBufferBuilder,
1265  spaceToDepthDescriptor.m_BlockSize,
1266  GetFlatBufferDataLayout(spaceToDepthDescriptor.m_DataLayout));
1267 
1268  auto flatBufferLayer = serializer::CreateSpaceToDepthLayer(m_flatBufferBuilder,
1269  flatBufferBaseLayer,
1270  flatBufferDescriptor);
1271 
1272  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_SpaceToDepthLayer);
1273 }
1274 
1275 // Build FlatBuffer for Splitter Layer
1276 void SerializerStrategy::SerializeSplitterLayer(const armnn::IConnectableLayer* layer,
1277  const armnn::ViewsDescriptor& viewsDescriptor,
1278  const char* name)
1279 {
1280  IgnoreUnused(name);
1281 
1282  // Create FlatBuffer ViewOrigins
1283  std::vector<flatbuffers::Offset<UintVector>> flatBufferViewOrigins;
1284  flatBufferViewOrigins.reserve(viewsDescriptor.GetNumViews());
1285 
1286  for(unsigned int vIdx = 0; vIdx < viewsDescriptor.GetNumViews(); ++vIdx)
1287  {
1288  std::vector<uint32_t> viewOrigin;
1289  viewOrigin.reserve(viewsDescriptor.GetNumDimensions());
1290 
1291  // Copy vector
1292  for(unsigned int dIdx = 0; dIdx < viewsDescriptor.GetNumDimensions(); ++dIdx)
1293  {
1294  viewOrigin.push_back(viewsDescriptor.GetViewOrigin(vIdx)[dIdx]);
1295  }
1296 
1297  flatBufferViewOrigins.push_back(CreateUintVector(m_flatBufferBuilder,
1298  m_flatBufferBuilder.CreateVector(viewOrigin)));
1299  }
1300 
1301  // Create FlatBuffer OriginsDescriptor
1302  auto flatBufferOriginDescriptor = CreateOriginsDescriptor(m_flatBufferBuilder,
1303  viewsDescriptor.GetOrigins().GetConcatAxis(),
1304  viewsDescriptor.GetOrigins().GetNumViews(),
1305  viewsDescriptor.GetOrigins().GetNumDimensions(),
1306  m_flatBufferBuilder.CreateVector(flatBufferViewOrigins));
1307 
1308  // Create FlatBuffer ViewOrigins
1309  std::vector<flatbuffers::Offset<UintVector>> flatBufferViewSizes;
1310  flatBufferViewSizes.reserve(viewsDescriptor.GetNumViews());
1311 
1312  for(unsigned int vIdx = 0; vIdx < viewsDescriptor.GetNumViews(); ++vIdx)
1313  {
1314  std::vector<uint32_t> viewSize;
1315  viewSize.reserve(viewsDescriptor.GetNumDimensions());
1316 
1317  // Copy vector
1318  for(unsigned int dIdx = 0; dIdx < viewsDescriptor.GetNumDimensions(); ++dIdx)
1319  {
1320  viewSize.push_back(viewsDescriptor.GetViewSizes(vIdx)[dIdx]);
1321  }
1322 
1323  flatBufferViewSizes.push_back(CreateUintVector(m_flatBufferBuilder,
1324  m_flatBufferBuilder.CreateVector(viewSize)));
1325  }
1326 
1327  // Create FlatBuffer ViewsDescriptor
1328  auto flatBufferViewsDescriptor = CreateViewsDescriptor(m_flatBufferBuilder,
1329  flatBufferOriginDescriptor,
1330  m_flatBufferBuilder.CreateVector(flatBufferViewSizes),
1331  viewsDescriptor.HasAxis(),
1332  viewsDescriptor.GetAxis());
1333 
1334  // Create FlatBuffer BaseLayer
1335  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Splitter);
1336 
1337  auto flatBufferSplitterLayer = serializer::CreateSplitterLayer(m_flatBufferBuilder,
1338  flatBufferBaseLayer,
1339  flatBufferViewsDescriptor);
1340 
1341  CreateAnyLayer(flatBufferSplitterLayer.o, serializer::Layer::Layer_SplitterLayer);
1342 }
1343 
1344 void SerializerStrategy::SerializeNormalizationLayer(const armnn::IConnectableLayer* layer,
1345  const armnn::NormalizationDescriptor& descriptor,
1346  const char* name)
1347 {
1348  IgnoreUnused(name);
1349 
1350  auto fbNormalizationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Normalization);
1351 
1352  auto fbNormalizationDescriptor = serializer::CreateNormalizationDescriptor(
1353  m_flatBufferBuilder,
1356  descriptor.m_NormSize,
1357  descriptor.m_Alpha,
1358  descriptor.m_Beta,
1359  descriptor.m_K,
1361 
1362  auto flatBufferLayer = serializer::CreateNormalizationLayer(m_flatBufferBuilder,
1363  fbNormalizationBaseLayer,
1364  fbNormalizationDescriptor);
1365 
1366  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_NormalizationLayer);
1367 }
1368 
1369 void SerializerStrategy::SerializeScatterNdLayer(const armnn::IConnectableLayer* layer,
1370  const armnn::ScatterNdDescriptor& descriptor,
1371  const char* name)
1372 {
1373  IgnoreUnused(name);
1374 
1375  // Create FlatBuffer BaseLayer
1376  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ScatterNd);
1377 
1378  auto flatBufferDesc = serializer::CreateScatterNdDescriptor(
1379  m_flatBufferBuilder,
1381  descriptor.m_InputEnabled,
1382  descriptor.m_Axis,
1383  descriptor.m_AxisEnabled);
1384 
1385  // Create the FlatBuffer TileLayer
1386  auto flatBufferLayer = serializer::CreateScatterNdLayer(
1387  m_flatBufferBuilder,
1388  flatBufferBaseLayer,
1389  flatBufferDesc);
1390 
1391  // Add the AnyLayer to the FlatBufferLayers
1392  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ScatterNdLayer);
1393 }
1394 
1395 void SerializerStrategy::SerializeShapeLayer(const armnn::IConnectableLayer* layer,
1396  const char* name)
1397 {
1398  IgnoreUnused(name);
1399 
1400  auto shapeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Shape);
1401  auto shapeLayer = serializer::CreateShapeLayer(m_flatBufferBuilder, shapeBaseLayer);
1402 
1403  CreateAnyLayer(shapeLayer.o, serializer::Layer::Layer_ShapeLayer);
1404 }
1405 
1406 void SerializerStrategy::SerializeStackLayer(const armnn::IConnectableLayer* layer,
1407  const armnn::StackDescriptor& stackDescriptor,
1408  const char* name)
1409 {
1410  IgnoreUnused(name);
1411 
1412  auto stackBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Stack);
1413 
1414  std::vector<unsigned int> inputShape;
1415  for (unsigned int i =0; i < stackDescriptor.m_InputShape.GetNumDimensions(); i++)
1416  {
1417  inputShape.push_back(stackDescriptor.m_InputShape[i]);
1418  }
1419 
1420  auto flatBufferStackDescriptor = CreateStackDescriptor(m_flatBufferBuilder,
1421  stackDescriptor.m_Axis,
1422  stackDescriptor.m_NumInputs,
1423  m_flatBufferBuilder.CreateVector(inputShape));
1424 
1425  auto stackLayer = serializer::CreateStackLayer(m_flatBufferBuilder, stackBaseLayer, flatBufferStackDescriptor);
1426  CreateAnyLayer(stackLayer.o, serializer::Layer::Layer_StackLayer);
1427 }
1428 
1429 void SerializerStrategy::SerializeStandInLayer(const armnn::IConnectableLayer *layer,
1430  const armnn::StandInDescriptor& standInDescriptor,
1431  const char *name)
1432 {
1433  IgnoreUnused(name);
1434 
1435  auto fbDescriptor = serializer::CreateStandInDescriptor(m_flatBufferBuilder,
1436  standInDescriptor.m_NumInputs,
1437  standInDescriptor.m_NumOutputs);
1438 
1439  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_StandIn);
1440  auto fbLayer = serializer::CreateStandInLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
1441 
1442  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_StandInLayer);
1443 }
1444 
1445 void SerializerStrategy::SerializeStridedSliceLayer(const armnn::IConnectableLayer* layer,
1446  const armnn::StridedSliceDescriptor& stridedSliceDescriptor,
1447  const char* name)
1448 {
1449  IgnoreUnused(name);
1450 
1451  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_StridedSlice);
1452 
1453  auto flatBufferDescriptor =
1454  CreateStridedSliceDescriptor(m_flatBufferBuilder,
1455  m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.m_Begin),
1456  m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.m_End),
1457  m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.m_Stride),
1458  stridedSliceDescriptor.m_BeginMask,
1459  stridedSliceDescriptor.m_EndMask,
1460  stridedSliceDescriptor.m_ShrinkAxisMask,
1461  stridedSliceDescriptor.m_EllipsisMask,
1462  stridedSliceDescriptor.m_NewAxisMask,
1463  GetFlatBufferDataLayout(stridedSliceDescriptor.m_DataLayout));
1464 
1465  auto flatBufferLayer = serializer::CreateStridedSliceLayer(m_flatBufferBuilder,
1466  flatBufferBaseLayer,
1467  flatBufferDescriptor);
1468 
1469  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_StridedSliceLayer);
1470 }
1471 
1472 void SerializerStrategy::SerializeSubtractionLayer(const armnn::IConnectableLayer* layer, const char* name)
1473 {
1474  IgnoreUnused(name);
1475 
1476  auto fbSubtractionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Subtraction);
1477  auto fbSubtractionLayer = serializer::CreateSubtractionLayer(m_flatBufferBuilder, fbSubtractionBaseLayer);
1478 
1479  CreateAnyLayer(fbSubtractionLayer.o, serializer::Layer::Layer_SubtractionLayer);
1480 }
1481 
1482 void SerializerStrategy::SerializeSwitchLayer(const armnn::IConnectableLayer* layer, const char* name)
1483 {
1484  IgnoreUnused(name);
1485 
1486  auto fbSwitchBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Switch);
1487  auto fbSwitchLayer = serializer::CreateSwitchLayer(m_flatBufferBuilder, fbSwitchBaseLayer);
1488 
1489  CreateAnyLayer(fbSwitchLayer.o, serializer::Layer::Layer_SwitchLayer);
1490 }
1491 
1492 void SerializerStrategy::SerializeTileLayer(const armnn::IConnectableLayer* layer,
1493  const armnn::TileDescriptor& descriptor,
1494  const char* name)
1495 {
1496  IgnoreUnused(name);
1497 
1498  // Create FlatBuffer BaseLayer
1499  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Tile);
1500 
1501  auto flatBufferDesc = serializer::CreateTileDescriptor(m_flatBufferBuilder,
1502  m_flatBufferBuilder.CreateVector(descriptor.m_Multiples));
1503 
1504  // Create the FlatBuffer TileLayer
1505  auto flatBufferLayer = serializer::CreateTileLayer(m_flatBufferBuilder,
1506  flatBufferBaseLayer,
1507  flatBufferDesc);
1508 
1509  // Add the AnyLayer to the FlatBufferLayers
1510  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_TileLayer);
1511 }
1512 
1513 void SerializerStrategy::SerializeTransposeConvolution2dLayer(
1514  const armnn::IConnectableLayer* layer,
1515  const armnn::TransposeConvolution2dDescriptor& descriptor,
1516  const std::vector<armnn::ConstTensor>& constants,
1517  const char* name)
1518 {
1519  IgnoreUnused(name);
1520 
1521  const armnn::ConstTensor& weights = constants.at(0);
1522 
1523  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution2d);
1524  auto fbDescriptor = CreateTransposeConvolution2dDescriptor(m_flatBufferBuilder,
1525  descriptor.m_PadLeft,
1526  descriptor.m_PadRight,
1527  descriptor.m_PadTop,
1528  descriptor.m_PadBottom,
1529  descriptor.m_StrideX,
1530  descriptor.m_StrideY,
1531  descriptor.m_BiasEnabled,
1533 
1534  // weights & biases
1535  auto fbWeightsConstTensorInfo = CreateConstTensorInfo(weights);
1536  flatbuffers::Offset<serializer::ConstTensor> fbBiasesConstTensorInfo;
1537  if (constants.size() > 1)
1538  {
1539  const armnn::ConstTensor& biases = constants.at(1);
1540  fbBiasesConstTensorInfo = CreateConstTensorInfo(biases);
1541  }
1542 
1543  auto fbLayer = CreateTransposeConvolution2dLayer(m_flatBufferBuilder,
1544  fbBaseLayer,
1545  fbDescriptor,
1546  fbWeightsConstTensorInfo,
1547  fbBiasesConstTensorInfo);
1548 
1549  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_TransposeConvolution2dLayer);
1550 }
1551 
1552 void SerializerStrategy::SerializeTransposeLayer(const armnn::IConnectableLayer* layer,
1553  const armnn::TransposeDescriptor& descriptor,
1554  const char* name)
1555 {
1556  IgnoreUnused(name);
1557 
1558  // Create FlatBuffer BaseLayer
1559  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Transpose);
1560 
1561  std::vector<unsigned int> dimMappings;
1562  for (unsigned int i=0; i<descriptor.m_DimMappings.GetSize(); ++i)
1563  {
1564  dimMappings.push_back(descriptor.m_DimMappings[i]);
1565  }
1566 
1567  auto flatBufferDesc = serializer::CreateTransposeDescriptor(m_flatBufferBuilder,
1568  m_flatBufferBuilder.CreateVector(dimMappings));
1569 
1570  // Create the FlatBuffer TransposeLayer
1571  auto flatBufferLayer = serializer::CreateTransposeLayer(m_flatBufferBuilder,
1572  flatBufferBaseLayer,
1573  flatBufferDesc);
1574 
1575  // Add the AnyLayer to the FlatBufferLayers
1576  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_TransposeLayer);
1577 }
1578 
1579 void SerializerStrategy::SerializeQLstmLayer(const armnn::IConnectableLayer* layer,
1580  const armnn::QLstmDescriptor& descriptor,
1581  const std::vector<armnn::ConstTensor>& constants,
1582  const char* name)
1583 {
1584  IgnoreUnused(name);
1585 
1586  auto fbQLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_QLstm);
1587 
1588  auto fbQLstmDescriptor = serializer::CreateQLstmDescriptor(
1589  m_flatBufferBuilder,
1590  descriptor.m_CifgEnabled,
1591  descriptor.m_PeepholeEnabled,
1592  descriptor.m_ProjectionEnabled,
1593  descriptor.m_LayerNormEnabled,
1594  descriptor.m_CellClip,
1595  descriptor.m_ProjectionClip,
1596  descriptor.m_InputIntermediateScale,
1597  descriptor.m_ForgetIntermediateScale,
1598  descriptor.m_CellIntermediateScale,
1599  descriptor.m_OutputIntermediateScale,
1600  descriptor.m_HiddenStateZeroPoint,
1601  descriptor.m_HiddenStateScale
1602  );
1603 
1604  // Index for constants vector
1605  std::size_t i = 0;
1606 
1607  // Mandatory params
1608  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]); //InputToForgetWeights
1609  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]); //InputToCellWeights
1610  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]); //InputToOutputWeights
1611  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToForgetWeights
1612  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToCellWeights
1613  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToOutputWeights
1614  auto forgetGateBias = CreateConstTensorInfo(constants[i++]); //ForgetGateBias
1615  auto cellBias = CreateConstTensorInfo(constants[i++]); //CellBias
1616  auto outputGateBias = CreateConstTensorInfo(constants[i++]); //OutputGateBias
1617 
1618  // CIFG
1619  flatbuffers::Offset<serializer::ConstTensor> inputToInputWeights;
1620  flatbuffers::Offset<serializer::ConstTensor> recurrentToInputWeights;
1621  flatbuffers::Offset<serializer::ConstTensor> inputGateBias;
1622 
1623  if (!descriptor.m_CifgEnabled)
1624  {
1625  inputToInputWeights = CreateConstTensorInfo(constants[i++]); //InputToInputWeights
1626  recurrentToInputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToInputWeights
1627  inputGateBias = CreateConstTensorInfo(constants[i++]); //InputGateBias
1628  }
1629 
1630  // Peephole
1631  flatbuffers::Offset<serializer::ConstTensor> cellToInputWeights;
1632  flatbuffers::Offset<serializer::ConstTensor> cellToForgetWeights;
1633  flatbuffers::Offset<serializer::ConstTensor> cellToOutputWeights;
1634 
1635  if (descriptor.m_PeepholeEnabled)
1636  {
1637  if (!descriptor.m_CifgEnabled)
1638  {
1639  cellToInputWeights = CreateConstTensorInfo(constants[i++]); //CellToInputWeights
1640  }
1641  cellToForgetWeights = CreateConstTensorInfo(constants[i++]); //CellToForgetWeights
1642  cellToOutputWeights = CreateConstTensorInfo(constants[i++]); //CellToOutputWeights
1643  }
1644 
1645  // Projection
1646  flatbuffers::Offset<serializer::ConstTensor> projectionWeights;
1647  flatbuffers::Offset<serializer::ConstTensor> projectionBias;
1648 
1649  if (descriptor.m_ProjectionEnabled)
1650  {
1651  projectionWeights = CreateConstTensorInfo(constants[i++]); //ProjectionWeights
1652  projectionBias = CreateConstTensorInfo(constants[i++]); //ProjectionBias
1653  }
1654 
1655  // Layer norm
1656  flatbuffers::Offset<serializer::ConstTensor> inputLayerNormWeights;
1657  flatbuffers::Offset<serializer::ConstTensor> forgetLayerNormWeights;
1658  flatbuffers::Offset<serializer::ConstTensor> cellLayerNormWeights;
1659  flatbuffers::Offset<serializer::ConstTensor> outputLayerNormWeights;
1660 
1661  if (descriptor.m_LayerNormEnabled)
1662  {
1663  if (!descriptor.m_CifgEnabled)
1664  {
1665  inputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //InputLayerNormWeights
1666  }
1667  forgetLayerNormWeights = CreateConstTensorInfo(constants[i++]); //ForgetLayerNormWeights
1668  cellLayerNormWeights = CreateConstTensorInfo(constants[i++]); //CellLayerNormWeights
1669  outputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //OutputLayerNormWeights
1670  }
1671 
1672  auto fbQLstmParams = serializer::CreateQLstmInputParams(
1673  m_flatBufferBuilder,
1674  inputToForgetWeights,
1675  inputToCellWeights,
1676  inputToOutputWeights,
1677  recurrentToForgetWeights,
1678  recurrentToCellWeights,
1679  recurrentToOutputWeights,
1680  forgetGateBias,
1681  cellBias,
1682  outputGateBias,
1683  inputToInputWeights,
1684  recurrentToInputWeights,
1685  inputGateBias,
1686  projectionWeights,
1687  projectionBias,
1688  cellToInputWeights,
1689  cellToForgetWeights,
1690  cellToOutputWeights,
1691  inputLayerNormWeights,
1692  forgetLayerNormWeights,
1693  cellLayerNormWeights,
1694  outputLayerNormWeights);
1695 
1696  auto fbQLstmLayer = serializer::CreateQLstmLayer(
1697  m_flatBufferBuilder,
1698  fbQLstmBaseLayer,
1699  fbQLstmDescriptor,
1700  fbQLstmParams);
1701 
1702  CreateAnyLayer(fbQLstmLayer.o, serializer::Layer::Layer_QLstmLayer);
1703 }
1704 
1705 void SerializerStrategy::SerializeQuantizedLstmLayer(const armnn::IConnectableLayer* layer,
1706  const std::vector<armnn::ConstTensor>& constants,
1707  const char* name)
1708 {
1709  IgnoreUnused(name);
1710 
1711  auto fbQuantizedLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_QuantizedLstm);
1712 
1713  // index for constants vector
1714  size_t i = 0;
1715 
1716  // Get input parameters
1717  auto inputToInputWeights = CreateConstTensorInfo(constants[i++]);
1718  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]);
1719  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]);
1720  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]);
1721 
1722  auto recurrentToInputWeights = CreateConstTensorInfo(constants[i++]);
1723  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]);
1724  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]);
1725  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]);
1726 
1727  auto inputGateBias = CreateConstTensorInfo(constants[i++]);
1728  auto forgetGateBias = CreateConstTensorInfo(constants[i++]);
1729  auto cellBias = CreateConstTensorInfo(constants[i++]);
1730  auto outputGateBias = CreateConstTensorInfo(constants[i++]);
1731 
1732  auto fbQuantizedLstmParams = serializer::CreateQuantizedLstmInputParams(
1733  m_flatBufferBuilder,
1734  inputToInputWeights,
1735  inputToForgetWeights,
1736  inputToCellWeights,
1737  inputToOutputWeights,
1738  recurrentToInputWeights,
1739  recurrentToForgetWeights,
1740  recurrentToCellWeights,
1741  recurrentToOutputWeights,
1742  inputGateBias,
1743  forgetGateBias,
1744  cellBias,
1745  outputGateBias);
1746 
1747  auto fbQuantizedLstmLayer = serializer::CreateQuantizedLstmLayer(
1748  m_flatBufferBuilder,
1749  fbQuantizedLstmBaseLayer,
1750  fbQuantizedLstmParams);
1751 
1752  CreateAnyLayer(fbQuantizedLstmLayer.o, serializer::Layer::Layer_QuantizedLstmLayer);
1753 }
1754 
1755 void SerializerStrategy::SerializeUnidirectionalSequenceLstmLayer(
1756  const armnn::IConnectableLayer* layer,
1758  const std::vector<armnn::ConstTensor>& constants,
1759  const char* name)
1760 {
1761  IgnoreUnused(name);
1762 
1763  auto fbUnidirectionalSequenceLstmBaseLayer =
1764  CreateLayerBase(layer, serializer::LayerType::LayerType_UnidirectionalSequenceLstm);
1765 
1766  auto fbUnidirectionalSequenceLstmDescriptor = serializer::CreateUnidirectionalSequenceLstmDescriptor(
1767  m_flatBufferBuilder,
1768  descriptor.m_ActivationFunc,
1769  descriptor.m_ClippingThresCell,
1770  descriptor.m_ClippingThresProj,
1771  descriptor.m_CifgEnabled,
1772  descriptor.m_PeepholeEnabled,
1773  descriptor.m_ProjectionEnabled,
1774  descriptor.m_LayerNormEnabled,
1775  descriptor.m_TimeMajor);
1776 
1777  // Index for constants vector
1778  std::size_t i = 0;
1779 
1780  // Get mandatory/basic input parameters
1781  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]); //InputToForgetWeights
1782  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]); //InputToCellWeights
1783  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]); //InputToOutputWeights
1784  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToForgetWeights
1785  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToCellWeights
1786  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToOutputWeights
1787  auto forgetGateBias = CreateConstTensorInfo(constants[i++]); //ForgetGateBias
1788  auto cellBias = CreateConstTensorInfo(constants[i++]); //CellBias
1789  auto outputGateBias = CreateConstTensorInfo(constants[i++]); //OutputGateBias
1790 
1791  //Define optional parameters, these will be set depending on configuration in Lstm descriptor
1792  flatbuffers::Offset<serializer::ConstTensor> inputToInputWeights;
1793  flatbuffers::Offset<serializer::ConstTensor> recurrentToInputWeights;
1794  flatbuffers::Offset<serializer::ConstTensor> cellToInputWeights;
1795  flatbuffers::Offset<serializer::ConstTensor> inputGateBias;
1796  flatbuffers::Offset<serializer::ConstTensor> projectionWeights;
1797  flatbuffers::Offset<serializer::ConstTensor> projectionBias;
1798  flatbuffers::Offset<serializer::ConstTensor> cellToForgetWeights;
1799  flatbuffers::Offset<serializer::ConstTensor> cellToOutputWeights;
1800  flatbuffers::Offset<serializer::ConstTensor> inputLayerNormWeights;
1801  flatbuffers::Offset<serializer::ConstTensor> forgetLayerNormWeights;
1802  flatbuffers::Offset<serializer::ConstTensor> cellLayerNormWeights;
1803  flatbuffers::Offset<serializer::ConstTensor> outputLayerNormWeights;
1804 
1805  if (!descriptor.m_CifgEnabled)
1806  {
1807  inputToInputWeights = CreateConstTensorInfo(constants[i++]); //InputToInputWeights
1808  recurrentToInputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToInputWeights
1809  inputGateBias = CreateConstTensorInfo(constants[i++]); //InputGateBias
1810  }
1811 
1812  if (descriptor.m_PeepholeEnabled)
1813  {
1814  if (!descriptor.m_CifgEnabled)
1815  {
1816  cellToInputWeights = CreateConstTensorInfo(constants[i++]); //CellToInputWeights
1817  }
1818  cellToForgetWeights = CreateConstTensorInfo(constants[i++]); //CellToForgetWeights
1819  cellToOutputWeights = CreateConstTensorInfo(constants[i++]); //CellToOutputWeights
1820  }
1821 
1822  if (descriptor.m_ProjectionEnabled)
1823  {
1824  projectionWeights = CreateConstTensorInfo(constants[i++]); //ProjectionWeights
1825  projectionBias = CreateConstTensorInfo(constants[i++]); //ProjectionBias
1826  }
1827 
1828  if (descriptor.m_LayerNormEnabled)
1829  {
1830  if (!descriptor.m_CifgEnabled)
1831  {
1832  inputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //InputLayerNormWeights
1833  }
1834  forgetLayerNormWeights = CreateConstTensorInfo(constants[i++]); //ForgetLayerNormWeights
1835  cellLayerNormWeights = CreateConstTensorInfo(constants[i++]); //CellLayerNormWeights
1836  outputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //OutputLayerNormWeights
1837  }
1838 
1839  auto fbUnidirectionalSequenceLstmParams = serializer::CreateLstmInputParams(
1840  m_flatBufferBuilder,
1841  inputToForgetWeights,
1842  inputToCellWeights,
1843  inputToOutputWeights,
1844  recurrentToForgetWeights,
1845  recurrentToCellWeights,
1846  recurrentToOutputWeights,
1847  forgetGateBias,
1848  cellBias,
1849  outputGateBias,
1850  inputToInputWeights,
1851  recurrentToInputWeights,
1852  cellToInputWeights,
1853  inputGateBias,
1854  projectionWeights,
1855  projectionBias,
1856  cellToForgetWeights,
1857  cellToOutputWeights,
1858  inputLayerNormWeights,
1859  forgetLayerNormWeights,
1860  cellLayerNormWeights,
1861  outputLayerNormWeights);
1862 
1863  auto fbUnidirectionalSequenceLstmLayer = serializer::CreateUnidirectionalSequenceLstmLayer(
1864  m_flatBufferBuilder,
1865  fbUnidirectionalSequenceLstmBaseLayer,
1866  fbUnidirectionalSequenceLstmDescriptor,
1867  fbUnidirectionalSequenceLstmParams);
1868 
1869  CreateAnyLayer(fbUnidirectionalSequenceLstmLayer.o, serializer::Layer::Layer_UnidirectionalSequenceLstmLayer);
1870 }
1871 
1872 fb::Offset<serializer::LayerBase> SerializerStrategy::CreateLayerBase(const IConnectableLayer* layer,
1873  const serializer::LayerType layerType)
1874 {
1875 
1876  uint32_t fbIndex = GetSerializedId(layer->GetGuid());
1877 
1878  std::vector<fb::Offset<serializer::InputSlot>> inputSlots = CreateInputSlots(layer);
1879  std::vector<fb::Offset<serializer::OutputSlot>> outputSlots = CreateOutputSlots(layer);
1880 
1881  return serializer::CreateLayerBase(m_flatBufferBuilder,
1882  fbIndex,
1883  m_flatBufferBuilder.CreateString(layer->GetName()),
1884  layerType,
1885  m_flatBufferBuilder.CreateVector(inputSlots),
1886  m_flatBufferBuilder.CreateVector(outputSlots));
1887 }
1888 
1889 void SerializerStrategy::CreateAnyLayer(const flatbuffers::Offset<void>& layer, const serializer::Layer serializerLayer)
1890 {
1891 
1892  auto anyLayer = armnnSerializer::CreateAnyLayer(m_flatBufferBuilder, serializerLayer, layer);
1893  m_serializedLayers.push_back(anyLayer);
1894 }
1895 
1896 template <typename T>
1897 flatbuffers::Offset<flatbuffers::Vector<T>> SerializerStrategy::CreateDataVector(const void* memory, unsigned int size)
1898 {
1899  const T* buffer = reinterpret_cast<const T*>(memory);
1900  std::vector<T> vector(buffer, buffer + (size / sizeof(T)));
1901  auto fbVector = m_flatBufferBuilder.CreateVector(vector);
1902  return fbVector;
1903 }
1904 
1905 flatbuffers::Offset<TensorInfo> SerializerStrategy::CreateTensorInfo(const armnn::TensorInfo& tensorInfo)
1906 {
1907  // Get the dimensions
1908  std::vector<unsigned int> shape;
1909  std::vector<bool> specificity;
1910  // This assumes that the TensorShape constructors have ensured that the size of m_DimensionsSpecificity
1911  // matches the size of dimensions.
1912  for(unsigned int dim = 0; dim < tensorInfo.GetShape().GetNumDimensions(); ++dim)
1913  {
1914  specificity.push_back(tensorInfo.GetShape().GetDimensionSpecificity(dim));
1915 
1916  if (tensorInfo.GetShape().GetDimensionSpecificity(dim))
1917  {
1918  shape.push_back(tensorInfo.GetShape()[dim]);
1919  }
1920  else
1921  {
1922  shape.push_back(0);
1923  }
1924  }
1925 
1926  if (tensorInfo.HasPerAxisQuantization())
1927  {
1928  // Create FlatBuffer TensorInfo
1929  auto flatBufferTensorInfo =
1930  serializer::CreateTensorInfo(m_flatBufferBuilder,
1931  m_flatBufferBuilder.CreateVector(shape),
1932  GetFlatBufferDataType(tensorInfo.GetDataType()),
1933  tensorInfo.GetQuantizationScales()[0],
1934  tensorInfo.GetQuantizationOffset(),
1935  m_flatBufferBuilder.CreateVector(tensorInfo.GetQuantizationScales()),
1936  tensorInfo.GetQuantizationDim().value(),
1937  static_cast<unsigned int>
1938  (tensorInfo.GetShape().GetDimensionality()),
1939  m_flatBufferBuilder.CreateVector(specificity));
1940  return flatBufferTensorInfo;
1941  }
1942 
1943  // Create FlatBuffer TensorInfo
1944  auto flatBufferTensorInfo = serializer::CreateTensorInfo(m_flatBufferBuilder,
1945  m_flatBufferBuilder.CreateVector(shape),
1946  GetFlatBufferDataType(tensorInfo.GetDataType()),
1947  tensorInfo.GetQuantizationScale(),
1948  tensorInfo.GetQuantizationOffset(),
1949  0,
1950  0,
1951  static_cast<unsigned int>
1952  (tensorInfo.GetShape().GetDimensionality()),
1953  m_flatBufferBuilder.CreateVector(specificity));
1954  return flatBufferTensorInfo;
1955 }
1956 
1957 flatbuffers::Offset<serializer::ConstTensor>
1958  SerializerStrategy::CreateConstTensorInfo(const armnn::ConstTensor& constTensor)
1959 {
1960  armnn::TensorInfo tensorInfo = constTensor.GetInfo();
1961 
1962  flatbuffers::Offset<void> fbPayload;
1963 
1964  switch (tensorInfo.GetDataType())
1965  {
1967  {
1968  auto fbVector = CreateDataVector<int64_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1969  flatbuffers::Offset<serializer::LongData> flatBuffersData = serializer::CreateLongData(
1970  m_flatBufferBuilder,
1971  fbVector);
1972  fbPayload = flatBuffersData.o;
1973  break;
1974  }
1977  {
1978  auto fbVector = CreateDataVector<int32_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1979  flatbuffers::Offset<serializer::IntData> flatBuffersData = serializer::CreateIntData(
1980  m_flatBufferBuilder,
1981  fbVector);
1982  fbPayload = flatBuffersData.o;
1983  break;
1984  }
1988  {
1989  auto fbVector = CreateDataVector<int16_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1990  flatbuffers::Offset<serializer::ShortData> flatBuffersData = serializer::CreateShortData(
1991  m_flatBufferBuilder,
1992  fbVector);
1993  fbPayload = flatBuffersData.o;
1994  break;
1995  }
2000  default:
2001  {
2002  auto fbVector = CreateDataVector<int8_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
2003  flatbuffers::Offset<serializer::ByteData> flatBuffersData = serializer::CreateByteData(
2004  m_flatBufferBuilder,
2005  fbVector);
2006  fbPayload = flatBuffersData.o;
2007  }
2008  }
2009  flatbuffers::Offset<serializer::ConstTensor> flatBufferConstTensor = serializer::CreateConstTensor(
2010  m_flatBufferBuilder,
2011  CreateTensorInfo(tensorInfo),
2013  fbPayload);
2014  return flatBufferConstTensor;
2015 }
2016 
2017 flatbuffers::Offset<armnnSerializer::FeatureCompatibilityVersions> SerializerStrategy::GetVersionTable()
2018 {
2019  flatbuffers::Offset<armnnSerializer::FeatureCompatibilityVersions> versionsTable =
2020  serializer::CreateFeatureCompatibilityVersions(
2021  m_flatBufferBuilder,
2022  1, // Binding ids scheme version
2023  1, // Weights layout scheme version
2024  1 // Constant tensors as inputs version
2025  );
2026  return versionsTable;
2027 }
2028 
2029 std::vector<fb::Offset<serializer::InputSlot>>
2030  SerializerStrategy::CreateInputSlots(const armnn::IConnectableLayer* layer)
2031 {
2032  std::vector<fb::Offset<serializer::InputSlot>> inputSlots;
2033 
2034  // Get the InputSlots
2035  for (unsigned int slotIndex = 0; slotIndex<layer->GetNumInputSlots(); ++slotIndex)
2036  {
2037  const IInputSlot& inputSlot = layer->GetInputSlot(slotIndex);
2038 
2039  // Get the Connection for the InputSlot
2040  const IOutputSlot* connection = inputSlot.GetConnection();
2041  bool isOverridden = inputSlot.IsTensorInfoOverridden();
2042 
2043  flatbuffers::Offset<TensorInfo> overriddenTensorInfo = CreateTensorInfo(inputSlot.GetTensorInfo());
2044 
2045  // Create FlatBuffer Connection
2046  serializer::Connection conn(GetSerializedId(inputSlot.GetConnection()->GetOwningLayerGuid()),
2047  connection->CalculateIndexOnOwner());
2048  // Create FlatBuffer InputSlot
2049  inputSlots.push_back(serializer::CreateInputSlot(m_flatBufferBuilder, slotIndex, &conn, isOverridden,
2050  overriddenTensorInfo));
2051  }
2052  return inputSlots;
2053 }
2054 
2055 std::vector<fb::Offset<serializer::OutputSlot>>
2056  SerializerStrategy::CreateOutputSlots(const armnn::IConnectableLayer* layer)
2057 {
2058  std::vector<fb::Offset<serializer::OutputSlot>> outputSlots;
2059 
2060  // Get the OutputSlots
2061  for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
2062  {
2063  const IOutputSlot& outputSlot = layer->GetOutputSlot(slotIndex);
2064  const armnn::TensorInfo& tensorInfo = outputSlot.GetTensorInfo();
2065 
2066  // Create FlatBuffer Outputslot
2067  outputSlots.push_back(serializer::CreateOutputSlot(m_flatBufferBuilder,
2068  slotIndex,
2069  CreateTensorInfo(tensorInfo)));
2070  }
2071  return outputSlots;
2072 }
2073 
2074 void SerializerStrategy::ExecuteStrategy(const armnn::IConnectableLayer* layer,
2075  const BaseDescriptor& descriptor,
2076  const std::vector<armnn::ConstTensor>& constants,
2077  const char* name,
2078  const armnn::LayerBindingId id)
2079 {
2080  IgnoreUnused(constants);
2081 
2082  switch (layer->GetType())
2083  {
2085  {
2086  const armnn::ActivationDescriptor& layerDescriptor =
2087  static_cast<const armnn::ActivationDescriptor&>(descriptor);
2088  SerializeActivationLayer(layer, layerDescriptor, name);
2089  break;
2090  }
2092  {
2093  SerializeAdditionLayer(layer, name);
2094  break;
2095  }
2097  {
2098  const armnn::ArgMinMaxDescriptor& layerDescriptor =
2099  static_cast<const armnn::ArgMinMaxDescriptor&>(descriptor);
2100  SerializeArgMinMaxLayer(layer, layerDescriptor, name);
2101  break;
2102  }
2104  {
2105  const armnn::BatchMatMulDescriptor& layerDescriptor =
2106  static_cast<const armnn::BatchMatMulDescriptor&>(descriptor);
2107  SerializeBatchMatMulLayer(layer,
2108  layerDescriptor,
2109  name);
2110  break;
2111  }
2113  {
2114  const armnn::BatchNormalizationDescriptor& layerDescriptor =
2115  static_cast<const armnn::BatchNormalizationDescriptor&>(descriptor);
2116  SerializeBatchNormalizationLayer(layer,
2117  layerDescriptor,
2118  constants,
2119  name);
2120  break;
2121  }
2123  {
2124  const armnn::BatchToSpaceNdDescriptor& layerDescriptor =
2125  static_cast<const armnn::BatchToSpaceNdDescriptor&>(descriptor);
2126  SerializeBatchToSpaceNdLayer(layer,
2127  layerDescriptor,
2128  name);
2129  break;
2130  }
2131  case armnn::LayerType::Cast :
2132  {
2133  SerializeCastLayer(layer, name);
2134  break;
2135  }
2137  {
2138  const armnn::ChannelShuffleDescriptor& layerDescriptor =
2139  static_cast<const armnn::ChannelShuffleDescriptor&>(descriptor);
2140  SerializeChannelShuffleLayer(layer,
2141  layerDescriptor,
2142  name);
2143  break;
2144  }
2146  {
2147  const armnn::ComparisonDescriptor& layerDescriptor =
2148  static_cast<const armnn::ComparisonDescriptor&>(descriptor);
2149  SerializeComparisonLayer(layer,
2150  layerDescriptor,
2151  name);
2152  break;
2153  }
2155  {
2156  const armnn::ConcatDescriptor& layerDescriptor =
2157  static_cast<const armnn::ConcatDescriptor&>(descriptor);
2158  SerializeConcatLayer(layer,
2159  layerDescriptor,
2160  name);
2161  break;
2162  }
2164  {
2165  SerializeConstantLayer(layer,
2166  constants,
2167  name);
2168  break;
2169  }
2171  {
2172  const armnn::Convolution2dDescriptor& layerDescriptor =
2173  static_cast<const armnn::Convolution2dDescriptor&>(descriptor);
2174  SerializeConvolution2dLayer(layer,
2175  layerDescriptor,
2176  name);
2177  break;
2178  }
2180  {
2181  const armnn::Convolution3dDescriptor& layerDescriptor =
2182  static_cast<const armnn::Convolution3dDescriptor&>(descriptor);
2183  SerializeConvolution3dLayer(layer,
2184  layerDescriptor,
2185  name);
2186  break;
2187  }
2189  {
2190  const armnn::DepthToSpaceDescriptor& layerDescriptor =
2191  static_cast<const armnn::DepthToSpaceDescriptor&>(descriptor);
2192  SerializeDepthToSpaceLayer(layer,
2193  layerDescriptor,
2194  name);
2195  break;
2196  }
2198  {
2199  const armnn::DepthwiseConvolution2dDescriptor& layerDescriptor =
2200  static_cast<const armnn::DepthwiseConvolution2dDescriptor&>(descriptor);
2201  SerializeDepthwiseConvolution2dLayer(layer,
2202  layerDescriptor,
2203  name);
2204  break;
2205  }
2207  {
2208  SerializeDequantizeLayer(layer,
2209  name);
2210  break;
2211  }
2213  {
2214  const armnn::DetectionPostProcessDescriptor& layerDescriptor =
2215  static_cast<const armnn::DetectionPostProcessDescriptor&>(descriptor);
2216  SerializeDetectionPostProcessLayer(layer, layerDescriptor, constants, name);
2217  break;
2218  }
2220  {
2221  SerializeDivisionLayer(layer, name);
2222  break;
2223  }
2225  {
2226  const armnn::ElementwiseBinaryDescriptor& layerDescriptor =
2227  static_cast<const armnn::ElementwiseBinaryDescriptor&>(descriptor);
2228  SerializeElementwiseBinaryLayer(layer, layerDescriptor, name);
2229  break;
2230  }
2232  {
2233  const armnn::ElementwiseUnaryDescriptor& layerDescriptor =
2234  static_cast<const armnn::ElementwiseUnaryDescriptor&>(descriptor);
2235  SerializeElementwiseUnaryLayer(layer, layerDescriptor, name);
2236  break;
2237  }
2238  case armnn::LayerType::Fill :
2239  {
2240  const armnn::FillDescriptor& layerDescriptor =
2241  static_cast<const armnn::FillDescriptor&>(descriptor);
2242  SerializeFillLayer(layer, layerDescriptor, name);
2243  break;
2244  }
2246  {
2247  SerializeFloorLayer(layer, name);
2248  break;
2249  }
2251  {
2252  const armnn::FullyConnectedDescriptor& layerDescriptor =
2253  static_cast<const armnn::FullyConnectedDescriptor&>(descriptor);
2254  SerializeFullyConnectedLayer(layer, layerDescriptor, name);
2255  break;
2256  }
2258  {
2259  const armnn::GatherDescriptor& layerDescriptor =
2260  static_cast<const armnn::GatherDescriptor&>(descriptor);
2261  SerializeGatherLayer(layer, layerDescriptor, name);
2262  break;
2263  }
2265  {
2266  SerializeGatherNdLayer(layer, name);
2267  break;
2268  }
2270  {
2271  SerializeInputLayer(layer, id, name);
2272  break;
2273  }
2275  {
2276  const armnn::InstanceNormalizationDescriptor& layerDescriptor =
2277  static_cast<const armnn::InstanceNormalizationDescriptor&>(descriptor);
2278  SerializeInstanceNormalizationLayer(layer, layerDescriptor, name);
2279  break;
2280  }
2282  {
2283  const armnn::L2NormalizationDescriptor& layerDescriptor =
2284  static_cast<const armnn::L2NormalizationDescriptor&>(descriptor);
2285  SerializeL2NormalizationLayer(layer, layerDescriptor, name);
2286  break;
2287  }
2289  {
2290  const armnn::LogicalBinaryDescriptor& layerDescriptor =
2291  static_cast<const armnn::LogicalBinaryDescriptor&>(descriptor);
2292  SerializeLogicalBinaryLayer(layer, layerDescriptor, name);
2293  break;
2294  }
2296  {
2297  const armnn::LogSoftmaxDescriptor& layerDescriptor =
2298  static_cast<const armnn::LogSoftmaxDescriptor&>(descriptor);
2299  SerializeLogSoftmaxLayer(layer, layerDescriptor, name);
2300  break;
2301  }
2302  case armnn::LayerType::Lstm :
2303  {
2304  const armnn::LstmDescriptor& layerDescriptor =
2305  static_cast<const armnn::LstmDescriptor&>(descriptor);
2306  SerializeLstmLayer(layer, layerDescriptor, constants, name);
2307  break;
2308  }
2310  {
2311  const armnn::QLstmDescriptor& layerDescriptor =
2312  static_cast<const armnn::QLstmDescriptor&>(descriptor);
2313  SerializeQLstmLayer(layer, layerDescriptor, constants, name);
2314  break;
2315  }
2317  {
2318  SerializeMaximumLayer(layer, name);
2319  break;
2320  }
2321  case armnn::LayerType::Mean :
2322  {
2323  const armnn::MeanDescriptor& layerDescriptor =
2324  static_cast<const armnn::MeanDescriptor&>(descriptor);
2325  SerializeMeanLayer(layer, layerDescriptor, name);
2326  break;
2327  }
2329  {
2330  SerializeMergeLayer(layer, name);
2331  break;
2332  }
2334  {
2335  SerializeMinimumLayer(layer, name);
2336  break;
2337  }
2339  {
2340  SerializeMultiplicationLayer(layer, name);
2341  break;
2342  }
2344  {
2345  const armnn::NormalizationDescriptor& layerDescriptor =
2346  static_cast<const armnn::NormalizationDescriptor&>(descriptor);
2347  SerializeNormalizationLayer(layer, layerDescriptor, name);
2348  break;
2349  }
2351  {
2352  SerializeOutputLayer(layer, id, name);
2353  break;
2354  }
2355  case armnn::LayerType::Pad :
2356  {
2357  const armnn::PadDescriptor& layerDescriptor =
2358  static_cast<const armnn::PadDescriptor&>(descriptor);
2359  SerializePadLayer(layer, layerDescriptor, name);
2360  break;
2361  }
2363  {
2364  const armnn::PermuteDescriptor& layerDescriptor =
2365  static_cast<const armnn::PermuteDescriptor&>(descriptor);
2366  SerializePermuteLayer(layer, layerDescriptor, name);
2367  break;
2368  }
2370  {
2371  const armnn::Pooling2dDescriptor& layerDescriptor =
2372  static_cast<const armnn::Pooling2dDescriptor&>(descriptor);
2373  SerializePooling2dLayer(layer, layerDescriptor, name);
2374  break;
2375  }
2377  {
2378  const armnn::Pooling3dDescriptor& layerDescriptor =
2379  static_cast<const armnn::Pooling3dDescriptor&>(descriptor);
2380  SerializePooling3dLayer(layer, layerDescriptor, name);
2381  break;
2382  }
2384  {
2385  SerializePreluLayer(layer, name);
2386  break;
2387  }
2389  {
2390  SerializeQuantizeLayer(layer, name);
2391  break;
2392  }
2394  SerializeQuantizedLstmLayer(layer, constants, name);
2395  break;
2397  {
2398  const armnn::ReshapeDescriptor &layerDescriptor =
2399  static_cast<const armnn::ReshapeDescriptor &>(descriptor);
2400  SerializeReshapeLayer(layer, layerDescriptor, name);
2401  break;
2402  }
2404  {
2405  SerializeRankLayer(layer, name);
2406  break;
2407  }
2409  {
2410  const armnn::ReduceDescriptor& layerDescriptor =
2411  static_cast<const armnn::ReduceDescriptor&>(descriptor);
2412  SerializeReduceLayer(layer, layerDescriptor, name);
2413  break;
2414  }
2416  {
2417  const armnn::ResizeDescriptor& layerDescriptor =
2418  static_cast<const armnn::ResizeDescriptor&>(descriptor);
2419  SerializeResizeLayer(layer, layerDescriptor, name);
2420  break;
2421  }
2423  {
2424  SerializeReverseV2Layer(layer, name);
2425  break;
2426  }
2428  {
2429  const armnn::ScatterNdDescriptor& layerDescriptor =
2430  static_cast<const armnn::ScatterNdDescriptor&>(descriptor);
2431  SerializeScatterNdLayer(layer, layerDescriptor, name);
2432  break;
2433  }
2435  {
2436  SerializeShapeLayer(layer, name);
2437  break;
2438  }
2440  {
2441  const armnn::SliceDescriptor& layerDescriptor =
2442  static_cast<const armnn::SliceDescriptor&>(descriptor);
2443  SerializeSliceLayer(layer, layerDescriptor, name);
2444  break;
2445  }
2447  {
2448  const armnn::SoftmaxDescriptor& layerDescriptor =
2449  static_cast<const armnn::SoftmaxDescriptor&>(descriptor);
2450  SerializeSoftmaxLayer(layer, layerDescriptor, name);
2451  break;
2452  }
2454  {
2455  const armnn::SpaceToBatchNdDescriptor& layerDescriptor =
2456  static_cast<const armnn::SpaceToBatchNdDescriptor&>(descriptor);
2457  SerializeSpaceToBatchNdLayer(layer, layerDescriptor, name);
2458  break;
2459  }
2461  {
2462  const armnn::SpaceToDepthDescriptor& layerDescriptor =
2463  static_cast<const armnn::SpaceToDepthDescriptor&>(descriptor);
2464  SerializeSpaceToDepthLayer(layer, layerDescriptor, name);
2465  break;
2466  }
2468  {
2469  const armnn::SplitterDescriptor& layerDescriptor =
2470  static_cast<const armnn::SplitterDescriptor&>(descriptor);
2471  SerializeSplitterLayer(layer, layerDescriptor, name);
2472  break;
2473  }
2475  {
2476  const armnn::StackDescriptor& layerDescriptor =
2477  static_cast<const armnn::StackDescriptor&>(descriptor);
2478  SerializeStackLayer(layer, layerDescriptor, name);
2479  break;
2480  }
2482  {
2483  const armnn::StandInDescriptor& layerDescriptor =
2484  static_cast<const armnn::StandInDescriptor&>(descriptor);
2485  SerializeStandInLayer(layer, layerDescriptor, name);
2486  break;
2487  }
2489  {
2490  const armnn::StridedSliceDescriptor& layerDescriptor =
2491  static_cast<const armnn::StridedSliceDescriptor&>(descriptor);
2492  SerializeStridedSliceLayer(layer, layerDescriptor, name);
2493  break;
2494  }
2496  {
2497  SerializeSubtractionLayer(layer, name);
2498  break;
2499  }
2501  {
2502  SerializeSwitchLayer(layer, name);
2503  break;
2504  }
2506  {
2507  const armnn::TileDescriptor& layerDescriptor =
2508  static_cast<const armnn::TileDescriptor&>(descriptor);
2509  SerializeTileLayer(layer, layerDescriptor, name);
2510  break;
2511  }
2513  {
2514  const armnn::TransposeDescriptor& layerDescriptor =
2515  static_cast<const armnn::TransposeDescriptor&>(descriptor);
2516  SerializeTransposeLayer(layer, layerDescriptor, name);
2517  break;
2518  }
2520  {
2521  const armnn::TransposeConvolution2dDescriptor& layerDescriptor =
2522  static_cast<const armnn::TransposeConvolution2dDescriptor&>(descriptor);
2523  SerializeTransposeConvolution2dLayer(layer, layerDescriptor, constants, name);
2524  break;
2525  }
2527  {
2528  const armnn::UnidirectionalSequenceLstmDescriptor& layerDescriptor =
2529  static_cast<const armnn::UnidirectionalSequenceLstmDescriptor&>(descriptor);
2530  SerializeUnidirectionalSequenceLstmLayer(layer, layerDescriptor, constants, name);
2531  break;
2532  }
2533  default:
2534  {
2536  fmt::format("A layer of unknown type was given to the serializer. Layer name: {}; Layer Id: {}",
2537  layer->GetName(),
2538  id));
2539  }
2540  }
2541 }
2542 
2543 void ISerializer::SerializerImpl::Serialize(const INetwork& inNetwork)
2544 {
2545  // Iterate through to network
2546  inNetwork.ExecuteStrategy(m_SerializerStrategy);
2547  flatbuffers::FlatBufferBuilder& fbBuilder = m_SerializerStrategy.GetFlatBufferBuilder();
2548 
2549  // Create FlatBuffer SerializedGraph
2550  auto serializedGraph = serializer::CreateSerializedGraph(
2551  fbBuilder,
2552  fbBuilder.CreateVector(m_SerializerStrategy.GetSerializedLayers()),
2553  fbBuilder.CreateVector(m_SerializerStrategy.GetInputIds()),
2554  fbBuilder.CreateVector(m_SerializerStrategy.GetOutputIds()),
2555  m_SerializerStrategy.GetVersionTable());
2556 
2557  // Serialize the graph
2558  fbBuilder.Finish(serializedGraph);
2559 }
2560 
2561 
2562 bool ISerializer::SerializerImpl::SaveSerializedToStream(std::ostream& stream)
2563 {
2564  flatbuffers::FlatBufferBuilder& fbBuilder = m_SerializerStrategy.GetFlatBufferBuilder();
2565 
2566  auto bytesToWrite = armnn::numeric_cast<std::streamsize>(fbBuilder.GetSize());
2567  stream.write(reinterpret_cast<const char*>(fbBuilder.GetBufferPointer()), bytesToWrite);
2568  return !stream.bad();
2569 }
2570 
2571 } // namespace armnnSerializer
armnn::BatchNormalizationDescriptor
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
Definition: Descriptors.hpp:828
armnn::Convolution2dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:570
armnn::ArgMinMaxFunction::Max
@ Max
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
armnn::InstanceNormalizationDescriptor::m_Beta
float m_Beta
Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0.
Definition: Descriptors.hpp:867
armnn::SliceDescriptor::m_Begin
std::vector< unsigned int > m_Begin
Beginning indices of the slice in each dimension.
Definition: Descriptors.hpp:1244
armnn::INetwork::ExecuteStrategy
void ExecuteStrategy(IStrategy &strategy) const
Definition: Network.cpp:672
armnn::ChannelShuffleDescriptor::m_Axis
uint32_t m_Axis
Axis to apply channel shuffle operation on.
Definition: Descriptors.hpp:1580
armnn::OriginsDescriptor::GetConcatAxis
unsigned int GetConcatAxis() const
Get the concatenation axis value.
Definition: Descriptors.cpp:162
armnn::DataType::Boolean
@ Boolean
armnn::FullyConnectedDescriptor::m_ConstantWeights
bool m_ConstantWeights
Enable/disable constant weights and biases.
Definition: Descriptors.hpp:530
armnn::Pooling2dDescriptor::m_PaddingMethod
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
Definition: Descriptors.hpp:425
armnn::ViewsDescriptor
A ViewsDescriptor for the SplitterLayer.
Definition: Descriptors.hpp:244
armnn::DetectionPostProcessDescriptor::m_NmsScoreThreshold
float m_NmsScoreThreshold
NMS score threshold.
Definition: Descriptors.hpp:751
armnn::LayerType::Permute
@ Permute
armnn::QLstmDescriptor::m_ForgetIntermediateScale
float m_ForgetIntermediateScale
Forget intermediate quantization scale.
Definition: Descriptors.hpp:1428
armnn::ActivationDescriptor
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
armnn::TransposeConvolution2dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:1469
armnn::BatchMatMulDescriptor::m_TransposeX
bool m_TransposeX
Transpose the slices of each input tensor Transpose and Adjoint can not both be set to true for the s...
Definition: Descriptors.hpp:1612
armnn::OriginsDescriptor::GetNumViews
uint32_t GetNumViews() const
Get the number of views.
Definition: Descriptors.cpp:187
armnn::FullyConnectedDescriptor
A FullyConnectedDescriptor for the FullyConnectedLayer.
Definition: Descriptors.hpp:507
armnn::DetectionPostProcessDescriptor::m_ScaleX
float m_ScaleX
Center size encoding scale x.
Definition: Descriptors.hpp:759
armnn::LayerType::Splitter
@ Splitter
armnn::BaseTensor::GetMemoryArea
MemoryType GetMemoryArea() const
Definition: Tensor.hpp:307
armnn::LayerType::BatchNormalization
@ BatchNormalization
armnn::QLstmDescriptor
A QLstmDescriptor for the QLstmLayer.
Definition: Descriptors.hpp:1380
armnn::ScatterNdFunction::Min
@ Min
Serializer.hpp
armnn::TransposeConvolution2dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:1477
armnn::IConnectableLayer::GetNumInputSlots
virtual unsigned int GetNumInputSlots() const =0
Returns the number of connectable input slots.
armnn::ActivationFunction::LeakyReLu
@ LeakyReLu
armnnSerializer::GetFlatBufferNormalizationAlgorithmChannel
armnnSerializer::NormalizationAlgorithmChannel GetFlatBufferNormalizationAlgorithmChannel(armnn::NormalizationAlgorithmChannel normalizationAlgorithmChannel)
Definition: SerializerUtils.cpp:213
armnn::QLstmDescriptor::m_ProjectionEnabled
bool m_ProjectionEnabled
Enable/disable the projection layer.
Definition: Descriptors.hpp:1422
armnn::Pooling3dDescriptor::m_OutputShapeRounding
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
Definition: Descriptors.hpp:499
armnn::ResizeDescriptor::m_HalfPixelCenters
bool m_HalfPixelCenters
Half Pixel Centers.
Definition: Descriptors.hpp:1018
armnn::LstmDescriptor::m_TimeMajor
bool m_TimeMajor
Enable/disable time major.
Definition: Descriptors.hpp:1154
armnn::BatchNormalizationDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:843
armnn::Pooling3dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:479
Descriptors.hpp
armnn::SpaceToBatchNdDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:1071
armnn::FullyConnectedDescriptor::m_TransposeWeightMatrix
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
Definition: Descriptors.hpp:528
armnn::ResizeDescriptor::m_TargetHeight
uint32_t m_TargetHeight
Target height value.
Definition: Descriptors.hpp:1009
armnn::DepthwiseConvolution2dDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:708
armnn::TensorInfo::GetQuantizationScales
std::vector< float > GetQuantizationScales() const
Definition: Tensor.cpp:451
armnn::Pooling2dDescriptor::m_PoolHeight
uint32_t m_PoolHeight
Pooling height value.
Definition: Descriptors.hpp:417
armnn::StridedSliceDescriptor::m_Begin
std::vector< int > m_Begin
Begin values for the input that will be sliced.
Definition: Descriptors.hpp:1342
armnn::ScatterNdFunction::Sub
@ Sub
armnn::DetectionPostProcessDescriptor::m_ScaleY
float m_ScaleY
Center size encoding scale y.
Definition: Descriptors.hpp:761
armnn::Pooling3dDescriptor
A Pooling3dDescriptor for the Pooling3dLayer.
Definition: Descriptors.hpp:431
armnn::BatchMatMulDescriptor::m_AdjointX
bool m_AdjointX
Adjoint the slices of each input tensor Transpose and Adjoint can not both be set to true for the sam...
Definition: Descriptors.hpp:1617
armnn::DetectionPostProcessDescriptor::m_MaxDetections
uint32_t m_MaxDetections
Maximum numbers of detections.
Definition: Descriptors.hpp:745
armnn::Convolution3dDescriptor::m_PadFront
uint32_t m_PadFront
Padding front value in the depth dimension.
Definition: Descriptors.hpp:637
armnn::ResizeDescriptor
A ResizeDescriptor for the ResizeLayer.
Definition: Descriptors.hpp:985
armnn::IInputSlot::IsTensorInfoOverridden
virtual bool IsTensorInfoOverridden() const =0
Returns true if this InputSlot has an overridden TensorInfo that was set through a call to SetTensorI...
armnn::ArgMinMaxDescriptor
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
armnn::IConnectableLayer::GetName
virtual const char * GetName() const =0
Returns the name of the layer.
armnn::ActivationDescriptor::m_A
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH,...
Definition: Descriptors.hpp:61
armnn::InstanceNormalizationDescriptor
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
Definition: Descriptors.hpp:847
armnn::StridedSliceDescriptor::m_EllipsisMask
int32_t m_EllipsisMask
Ellipsis mask value.
Definition: Descriptors.hpp:1357
armnn::ViewsDescriptor::HasAxis
bool HasAxis() const
Returns true if an axis has been set.
Definition: Descriptors.cpp:388
armnn::SoftmaxDescriptor::m_Beta
float m_Beta
Exponentiation value.
Definition: Descriptors.hpp:190
armnn::GatherDescriptor
A GatherDescriptor for the GatherLayer.
Definition: Descriptors.hpp:965
armnn::TensorShape::GetDimensionSpecificity
bool GetDimensionSpecificity(unsigned int i) const
Gets information about if the dimension size has been specified or not.
Definition: Tensor.cpp:211
armnn::TensorInfo::GetQuantizationScale
float GetQuantizationScale() const
Definition: Tensor.cpp:461
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
armnn::DepthwiseConvolution2dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:710
armnn::ActivationFunction::Sqrt
@ Sqrt
armnn::ScatterNdDescriptor::m_InputEnabled
bool m_InputEnabled
Flag to show if input tensor is accepted.
Definition: Descriptors.hpp:1722
armnn::BatchMatMulDescriptor::m_DataLayoutX
DataLayout m_DataLayoutX
Data layout of each input tensor, such as NHWC/NDHWC (leave as default for arbitrary layout)
Definition: Descriptors.hpp:1621
armnn::L2NormalizationDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:824
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::LayerType::Floor
@ Floor
armnn::L2NormalizationDescriptor
A L2NormalizationDescriptor for the L2NormalizationLayer.
Definition: Descriptors.hpp:809
armnnSerializer::GetFlatBufferComparisonOperation
armnnSerializer::ComparisonOperation GetFlatBufferComparisonOperation(armnn::ComparisonOperation comparisonOperation)
Definition: SerializerUtils.cpp:11
armnn::BaseTensor::GetNumBytes
unsigned int GetNumBytes() const
Definition: Tensor.hpp:304
armnn::NormalizationDescriptor::m_Beta
float m_Beta
Beta value for the normalization equation.
Definition: Descriptors.hpp:801
armnn::OriginsDescriptor::GetNumDimensions
uint32_t GetNumDimensions() const
Get the number of dimensions.
Definition: Descriptors.cpp:192
armnn::LayerType::Transpose
@ Transpose
armnn::NormalizationDescriptor
A NormalizationDescriptor for the NormalizationLayer.
Definition: Descriptors.hpp:769
armnn::LayerType::Comparison
@ Comparison
armnn::Pooling2dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:421
armnn::BatchToSpaceNdDescriptor::m_BlockShape
std::vector< unsigned int > m_BlockShape
Block shape values.
Definition: Descriptors.hpp:898
armnn::IConnectableLayer::GetGuid
virtual LayerGuid GetGuid() const =0
Returns the unique id of the layer.
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::ChannelShuffleDescriptor
A ChannelShuffleDescriptor for the ChannelShuffle operator.
Definition: Descriptors.hpp:1562
armnn::DataType::Float32
@ Float32
armnn::ResizeDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:1014
armnn::ActivationFunction::TanH
@ TanH
armnn::DepthwiseConvolution2dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:692
armnnSerializer::GetFlatBufferDataType
armnnSerializer::DataType GetFlatBufferDataType(armnn::DataType dataType)
Definition: SerializerUtils.cpp:67
armnn::Convolution2dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:576
armnn::Pooling2dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:411
armnn::Convolution3dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:633
armnnSerializer
Definition: ISerializer.hpp:11
armnn::ArgMinMaxDescriptor::m_Function
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:81
armnn::LayerType::Tile
@ Tile
armnn::SpaceToBatchNdDescriptor::m_BlockShape
std::vector< unsigned int > m_BlockShape
Block shape value.
Definition: Descriptors.hpp:1066
armnn::StridedSliceDescriptor::m_BeginMask
int32_t m_BeginMask
Begin mask value.
Definition: Descriptors.hpp:1350
armnn::BatchMatMulDescriptor::m_AdjointY
bool m_AdjointY
Definition: Descriptors.hpp:1618
armnn::Convolution3dDescriptor::m_DilationX
uint32_t m_DilationX
Dilation along x axis.
Definition: Descriptors.hpp:647
armnn::Convolution3dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:635
armnn::DataType::QAsymmU8
@ QAsymmU8
armnn::QLstmDescriptor::m_InputIntermediateScale
float m_InputIntermediateScale
Input intermediate quantization scale.
Definition: Descriptors.hpp:1426
armnn::ArgMinMaxFunction
ArgMinMaxFunction
Definition: Types.hpp:103
armnn::DetectionPostProcessDescriptor::m_ScaleW
float m_ScaleW
Center size encoding scale weight.
Definition: Descriptors.hpp:763
armnn::ActivationFunction::BoundedReLu
@ BoundedReLu
min(a, max(b, input)) ReLu1 & ReLu6.
armnn::LayerType::Stack
@ Stack
armnn::DataType::QSymmS8
@ QSymmS8
armnn::StackDescriptor
A StackDescriptor for the StackLayer.
Definition: Descriptors.hpp:1251
armnn::IOutputSlot::GetTensorInfo
virtual const TensorInfo & GetTensorInfo() const =0
IgnoreUnused.hpp
armnn::Pooling3dDescriptor::m_StrideZ
uint32_t m_StrideZ
Stride value when proceeding through input for the depth dimension.
Definition: Descriptors.hpp:497
armnn::LayerType::Normalization
@ Normalization
armnn::NormalizationDescriptor::m_NormSize
uint32_t m_NormSize
Depth radius value.
Definition: Descriptors.hpp:797
armnn::LayerType::QuantizedLstm
@ QuantizedLstm
armnn::Pooling2dDescriptor::m_PoolWidth
uint32_t m_PoolWidth
Pooling width value.
Definition: Descriptors.hpp:415
armnn::StandInDescriptor::m_NumInputs
uint32_t m_NumInputs
Number of input tensors.
Definition: Descriptors.hpp:1297
armnn::LayerType::Reduce
@ Reduce
armnn::Pooling3dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCDHW, NDHWC).
Definition: Descriptors.hpp:503
armnn::Convolution2dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:566
armnn::BatchToSpaceNdDescriptor::m_Crops
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
Definition: Descriptors.hpp:900
armnn::TensorInfo::HasPerAxisQuantization
bool HasPerAxisQuantization() const
Definition: Tensor.cpp:446
armnn::DepthwiseConvolution2dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:702
armnn::Convolution2dDescriptor::m_DilationY
uint32_t m_DilationY
Dilation along y axis.
Definition: Descriptors.hpp:580
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
armnn::Pooling3dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:481
armnn::DetectionPostProcessDescriptor::m_MaxClassesPerDetection
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
Definition: Descriptors.hpp:747
armnnSerializer::GetFlatBufferResizeMethod
armnnSerializer::ResizeMethod GetFlatBufferResizeMethod(armnn::ResizeMethod method)
Definition: SerializerUtils.cpp:241
armnn::IConnectableLayer::GetNumOutputSlots
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
armnnSerializer::ISerializer
Definition: ISerializer.hpp:17
armnn::QLstmDescriptor::m_CellIntermediateScale
float m_CellIntermediateScale
Cell intermediate quantization scale.
Definition: Descriptors.hpp:1430
armnn::ActivationFunction::HardSwish
@ HardSwish
armnn::DataType::QSymmS16
@ QSymmS16
armnn::ActivationFunction::Gelu
@ Gelu
armnnSerializer::GetFlatBufferActivationFunction
serializer::ActivationFunction GetFlatBufferActivationFunction(armnn::ActivationFunction function)
Definition: Serializer.cpp:55
armnn::NormalizationDescriptor::m_NormMethodType
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
Definition: Descriptors.hpp:795
armnn::LayerType::GatherNd
@ GatherNd
armnn::LayerType::ElementwiseBinary
@ ElementwiseBinary
armnn::TransposeConvolution2dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:1475
armnn::ScatterNdDescriptor::m_AxisEnabled
bool m_AxisEnabled
Flag for ScatterElement, will be set to false by default, we do not support m_AxisEnable = true for n...
Definition: Descriptors.hpp:1728
NumericCast.hpp
armnn::DataType::BFloat16
@ BFloat16
armnn::Pooling3dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:495
armnn::ReduceDescriptor::m_ReduceOperation
ReduceOperation m_ReduceOperation
Specifies the reduction operation to execute.
Definition: Descriptors.hpp:1558
armnnSerializer::GetFlatBufferConstTensorData
armnnSerializer::ConstTensorData GetFlatBufferConstTensorData(armnn::DataType dataType)
Definition: SerializerUtils.cpp:45
armnn::MeanDescriptor::m_KeepDims
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept.
Definition: Descriptors.hpp:1192
armnn::FillDescriptor::m_Value
float m_Value
Definition: Descriptors.hpp:940
armnn::ElementwiseBinaryDescriptor
A ElementwiseBinaryDescriptor for the ElementwiseBinaryLayer.
Definition: Descriptors.hpp:109
armnn::ViewsDescriptor::GetViewSizes
const uint32_t * GetViewSizes(uint32_t idx) const
Get the view sizes at the int value idx.
Definition: Descriptors.cpp:347
armnnSerializer::GetFlatBufferBinaryOperation
armnnSerializer::BinaryOperation GetFlatBufferBinaryOperation(armnn::BinaryOperation binaryOperation)
Definition: SerializerUtils.cpp:110
armnn::Pooling3dDescriptor::m_PoolType
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
Definition: Descriptors.hpp:473
armnn::ResizeDescriptor::m_Method
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
Definition: Descriptors.hpp:1012
armnn::SpaceToBatchNdDescriptor::m_PadList
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding values for the input dimension: heightPad{top, bottom} widthPad{left,...
Definition: Descriptors.hpp:1069
armnn::LayerType::Slice
@ Slice
armnn::LstmDescriptor::m_PeepholeEnabled
bool m_PeepholeEnabled
Enable/disable peephole.
Definition: Descriptors.hpp:1148
armnn::Convolution3dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:631
armnn::ScatterNdDescriptor::m_Axis
int32_t m_Axis
Extra attribute for ScatterElement, will be set to 0 by default, we do not support axis !...
Definition: Descriptors.hpp:1725
armnn::NormalizationDescriptor::m_NormChannelType
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
Definition: Descriptors.hpp:793
LstmParams.hpp
armnn::IOutputSlot
An output connection slot for a layer.
Definition: INetwork.hpp:53
armnnSerializer::GetFlatBufferDataLayout
armnnSerializer::DataLayout GetFlatBufferDataLayout(armnn::DataLayout dataLayout)
Definition: SerializerUtils.cpp:94
armnn::StackDescriptor::m_NumInputs
uint32_t m_NumInputs
Number of input tensors.
Definition: Descriptors.hpp:1275
armnn::DataType::Float16
@ Float16
armnn::LstmDescriptor::m_ClippingThresProj
float m_ClippingThresProj
Clipping threshold value for the projection.
Definition: Descriptors.hpp:1144
armnn::LayerType::ChannelShuffle
@ ChannelShuffle
armnn::Pooling3dDescriptor::m_PoolWidth
uint32_t m_PoolWidth
Pooling width value.
Definition: Descriptors.hpp:487
armnn::StridedSliceDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:1363
armnn::Pooling2dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:427
armnn::TensorShape::GetNumDimensions
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
armnn::DepthwiseConvolution2dDescriptor::m_DilationY
uint32_t m_DilationY
Dilation factor value for height dimension.
Definition: Descriptors.hpp:706
armnn::Pooling2dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:413
armnn::LayerType::Subtraction
@ Subtraction
armnn::LayerType::Prelu
@ Prelu
armnn::ViewsDescriptor::GetAxis
int32_t GetAxis() const
Get the axis value.
Definition: Descriptors.cpp:382
armnn::Pooling3dDescriptor::m_PaddingMethod
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
Definition: Descriptors.hpp:501
armnn::LayerType::ScatterNd
@ ScatterNd
armnn::Pooling2dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:409
armnn::FullyConnectedDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:526
armnn::LayerType::LogicalBinary
@ LogicalBinary
armnn::LogicalBinaryDescriptor::m_Operation
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
Definition: Descriptors.hpp:1534
armnn::LayerType::Concat
@ Concat
armnn::PadDescriptor
A PadDescriptor for the PadLayer.
Definition: Descriptors.hpp:1196
armnn::TransposeDescriptor
A TransposeDescriptor for the TransposeLayer.
Definition: Descriptors.hpp:1490
armnn::Convolution3dDescriptor::m_DilationZ
uint32_t m_DilationZ
Dilation along z axis.
Definition: Descriptors.hpp:651
armnn::BatchMatMulDescriptor::m_TransposeY
bool m_TransposeY
Definition: Descriptors.hpp:1613
armnn::ViewsDescriptor::GetOrigins
const OriginsDescriptor & GetOrigins() const
Get the View Origins.
Definition: Descriptors.cpp:352
armnn::DetectionPostProcessDescriptor::m_NumClasses
uint32_t m_NumClasses
Number of classes.
Definition: Descriptors.hpp:755
armnn::TensorInfo::GetQuantizationDim
Optional< unsigned int > GetQuantizationDim() const
Definition: Tensor.cpp:498
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
armnn::LayerType::Merge
@ Merge
armnn::StackDescriptor::m_Axis
uint32_t m_Axis
0-based axis along which to stack the input tensors.
Definition: Descriptors.hpp:1273
armnn::SliceDescriptor
A SliceDescriptor for the SliceLayer.
Definition: Descriptors.hpp:1228
armnn::LayerType::StandIn
@ StandIn
armnn::LayerType::Softmax
@ Softmax
armnnSerializer::GetFlatBufferUnaryOperation
armnnSerializer::UnaryOperation GetFlatBufferUnaryOperation(armnn::UnaryOperation unaryOperation)
Definition: SerializerUtils.cpp:135
armnn::InstanceNormalizationDescriptor::m_Gamma
float m_Gamma
Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0.
Definition: Descriptors.hpp:865
armnn::DetectionPostProcessDescriptor::m_NmsIouThreshold
float m_NmsIouThreshold
Intersection over union threshold.
Definition: Descriptors.hpp:753
armnn::ActivationFunction::Elu
@ Elu
armnn::Convolution2dDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:582
armnn::BatchMatMulDescriptor::m_DataLayoutY
DataLayout m_DataLayoutY
Definition: Descriptors.hpp:1622
armnn::ReshapeDescriptor
A ReshapeDescriptor for the ReshapeLayer.
Definition: Descriptors.hpp:1023
armnnSerializer::GetFlatBufferNormalizationAlgorithmMethod
armnnSerializer::NormalizationAlgorithmMethod GetFlatBufferNormalizationAlgorithmMethod(armnn::NormalizationAlgorithmMethod normalizationAlgorithmMethod)
Definition: SerializerUtils.cpp:227
armnn::InvalidArgumentException
Definition: Exceptions.hpp:80
armnn::LayerType::Quantize
@ Quantize
armnn::PadDescriptor::m_PadValue
float m_PadValue
Optional value to use for padding, defaults to 0.
Definition: Descriptors.hpp:1221
armnn::LayerBindingId
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:309
armnn::ActivationFunction::Linear
@ Linear
armnn::ScatterNdFunction
ScatterNdFunction
Definition: Types.hpp:500
armnn::DepthwiseConvolution2dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:694
armnn::Convolution3dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:629
armnn::ActivationDescriptor::m_Function
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu,...
Definition: Descriptors.hpp:59
armnn::NormalizationDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:805
armnn::LayerType::Multiplication
@ Multiplication
armnn::PermuteDescriptor
A PermuteDescriptor for the PermuteLayer.
Definition: Descriptors.hpp:149
armnn::BatchMatMulDescriptor
A BatchMatMulDescriptor for the BatchMatMul operator.
Definition: Descriptors.hpp:1584
armnn::IOutputSlot::GetOwningLayerGuid
virtual LayerGuid GetOwningLayerGuid() const =0
armnnSerializer::GetFlatBufferArgMinMaxFunction
serializer::ArgMinMaxFunction GetFlatBufferArgMinMaxFunction(armnn::ArgMinMaxFunction function)
Definition: Serializer.cpp:88
armnn::Convolution3dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:643
armnn::IConnectableLayer::GetType
virtual LayerType GetType() const =0
Returns the armnn::LayerType of this layer.
armnn::LayerType::Addition
@ Addition
armnn::Convolution2dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:584
armnn::QLstmDescriptor::m_ProjectionClip
float m_ProjectionClip
Clipping threshold value for the projection.
Definition: Descriptors.hpp:1416
armnnSerializer::GetFlatBufferPaddingMethod
armnnSerializer::PaddingMethod GetFlatBufferPaddingMethod(armnn::PaddingMethod paddingMethod)
Definition: SerializerUtils.cpp:188
armnn::GatherDescriptor::m_Axis
int32_t m_Axis
The axis in params to gather indices from.
Definition: Descriptors.hpp:981
armnn::Convolution3dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:641
armnn::SpaceToBatchNdDescriptor
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
Definition: Descriptors.hpp:1043
armnn::Convolution2dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:572
armnn::Convolution3dDescriptor
A Convolution3dDescriptor for the Convolution3dLayer.
Definition: Descriptors.hpp:588
armnn::LayerType::DepthToSpace
@ DepthToSpace
armnn::ReshapeDescriptor::m_TargetShape
TensorShape m_TargetShape
Target shape value.
Definition: Descriptors.hpp:1039
armnn::ScatterNdFunction::Add
@ Add
armnn::BaseDescriptor
Base class for all descriptors.
Definition: Descriptors.hpp:22
armnn::TransposeConvolution2dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:1479
armnn::StridedSliceDescriptor::m_EndMask
int32_t m_EndMask
End mask value.
Definition: Descriptors.hpp:1353
armnn::BaseTensor::GetInfo
const TensorInfo & GetInfo() const
Definition: Tensor.hpp:297
armnn::Pooling2dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:407
armnn::PermuteDescriptor::m_DimMappings
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination,...
Definition: Descriptors.hpp:173
armnn::ActivationFunction
ActivationFunction
Definition: Types.hpp:86
armnn::StandInDescriptor::m_NumOutputs
uint32_t m_NumOutputs
Number of output tensors.
Definition: Descriptors.hpp:1299
armnn::Pooling3dDescriptor::m_PadFront
uint32_t m_PadFront
Padding front value in the depth dimension.
Definition: Descriptors.hpp:483
SerializerUtils.hpp
armnn::StackDescriptor::m_InputShape
TensorShape m_InputShape
Required shape of all input tensors.
Definition: Descriptors.hpp:1277
armnn::LayerType::DetectionPostProcess
@ DetectionPostProcess
armnn::Convolution2dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:574
armnn::QLstmDescriptor::m_OutputIntermediateScale
float m_OutputIntermediateScale
Output intermediate quantization scale.
Definition: Descriptors.hpp:1432
armnn::LayerType::Pooling2d
@ Pooling2d
armnn::Convolution2dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:568
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:200
armnnSerializer::GetFlatBufferReduceOperation
armnnSerializer::ReduceOperation GetFlatBufferReduceOperation(armnn::ReduceOperation reduceOperation)
Definition: SerializerUtils.cpp:254
armnn::DetectionPostProcessDescriptor::m_DetectionsPerClass
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
Definition: Descriptors.hpp:749
LayerGuid
arm::pipe::ProfilingGuid LayerGuid
Define LayerGuid type.
Definition: Types.hpp:26
armnn::LayerType::Division
@ Division
armnn::DetectionPostProcessDescriptor::m_ScaleH
float m_ScaleH
Center size encoding scale height.
Definition: Descriptors.hpp:765
armnn::DataType::Signed32
@ Signed32
armnn::LayerType::Shape
@ Shape
armnn::ReduceDescriptor::m_KeepDims
bool m_KeepDims
if true then output shape has no change.
Definition: Descriptors.hpp:1554
armnn::BatchToSpaceNdDescriptor
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
Definition: Descriptors.hpp:875
armnn::Convolution2dDescriptor
A Convolution2dDescriptor for the Convolution2dLayer.
Definition: Descriptors.hpp:534
armnn::ActivationFunction::Abs
@ Abs
armnn::DepthwiseConvolution2dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:698
armnn::ComparisonDescriptor
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
armnn::FillDescriptor
A FillDescriptor for the FillLayer.
Definition: Descriptors.hpp:925
armnn::DataType::QAsymmS8
@ QAsymmS8
armnn::ComparisonDescriptor::m_Operation
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
Definition: Descriptors.hpp:105
armnn::Pooling3dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:477
armnn::ElementwiseUnaryDescriptor::m_Operation
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
Definition: Descriptors.hpp:145
armnn::ArgMinMaxFunction::Min
@ Min
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::LayerType::Gather
@ Gather
armnn::StridedSliceDescriptor::m_ShrinkAxisMask
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1.
Definition: Descriptors.hpp:1355
armnn::StandInDescriptor
A StandInDescriptor for the StandIn layer.
Definition: Descriptors.hpp:1281
armnn::Pooling2dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:419
armnn::LayerType::Pooling3d
@ Pooling3d
armnn::SpaceToDepthDescriptor::m_BlockSize
unsigned int m_BlockSize
Scalar specifying the input block size. It must be >= 1.
Definition: Descriptors.hpp:1092
armnn::ResizeDescriptor::m_TargetWidth
uint32_t m_TargetWidth
Target width value.
Definition: Descriptors.hpp:1007
armnn::LayerType::LogSoftmax
@ LogSoftmax
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::Pooling3dDescriptor::m_PoolHeight
uint32_t m_PoolHeight
Pooling height value.
Definition: Descriptors.hpp:489
armnn::StridedSliceDescriptor::m_Stride
std::vector< int > m_Stride
Stride values for the input that will be sliced.
Definition: Descriptors.hpp:1346
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LayerType::Cast
@ Cast
armnn::InstanceNormalizationDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:871
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
armnn::PermutationVector::GetSize
SizeType GetSize() const
Definition: Types.hpp:357
armnn::LstmDescriptor
An LstmDescriptor for the LstmLayer.
Definition: Descriptors.hpp:1102
armnn::StridedSliceDescriptor
A StridedSliceDescriptor for the StridedSliceLayer.
Definition: Descriptors.hpp:1303
armnn::LayerType::Switch
@ Switch
armnn::TransposeDescriptor::m_DimMappings
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination,...
Definition: Descriptors.hpp:1514
armnnSerializer::GetFlatBufferPoolingAlgorithm
armnnSerializer::PoolingAlgorithm GetFlatBufferPoolingAlgorithm(armnn::PoolingAlgorithm poolingAlgorithm)
Definition: SerializerUtils.cpp:162
armnn::Pooling3dDescriptor::m_PadBack
uint32_t m_PadBack
Padding back value in the depth dimension.
Definition: Descriptors.hpp:485
armnn::ResizeDescriptor::m_AlignCorners
bool m_AlignCorners
Aligned corners.
Definition: Descriptors.hpp:1016
armnn::TileDescriptor::m_Multiples
std::vector< uint32_t > m_Multiples
The vector to multiply the input shape by.
Definition: Descriptors.hpp:1656
armnn::MeanDescriptor::m_Axis
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
Definition: Descriptors.hpp:1190
armnn::LstmDescriptor::m_CifgEnabled
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
Definition: Descriptors.hpp:1146
armnn::LogicalBinaryDescriptor
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
Definition: Descriptors.hpp:1518
armnn::NormalizationDescriptor::m_Alpha
float m_Alpha
Alpha value for the normalization equation.
Definition: Descriptors.hpp:799
armnn::Pooling3dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:493
armnn::IInputSlot::GetTensorInfo
virtual const TensorInfo & GetTensorInfo() const =0
Gets the TensorInfo for this InputSlot.
armnn::LayerType::Reshape
@ Reshape
armnn::Pooling3dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:475
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:193
armnn::QLstmDescriptor::m_HiddenStateZeroPoint
int32_t m_HiddenStateZeroPoint
Hidden State zero point.
Definition: Descriptors.hpp:1434
armnn::Convolution2dDescriptor::m_DilationX
uint32_t m_DilationX
Dilation along x axis.
Definition: Descriptors.hpp:578
armnn::SoftmaxDescriptor::m_Axis
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
Definition: Descriptors.hpp:192
armnn::Convolution3dDescriptor::m_PadBack
uint32_t m_PadBack
Padding back value in the depth dimension.
Definition: Descriptors.hpp:639
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::LayerType::Fill
@ Fill
armnn::ReduceDescriptor::m_vAxis
std::vector< uint32_t > m_vAxis
The indices of the dimensions to reduce.
Definition: Descriptors.hpp:1556
armnn::PadDescriptor::m_PadList
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding for input dimension.
Definition: Descriptors.hpp:1218
armnn::LayerType::L2Normalization
@ L2Normalization
armnn::LstmDescriptor::m_LayerNormEnabled
bool m_LayerNormEnabled
Enable/disable layer normalization.
Definition: Descriptors.hpp:1152
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::TransposeConvolution2dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:1473
armnn::ViewsDescriptor::GetNumDimensions
uint32_t GetNumDimensions() const
Get the number of dimensions.
Definition: Descriptors.cpp:307
armnn::TransposeConvolution2dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:1471
armnn::LayerType::Minimum
@ Minimum
armnnSerializer::GetFlatBufferOutputShapeRounding
armnnSerializer::OutputShapeRounding GetFlatBufferOutputShapeRounding(armnn::OutputShapeRounding outputShapeRounding)
Definition: SerializerUtils.cpp:176
armnn::QLstmDescriptor::m_CifgEnabled
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
Definition: Descriptors.hpp:1418
armnn::ElementwiseBinaryDescriptor::m_Operation
BinaryOperation m_Operation
Specifies the elementwiseBinary operation to execute.
Definition: Descriptors.hpp:125
armnn::ScatterNdFunction::Update
@ Update
armnn::LayerType::UnidirectionalSequenceLstm
@ UnidirectionalSequenceLstm
armnn::IOutputSlot::CalculateIndexOnOwner
virtual unsigned int CalculateIndexOnOwner() const =0
armnn::LayerType::ReverseV2
@ ReverseV2
armnnSerializer::ISerializerPtr
std::unique_ptr< ISerializer, void(*)(ISerializer *serializer)> ISerializerPtr
Definition: ISerializer.hpp:15
armnn::Convolution3dDescriptor::m_DilationY
uint32_t m_DilationY
Dilation along y axis.
Definition: Descriptors.hpp:649
armnn::OriginsDescriptor
An OriginsDescriptor for the ConcatLayer.
Definition: Descriptors.hpp:201
armnn::ScatterNdFunction::Max
@ Max
armnn::ActivationFunction::ReLu
@ ReLu
armnn::IConnectableLayer::GetOutputSlot
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::ElementwiseUnaryDescriptor
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:129
armnn::TransposeConvolution2dDescriptor
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
Definition: Descriptors.hpp:1440
armnn::IConnectableLayer::GetInputSlot
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
armnn::Convolution3dDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:653
armnn::LayerType::ArgMinMax
@ ArgMinMax
armnn::ArgMinMaxDescriptor::m_Axis
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:83
armnn::ActivationDescriptor::m_B
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:63
armnn::LayerType::Pad
@ Pad
armnn::Convolution3dDescriptor::m_StrideZ
uint32_t m_StrideZ
Stride value when proceeding through input for the depth dimension.
Definition: Descriptors.hpp:645
QuantizedLstmParams.hpp
armnn::DetectionPostProcessDescriptor::m_UseRegularNms
bool m_UseRegularNms
Use Regular NMS.
Definition: Descriptors.hpp:757
armnnSerializer::GetFlatBufferScatterNdFunction
serializer::ScatterNdFunction GetFlatBufferScatterNdFunction(armnn::ScatterNdFunction function)
Definition: Serializer.cpp:100
armnn::ScatterNdDescriptor::m_Function
ScatterNdFunction m_Function
Specify if the function is update, add, sub, max or min.
Definition: Descriptors.hpp:1719
armnn::ViewsDescriptor::GetNumViews
uint32_t GetNumViews() const
Get the number of views.
Definition: Descriptors.cpp:302
armnn::QLstmDescriptor::m_HiddenStateScale
float m_HiddenStateScale
Hidden State quantization scale.
Definition: Descriptors.hpp:1436
armnn::LayerType::Rank
@ Rank
armnn::NormalizationDescriptor::m_K
float m_K
Kappa value used for the across channel normalization equation.
Definition: Descriptors.hpp:803
armnn::LayerType::Mean
@ Mean
armnn::LstmDescriptor::m_ProjectionEnabled
bool m_ProjectionEnabled
Enable/disable the projection layer.
Definition: Descriptors.hpp:1150
armnn::StridedSliceDescriptor::m_End
std::vector< int > m_End
End values for the input that will be sliced.
Definition: Descriptors.hpp:1344
armnn::PadDescriptor::m_PaddingMode
PaddingMode m_PaddingMode
Specifies the Padding mode (Constant, Reflect or Symmetric)
Definition: Descriptors.hpp:1224
armnn::ConstTensor
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:329
armnn::ActivationFunction::Square
@ Square
armnn::IConnectableLayer
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:80
armnn::IInputSlot
An input connection slot for a layer.
Definition: INetwork.hpp:25
armnn::Pooling2dDescriptor::m_OutputShapeRounding
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
Definition: Descriptors.hpp:423
armnn::LayerType::Input
@ Input
armnn::TransposeConvolution2dDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:1481
armnn::QLstmDescriptor::m_CellClip
float m_CellClip
Clipping threshold value for the cell state.
Definition: Descriptors.hpp:1414
armnn::LayerType::Resize
@ Resize
armnn::DetectionPostProcessDescriptor
Definition: Descriptors.hpp:713
armnn::QLstmDescriptor::m_LayerNormEnabled
bool m_LayerNormEnabled
Enable/disable layer normalization.
Definition: Descriptors.hpp:1424
armnn::ChannelShuffleDescriptor::m_NumGroups
uint32_t m_NumGroups
Number of groups for the channel shuffle operation.
Definition: Descriptors.hpp:1578
armnn::Convolution3dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NDHWC, NCDHW).
Definition: Descriptors.hpp:655
armnn::L2NormalizationDescriptor::m_Eps
float m_Eps
Used to avoid dividing by zero.
Definition: Descriptors.hpp:822
armnn::OriginsDescriptor::GetViewOrigin
const uint32_t * GetViewOrigin(uint32_t idx) const
Return the view origin at the int value idx.
Definition: Descriptors.cpp:197
armnn::DataType::Signed64
@ Signed64
armnn::TransposeConvolution2dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:1483
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::SliceDescriptor::m_Size
std::vector< unsigned int > m_Size
Size of the slice in each dimension.
Definition: Descriptors.hpp:1247
armnn::Pooling2dDescriptor
A Pooling2dDescriptor for the Pooling2dLayer.
Definition: Descriptors.hpp:371
armnn::LstmDescriptor::m_ActivationFunc
uint32_t m_ActivationFunc
The activation function to use.
Definition: Descriptors.hpp:1140
armnn::BatchToSpaceNdDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:902
armnn::LayerType::Maximum
@ Maximum
armnn::TensorInfo::GetQuantizationOffset
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:482
armnn::LayerType::Activation
@ Activation
armnn::DepthwiseConvolution2dDescriptor
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
Definition: Descriptors.hpp:659
armnn::LayerType::Lstm
@ Lstm
armnn::LayerType::Dequantize
@ Dequantize
armnn::ScatterNdDescriptor
A ScatterNdDescriptor for the ScatterNdLayer.
Definition: Descriptors.hpp:1679
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::ReduceDescriptor
A ReduceDescriptor for the REDUCE operators.
Definition: Descriptors.hpp:1538
armnn::DepthwiseConvolution2dDescriptor::m_DilationX
uint32_t m_DilationX
Dilation factor value for width dimension.
Definition: Descriptors.hpp:704
armnnSerializer::GetFlatBufferPaddingMode
armnnSerializer::PaddingMode GetFlatBufferPaddingMode(armnn::PaddingMode paddingMode)
Definition: SerializerUtils.cpp:200
armnn::BatchNormalizationDescriptor::m_Eps
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
Definition: Descriptors.hpp:841
armnn::LstmDescriptor::m_ClippingThresCell
float m_ClippingThresCell
Clipping threshold value for the cell state.
Definition: Descriptors.hpp:1142
armnn::Pooling3dDescriptor::m_PoolDepth
uint32_t m_PoolDepth
Pooling depth value.
Definition: Descriptors.hpp:491
armnn::TensorShape::GetDimensionality
Dimensionality GetDimensionality() const
Function that returns the tensor type.
Definition: Tensor.hpp:92
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:491
armnn::StridedSliceDescriptor::m_NewAxisMask
int32_t m_NewAxisMask
New axis mask value.
Definition: Descriptors.hpp:1360
armnn::MeanDescriptor
A MeanDescriptor for the MeanLayer.
Definition: Descriptors.hpp:1172
armnn::QLstmDescriptor::m_PeepholeEnabled
bool m_PeepholeEnabled
Enable/disable peephole.
Definition: Descriptors.hpp:1420
armnn::LayerType::QLstm
@ QLstm
armnn::OptionalReferenceSwitch::value
const T & value() const
Definition: Optional.hpp:146
armnn::TileDescriptor
Definition: Descriptors.hpp:1640
armnn::SoftmaxDescriptor
A SoftmaxDescriptor for the SoftmaxLayer.
Definition: Descriptors.hpp:177
armnn::Pooling2dDescriptor::m_PoolType
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
Definition: Descriptors.hpp:405
armnn::InstanceNormalizationDescriptor::m_Eps
float m_Eps
Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f.
Definition: Descriptors.hpp:869
armnnSerializer::GetFlatBufferLogicalBinaryOperation
armnnSerializer::LogicalBinaryOperation GetFlatBufferLogicalBinaryOperation(armnn::LogicalBinaryOperation logicalBinaryOperation)
Definition: SerializerUtils.cpp:31
armnn::SpaceToDepthDescriptor
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
Definition: Descriptors.hpp:1075
armnn::LayerType::Output
@ Output
armnn::LayerType::Constant
@ Constant
armnn::ViewsDescriptor::GetViewOrigin
const uint32_t * GetViewOrigin(uint32_t idx) const
Get the view origin at the int value idx.
Definition: Descriptors.cpp:312
armnn::ActivationFunction::Sigmoid
@ Sigmoid
armnn::SpaceToDepthDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:1095
armnn::INetwork
Main network class which provides the interface for building up a neural network.
Definition: INetwork.hpp:347
armnn::DepthwiseConvolution2dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:700
armnn::DepthwiseConvolution2dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:696
armnn::IInputSlot::GetConnection
virtual const IOutputSlot * GetConnection() const =0