ArmNN
 23.05
Serializer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017,2019-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "Serializer.hpp"
6 #include "SerializerUtils.hpp"
7 
8 #include <armnn/Descriptors.hpp>
9 #include <armnn/LstmParams.hpp>
13 
14 #include <fmt/format.h>
15 #include <iostream>
16 
17 using namespace armnn;
18 namespace fb = flatbuffers;
19 namespace serializer = armnnSerializer;
20 
21 namespace armnnSerializer
22 {
23 
24 ISerializer::ISerializer() : pSerializerImpl(new SerializerImpl())
25 {
26 }
27 
28 ISerializer::~ISerializer() = default;
29 
30 ISerializer* ISerializer::CreateRaw()
31 {
32  return new ISerializer();
33 }
34 
35 ISerializerPtr ISerializer::Create()
36 {
37  return ISerializerPtr(CreateRaw(), &ISerializer::Destroy);
38 }
39 
40 void ISerializer::Destroy(ISerializer* serializer)
41 {
42  delete serializer;
43 }
44 
45 void ISerializer::Serialize(const armnn::INetwork& inNetwork)
46 {
47  pSerializerImpl->Serialize(inNetwork);
48 }
49 
50 bool ISerializer::SaveSerializedToStream(std::ostream& stream)
51 {
52  return pSerializerImpl->SaveSerializedToStream(stream);
53 }
54 
56 {
57  switch (function)
58  {
60  return serializer::ActivationFunction::ActivationFunction_Sigmoid;
62  return serializer::ActivationFunction::ActivationFunction_TanH;
64  return serializer::ActivationFunction::ActivationFunction_Linear;
66  return serializer::ActivationFunction::ActivationFunction_ReLu;
68  return serializer::ActivationFunction::ActivationFunction_BoundedReLu;
70  return serializer::ActivationFunction::ActivationFunction_LeakyReLu;
72  return serializer::ActivationFunction::ActivationFunction_Abs;
74  return serializer::ActivationFunction::ActivationFunction_Sqrt;
76  return serializer::ActivationFunction::ActivationFunction_Square;
78  return serializer::ActivationFunction::ActivationFunction_Elu;
80  return serializer::ActivationFunction::ActivationFunction_HardSwish;
81  default:
82  return serializer::ActivationFunction::ActivationFunction_Sigmoid;
83  }
84 }
85 
87 {
88  switch (function)
89  {
91  return serializer::ArgMinMaxFunction::ArgMinMaxFunction_Max;
93  default:
94  return serializer::ArgMinMaxFunction::ArgMinMaxFunction_Min;
95  }
96 }
97 
98 uint32_t SerializerStrategy::GetSerializedId(LayerGuid guid)
99 {
100  if (m_guidMap.empty())
101  {
102  m_guidMap.insert(std::make_pair(guid, m_layerId));
103  }
104  else if (m_guidMap.find(guid) == m_guidMap.end())
105  {
106  ++m_layerId;
107  m_guidMap.insert(std::make_pair(guid, m_layerId));
108 
109  return m_layerId;
110  }
111  return m_guidMap[guid];
112 }
113 
114 // Build FlatBuffer for Input Layer
115 void SerializerStrategy::SerializeInputLayer(const armnn::IConnectableLayer* layer, LayerBindingId id, const char* name)
116 {
117  IgnoreUnused(name);
118 
119  // Create FlatBuffer BaseLayer
120  auto flatBufferInputBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Input);
121 
122  // Create FlatBuffer BindableBaseLayer
123  auto flatBufferInputBindableBaseLayer = serializer::CreateBindableLayerBase(m_flatBufferBuilder,
124  flatBufferInputBaseLayer,
125  id);
126  // Push layer binding id to outputIds.
127  m_inputIds.push_back(id);
128 
129  // Create the FlatBuffer InputLayer
130  auto flatBufferInputLayer = serializer::CreateInputLayer(m_flatBufferBuilder, flatBufferInputBindableBaseLayer);
131 
132  // Add the AnyLayer to the FlatBufferLayers
133  CreateAnyLayer(flatBufferInputLayer.o, serializer::Layer::Layer_InputLayer);
134 }
135 
136 // Build FlatBuffer for Output Layer
137 void SerializerStrategy::SerializeOutputLayer(const armnn::IConnectableLayer* layer,
138  LayerBindingId id, const char* name)
139 {
140  IgnoreUnused(name);
141 
142  // Create FlatBuffer BaseLayer
143  auto flatBufferOutputBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Output);
144 
145  // Create FlatBuffer BindableBaseLayer
146  auto flatBufferOutputBindableBaseLayer = serializer::CreateBindableLayerBase(m_flatBufferBuilder,
147  flatBufferOutputBaseLayer,
148  id);
149  // Push layer binding id to outputIds.
150  m_outputIds.push_back(id);
151 
152  // Create the FlatBuffer OutputLayer
153  auto flatBufferOutputLayer = serializer::CreateOutputLayer(m_flatBufferBuilder, flatBufferOutputBindableBaseLayer);
154  // Add the AnyLayer to the FlatBufferLayers
155  CreateAnyLayer(flatBufferOutputLayer.o, serializer::Layer::Layer_OutputLayer);
156 }
157 
158 // Build FlatBuffer for Activation Layer
159 void SerializerStrategy::SerializeActivationLayer(const armnn::IConnectableLayer* layer,
160  const armnn::ActivationDescriptor& descriptor,
161  const char* name)
162 {
163  IgnoreUnused(name);
164 
165  // Create FlatBuffer BaseLayer
166  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Activation);
167 
168  // Create the FlatBuffer ActivationDescriptor
169  auto flatBufferDescriptor = CreateActivationDescriptor(m_flatBufferBuilder,
171  descriptor.m_A,
172  descriptor.m_B);
173 
174  // Create the FlatBuffer ActivationLayer
175  auto flatBufferAdditionLayer = CreateActivationLayer(m_flatBufferBuilder,
176  flatBufferBaseLayer,
177  flatBufferDescriptor);
178 
179  // Add the AnyLayer to the FlatBufferLayers
180  CreateAnyLayer(flatBufferAdditionLayer.o, serializer::Layer::Layer_ActivationLayer);
181 }
182 
183 // Build FlatBuffer for Addition Layer
184 void SerializerStrategy::SerializeAdditionLayer(const armnn::IConnectableLayer* layer, const char* name)
185 {
186  IgnoreUnused(name);
187 
188  // Create FlatBuffer BaseLayer
189  auto flatBufferAdditionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Addition);
190 
191  // Create the FlatBuffer AdditionLayer
192  auto flatBufferAdditionLayer = serializer::CreateAdditionLayer(m_flatBufferBuilder, flatBufferAdditionBaseLayer);
193 
194  // Add the AnyLayer to the FlatBufferLayers
195  CreateAnyLayer(flatBufferAdditionLayer.o, serializer::Layer::Layer_AdditionLayer);
196 }
197 
198 // Build FlatBuffer for ArgMinMax Layer
199 void SerializerStrategy::SerializeArgMinMaxLayer(const armnn::IConnectableLayer *layer,
200  const armnn::ArgMinMaxDescriptor& descriptor,
201  const char *name)
202 {
203  IgnoreUnused(name);
204 
205  // Create FlatBuffer BaseLayer
206  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ArgMinMax);
207 
208  // Create FlatBuffer Descriptor
209  auto flatBufferDescriptor = CreateArgMinMaxDescriptor(m_flatBufferBuilder,
211  descriptor.m_Axis);
212 
213  // Create FlatBuffer ArgMinMaxLayer
214  auto flatBufferLayer = CreateArgMinMaxLayer(m_flatBufferBuilder,
215  flatBufferBaseLayer,
216  flatBufferDescriptor);
217 
218  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ArgMinMaxLayer);
219 }
220 
221 void SerializerStrategy::SerializeBatchMatMulLayer(const armnn::IConnectableLayer* layer,
222  const armnn::BatchMatMulDescriptor& descriptor,
223  const char* name)
224 {
225  IgnoreUnused(name);
226 
227  // Create FlatBuffer BaseLayer
228  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchMatMul);
229 
230  // Create the FlatBuffer BatchMatMulDescriptor
231  auto flatBufferDescriptor = CreateBatchMatMulDescriptor(m_flatBufferBuilder,
232  descriptor.m_TransposeX,
233  descriptor.m_TransposeY,
234  descriptor.m_AdjointX,
235  descriptor.m_AdjointY,
238 
239  // Create the FlatBuffer BatchMatMulLayer
240  auto flatBufferBatchMatMulLayer = CreateBatchMatMulLayer(m_flatBufferBuilder,
241  flatBufferBaseLayer,
242  flatBufferDescriptor);
243 
244  // Add the AnyLayer to the FlatBufferLayers
245  CreateAnyLayer(flatBufferBatchMatMulLayer.o, serializer::Layer::Layer_BatchMatMulLayer);
246 }
247 
248 // Build FlatBuffer for BatchToSpaceNd Layer
249 void SerializerStrategy::SerializeBatchToSpaceNdLayer(const armnn::IConnectableLayer* layer,
250  const armnn::BatchToSpaceNdDescriptor& descriptor,
251  const char* name)
252 {
253  IgnoreUnused(name);
254 
255  // Create FlatBuffer BaseLayer
256  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchToSpaceNd);
257 
258  std::vector<unsigned int> crops;
259  crops.reserve(descriptor.m_Crops.size() * 2);
260  for (auto& crop : descriptor.m_Crops)
261  {
262  crops.push_back(crop.first);
263  crops.push_back(crop.second);
264  }
265 
266  auto flatBufferDescriptor =
267  CreateBatchToSpaceNdDescriptor(m_flatBufferBuilder,
268  m_flatBufferBuilder.CreateVector(descriptor.m_BlockShape),
269  m_flatBufferBuilder.CreateVector(crops),
271 
272  auto flatBufferLayer = serializer::CreateBatchToSpaceNdLayer(m_flatBufferBuilder,
273  flatBufferBaseLayer,
274  flatBufferDescriptor);
275 
276  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_BatchToSpaceNdLayer);
277 }
278 
279 void SerializerStrategy::SerializeBatchNormalizationLayer(
280  const armnn::IConnectableLayer* layer,
281  const armnn::BatchNormalizationDescriptor& batchNormDescriptor,
282  const std::vector<armnn::ConstTensor>& constants,
283  const char* name)
284 {
285  IgnoreUnused(name);
286 
287  const armnn::ConstTensor& mean = constants[0];
288  const armnn::ConstTensor& variance = constants[1];
289  const armnn::ConstTensor& beta = constants[2];
290  const armnn::ConstTensor& gamma = constants[3];
291 
292  auto fbBatchNormalizationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchNormalization);
293  auto fbBatchNormalizationDescriptor = serializer::CreateBatchNormalizationDescriptor(
294  m_flatBufferBuilder,
295  batchNormDescriptor.m_Eps,
296  GetFlatBufferDataLayout(batchNormDescriptor.m_DataLayout));
297 
298  auto fbMeanConstTensorInfo = CreateConstTensorInfo(mean);
299  auto fbVarianceConstTensorInfo = CreateConstTensorInfo(variance);
300  auto fbBetaConstTensorInfo = CreateConstTensorInfo(beta);
301  auto fbGammaConstTensorInfo = CreateConstTensorInfo(gamma);
302  auto fbBatchNormalizationLayer = serializer::CreateBatchNormalizationLayer(m_flatBufferBuilder,
303  fbBatchNormalizationBaseLayer,
304  fbBatchNormalizationDescriptor,
305  fbMeanConstTensorInfo,
306  fbVarianceConstTensorInfo,
307  fbBetaConstTensorInfo,
308  fbGammaConstTensorInfo);
309 
310  CreateAnyLayer(fbBatchNormalizationLayer.o, serializer::Layer::Layer_BatchNormalizationLayer);
311 }
312 
313 void SerializerStrategy::SerializeCastLayer(const armnn::IConnectableLayer* layer,
314  const char* name)
315 {
316  IgnoreUnused(name);
317 
318  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Cast);
319  auto fbCastLayer = serializer::CreateCastLayer(m_flatBufferBuilder, fbBaseLayer);
320  CreateAnyLayer(fbCastLayer.o, serializer::Layer::Layer_CastLayer);
321 }
322 
323 void SerializerStrategy::SerializeChannelShuffleLayer(const armnn::IConnectableLayer* layer,
324  const armnn::ChannelShuffleDescriptor& descriptor,
325  const char* name)
326 {
327  IgnoreUnused(name);
328  auto fbDescriptor = CreateChannelShuffleDescriptor(m_flatBufferBuilder,
329  descriptor.m_Axis,
330  descriptor.m_NumGroups);
331  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ChannelShuffle);
332  auto fbChannelShuffleLayer = serializer::CreateChannelShuffleLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
333  CreateAnyLayer(fbChannelShuffleLayer.o, serializer::Layer::Layer_ChannelShuffleLayer);
334 }
335 
336 void SerializerStrategy::SerializeComparisonLayer(const armnn::IConnectableLayer* layer,
337  const armnn::ComparisonDescriptor& descriptor,
338  const char* name)
339 {
340  IgnoreUnused(name);
341 
342  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Comparison);
343  auto fbDescriptor = serializer::CreateComparisonDescriptor(
344  m_flatBufferBuilder,
346 
347  auto fbLayer = serializer::CreateComparisonLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
348  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_ComparisonLayer);
349 }
350 
351 // Build FlatBuffer for Constant Layer
352 void SerializerStrategy::SerializeConstantLayer(const armnn::IConnectableLayer* layer,
353  const std::vector<armnn::ConstTensor>& constants,
354  const char* name)
355 {
356  IgnoreUnused(name);
357 
358  armnn::ConstTensor input = constants[0];
359 
360  // Create FlatBuffer BaseLayer
361  auto flatBufferConstantBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Constant);
362 
363  auto flatBufferConstTensorInfo = CreateConstTensorInfo(input);
364 
365  // Create the FlatBuffer ConstantLayer
366  auto flatBufferLayer = CreateConstantLayer(m_flatBufferBuilder,
367  flatBufferConstantBaseLayer,
368  flatBufferConstTensorInfo);
369 
370  // Add the AnyLayer to the FlatBufferLayers
371  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ConstantLayer);
372 }
373 
374 // Build FlatBuffer for Convolution2dLayer
375 void SerializerStrategy::SerializeConvolution2dLayer(const armnn::IConnectableLayer* layer,
376  const armnn::Convolution2dDescriptor& descriptor,
377  const char* name)
378 {
379  IgnoreUnused(name);
380 
381  // Create FlatBuffer BaseLayer
382  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution2d);
383 
384  auto flatBufferDescriptor = CreateConvolution2dDescriptor(m_flatBufferBuilder,
385  descriptor.m_PadLeft,
386  descriptor.m_PadRight,
387  descriptor.m_PadTop,
388  descriptor.m_PadBottom,
389  descriptor.m_StrideX,
390  descriptor.m_StrideY,
391  descriptor.m_DilationX,
392  descriptor.m_DilationY,
393  descriptor.m_BiasEnabled,
395 
396  // Create the FlatBuffer Convolution2dLayer
397  auto flatBufferLayer = CreateConvolution2dLayer(m_flatBufferBuilder,
398  flatBufferBaseLayer,
399  flatBufferDescriptor);
400 
401  // Add the AnyLayer to the FlatBufferLayers
402  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_Convolution2dLayer);
403 }
404 
405 // Build FlatBuffer for Convolution3dLayer
406 void SerializerStrategy::SerializeConvolution3dLayer(const armnn::IConnectableLayer* layer,
407  const armnn::Convolution3dDescriptor& descriptor,
408  const char* name)
409 {
410  IgnoreUnused(name);
411 
412  // Create FlatBuffer BaseLayer
413  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution3d);
414 
415  auto flatBufferDescriptor = CreateConvolution3dDescriptor(m_flatBufferBuilder,
416  descriptor.m_PadLeft,
417  descriptor.m_PadRight,
418  descriptor.m_PadTop,
419  descriptor.m_PadBottom,
420  descriptor.m_PadFront,
421  descriptor.m_PadBack,
422  descriptor.m_StrideX,
423  descriptor.m_StrideY,
424  descriptor.m_StrideZ,
425  descriptor.m_DilationX,
426  descriptor.m_DilationY,
427  descriptor.m_DilationZ,
428  descriptor.m_BiasEnabled,
430 
431  // Create the FlatBuffer Convolution3dLayer
432  auto flatBufferLayer = CreateConvolution3dLayer(m_flatBufferBuilder,
433  flatBufferBaseLayer,
434  flatBufferDescriptor);
435 
436  // Add the AnyLayer to the FlatBufferLayers
437  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_Convolution3dLayer);
438 }
439 
440 void SerializerStrategy::SerializeDepthToSpaceLayer(const armnn::IConnectableLayer* layer,
441  const armnn::DepthToSpaceDescriptor& descriptor,
442  const char* name)
443 {
444  IgnoreUnused(name);
445 
446  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_DepthToSpace);
447  auto fbDescriptor = CreateDepthToSpaceDescriptor(m_flatBufferBuilder,
448  descriptor.m_BlockSize,
450 
451  auto fbLayer = serializer::CreateDepthToSpaceLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
452 
453  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_DepthToSpaceLayer);
454 }
455 
456 void SerializerStrategy::SerializeDepthwiseConvolution2dLayer(const armnn::IConnectableLayer* layer,
457  const armnn::DepthwiseConvolution2dDescriptor& descriptor,
458  const char* name)
459 {
460  IgnoreUnused(name);
461 
462  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_DepthwiseConvolution2d);
463  auto fbDescriptor = CreateDepthwiseConvolution2dDescriptor(m_flatBufferBuilder,
464  descriptor.m_PadLeft,
465  descriptor.m_PadRight,
466  descriptor.m_PadTop,
467  descriptor.m_PadBottom,
468  descriptor.m_StrideX,
469  descriptor.m_StrideY,
470  descriptor.m_DilationX,
471  descriptor.m_DilationY,
472  descriptor.m_BiasEnabled,
474 
475  auto flatBufferLayer = CreateDepthwiseConvolution2dLayer(m_flatBufferBuilder,
476  fbBaseLayer,
477  fbDescriptor);
478 
479  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_DepthwiseConvolution2dLayer);
480 }
481 
482 void SerializerStrategy::SerializeDequantizeLayer(const armnn::IConnectableLayer* layer,
483  const char* name)
484 {
485  IgnoreUnused(name);
486 
487  auto fbDequantizeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Dequantize);
488  auto fbDequantizeLayer = serializer::CreateDequantizeLayer(m_flatBufferBuilder, fbDequantizeBaseLayer);
489 
490  CreateAnyLayer(fbDequantizeLayer.o, serializer::Layer::Layer_DequantizeLayer);
491 }
492 
493 void SerializerStrategy::SerializeDetectionPostProcessLayer(const armnn::IConnectableLayer* layer,
494  const armnn::DetectionPostProcessDescriptor& descriptor,
495  const std::vector<armnn::ConstTensor>& constants,
496  const char* name)
497 {
498  IgnoreUnused(name);
499 
500  const armnn::ConstTensor& anchors = constants[0];
501 
502  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_DetectionPostProcess);
503  auto fbDescriptor = CreateDetectionPostProcessDescriptor(m_flatBufferBuilder,
504  descriptor.m_MaxDetections,
505  descriptor.m_MaxClassesPerDetection,
506  descriptor.m_DetectionsPerClass,
507  descriptor.m_NmsScoreThreshold,
508  descriptor.m_NmsIouThreshold,
509  descriptor.m_NumClasses,
510  descriptor.m_UseRegularNms,
511  descriptor.m_ScaleX,
512  descriptor.m_ScaleY,
513  descriptor.m_ScaleW,
514  descriptor.m_ScaleH);
515 
516  flatbuffers::Offset<serializer::ConstTensor> fbAnchorsConstTensorInfo = CreateConstTensorInfo(anchors);
517 
518  auto flatBufferLayer = CreateDetectionPostProcessLayer(m_flatBufferBuilder,
519  fbBaseLayer,
520  fbDescriptor,
521  fbAnchorsConstTensorInfo);
522 
523  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_DetectionPostProcessLayer);
524 }
525 
526 void SerializerStrategy::SerializeDivisionLayer(const armnn::IConnectableLayer* layer, const char* name)
527 {
528  IgnoreUnused(name);
529 
530  auto fbDivisionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Division);
531  auto fbDivisionLayer = serializer::CreateDivisionLayer(m_flatBufferBuilder, fbDivisionBaseLayer);
532 
533  CreateAnyLayer(fbDivisionLayer.o, serializer::Layer::Layer_DivisionLayer);
534 }
535 
536 void SerializerStrategy::SerializeElementwiseBinaryLayer(const armnn::IConnectableLayer* layer,
537  const armnn::ElementwiseBinaryDescriptor& descriptor,
538  const char* name)
539 {
540  IgnoreUnused(name);
541 
542  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ElementwiseBinary);
543  auto fbDescriptor = serializer::CreateElementwiseBinaryDescriptor(
544  m_flatBufferBuilder,
546 
547  auto fbLayer = serializer::CreateElementwiseBinaryLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
548  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_ElementwiseBinaryLayer);
549 }
550 
551 void SerializerStrategy::SerializeElementwiseUnaryLayer(const armnn::IConnectableLayer* layer,
552  const armnn::ElementwiseUnaryDescriptor& descriptor,
553  const char* name)
554 {
555  IgnoreUnused(name);
556 
557  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ElementwiseUnary);
558  auto fbDescriptor = serializer::CreateElementwiseUnaryDescriptor(
559  m_flatBufferBuilder,
561 
562  auto fbLayer = serializer::CreateElementwiseUnaryLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
563  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_ElementwiseUnaryLayer);
564 }
565 
566 void SerializerStrategy::SerializeFillLayer(const armnn::IConnectableLayer* layer,
567  const armnn::FillDescriptor& fillDescriptor,
568  const char* name)
569 {
570  IgnoreUnused(name);
571 
572  auto fbFillBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Fill);
573 
574  auto fbDescriptor = serializer::CreateFillDescriptor(m_flatBufferBuilder, fillDescriptor.m_Value);
575 
576  auto fbFillLayer = serializer::CreateFillLayer(m_flatBufferBuilder, fbFillBaseLayer, fbDescriptor);
577 
578  CreateAnyLayer(fbFillLayer.o, serializer::Layer::Layer_FillLayer);
579 }
580 
581 void SerializerStrategy::SerializeFloorLayer(const armnn::IConnectableLayer *layer, const char *name)
582 {
583  IgnoreUnused(name);
584 
585  auto flatBufferFloorBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Floor);
586  auto flatBufferFloorLayer = serializer::CreateFloorLayer(m_flatBufferBuilder, flatBufferFloorBaseLayer);
587 
588  CreateAnyLayer(flatBufferFloorLayer.o, serializer::Layer::Layer_FloorLayer);
589 }
590 
591 void SerializerStrategy::SerializeGatherLayer(const armnn::IConnectableLayer* layer,
592  const armnn::GatherDescriptor& gatherDescriptor,
593  const char* name)
594 {
595  IgnoreUnused(name);
596 
597  auto fbGatherDescriptor = CreateGatherDescriptor(m_flatBufferBuilder,
598  gatherDescriptor.m_Axis);
599  auto fbGatherBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Gather);
600  auto flatBufferLayer = serializer::CreateGatherLayer(m_flatBufferBuilder, fbGatherBaseLayer, fbGatherDescriptor);
601 
602  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_GatherLayer);
603 }
604 
605 void SerializerStrategy::SerializeGatherNdLayer(const armnn::IConnectableLayer* layer,
606  const char* name)
607 {
608  IgnoreUnused(name);
609 
610  auto fbGatherNdBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_GatherNd);
611  auto flatBufferLayer = serializer::CreateGatherNdLayer(m_flatBufferBuilder, fbGatherNdBaseLayer);
612 
613  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_GatherNdLayer);
614 }
615 
616 void SerializerStrategy::SerializeInstanceNormalizationLayer(
617  const armnn::IConnectableLayer* layer,
618  const armnn::InstanceNormalizationDescriptor& instanceNormalizationDescriptor,
619  const char* name)
620 {
621  IgnoreUnused(name);
622 
623  auto fbDescriptor = serializer::CreateInstanceNormalizationDescriptor(
624  m_flatBufferBuilder,
625  instanceNormalizationDescriptor.m_Gamma,
626  instanceNormalizationDescriptor.m_Beta,
627  instanceNormalizationDescriptor.m_Eps,
628  GetFlatBufferDataLayout(instanceNormalizationDescriptor.m_DataLayout));
629 
630  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_InstanceNormalization);
631  auto fbLayer = serializer::CreateInstanceNormalizationLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
632 
633  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_InstanceNormalizationLayer);
634 }
635 
636 void SerializerStrategy::SerializeL2NormalizationLayer(const armnn::IConnectableLayer* layer,
637  const armnn::L2NormalizationDescriptor& l2NormalizationDescriptor,
638  const char* name)
639 {
640  IgnoreUnused(name);
641 
642  // Create FlatBuffer BaseLayer
643  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_L2Normalization);
644 
645  // Create the FlatBuffer L2Normalization Descriptor
646  auto fbDescriptor = serializer::CreateL2NormalizationDescriptor(
647  m_flatBufferBuilder,
648  GetFlatBufferDataLayout(l2NormalizationDescriptor.m_DataLayout),
649  l2NormalizationDescriptor.m_Eps);
650 
651  // Create FlatBuffer layer
652  auto fbLayer = serializer::CreateL2NormalizationLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
653 
654  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_L2NormalizationLayer);
655 }
656 
657 void SerializerStrategy::SerializeLogicalBinaryLayer(const armnn::IConnectableLayer* layer,
658  const armnn::LogicalBinaryDescriptor& descriptor,
659  const char* name)
660 {
661  IgnoreUnused(name);
662 
663  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_LogicalBinary);
664  auto fbDescriptor = serializer::CreateLogicalBinaryDescriptor(
665  m_flatBufferBuilder,
667 
668  auto fbLayer = serializer::CreateLogicalBinaryLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
669  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_LogicalBinaryLayer);
670 }
671 
672 void SerializerStrategy::SerializeLogSoftmaxLayer(const armnn::IConnectableLayer* layer,
673  const armnn::LogSoftmaxDescriptor& logSoftmaxDescriptor,
674  const char* name)
675 {
676  IgnoreUnused(name);
677 
678  // Create FlatBuffer BaseLayer
679  auto flatBufferLogSoftmaxBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_LogSoftmax);
680 
681  // Create the FlatBuffer LogSoftmaxDescriptor
682  auto flatBufferLogSoftmaxDesc =
683  serializer::CreateLogSoftmaxDescriptor(m_flatBufferBuilder,
684  logSoftmaxDescriptor.m_Beta,
685  logSoftmaxDescriptor.m_Axis);
686 
687  // Create the FlatBuffer LogSoftmaxLayer
688  auto flatBufferLogSoftmaxLayer =
689  serializer::CreateLogSoftmaxLayer(m_flatBufferBuilder,
690  flatBufferLogSoftmaxBaseLayer,
691  flatBufferLogSoftmaxDesc);
692 
693  CreateAnyLayer(flatBufferLogSoftmaxLayer.o, serializer::Layer::Layer_LogSoftmaxLayer);
694 }
695 
696 void SerializerStrategy::SerializeLstmLayer(const armnn::IConnectableLayer* layer,
697  const armnn::LstmDescriptor& descriptor,
698  const std::vector<armnn::ConstTensor>& constants,
699  const char* name)
700 {
701  IgnoreUnused(name);
702 
703  auto fbLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Lstm);
704 
705  auto fbLstmDescriptor = serializer::CreateLstmDescriptor(
706  m_flatBufferBuilder,
707  descriptor.m_ActivationFunc,
708  descriptor.m_ClippingThresCell,
709  descriptor.m_ClippingThresProj,
710  descriptor.m_CifgEnabled,
711  descriptor.m_PeepholeEnabled,
712  descriptor.m_ProjectionEnabled,
713  descriptor.m_LayerNormEnabled);
714 
715  // Index for constants vector
716  std::size_t i = 0;
717 
718  // Get mandatory/basic input parameters
719  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]); //InputToForgetWeights
720  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]); //InputToCellWeights
721  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]); //InputToOutputWeights
722  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToForgetWeights
723  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToCellWeights
724  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToOutputWeights
725  auto forgetGateBias = CreateConstTensorInfo(constants[i++]); //ForgetGateBias
726  auto cellBias = CreateConstTensorInfo(constants[i++]); //CellBias
727  auto outputGateBias = CreateConstTensorInfo(constants[i++]); //OutputGateBias
728 
729 
730 
731  //Define optional parameters, these will be set depending on configuration in Lstm descriptor
732  flatbuffers::Offset<serializer::ConstTensor> inputToInputWeights;
733  flatbuffers::Offset<serializer::ConstTensor> recurrentToInputWeights;
734  flatbuffers::Offset<serializer::ConstTensor> cellToInputWeights;
735  flatbuffers::Offset<serializer::ConstTensor> inputGateBias;
736  flatbuffers::Offset<serializer::ConstTensor> projectionWeights;
737  flatbuffers::Offset<serializer::ConstTensor> projectionBias;
738  flatbuffers::Offset<serializer::ConstTensor> cellToForgetWeights;
739  flatbuffers::Offset<serializer::ConstTensor> cellToOutputWeights;
740  flatbuffers::Offset<serializer::ConstTensor> inputLayerNormWeights;
741  flatbuffers::Offset<serializer::ConstTensor> forgetLayerNormWeights;
742  flatbuffers::Offset<serializer::ConstTensor> cellLayerNormWeights;
743  flatbuffers::Offset<serializer::ConstTensor> outputLayerNormWeights;
744 
745  if (!descriptor.m_CifgEnabled)
746  {
747  inputToInputWeights = CreateConstTensorInfo(constants[i++]); //InputToInputWeights
748  recurrentToInputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToInputWeights
749  inputGateBias = CreateConstTensorInfo(constants[i++]); //InputGateBias
750  }
751 
752  if (descriptor.m_PeepholeEnabled)
753  {
754  if (!descriptor.m_CifgEnabled)
755  {
756  cellToInputWeights = CreateConstTensorInfo(constants[i++]); //CellToInputWeights
757  }
758  cellToForgetWeights = CreateConstTensorInfo(constants[i++]); //CellToForgetWeights
759  cellToOutputWeights = CreateConstTensorInfo(constants[i++]); //CellToOutputWeights
760  }
761 
762  if (descriptor.m_ProjectionEnabled)
763  {
764  projectionWeights = CreateConstTensorInfo(constants[i++]); //ProjectionWeights
765  projectionBias = CreateConstTensorInfo(constants[i++]); //ProjectionBias
766  }
767 
768  if (descriptor.m_LayerNormEnabled)
769  {
770  if (!descriptor.m_CifgEnabled)
771  {
772  inputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //InputLayerNormWeights
773  }
774  forgetLayerNormWeights = CreateConstTensorInfo(constants[i++]); //ForgetLayerNormWeights
775  cellLayerNormWeights = CreateConstTensorInfo(constants[i++]); //CellLayerNormWeights
776  outputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //OutputLayerNormWeights
777  }
778 
779  auto fbLstmParams = serializer::CreateLstmInputParams(
780  m_flatBufferBuilder,
781  inputToForgetWeights,
782  inputToCellWeights,
783  inputToOutputWeights,
784  recurrentToForgetWeights,
785  recurrentToCellWeights,
786  recurrentToOutputWeights,
787  forgetGateBias,
788  cellBias,
789  outputGateBias,
790  inputToInputWeights,
791  recurrentToInputWeights,
792  cellToInputWeights,
793  inputGateBias,
794  projectionWeights,
795  projectionBias,
796  cellToForgetWeights,
797  cellToOutputWeights,
798  inputLayerNormWeights,
799  forgetLayerNormWeights,
800  cellLayerNormWeights,
801  outputLayerNormWeights);
802 
803  auto fbLstmLayer = serializer::CreateLstmLayer(
804  m_flatBufferBuilder,
805  fbLstmBaseLayer,
806  fbLstmDescriptor,
807  fbLstmParams);
808 
809  CreateAnyLayer(fbLstmLayer.o, serializer::Layer::Layer_LstmLayer);
810 }
811 
812 void SerializerStrategy::SerializeMaximumLayer(const armnn::IConnectableLayer* layer, const char* name)
813 {
814  IgnoreUnused(name);
815 
816  auto fbMaximumBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Maximum);
817  auto fbMaximumLayer = serializer::CreateMaximumLayer(m_flatBufferBuilder, fbMaximumBaseLayer);
818 
819  CreateAnyLayer(fbMaximumLayer.o, serializer::Layer::Layer_MaximumLayer);
820 }
821 
822 void SerializerStrategy::SerializeMeanLayer(const armnn::IConnectableLayer* layer,
823  const armnn::MeanDescriptor& descriptor,
824  const char* name)
825 {
826  IgnoreUnused(name);
827 
828  auto fbMeanBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Mean);
829  auto fbMeanDescriptor = serializer::CreateMeanDescriptor(m_flatBufferBuilder,
830  m_flatBufferBuilder.CreateVector(descriptor.m_Axis),
831  descriptor.m_KeepDims);
832 
833  auto fbMeanLayer = serializer::CreateMeanLayer(m_flatBufferBuilder,
834  fbMeanBaseLayer,
835  fbMeanDescriptor);
836 
837  CreateAnyLayer(fbMeanLayer.o, serializer::Layer::Layer_MeanLayer);
838 }
839 
840 void SerializerStrategy::SerializeMinimumLayer(const armnn::IConnectableLayer* layer, const char* name)
841 {
842  IgnoreUnused(name);
843 
844  auto fbMinimumBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Minimum);
845  auto fbMinimumLayer = serializer::CreateMinimumLayer(m_flatBufferBuilder, fbMinimumBaseLayer);
846 
847  CreateAnyLayer(fbMinimumLayer.o, serializer::Layer::Layer_MinimumLayer);
848 }
849 
850 void SerializerStrategy::SerializeMergeLayer(const armnn::IConnectableLayer* layer, const char* name)
851 {
852  IgnoreUnused(name);
853 
854  auto fbMergeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Merge);
855  auto fbMergeLayer = serializer::CreateMergeLayer(m_flatBufferBuilder, fbMergeBaseLayer);
856 
857  CreateAnyLayer(fbMergeLayer.o, serializer::Layer::Layer_MergeLayer);
858 }
859 
860 void SerializerStrategy::SerializeConcatLayer(const armnn::IConnectableLayer* layer,
861  const armnn::ConcatDescriptor& concatDescriptor,
862  const char* name)
863 {
864  IgnoreUnused(name);
865 
866  auto flatBufferConcatBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Concat);
867 
868  std::vector<flatbuffers::Offset<UintVector>> views;
869  for (unsigned int v = 0; v < concatDescriptor.GetNumViews(); ++v)
870  {
871  const uint32_t* origin = concatDescriptor.GetViewOrigin(v);
872  std::vector<uint32_t> origins;
873  for (unsigned int d = 0; d < concatDescriptor.GetNumDimensions(); ++d)
874  {
875  origins.push_back(origin[d]);
876  }
877  auto view = m_flatBufferBuilder.CreateVector(origins);
878  auto uintVector = CreateUintVector(m_flatBufferBuilder, view);
879  views.push_back(uintVector);
880  }
881 
882  auto flatBufferConcatDescriptor = CreateOriginsDescriptor(m_flatBufferBuilder,
883  concatDescriptor.GetConcatAxis(),
884  concatDescriptor.GetNumViews(),
885  concatDescriptor.GetNumDimensions(),
886  m_flatBufferBuilder.CreateVector(views));
887 
888  auto flatBufferLayer = CreateConcatLayer(m_flatBufferBuilder,
889  flatBufferConcatBaseLayer,
890  flatBufferConcatDescriptor);
891 
892  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ConcatLayer);
893 }
894 
895 void SerializerStrategy::SerializeMultiplicationLayer(const armnn::IConnectableLayer* layer, const char* name)
896 {
897  IgnoreUnused(name);
898 
899  auto fbMultiplicationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Multiplication);
900  auto fbMultiplicationLayer = serializer::CreateMultiplicationLayer(m_flatBufferBuilder,
901  fbMultiplicationBaseLayer);
902 
903  CreateAnyLayer(fbMultiplicationLayer.o, serializer::Layer::Layer_MultiplicationLayer);
904 }
905 
906 void SerializerStrategy::SerializePadLayer(const armnn::IConnectableLayer* layer,
907  const armnn::PadDescriptor& padDescriptor,
908  const char* name)
909 {
910  IgnoreUnused(name);
911 
912  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pad);
913 
914  std::vector<unsigned int> padList;
915  for (auto& p: padDescriptor.m_PadList)
916  {
917  padList.push_back(p.first);
918  padList.push_back(p.second);
919  }
920 
921  auto flatBufferPadDesc = serializer::CreatePadDescriptor(m_flatBufferBuilder,
922  m_flatBufferBuilder.CreateVector(padList),
923  padDescriptor.m_PadValue,
924  GetFlatBufferPaddingMode(padDescriptor.m_PaddingMode));
925 
926  auto flatBufferPadLayer = serializer::CreatePadLayer(m_flatBufferBuilder,
927  flatBufferBaseLayer,
928  flatBufferPadDesc);
929 
930  CreateAnyLayer(flatBufferPadLayer.o, serializer::Layer::Layer_PadLayer);
931 }
932 
933 void SerializerStrategy::SerializePermuteLayer(const armnn::IConnectableLayer* layer,
934  const armnn::PermuteDescriptor& permuteDescriptor,
935  const char* name)
936 {
937  IgnoreUnused(name);
938 
939  // Create FlatBuffer BaseLayer
940  auto flatBufferPermuteBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Permute);
941 
942  std::vector<unsigned int> dimMappings;
943  for (unsigned int i=0; i<permuteDescriptor.m_DimMappings.GetSize(); ++i)
944  {
945  dimMappings.push_back(permuteDescriptor.m_DimMappings[i]);
946  }
947 
948  auto flatBufferPermuteDesc = serializer::CreatePermuteDescriptor(m_flatBufferBuilder,
949  m_flatBufferBuilder.CreateVector(dimMappings));
950 
951  // Create the FlatBuffer PermuteLayer
952  auto flatBufferPermuteLayer = serializer::CreatePermuteLayer(m_flatBufferBuilder,
953  flatBufferPermuteBaseLayer,
954  flatBufferPermuteDesc);
955 
956  // Add the AnyLayer to the FlatBufferLayers
957  CreateAnyLayer(flatBufferPermuteLayer.o, serializer::Layer::Layer_PermuteLayer);
958 }
959 
960 // Build FlatBuffer for Rank Layer
961 void SerializerStrategy::SerializeRankLayer(const armnn::IConnectableLayer* layer,
962  const char* name)
963 {
964  IgnoreUnused(name);
965  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Rank);
966  auto flatBufferRankLayer = serializer::CreateRankLayer(m_flatBufferBuilder, flatBufferBaseLayer);
967 
968  CreateAnyLayer(flatBufferRankLayer.o, serializer::Layer::Layer_RankLayer);
969 }
970 
971 void SerializerStrategy::SerializeReduceLayer(const armnn::IConnectableLayer* layer,
972  const armnn::ReduceDescriptor& reduceDescriptor,
973  const char*)
974 {
975  auto fbReduceBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Reduce);
976  auto fbDescriptor = CreateReduceDescriptor(m_flatBufferBuilder,
977  reduceDescriptor.m_KeepDims,
978  m_flatBufferBuilder.CreateVector(reduceDescriptor.m_vAxis),
980  auto fbReduceLayer = serializer::CreateReduceLayer(m_flatBufferBuilder,
981  fbReduceBaseLayer,
982  fbDescriptor);
983 
984  CreateAnyLayer(fbReduceLayer.o, serializer::Layer::Layer_ReduceLayer);
985 }
986 
987 // Build FlatBuffer for Reshape Layer
988 void SerializerStrategy::SerializeReshapeLayer(const armnn::IConnectableLayer* layer,
989  const armnn::ReshapeDescriptor& reshapeDescriptor,
990  const char* name)
991 {
992  IgnoreUnused(name);
993 
994  // Create FlatBuffer BaseLayer
995  auto flatBufferReshapeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Reshape);
996 
997  std::vector<unsigned int> targetShape;
998  for (unsigned int i =0; i < reshapeDescriptor.m_TargetShape.GetNumDimensions(); i++)
999  {
1000  targetShape.push_back(reshapeDescriptor.m_TargetShape[i]);
1001  }
1002 
1003  auto flatBufferReshapeDesc = serializer::CreateReshapeDescriptor(m_flatBufferBuilder,
1004  m_flatBufferBuilder.CreateVector(targetShape));
1005 
1006  // Create the FlatBuffer ReshapeLayer
1007  auto flatBufferReshapeLayer = serializer::CreateReshapeLayer(m_flatBufferBuilder, flatBufferReshapeBaseLayer,
1008  flatBufferReshapeDesc);
1009 
1010  // Add the AnyLayer to the FlatBufferLayers
1011  CreateAnyLayer(flatBufferReshapeLayer.o, serializer::Layer::Layer_ReshapeLayer);
1012 }
1013 
1014 void SerializerStrategy::SerializeResizeLayer(const armnn::IConnectableLayer* layer,
1015  const armnn::ResizeDescriptor& resizeDescriptor,
1016  const char* name)
1017 {
1018  IgnoreUnused(name);
1019 
1020  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Resize);
1021 
1022  auto flatBufferDescriptor =
1023  CreateResizeDescriptor(m_flatBufferBuilder,
1024  resizeDescriptor.m_TargetHeight,
1025  resizeDescriptor.m_TargetWidth,
1026  GetFlatBufferResizeMethod(resizeDescriptor.m_Method),
1027  GetFlatBufferDataLayout(resizeDescriptor.m_DataLayout),
1028  resizeDescriptor.m_AlignCorners,
1029  resizeDescriptor.m_HalfPixelCenters);
1030 
1031  auto flatBufferLayer = serializer::CreateResizeLayer(m_flatBufferBuilder,
1032  flatBufferBaseLayer,
1033  flatBufferDescriptor);
1034 
1035  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ResizeLayer);
1036 }
1037 
1038 void SerializerStrategy::SerializeSliceLayer(const armnn::IConnectableLayer* layer,
1039  const armnn::SliceDescriptor& sliceDescriptor,
1040  const char* name)
1041 {
1042  IgnoreUnused(name);
1043 
1044  auto fbSliceBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Slice);
1045  auto fbSliceDescriptor = CreateSliceDescriptor(m_flatBufferBuilder,
1046  m_flatBufferBuilder.CreateVector(sliceDescriptor.m_Begin),
1047  m_flatBufferBuilder.CreateVector(sliceDescriptor.m_Size));
1048 
1049  auto fbSliceLayer = serializer::CreateSliceLayer(m_flatBufferBuilder, fbSliceBaseLayer, fbSliceDescriptor);
1050 
1051  CreateAnyLayer(fbSliceLayer.o, serializer::Layer::Layer_SliceLayer);
1052 }
1053 
1054 // Build FlatBuffer for Softmax Layer
1055 void SerializerStrategy::SerializeSoftmaxLayer(const armnn::IConnectableLayer* layer,
1056  const armnn::SoftmaxDescriptor& softmaxDescriptor,
1057  const char* name)
1058 {
1059  IgnoreUnused(name);
1060 
1061  // Create FlatBuffer BaseLayer
1062  auto flatBufferSoftmaxBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Softmax);
1063 
1064  // Create the FlatBuffer SoftmaxDescriptor
1065  auto flatBufferSoftmaxDesc =
1066  serializer::CreateSoftmaxDescriptor(m_flatBufferBuilder,
1067  softmaxDescriptor.m_Beta,
1068  softmaxDescriptor.m_Axis);
1069 
1070  // Create the FlatBuffer SoftmaxLayer
1071  auto flatBufferSoftmaxLayer =
1072  serializer::CreateSoftmaxLayer(m_flatBufferBuilder,
1073  flatBufferSoftmaxBaseLayer,
1074  flatBufferSoftmaxDesc);
1075 
1076  CreateAnyLayer(flatBufferSoftmaxLayer.o, serializer::Layer::Layer_SoftmaxLayer);
1077 }
1078 
1079 void SerializerStrategy::SerializePooling2dLayer(const armnn::IConnectableLayer* layer,
1080  const armnn::Pooling2dDescriptor& pooling2dDescriptor,
1081  const char* name)
1082 {
1083  IgnoreUnused(name);
1084 
1085  auto fbPooling2dBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pooling2d);
1086  auto fbPooling2dDescriptor = serializer::CreatePooling2dDescriptor(
1087  m_flatBufferBuilder,
1088  GetFlatBufferPoolingAlgorithm(pooling2dDescriptor.m_PoolType),
1089  pooling2dDescriptor.m_PadLeft,
1090  pooling2dDescriptor.m_PadRight,
1091  pooling2dDescriptor.m_PadTop,
1092  pooling2dDescriptor.m_PadBottom,
1093  pooling2dDescriptor.m_PoolWidth,
1094  pooling2dDescriptor.m_PoolHeight,
1095  pooling2dDescriptor.m_StrideX,
1096  pooling2dDescriptor.m_StrideY,
1098  GetFlatBufferPaddingMethod(pooling2dDescriptor.m_PaddingMethod),
1099  GetFlatBufferDataLayout(pooling2dDescriptor.m_DataLayout));
1100 
1101  auto fbPooling2dLayer = serializer::CreatePooling2dLayer(m_flatBufferBuilder,
1102  fbPooling2dBaseLayer,
1103  fbPooling2dDescriptor);
1104 
1105  CreateAnyLayer(fbPooling2dLayer.o, serializer::Layer::Layer_Pooling2dLayer);
1106 }
1107 
1108 void SerializerStrategy::SerializePooling3dLayer(const armnn::IConnectableLayer* layer,
1109  const armnn::Pooling3dDescriptor& pooling3dDescriptor,
1110  const char* name)
1111 {
1112  IgnoreUnused(name);
1113 
1114  auto fbPooling3dBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pooling3d);
1115  auto fbPooling3dDescriptor = serializer::CreatePooling3dDescriptor(
1116  m_flatBufferBuilder,
1117  GetFlatBufferPoolingAlgorithm(pooling3dDescriptor.m_PoolType),
1118  pooling3dDescriptor.m_PadLeft,
1119  pooling3dDescriptor.m_PadRight,
1120  pooling3dDescriptor.m_PadTop,
1121  pooling3dDescriptor.m_PadBottom,
1122  pooling3dDescriptor.m_PadFront,
1123  pooling3dDescriptor.m_PadBack,
1124  pooling3dDescriptor.m_PoolWidth,
1125  pooling3dDescriptor.m_PoolHeight,
1126  pooling3dDescriptor.m_PoolDepth,
1127  pooling3dDescriptor.m_StrideX,
1128  pooling3dDescriptor.m_StrideY,
1129  pooling3dDescriptor.m_StrideZ,
1131  GetFlatBufferPaddingMethod(pooling3dDescriptor.m_PaddingMethod),
1132  GetFlatBufferDataLayout(pooling3dDescriptor.m_DataLayout));
1133 
1134  auto fbPooling3dLayer = serializer::CreatePooling3dLayer(m_flatBufferBuilder,
1135  fbPooling3dBaseLayer,
1136  fbPooling3dDescriptor);
1137 
1138  CreateAnyLayer(fbPooling3dLayer.o, serializer::Layer::Layer_Pooling3dLayer);
1139 }
1140 
1141 void SerializerStrategy::SerializePreluLayer(const armnn::IConnectableLayer* layer,
1142  const char* name)
1143 {
1144  IgnoreUnused(name);
1145 
1146  // Create FlatBuffer BaseLayer
1147  auto flatBufferPreluBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Prelu);
1148 
1149  // Create the FlatBuffer AdditionLayer
1150  auto flatBufferPreluLayer = serializer::CreatePreluLayer(m_flatBufferBuilder, flatBufferPreluBaseLayer);
1151 
1152  // Add the AnyLayer to the FlatBufferLayers
1153  CreateAnyLayer(flatBufferPreluLayer.o, serializer::Layer::Layer_PreluLayer);
1154 }
1155 
1156 void SerializerStrategy::SerializeQuantizeLayer(const armnn::IConnectableLayer *layer, const char *name)
1157 {
1158  IgnoreUnused(name);
1159 
1160  auto fbQuantizeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Quantize);
1161  auto fbQuantizeLayer = serializer::CreateQuantizeLayer(m_flatBufferBuilder,
1162  fbQuantizeBaseLayer);
1163  CreateAnyLayer(fbQuantizeLayer.o, serializer::Layer::Layer_QuantizeLayer);
1164 }
1165 
1166 // Build FlatBuffer for FullyConnected Layer
1167 void SerializerStrategy::SerializeFullyConnectedLayer(const armnn::IConnectableLayer* layer,
1168  const armnn::FullyConnectedDescriptor& fullyConnectedDescriptor,
1169  const char*)
1170 {
1171  // Create FlatBuffer BaseLayer
1172  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_FullyConnected);
1173 
1174  // Create FlatBuffer FullyConnectedDescriptor
1175  auto flatBufferDescriptor =
1176  serializer::CreateFullyConnectedDescriptor(m_flatBufferBuilder,
1177  fullyConnectedDescriptor.m_BiasEnabled,
1178  fullyConnectedDescriptor.m_TransposeWeightMatrix,
1179  fullyConnectedDescriptor.m_ConstantWeights);
1180 
1181  // Create FlatBuffer FullyConnectedLayer
1182  auto flatBufferLayer = serializer::CreateFullyConnectedLayer(m_flatBufferBuilder,
1183  flatBufferBaseLayer,
1184  flatBufferDescriptor);
1185 
1186  // Add created FullyConnectedLayer to the FlatBufferLayers
1187  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_FullyConnectedLayer);
1188 }
1189 
1190 // Build FlatBuffer for SpaceToBatchNd Layer
1191 void SerializerStrategy::SerializeSpaceToBatchNdLayer(const armnn::IConnectableLayer* layer,
1192  const armnn::SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
1193  const char* name)
1194 {
1195  IgnoreUnused(name);
1196 
1197  // Create FlatBuffer BaseLayer
1198  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_SpaceToBatchNd);
1199 
1200  std::vector<unsigned int> padList;
1201  padList.reserve(spaceToBatchNdDescriptor.m_PadList.size()*2);
1202  for (auto& pad : spaceToBatchNdDescriptor.m_PadList)
1203  {
1204  padList.push_back(pad.first);
1205  padList.push_back(pad.second);
1206  }
1207 
1208  auto flatBufferDescriptor =
1209  CreateSpaceToBatchNdDescriptor(m_flatBufferBuilder,
1210  m_flatBufferBuilder.CreateVector(spaceToBatchNdDescriptor.m_BlockShape),
1211  m_flatBufferBuilder.CreateVector(padList),
1212  GetFlatBufferDataLayout(spaceToBatchNdDescriptor.m_DataLayout));
1213 
1214  auto flatBufferLayer = serializer::CreateSpaceToBatchNdLayer(m_flatBufferBuilder,
1215  flatBufferBaseLayer,
1216  flatBufferDescriptor);
1217 
1218  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_SpaceToBatchNdLayer);
1219 }
1220 
1221 // Build FlatBuffer for SpaceToDepthLayer
1222 void SerializerStrategy::SerializeSpaceToDepthLayer(const armnn::IConnectableLayer* layer,
1223  const armnn::SpaceToDepthDescriptor& spaceToDepthDescriptor,
1224  const char* name)
1225 {
1226  IgnoreUnused(name);
1227 
1228  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_SpaceToDepth);
1229  auto flatBufferDescriptor =
1230  CreateSpaceToDepthDescriptor(m_flatBufferBuilder,
1231  spaceToDepthDescriptor.m_BlockSize,
1232  GetFlatBufferDataLayout(spaceToDepthDescriptor.m_DataLayout));
1233 
1234  auto flatBufferLayer = serializer::CreateSpaceToDepthLayer(m_flatBufferBuilder,
1235  flatBufferBaseLayer,
1236  flatBufferDescriptor);
1237 
1238  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_SpaceToDepthLayer);
1239 }
1240 
1241 // Build FlatBuffer for Splitter Layer
1242 void SerializerStrategy::SerializeSplitterLayer(const armnn::IConnectableLayer* layer,
1243  const armnn::ViewsDescriptor& viewsDescriptor,
1244  const char* name)
1245 {
1246  IgnoreUnused(name);
1247 
1248  // Create FlatBuffer ViewOrigins
1249  std::vector<flatbuffers::Offset<UintVector>> flatBufferViewOrigins;
1250  flatBufferViewOrigins.reserve(viewsDescriptor.GetNumViews());
1251 
1252  for(unsigned int vIdx = 0; vIdx < viewsDescriptor.GetNumViews(); ++vIdx)
1253  {
1254  std::vector<uint32_t> viewOrigin;
1255  viewOrigin.reserve(viewsDescriptor.GetNumDimensions());
1256 
1257  // Copy vector
1258  for(unsigned int dIdx = 0; dIdx < viewsDescriptor.GetNumDimensions(); ++dIdx)
1259  {
1260  viewOrigin.push_back(viewsDescriptor.GetViewOrigin(vIdx)[dIdx]);
1261  }
1262 
1263  flatBufferViewOrigins.push_back(CreateUintVector(m_flatBufferBuilder,
1264  m_flatBufferBuilder.CreateVector(viewOrigin)));
1265  }
1266 
1267  // Create FlatBuffer OriginsDescriptor
1268  auto flatBufferOriginDescriptor = CreateOriginsDescriptor(m_flatBufferBuilder,
1269  viewsDescriptor.GetOrigins().GetConcatAxis(),
1270  viewsDescriptor.GetOrigins().GetNumViews(),
1271  viewsDescriptor.GetOrigins().GetNumDimensions(),
1272  m_flatBufferBuilder.CreateVector(flatBufferViewOrigins));
1273 
1274  // Create FlatBuffer ViewOrigins
1275  std::vector<flatbuffers::Offset<UintVector>> flatBufferViewSizes;
1276  flatBufferViewSizes.reserve(viewsDescriptor.GetNumViews());
1277 
1278  for(unsigned int vIdx = 0; vIdx < viewsDescriptor.GetNumViews(); ++vIdx)
1279  {
1280  std::vector<uint32_t> viewSize;
1281  viewSize.reserve(viewsDescriptor.GetNumDimensions());
1282 
1283  // Copy vector
1284  for(unsigned int dIdx = 0; dIdx < viewsDescriptor.GetNumDimensions(); ++dIdx)
1285  {
1286  viewSize.push_back(viewsDescriptor.GetViewSizes(vIdx)[dIdx]);
1287  }
1288 
1289  flatBufferViewSizes.push_back(CreateUintVector(m_flatBufferBuilder,
1290  m_flatBufferBuilder.CreateVector(viewSize)));
1291  }
1292 
1293  // Create FlatBuffer ViewsDescriptor
1294  auto flatBufferViewsDescriptor = CreateViewsDescriptor(m_flatBufferBuilder,
1295  flatBufferOriginDescriptor,
1296  m_flatBufferBuilder.CreateVector(flatBufferViewSizes));
1297 
1298  // Create FlatBuffer BaseLayer
1299  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Splitter);
1300 
1301  auto flatBufferSplitterLayer = serializer::CreateSplitterLayer(m_flatBufferBuilder,
1302  flatBufferBaseLayer,
1303  flatBufferViewsDescriptor);
1304 
1305  CreateAnyLayer(flatBufferSplitterLayer.o, serializer::Layer::Layer_SplitterLayer);
1306 }
1307 
1308 void SerializerStrategy::SerializeNormalizationLayer(const armnn::IConnectableLayer* layer,
1309  const armnn::NormalizationDescriptor& descriptor,
1310  const char* name)
1311 {
1312  IgnoreUnused(name);
1313 
1314  auto fbNormalizationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Normalization);
1315 
1316  auto fbNormalizationDescriptor = serializer::CreateNormalizationDescriptor(
1317  m_flatBufferBuilder,
1320  descriptor.m_NormSize,
1321  descriptor.m_Alpha,
1322  descriptor.m_Beta,
1323  descriptor.m_K,
1325 
1326  auto flatBufferLayer = serializer::CreateNormalizationLayer(m_flatBufferBuilder,
1327  fbNormalizationBaseLayer,
1328  fbNormalizationDescriptor);
1329 
1330  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_NormalizationLayer);
1331 }
1332 
1333 void SerializerStrategy::SerializeShapeLayer(const armnn::IConnectableLayer* layer,
1334  const char* name)
1335 {
1336  IgnoreUnused(name);
1337 
1338  auto shapeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Shape);
1339  auto shapeLayer = serializer::CreateShapeLayer(m_flatBufferBuilder, shapeBaseLayer);
1340 
1341  CreateAnyLayer(shapeLayer.o, serializer::Layer::Layer_ShapeLayer);
1342 }
1343 
1344 void SerializerStrategy::SerializeStackLayer(const armnn::IConnectableLayer* layer,
1345  const armnn::StackDescriptor& stackDescriptor,
1346  const char* name)
1347 {
1348  IgnoreUnused(name);
1349 
1350  auto stackBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Stack);
1351 
1352  std::vector<unsigned int> inputShape;
1353  for (unsigned int i =0; i < stackDescriptor.m_InputShape.GetNumDimensions(); i++)
1354  {
1355  inputShape.push_back(stackDescriptor.m_InputShape[i]);
1356  }
1357 
1358  auto flatBufferStackDescriptor = CreateStackDescriptor(m_flatBufferBuilder,
1359  stackDescriptor.m_Axis,
1360  stackDescriptor.m_NumInputs,
1361  m_flatBufferBuilder.CreateVector(inputShape));
1362 
1363  auto stackLayer = serializer::CreateStackLayer(m_flatBufferBuilder, stackBaseLayer, flatBufferStackDescriptor);
1364  CreateAnyLayer(stackLayer.o, serializer::Layer::Layer_StackLayer);
1365 }
1366 
1367 void SerializerStrategy::SerializeStandInLayer(const armnn::IConnectableLayer *layer,
1368  const armnn::StandInDescriptor& standInDescriptor,
1369  const char *name)
1370 {
1371  IgnoreUnused(name);
1372 
1373  auto fbDescriptor = serializer::CreateStandInDescriptor(m_flatBufferBuilder,
1374  standInDescriptor.m_NumInputs,
1375  standInDescriptor.m_NumOutputs);
1376 
1377  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_StandIn);
1378  auto fbLayer = serializer::CreateStandInLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
1379 
1380  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_StandInLayer);
1381 }
1382 
1383 void SerializerStrategy::SerializeStridedSliceLayer(const armnn::IConnectableLayer* layer,
1384  const armnn::StridedSliceDescriptor& stridedSliceDescriptor,
1385  const char* name)
1386 {
1387  IgnoreUnused(name);
1388 
1389  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_StridedSlice);
1390 
1391  auto flatBufferDescriptor =
1392  CreateStridedSliceDescriptor(m_flatBufferBuilder,
1393  m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.m_Begin),
1394  m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.m_End),
1395  m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.m_Stride),
1396  stridedSliceDescriptor.m_BeginMask,
1397  stridedSliceDescriptor.m_EndMask,
1398  stridedSliceDescriptor.m_ShrinkAxisMask,
1399  stridedSliceDescriptor.m_EllipsisMask,
1400  stridedSliceDescriptor.m_NewAxisMask,
1401  GetFlatBufferDataLayout(stridedSliceDescriptor.m_DataLayout));
1402 
1403  auto flatBufferLayer = serializer::CreateStridedSliceLayer(m_flatBufferBuilder,
1404  flatBufferBaseLayer,
1405  flatBufferDescriptor);
1406 
1407  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_StridedSliceLayer);
1408 }
1409 
1410 void SerializerStrategy::SerializeSubtractionLayer(const armnn::IConnectableLayer* layer, const char* name)
1411 {
1412  IgnoreUnused(name);
1413 
1414  auto fbSubtractionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Subtraction);
1415  auto fbSubtractionLayer = serializer::CreateSubtractionLayer(m_flatBufferBuilder, fbSubtractionBaseLayer);
1416 
1417  CreateAnyLayer(fbSubtractionLayer.o, serializer::Layer::Layer_SubtractionLayer);
1418 }
1419 
1420 void SerializerStrategy::SerializeSwitchLayer(const armnn::IConnectableLayer* layer, const char* name)
1421 {
1422  IgnoreUnused(name);
1423 
1424  auto fbSwitchBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Switch);
1425  auto fbSwitchLayer = serializer::CreateSwitchLayer(m_flatBufferBuilder, fbSwitchBaseLayer);
1426 
1427  CreateAnyLayer(fbSwitchLayer.o, serializer::Layer::Layer_SwitchLayer);
1428 }
1429 
1430 void SerializerStrategy::SerializeTransposeConvolution2dLayer(
1431  const armnn::IConnectableLayer* layer,
1432  const armnn::TransposeConvolution2dDescriptor& descriptor,
1433  const std::vector<armnn::ConstTensor>& constants,
1434  const char* name)
1435 {
1436  IgnoreUnused(name);
1437 
1438  const armnn::ConstTensor& weights = constants.at(0);
1439 
1440  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution2d);
1441  auto fbDescriptor = CreateTransposeConvolution2dDescriptor(m_flatBufferBuilder,
1442  descriptor.m_PadLeft,
1443  descriptor.m_PadRight,
1444  descriptor.m_PadTop,
1445  descriptor.m_PadBottom,
1446  descriptor.m_StrideX,
1447  descriptor.m_StrideY,
1448  descriptor.m_BiasEnabled,
1450 
1451  // weights & biases
1452  auto fbWeightsConstTensorInfo = CreateConstTensorInfo(weights);
1453  flatbuffers::Offset<serializer::ConstTensor> fbBiasesConstTensorInfo;
1454  if (constants.size() > 1)
1455  {
1456  const armnn::ConstTensor& biases = constants.at(1);
1457  fbBiasesConstTensorInfo = CreateConstTensorInfo(biases);
1458  }
1459 
1460  auto fbLayer = CreateTransposeConvolution2dLayer(m_flatBufferBuilder,
1461  fbBaseLayer,
1462  fbDescriptor,
1463  fbWeightsConstTensorInfo,
1464  fbBiasesConstTensorInfo);
1465 
1466  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_TransposeConvolution2dLayer);
1467 }
1468 
1469 void SerializerStrategy::SerializeTransposeLayer(const armnn::IConnectableLayer* layer,
1470  const armnn::TransposeDescriptor& descriptor,
1471  const char* name)
1472 {
1473  IgnoreUnused(name);
1474 
1475  // Create FlatBuffer BaseLayer
1476  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Transpose);
1477 
1478  std::vector<unsigned int> dimMappings;
1479  for (unsigned int i=0; i<descriptor.m_DimMappings.GetSize(); ++i)
1480  {
1481  dimMappings.push_back(descriptor.m_DimMappings[i]);
1482  }
1483 
1484  auto flatBufferDesc = serializer::CreateTransposeDescriptor(m_flatBufferBuilder,
1485  m_flatBufferBuilder.CreateVector(dimMappings));
1486 
1487  // Create the FlatBuffer TransposeLayer
1488  auto flatBufferLayer = serializer::CreateTransposeLayer(m_flatBufferBuilder,
1489  flatBufferBaseLayer,
1490  flatBufferDesc);
1491 
1492  // Add the AnyLayer to the FlatBufferLayers
1493  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_TransposeLayer);
1494 }
1495 
1496 void SerializerStrategy::SerializeQLstmLayer(const armnn::IConnectableLayer* layer,
1497  const armnn::QLstmDescriptor& descriptor,
1498  const std::vector<armnn::ConstTensor>& constants,
1499  const char* name)
1500 {
1501  IgnoreUnused(name);
1502 
1503  auto fbQLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_QLstm);
1504 
1505  auto fbQLstmDescriptor = serializer::CreateQLstmDescriptor(
1506  m_flatBufferBuilder,
1507  descriptor.m_CifgEnabled,
1508  descriptor.m_PeepholeEnabled,
1509  descriptor.m_ProjectionEnabled,
1510  descriptor.m_LayerNormEnabled,
1511  descriptor.m_CellClip,
1512  descriptor.m_ProjectionClip,
1513  descriptor.m_InputIntermediateScale,
1514  descriptor.m_ForgetIntermediateScale,
1515  descriptor.m_CellIntermediateScale,
1516  descriptor.m_OutputIntermediateScale,
1517  descriptor.m_HiddenStateZeroPoint,
1518  descriptor.m_HiddenStateScale
1519  );
1520 
1521  // Index for constants vector
1522  std::size_t i = 0;
1523 
1524  // Mandatory params
1525  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]); //InputToForgetWeights
1526  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]); //InputToCellWeights
1527  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]); //InputToOutputWeights
1528  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToForgetWeights
1529  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToCellWeights
1530  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToOutputWeights
1531  auto forgetGateBias = CreateConstTensorInfo(constants[i++]); //ForgetGateBias
1532  auto cellBias = CreateConstTensorInfo(constants[i++]); //CellBias
1533  auto outputGateBias = CreateConstTensorInfo(constants[i++]); //OutputGateBias
1534 
1535  // CIFG
1536  flatbuffers::Offset<serializer::ConstTensor> inputToInputWeights;
1537  flatbuffers::Offset<serializer::ConstTensor> recurrentToInputWeights;
1538  flatbuffers::Offset<serializer::ConstTensor> inputGateBias;
1539 
1540  if (!descriptor.m_CifgEnabled)
1541  {
1542  inputToInputWeights = CreateConstTensorInfo(constants[i++]); //InputToInputWeights
1543  recurrentToInputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToInputWeights
1544  inputGateBias = CreateConstTensorInfo(constants[i++]); //InputGateBias
1545  }
1546 
1547  // Peephole
1548  flatbuffers::Offset<serializer::ConstTensor> cellToInputWeights;
1549  flatbuffers::Offset<serializer::ConstTensor> cellToForgetWeights;
1550  flatbuffers::Offset<serializer::ConstTensor> cellToOutputWeights;
1551 
1552  if (descriptor.m_PeepholeEnabled)
1553  {
1554  if (!descriptor.m_CifgEnabled)
1555  {
1556  cellToInputWeights = CreateConstTensorInfo(constants[i++]); //CellToInputWeights
1557  }
1558  cellToForgetWeights = CreateConstTensorInfo(constants[i++]); //CellToForgetWeights
1559  cellToOutputWeights = CreateConstTensorInfo(constants[i++]); //CellToOutputWeights
1560  }
1561 
1562  // Projection
1563  flatbuffers::Offset<serializer::ConstTensor> projectionWeights;
1564  flatbuffers::Offset<serializer::ConstTensor> projectionBias;
1565 
1566  if (descriptor.m_ProjectionEnabled)
1567  {
1568  projectionWeights = CreateConstTensorInfo(constants[i++]); //ProjectionWeights
1569  projectionBias = CreateConstTensorInfo(constants[i++]); //ProjectionBias
1570  }
1571 
1572  // Layer norm
1573  flatbuffers::Offset<serializer::ConstTensor> inputLayerNormWeights;
1574  flatbuffers::Offset<serializer::ConstTensor> forgetLayerNormWeights;
1575  flatbuffers::Offset<serializer::ConstTensor> cellLayerNormWeights;
1576  flatbuffers::Offset<serializer::ConstTensor> outputLayerNormWeights;
1577 
1578  if (descriptor.m_LayerNormEnabled)
1579  {
1580  if (!descriptor.m_CifgEnabled)
1581  {
1582  inputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //InputLayerNormWeights
1583  }
1584  forgetLayerNormWeights = CreateConstTensorInfo(constants[i++]); //ForgetLayerNormWeights
1585  cellLayerNormWeights = CreateConstTensorInfo(constants[i++]); //CellLayerNormWeights
1586  outputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //OutputLayerNormWeights
1587  }
1588 
1589  auto fbQLstmParams = serializer::CreateQLstmInputParams(
1590  m_flatBufferBuilder,
1591  inputToForgetWeights,
1592  inputToCellWeights,
1593  inputToOutputWeights,
1594  recurrentToForgetWeights,
1595  recurrentToCellWeights,
1596  recurrentToOutputWeights,
1597  forgetGateBias,
1598  cellBias,
1599  outputGateBias,
1600  inputToInputWeights,
1601  recurrentToInputWeights,
1602  inputGateBias,
1603  projectionWeights,
1604  projectionBias,
1605  cellToInputWeights,
1606  cellToForgetWeights,
1607  cellToOutputWeights,
1608  inputLayerNormWeights,
1609  forgetLayerNormWeights,
1610  cellLayerNormWeights,
1611  outputLayerNormWeights);
1612 
1613  auto fbQLstmLayer = serializer::CreateQLstmLayer(
1614  m_flatBufferBuilder,
1615  fbQLstmBaseLayer,
1616  fbQLstmDescriptor,
1617  fbQLstmParams);
1618 
1619  CreateAnyLayer(fbQLstmLayer.o, serializer::Layer::Layer_QLstmLayer);
1620 }
1621 
1622 void SerializerStrategy::SerializeQuantizedLstmLayer(const armnn::IConnectableLayer* layer,
1623  const std::vector<armnn::ConstTensor>& constants,
1624  const char* name)
1625 {
1626  IgnoreUnused(name);
1627 
1628  auto fbQuantizedLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_QuantizedLstm);
1629 
1630  // index for constants vector
1631  size_t i = 0;
1632 
1633  // Get input parameters
1634  auto inputToInputWeights = CreateConstTensorInfo(constants[i++]);
1635  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]);
1636  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]);
1637  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]);
1638 
1639  auto recurrentToInputWeights = CreateConstTensorInfo(constants[i++]);
1640  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]);
1641  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]);
1642  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]);
1643 
1644  auto inputGateBias = CreateConstTensorInfo(constants[i++]);
1645  auto forgetGateBias = CreateConstTensorInfo(constants[i++]);
1646  auto cellBias = CreateConstTensorInfo(constants[i++]);
1647  auto outputGateBias = CreateConstTensorInfo(constants[i++]);
1648 
1649  auto fbQuantizedLstmParams = serializer::CreateQuantizedLstmInputParams(
1650  m_flatBufferBuilder,
1651  inputToInputWeights,
1652  inputToForgetWeights,
1653  inputToCellWeights,
1654  inputToOutputWeights,
1655  recurrentToInputWeights,
1656  recurrentToForgetWeights,
1657  recurrentToCellWeights,
1658  recurrentToOutputWeights,
1659  inputGateBias,
1660  forgetGateBias,
1661  cellBias,
1662  outputGateBias);
1663 
1664  auto fbQuantizedLstmLayer = serializer::CreateQuantizedLstmLayer(
1665  m_flatBufferBuilder,
1666  fbQuantizedLstmBaseLayer,
1667  fbQuantizedLstmParams);
1668 
1669  CreateAnyLayer(fbQuantizedLstmLayer.o, serializer::Layer::Layer_QuantizedLstmLayer);
1670 }
1671 
1672 void SerializerStrategy::SerializeUnidirectionalSequenceLstmLayer(
1673  const armnn::IConnectableLayer* layer,
1675  const std::vector<armnn::ConstTensor>& constants,
1676  const char* name)
1677 {
1678  IgnoreUnused(name);
1679 
1680  auto fbUnidirectionalSequenceLstmBaseLayer =
1681  CreateLayerBase(layer, serializer::LayerType::LayerType_UnidirectionalSequenceLstm);
1682 
1683  auto fbUnidirectionalSequenceLstmDescriptor = serializer::CreateUnidirectionalSequenceLstmDescriptor(
1684  m_flatBufferBuilder,
1685  descriptor.m_ActivationFunc,
1686  descriptor.m_ClippingThresCell,
1687  descriptor.m_ClippingThresProj,
1688  descriptor.m_CifgEnabled,
1689  descriptor.m_PeepholeEnabled,
1690  descriptor.m_ProjectionEnabled,
1691  descriptor.m_LayerNormEnabled,
1692  descriptor.m_TimeMajor);
1693 
1694  // Index for constants vector
1695  std::size_t i = 0;
1696 
1697  // Get mandatory/basic input parameters
1698  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]); //InputToForgetWeights
1699  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]); //InputToCellWeights
1700  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]); //InputToOutputWeights
1701  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToForgetWeights
1702  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToCellWeights
1703  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToOutputWeights
1704  auto forgetGateBias = CreateConstTensorInfo(constants[i++]); //ForgetGateBias
1705  auto cellBias = CreateConstTensorInfo(constants[i++]); //CellBias
1706  auto outputGateBias = CreateConstTensorInfo(constants[i++]); //OutputGateBias
1707 
1708  //Define optional parameters, these will be set depending on configuration in Lstm descriptor
1709  flatbuffers::Offset<serializer::ConstTensor> inputToInputWeights;
1710  flatbuffers::Offset<serializer::ConstTensor> recurrentToInputWeights;
1711  flatbuffers::Offset<serializer::ConstTensor> cellToInputWeights;
1712  flatbuffers::Offset<serializer::ConstTensor> inputGateBias;
1713  flatbuffers::Offset<serializer::ConstTensor> projectionWeights;
1714  flatbuffers::Offset<serializer::ConstTensor> projectionBias;
1715  flatbuffers::Offset<serializer::ConstTensor> cellToForgetWeights;
1716  flatbuffers::Offset<serializer::ConstTensor> cellToOutputWeights;
1717  flatbuffers::Offset<serializer::ConstTensor> inputLayerNormWeights;
1718  flatbuffers::Offset<serializer::ConstTensor> forgetLayerNormWeights;
1719  flatbuffers::Offset<serializer::ConstTensor> cellLayerNormWeights;
1720  flatbuffers::Offset<serializer::ConstTensor> outputLayerNormWeights;
1721 
1722  if (!descriptor.m_CifgEnabled)
1723  {
1724  inputToInputWeights = CreateConstTensorInfo(constants[i++]); //InputToInputWeights
1725  recurrentToInputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToInputWeights
1726  inputGateBias = CreateConstTensorInfo(constants[i++]); //InputGateBias
1727  }
1728 
1729  if (descriptor.m_PeepholeEnabled)
1730  {
1731  if (!descriptor.m_CifgEnabled)
1732  {
1733  cellToInputWeights = CreateConstTensorInfo(constants[i++]); //CellToInputWeights
1734  }
1735  cellToForgetWeights = CreateConstTensorInfo(constants[i++]); //CellToForgetWeights
1736  cellToOutputWeights = CreateConstTensorInfo(constants[i++]); //CellToOutputWeights
1737  }
1738 
1739  if (descriptor.m_ProjectionEnabled)
1740  {
1741  projectionWeights = CreateConstTensorInfo(constants[i++]); //ProjectionWeights
1742  projectionBias = CreateConstTensorInfo(constants[i++]); //ProjectionBias
1743  }
1744 
1745  if (descriptor.m_LayerNormEnabled)
1746  {
1747  if (!descriptor.m_CifgEnabled)
1748  {
1749  inputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //InputLayerNormWeights
1750  }
1751  forgetLayerNormWeights = CreateConstTensorInfo(constants[i++]); //ForgetLayerNormWeights
1752  cellLayerNormWeights = CreateConstTensorInfo(constants[i++]); //CellLayerNormWeights
1753  outputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //OutputLayerNormWeights
1754  }
1755 
1756  auto fbUnidirectionalSequenceLstmParams = serializer::CreateLstmInputParams(
1757  m_flatBufferBuilder,
1758  inputToForgetWeights,
1759  inputToCellWeights,
1760  inputToOutputWeights,
1761  recurrentToForgetWeights,
1762  recurrentToCellWeights,
1763  recurrentToOutputWeights,
1764  forgetGateBias,
1765  cellBias,
1766  outputGateBias,
1767  inputToInputWeights,
1768  recurrentToInputWeights,
1769  cellToInputWeights,
1770  inputGateBias,
1771  projectionWeights,
1772  projectionBias,
1773  cellToForgetWeights,
1774  cellToOutputWeights,
1775  inputLayerNormWeights,
1776  forgetLayerNormWeights,
1777  cellLayerNormWeights,
1778  outputLayerNormWeights);
1779 
1780  auto fbUnidirectionalSequenceLstmLayer = serializer::CreateUnidirectionalSequenceLstmLayer(
1781  m_flatBufferBuilder,
1782  fbUnidirectionalSequenceLstmBaseLayer,
1783  fbUnidirectionalSequenceLstmDescriptor,
1784  fbUnidirectionalSequenceLstmParams);
1785 
1786  CreateAnyLayer(fbUnidirectionalSequenceLstmLayer.o, serializer::Layer::Layer_UnidirectionalSequenceLstmLayer);
1787 }
1788 
1789 fb::Offset<serializer::LayerBase> SerializerStrategy::CreateLayerBase(const IConnectableLayer* layer,
1790  const serializer::LayerType layerType)
1791 {
1792 
1793  uint32_t fbIndex = GetSerializedId(layer->GetGuid());
1794 
1795  std::vector<fb::Offset<serializer::InputSlot>> inputSlots = CreateInputSlots(layer);
1796  std::vector<fb::Offset<serializer::OutputSlot>> outputSlots = CreateOutputSlots(layer);
1797 
1798  return serializer::CreateLayerBase(m_flatBufferBuilder,
1799  fbIndex,
1800  m_flatBufferBuilder.CreateString(layer->GetName()),
1801  layerType,
1802  m_flatBufferBuilder.CreateVector(inputSlots),
1803  m_flatBufferBuilder.CreateVector(outputSlots));
1804 }
1805 
1806 void SerializerStrategy::CreateAnyLayer(const flatbuffers::Offset<void>& layer, const serializer::Layer serializerLayer)
1807 {
1808 
1809  auto anyLayer = armnnSerializer::CreateAnyLayer(m_flatBufferBuilder, serializerLayer, layer);
1810  m_serializedLayers.push_back(anyLayer);
1811 }
1812 
1813 template <typename T>
1814 flatbuffers::Offset<flatbuffers::Vector<T>> SerializerStrategy::CreateDataVector(const void* memory, unsigned int size)
1815 {
1816  const T* buffer = reinterpret_cast<const T*>(memory);
1817  std::vector<T> vector(buffer, buffer + (size / sizeof(T)));
1818  auto fbVector = m_flatBufferBuilder.CreateVector(vector);
1819  return fbVector;
1820 }
1821 
1822 flatbuffers::Offset<TensorInfo> SerializerStrategy::CreateTensorInfo(const armnn::TensorInfo& tensorInfo)
1823 {
1824  // Get the dimensions
1825  std::vector<unsigned int> shape;
1826  std::vector<bool> specificity;
1827  // This assumes that the TensorShape constructors have ensured that the size of m_DimensionsSpecificity
1828  // matches the size of dimensions.
1829  for(unsigned int dim = 0; dim < tensorInfo.GetShape().GetNumDimensions(); ++dim)
1830  {
1831  specificity.push_back(tensorInfo.GetShape().GetDimensionSpecificity(dim));
1832 
1833  if (tensorInfo.GetShape().GetDimensionSpecificity(dim))
1834  {
1835  shape.push_back(tensorInfo.GetShape()[dim]);
1836  }
1837  else
1838  {
1839  shape.push_back(0);
1840  }
1841  }
1842 
1843  if (tensorInfo.HasPerAxisQuantization())
1844  {
1845  // Create FlatBuffer TensorInfo
1846  auto flatBufferTensorInfo =
1847  serializer::CreateTensorInfo(m_flatBufferBuilder,
1848  m_flatBufferBuilder.CreateVector(shape),
1849  GetFlatBufferDataType(tensorInfo.GetDataType()),
1850  tensorInfo.GetQuantizationScales()[0],
1851  tensorInfo.GetQuantizationOffset(),
1852  m_flatBufferBuilder.CreateVector(tensorInfo.GetQuantizationScales()),
1853  tensorInfo.GetQuantizationDim().value(),
1854  static_cast<unsigned int>
1855  (tensorInfo.GetShape().GetDimensionality()),
1856  m_flatBufferBuilder.CreateVector(specificity));
1857  return flatBufferTensorInfo;
1858  }
1859 
1860  // Create FlatBuffer TensorInfo
1861  auto flatBufferTensorInfo = serializer::CreateTensorInfo(m_flatBufferBuilder,
1862  m_flatBufferBuilder.CreateVector(shape),
1863  GetFlatBufferDataType(tensorInfo.GetDataType()),
1864  tensorInfo.GetQuantizationScale(),
1865  tensorInfo.GetQuantizationOffset(),
1866  0,
1867  0,
1868  static_cast<unsigned int>
1869  (tensorInfo.GetShape().GetDimensionality()),
1870  m_flatBufferBuilder.CreateVector(specificity));
1871  return flatBufferTensorInfo;
1872 }
1873 
1874 flatbuffers::Offset<serializer::ConstTensor>
1875  SerializerStrategy::CreateConstTensorInfo(const armnn::ConstTensor& constTensor)
1876 {
1877  armnn::TensorInfo tensorInfo = constTensor.GetInfo();
1878 
1879  flatbuffers::Offset<void> fbPayload;
1880 
1881  switch (tensorInfo.GetDataType())
1882  {
1884  {
1885  auto fbVector = CreateDataVector<int64_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1886  flatbuffers::Offset<serializer::LongData> flatBuffersData = serializer::CreateLongData(
1887  m_flatBufferBuilder,
1888  fbVector);
1889  fbPayload = flatBuffersData.o;
1890  break;
1891  }
1894  {
1895  auto fbVector = CreateDataVector<int32_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1896  flatbuffers::Offset<serializer::IntData> flatBuffersData = serializer::CreateIntData(
1897  m_flatBufferBuilder,
1898  fbVector);
1899  fbPayload = flatBuffersData.o;
1900  break;
1901  }
1905  {
1906  auto fbVector = CreateDataVector<int16_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1907  flatbuffers::Offset<serializer::ShortData> flatBuffersData = serializer::CreateShortData(
1908  m_flatBufferBuilder,
1909  fbVector);
1910  fbPayload = flatBuffersData.o;
1911  break;
1912  }
1917  default:
1918  {
1919  auto fbVector = CreateDataVector<int8_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1920  flatbuffers::Offset<serializer::ByteData> flatBuffersData = serializer::CreateByteData(
1921  m_flatBufferBuilder,
1922  fbVector);
1923  fbPayload = flatBuffersData.o;
1924  }
1925  }
1926  flatbuffers::Offset<serializer::ConstTensor> flatBufferConstTensor = serializer::CreateConstTensor(
1927  m_flatBufferBuilder,
1928  CreateTensorInfo(tensorInfo),
1930  fbPayload);
1931  return flatBufferConstTensor;
1932 }
1933 
1934 flatbuffers::Offset<armnnSerializer::FeatureCompatibilityVersions> SerializerStrategy::GetVersionTable()
1935 {
1936  flatbuffers::Offset<armnnSerializer::FeatureCompatibilityVersions> versionsTable =
1937  serializer::CreateFeatureCompatibilityVersions(
1938  m_flatBufferBuilder,
1939  1, // Binding ids scheme version
1940  1, // Weights layout scheme version
1941  1 // Constant tensors as inputs version
1942  );
1943  return versionsTable;
1944 }
1945 
1946 std::vector<fb::Offset<serializer::InputSlot>>
1947  SerializerStrategy::CreateInputSlots(const armnn::IConnectableLayer* layer)
1948 {
1949  std::vector<fb::Offset<serializer::InputSlot>> inputSlots;
1950 
1951  // Get the InputSlots
1952  for (unsigned int slotIndex = 0; slotIndex<layer->GetNumInputSlots(); ++slotIndex)
1953  {
1954  const IInputSlot& inputSlot = layer->GetInputSlot(slotIndex);
1955 
1956  // Get the Connection for the InputSlot
1957  const IOutputSlot* connection = inputSlot.GetConnection();
1958 
1959  // Create FlatBuffer Connection
1960  serializer::Connection conn(GetSerializedId(inputSlot.GetConnection()->GetOwningLayerGuid()),
1961  connection->CalculateIndexOnOwner());
1962  // Create FlatBuffer InputSlot
1963  inputSlots.push_back(serializer::CreateInputSlot(m_flatBufferBuilder, slotIndex, &conn));
1964  }
1965  return inputSlots;
1966 }
1967 
1968 std::vector<fb::Offset<serializer::OutputSlot>>
1969  SerializerStrategy::CreateOutputSlots(const armnn::IConnectableLayer* layer)
1970 {
1971  std::vector<fb::Offset<serializer::OutputSlot>> outputSlots;
1972 
1973  // Get the OutputSlots
1974  for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
1975  {
1976  const IOutputSlot& outputSlot = layer->GetOutputSlot(slotIndex);
1977  const armnn::TensorInfo& tensorInfo = outputSlot.GetTensorInfo();
1978 
1979  // Create FlatBuffer Outputslot
1980  outputSlots.push_back(serializer::CreateOutputSlot(m_flatBufferBuilder,
1981  slotIndex,
1982  CreateTensorInfo(tensorInfo)));
1983  }
1984  return outputSlots;
1985 }
1986 
1987 void SerializerStrategy::ExecuteStrategy(const armnn::IConnectableLayer* layer,
1988  const BaseDescriptor& descriptor,
1989  const std::vector<armnn::ConstTensor>& constants,
1990  const char* name,
1991  const armnn::LayerBindingId id)
1992 {
1993  IgnoreUnused(constants);
1994 
1995  switch (layer->GetType())
1996  {
1998  {
1999  const armnn::ActivationDescriptor& layerDescriptor =
2000  static_cast<const armnn::ActivationDescriptor&>(descriptor);
2001  SerializeActivationLayer(layer, layerDescriptor, name);
2002  break;
2003  }
2005  {
2006  SerializeAdditionLayer(layer, name);
2007  break;
2008  }
2010  {
2011  const armnn::ArgMinMaxDescriptor& layerDescriptor =
2012  static_cast<const armnn::ArgMinMaxDescriptor&>(descriptor);
2013  SerializeArgMinMaxLayer(layer, layerDescriptor, name);
2014  break;
2015  }
2017  {
2018  const armnn::BatchMatMulDescriptor& layerDescriptor =
2019  static_cast<const armnn::BatchMatMulDescriptor&>(descriptor);
2020  SerializeBatchMatMulLayer(layer,
2021  layerDescriptor,
2022  name);
2023  break;
2024  }
2026  {
2027  const armnn::BatchNormalizationDescriptor& layerDescriptor =
2028  static_cast<const armnn::BatchNormalizationDescriptor&>(descriptor);
2029  SerializeBatchNormalizationLayer(layer,
2030  layerDescriptor,
2031  constants,
2032  name);
2033  break;
2034  }
2036  {
2037  const armnn::BatchToSpaceNdDescriptor& layerDescriptor =
2038  static_cast<const armnn::BatchToSpaceNdDescriptor&>(descriptor);
2039  SerializeBatchToSpaceNdLayer(layer,
2040  layerDescriptor,
2041  name);
2042  break;
2043  }
2044  case armnn::LayerType::Cast :
2045  {
2046  SerializeCastLayer(layer, name);
2047  break;
2048  }
2050  {
2051  const armnn::ChannelShuffleDescriptor& layerDescriptor =
2052  static_cast<const armnn::ChannelShuffleDescriptor&>(descriptor);
2053  SerializeChannelShuffleLayer(layer,
2054  layerDescriptor,
2055  name);
2056  break;
2057  }
2059  {
2060  const armnn::ComparisonDescriptor& layerDescriptor =
2061  static_cast<const armnn::ComparisonDescriptor&>(descriptor);
2062  SerializeComparisonLayer(layer,
2063  layerDescriptor,
2064  name);
2065  break;
2066  }
2068  {
2069  const armnn::ConcatDescriptor& layerDescriptor =
2070  static_cast<const armnn::ConcatDescriptor&>(descriptor);
2071  SerializeConcatLayer(layer,
2072  layerDescriptor,
2073  name);
2074  break;
2075  }
2077  {
2078  SerializeConstantLayer(layer,
2079  constants,
2080  name);
2081  break;
2082  }
2084  {
2085  const armnn::Convolution2dDescriptor& layerDescriptor =
2086  static_cast<const armnn::Convolution2dDescriptor&>(descriptor);
2087  SerializeConvolution2dLayer(layer,
2088  layerDescriptor,
2089  name);
2090  break;
2091  }
2093  {
2094  const armnn::Convolution3dDescriptor& layerDescriptor =
2095  static_cast<const armnn::Convolution3dDescriptor&>(descriptor);
2096  SerializeConvolution3dLayer(layer,
2097  layerDescriptor,
2098  name);
2099  break;
2100  }
2102  {
2103  const armnn::DepthToSpaceDescriptor& layerDescriptor =
2104  static_cast<const armnn::DepthToSpaceDescriptor&>(descriptor);
2105  SerializeDepthToSpaceLayer(layer,
2106  layerDescriptor,
2107  name);
2108  break;
2109  }
2111  {
2112  const armnn::DepthwiseConvolution2dDescriptor& layerDescriptor =
2113  static_cast<const armnn::DepthwiseConvolution2dDescriptor&>(descriptor);
2114  SerializeDepthwiseConvolution2dLayer(layer,
2115  layerDescriptor,
2116  name);
2117  break;
2118  }
2120  {
2121  SerializeDequantizeLayer(layer,
2122  name);
2123  break;
2124  }
2126  {
2127  const armnn::DetectionPostProcessDescriptor& layerDescriptor =
2128  static_cast<const armnn::DetectionPostProcessDescriptor&>(descriptor);
2129  SerializeDetectionPostProcessLayer(layer, layerDescriptor, constants, name);
2130  break;
2131  }
2133  {
2134  SerializeDivisionLayer(layer, name);
2135  break;
2136  }
2138  {
2139  const armnn::ElementwiseBinaryDescriptor& layerDescriptor =
2140  static_cast<const armnn::ElementwiseBinaryDescriptor&>(descriptor);
2141  SerializeElementwiseBinaryLayer(layer, layerDescriptor, name);
2142  break;
2143  }
2145  {
2146  const armnn::ElementwiseUnaryDescriptor& layerDescriptor =
2147  static_cast<const armnn::ElementwiseUnaryDescriptor&>(descriptor);
2148  SerializeElementwiseUnaryLayer(layer, layerDescriptor, name);
2149  break;
2150  }
2151  case armnn::LayerType::Fill :
2152  {
2153  const armnn::FillDescriptor& layerDescriptor =
2154  static_cast<const armnn::FillDescriptor&>(descriptor);
2155  SerializeFillLayer(layer, layerDescriptor, name);
2156  break;
2157  }
2159  {
2160  SerializeFloorLayer(layer, name);
2161  break;
2162  }
2164  {
2165  const armnn::FullyConnectedDescriptor& layerDescriptor =
2166  static_cast<const armnn::FullyConnectedDescriptor&>(descriptor);
2167  SerializeFullyConnectedLayer(layer, layerDescriptor, name);
2168  break;
2169  }
2171  {
2172  const armnn::GatherDescriptor& layerDescriptor =
2173  static_cast<const armnn::GatherDescriptor&>(descriptor);
2174  SerializeGatherLayer(layer, layerDescriptor, name);
2175  break;
2176  }
2178  {
2179  SerializeGatherNdLayer(layer, name);
2180  break;
2181  }
2183  {
2184  SerializeInputLayer(layer, id, name);
2185  break;
2186  }
2188  {
2189  const armnn::InstanceNormalizationDescriptor& layerDescriptor =
2190  static_cast<const armnn::InstanceNormalizationDescriptor&>(descriptor);
2191  SerializeInstanceNormalizationLayer(layer, layerDescriptor, name);
2192  break;
2193  }
2195  {
2196  const armnn::L2NormalizationDescriptor& layerDescriptor =
2197  static_cast<const armnn::L2NormalizationDescriptor&>(descriptor);
2198  SerializeL2NormalizationLayer(layer, layerDescriptor, name);
2199  break;
2200  }
2202  {
2203  const armnn::LogicalBinaryDescriptor& layerDescriptor =
2204  static_cast<const armnn::LogicalBinaryDescriptor&>(descriptor);
2205  SerializeLogicalBinaryLayer(layer, layerDescriptor, name);
2206  break;
2207  }
2209  {
2210  const armnn::LogSoftmaxDescriptor& layerDescriptor =
2211  static_cast<const armnn::LogSoftmaxDescriptor&>(descriptor);
2212  SerializeLogSoftmaxLayer(layer, layerDescriptor, name);
2213  break;
2214  }
2215  case armnn::LayerType::Lstm :
2216  {
2217  const armnn::LstmDescriptor& layerDescriptor =
2218  static_cast<const armnn::LstmDescriptor&>(descriptor);
2219  SerializeLstmLayer(layer, layerDescriptor, constants, name);
2220  break;
2221  }
2223  {
2224  const armnn::QLstmDescriptor& layerDescriptor =
2225  static_cast<const armnn::QLstmDescriptor&>(descriptor);
2226  SerializeQLstmLayer(layer, layerDescriptor, constants, name);
2227  break;
2228  }
2230  {
2231  SerializeMaximumLayer(layer, name);
2232  break;
2233  }
2234  case armnn::LayerType::Mean :
2235  {
2236  const armnn::MeanDescriptor& layerDescriptor =
2237  static_cast<const armnn::MeanDescriptor&>(descriptor);
2238  SerializeMeanLayer(layer, layerDescriptor, name);
2239  break;
2240  }
2242  {
2243  SerializeMergeLayer(layer, name);
2244  break;
2245  }
2247  {
2248  SerializeMinimumLayer(layer, name);
2249  break;
2250  }
2252  {
2253  SerializeMultiplicationLayer(layer, name);
2254  break;
2255  }
2257  {
2258  const armnn::NormalizationDescriptor& layerDescriptor =
2259  static_cast<const armnn::NormalizationDescriptor&>(descriptor);
2260  SerializeNormalizationLayer(layer, layerDescriptor, name);
2261  break;
2262  }
2264  {
2265  SerializeOutputLayer(layer, id, name);
2266  break;
2267  }
2268  case armnn::LayerType::Pad :
2269  {
2270  const armnn::PadDescriptor& layerDescriptor =
2271  static_cast<const armnn::PadDescriptor&>(descriptor);
2272  SerializePadLayer(layer, layerDescriptor, name);
2273  break;
2274  }
2276  {
2277  const armnn::PermuteDescriptor& layerDescriptor =
2278  static_cast<const armnn::PermuteDescriptor&>(descriptor);
2279  SerializePermuteLayer(layer, layerDescriptor, name);
2280  break;
2281  }
2283  {
2284  const armnn::Pooling2dDescriptor& layerDescriptor =
2285  static_cast<const armnn::Pooling2dDescriptor&>(descriptor);
2286  SerializePooling2dLayer(layer, layerDescriptor, name);
2287  break;
2288  }
2290  {
2291  const armnn::Pooling3dDescriptor& layerDescriptor =
2292  static_cast<const armnn::Pooling3dDescriptor&>(descriptor);
2293  SerializePooling3dLayer(layer, layerDescriptor, name);
2294  break;
2295  }
2297  {
2298  SerializePreluLayer(layer, name);
2299  break;
2300  }
2302  {
2303  SerializeQuantizeLayer(layer, name);
2304  break;
2305  }
2307  SerializeQuantizedLstmLayer(layer, constants, name);
2308  break;
2310  {
2311  const armnn::ReshapeDescriptor &layerDescriptor =
2312  static_cast<const armnn::ReshapeDescriptor &>(descriptor);
2313  SerializeReshapeLayer(layer, layerDescriptor, name);
2314  break;
2315  }
2317  {
2318  SerializeRankLayer(layer, name);
2319  break;
2320  }
2322  {
2323  const armnn::ReduceDescriptor& layerDescriptor =
2324  static_cast<const armnn::ReduceDescriptor&>(descriptor);
2325  SerializeReduceLayer(layer, layerDescriptor, name);
2326  break;
2327  }
2329  {
2330  const armnn::ResizeDescriptor& layerDescriptor =
2331  static_cast<const armnn::ResizeDescriptor&>(descriptor);
2332  SerializeResizeLayer(layer, layerDescriptor, name);
2333  break;
2334  }
2336  {
2337  SerializeShapeLayer(layer, name);
2338  break;
2339  }
2341  {
2342  const armnn::SliceDescriptor& layerDescriptor =
2343  static_cast<const armnn::SliceDescriptor&>(descriptor);
2344  SerializeSliceLayer(layer, layerDescriptor, name);
2345  break;
2346  }
2348  {
2349  const armnn::SoftmaxDescriptor& layerDescriptor =
2350  static_cast<const armnn::SoftmaxDescriptor&>(descriptor);
2351  SerializeSoftmaxLayer(layer, layerDescriptor, name);
2352  break;
2353  }
2355  {
2356  const armnn::SpaceToBatchNdDescriptor& layerDescriptor =
2357  static_cast<const armnn::SpaceToBatchNdDescriptor&>(descriptor);
2358  SerializeSpaceToBatchNdLayer(layer, layerDescriptor, name);
2359  break;
2360  }
2362  {
2363  const armnn::SpaceToDepthDescriptor& layerDescriptor =
2364  static_cast<const armnn::SpaceToDepthDescriptor&>(descriptor);
2365  SerializeSpaceToDepthLayer(layer, layerDescriptor, name);
2366  break;
2367  }
2369  {
2370  const armnn::SplitterDescriptor& layerDescriptor =
2371  static_cast<const armnn::SplitterDescriptor&>(descriptor);
2372  SerializeSplitterLayer(layer, layerDescriptor, name);
2373  break;
2374  }
2376  {
2377  const armnn::StackDescriptor& layerDescriptor =
2378  static_cast<const armnn::StackDescriptor&>(descriptor);
2379  SerializeStackLayer(layer, layerDescriptor, name);
2380  break;
2381  }
2383  {
2384  const armnn::StandInDescriptor& layerDescriptor =
2385  static_cast<const armnn::StandInDescriptor&>(descriptor);
2386  SerializeStandInLayer(layer, layerDescriptor, name);
2387  break;
2388  }
2390  {
2391  const armnn::StridedSliceDescriptor& layerDescriptor =
2392  static_cast<const armnn::StridedSliceDescriptor&>(descriptor);
2393  SerializeStridedSliceLayer(layer, layerDescriptor, name);
2394  break;
2395  }
2397  {
2398  SerializeSubtractionLayer(layer, name);
2399  break;
2400  }
2402  {
2403  SerializeSwitchLayer(layer, name);
2404  break;
2405  }
2407  {
2408  const armnn::TransposeDescriptor& layerDescriptor =
2409  static_cast<const armnn::TransposeDescriptor&>(descriptor);
2410  SerializeTransposeLayer(layer, layerDescriptor, name);
2411  break;
2412  }
2414  {
2415  const armnn::TransposeConvolution2dDescriptor& layerDescriptor =
2416  static_cast<const armnn::TransposeConvolution2dDescriptor&>(descriptor);
2417  SerializeTransposeConvolution2dLayer(layer, layerDescriptor, constants, name);
2418  break;
2419  }
2421  {
2422  const armnn::UnidirectionalSequenceLstmDescriptor& layerDescriptor =
2423  static_cast<const armnn::UnidirectionalSequenceLstmDescriptor&>(descriptor);
2424  SerializeUnidirectionalSequenceLstmLayer(layer, layerDescriptor, constants, name);
2425  break;
2426  }
2427  default:
2428  {
2430  fmt::format("A layer of unknown type was given to the serializer. Layer name: {}; Layer Id: {}",
2431  layer->GetName(),
2432  id));
2433  }
2434  }
2435 }
2436 
2437 void ISerializer::SerializerImpl::Serialize(const INetwork& inNetwork)
2438 {
2439  // Iterate through to network
2440  inNetwork.ExecuteStrategy(m_SerializerStrategy);
2441  flatbuffers::FlatBufferBuilder& fbBuilder = m_SerializerStrategy.GetFlatBufferBuilder();
2442 
2443  // Create FlatBuffer SerializedGraph
2444  auto serializedGraph = serializer::CreateSerializedGraph(
2445  fbBuilder,
2446  fbBuilder.CreateVector(m_SerializerStrategy.GetSerializedLayers()),
2447  fbBuilder.CreateVector(m_SerializerStrategy.GetInputIds()),
2448  fbBuilder.CreateVector(m_SerializerStrategy.GetOutputIds()),
2449  m_SerializerStrategy.GetVersionTable());
2450 
2451  // Serialize the graph
2452  fbBuilder.Finish(serializedGraph);
2453 }
2454 
2455 
2456 bool ISerializer::SerializerImpl::SaveSerializedToStream(std::ostream& stream)
2457 {
2458  flatbuffers::FlatBufferBuilder& fbBuilder = m_SerializerStrategy.GetFlatBufferBuilder();
2459 
2460  auto bytesToWrite = armnn::numeric_cast<std::streamsize>(fbBuilder.GetSize());
2461  stream.write(reinterpret_cast<const char*>(fbBuilder.GetBufferPointer()), bytesToWrite);
2462  return !stream.bad();
2463 }
2464 
2465 } // namespace armnnSerializer
armnn::QLstmDescriptor::m_CellClip
float m_CellClip
Clipping threshold value for the cell state.
Definition: Descriptors.hpp:1381
armnn::QLstmDescriptor::m_LayerNormEnabled
bool m_LayerNormEnabled
Enable/disable layer normalization.
Definition: Descriptors.hpp:1391
armnn::ActivationFunction::Abs
@ Abs
armnn::ActivationFunction::Elu
@ Elu
armnn::LayerType::Floor
@ Floor
armnn::Pooling3dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:467
armnn::InstanceNormalizationDescriptor::m_Eps
float m_Eps
Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f.
Definition: Descriptors.hpp:857
armnn::Pooling2dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:409
armnn::L2NormalizationDescriptor::m_Eps
float m_Eps
Used to avoid dividing by zero.
Definition: Descriptors.hpp:810
armnn::Convolution2dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:572
armnnSerializer::GetFlatBufferOutputShapeRounding
armnnSerializer::OutputShapeRounding GetFlatBufferOutputShapeRounding(armnn::OutputShapeRounding outputShapeRounding)
Definition: SerializerUtils.cpp:172
armnn::LstmDescriptor::m_TimeMajor
bool m_TimeMajor
Enable/disable time major.
Definition: Descriptors.hpp:1121
armnn::StackDescriptor::m_InputShape
TensorShape m_InputShape
Required shape of all input tensors.
Definition: Descriptors.hpp:1244
armnn::LayerType::Softmax
@ Softmax
armnn::LayerType::Pooling3d
@ Pooling3d
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::TensorInfo::GetQuantizationOffset
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:478
armnn::BatchMatMulDescriptor::m_TransposeX
bool m_TransposeX
Transpose the slices of each input tensor Transpose and Adjoint can not both be set to true for the s...
Definition: Descriptors.hpp:1579
armnn::TransposeConvolution2dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:1442
armnn::TensorInfo::GetQuantizationScale
float GetQuantizationScale() const
Definition: Tensor.cpp:461
armnn::Convolution2dDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:570
armnn::ViewsDescriptor::GetNumViews
uint32_t GetNumViews() const
Get the number of views.
Definition: Descriptors.cpp:295
armnn::LayerType::Transpose
@ Transpose
LstmParams.hpp
armnn::QLstmDescriptor::m_ProjectionClip
float m_ProjectionClip
Clipping threshold value for the projection.
Definition: Descriptors.hpp:1383
armnn::Pooling3dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:481
armnn::ViewsDescriptor::GetViewOrigin
const uint32_t * GetViewOrigin(uint32_t idx) const
Get the view origin at the int value idx.
Definition: Descriptors.cpp:305
armnn::LayerType::ChannelShuffle
@ ChannelShuffle
armnn::TransposeConvolution2dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:1436
armnn::DataType::QAsymmU8
@ QAsymmU8
armnn::FullyConnectedDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:514
QuantizedLstmParams.hpp
armnnSerializer
Definition: ISerializer.hpp:11
armnn::GatherDescriptor
A GatherDescriptor for the GatherLayer.
Definition: Descriptors.hpp:932
armnn::NormalizationDescriptor
A NormalizationDescriptor for the NormalizationLayer.
Definition: Descriptors.hpp:757
armnn::ActivationFunction::Linear
@ Linear
armnn::TransposeDescriptor
A TransposeDescriptor for the TransposeLayer.
Definition: Descriptors.hpp:1457
armnn::QLstmDescriptor::m_ForgetIntermediateScale
float m_ForgetIntermediateScale
Forget intermediate quantization scale.
Definition: Descriptors.hpp:1395
armnn::BatchNormalizationDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:831
SerializerUtils.hpp
armnn::TransposeConvolution2dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:1440
armnn::ElementwiseUnaryDescriptor
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:129
armnn::PadDescriptor
A PadDescriptor for the PadLayer.
Definition: Descriptors.hpp:1163
armnn::SoftmaxDescriptor
A SoftmaxDescriptor for the SoftmaxLayer.
Definition: Descriptors.hpp:177
armnn::OriginsDescriptor::GetNumViews
uint32_t GetNumViews() const
Get the number of views.
Definition: Descriptors.cpp:187
armnn::TensorShape::GetDimensionality
Dimensionality GetDimensionality() const
Function that returns the tensor type.
Definition: Tensor.hpp:92
armnn::LayerType::L2Normalization
@ L2Normalization
armnn::Convolution2dDescriptor::m_DilationY
uint32_t m_DilationY
Dilation along y axis.
Definition: Descriptors.hpp:568
armnn::ComparisonDescriptor::m_Operation
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
Definition: Descriptors.hpp:105
armnn::Convolution2dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:560
armnn::StackDescriptor
A StackDescriptor for the StackLayer.
Definition: Descriptors.hpp:1218
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
armnn::SliceDescriptor
A SliceDescriptor for the SliceLayer.
Definition: Descriptors.hpp:1195
armnn::OriginsDescriptor::GetNumDimensions
uint32_t GetNumDimensions() const
Get the number of dimensions.
Definition: Descriptors.cpp:192
armnn::BatchToSpaceNdDescriptor::m_Crops
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
Definition: Descriptors.hpp:888
armnn::Convolution3dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:617
armnn::DataType::Float16
@ Float16
armnn::LayerType::Input
@ Input
armnn::LayerBindingId
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:301
armnn::LayerType::Slice
@ Slice
armnn::ActivationDescriptor
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
armnn::ReduceDescriptor::m_ReduceOperation
ReduceOperation m_ReduceOperation
Specifies the reduction operation to execute.
Definition: Descriptors.hpp:1525
armnn::Pooling3dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:465
armnn::IConnectableLayer
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:68
armnn::LstmDescriptor
An LstmDescriptor for the LstmLayer.
Definition: Descriptors.hpp:1069
armnn::LayerType::ElementwiseBinary
@ ElementwiseBinary
armnn::LayerType::Maximum
@ Maximum
armnn::FullyConnectedDescriptor
A FullyConnectedDescriptor for the FullyConnectedLayer.
Definition: Descriptors.hpp:495
armnn::StridedSliceDescriptor::m_BeginMask
int32_t m_BeginMask
Begin mask value.
Definition: Descriptors.hpp:1317
armnn::LayerType::Quantize
@ Quantize
armnn::ChannelShuffleDescriptor::m_NumGroups
uint32_t m_NumGroups
Number of groups for the channel shuffle operation.
Definition: Descriptors.hpp:1545
armnn::StandInDescriptor::m_NumInputs
uint32_t m_NumInputs
Number of input tensors.
Definition: Descriptors.hpp:1264
armnn::Convolution3dDescriptor::m_DilationY
uint32_t m_DilationY
Dilation along y axis.
Definition: Descriptors.hpp:637
armnn::ArgMinMaxDescriptor::m_Axis
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:83
armnnSerializer::GetFlatBufferLogicalBinaryOperation
armnnSerializer::LogicalBinaryOperation GetFlatBufferLogicalBinaryOperation(armnn::LogicalBinaryOperation logicalBinaryOperation)
Definition: SerializerUtils.cpp:31
armnn::SpaceToBatchNdDescriptor::m_PadList
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding values for the input dimension: heightPad{top, bottom} widthPad{left,...
Definition: Descriptors.hpp:1036
armnn::BatchMatMulDescriptor
A BatchMatMulDescriptor for the BatchMatMul operator.
Definition: Descriptors.hpp:1551
armnn::PermuteDescriptor::m_DimMappings
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination,...
Definition: Descriptors.hpp:173
armnn::ResizeDescriptor
A ResizeDescriptor for the ResizeLayer.
Definition: Descriptors.hpp:952
armnn::LayerType::ArgMinMax
@ ArgMinMax
armnn::FullyConnectedDescriptor::m_ConstantWeights
bool m_ConstantWeights
Enable/disable constant weights and biases.
Definition: Descriptors.hpp:518
armnn::StridedSliceDescriptor
A StridedSliceDescriptor for the StridedSliceLayer.
Definition: Descriptors.hpp:1270
armnn::LayerType::Subtraction
@ Subtraction
armnn::Pooling2dDescriptor::m_PoolHeight
uint32_t m_PoolHeight
Pooling height value.
Definition: Descriptors.hpp:405
armnn::StridedSliceDescriptor::m_Begin
std::vector< int > m_Begin
Begin values for the input that will be sliced.
Definition: Descriptors.hpp:1309
armnn::ConstTensor
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::ReduceDescriptor::m_vAxis
std::vector< uint32_t > m_vAxis
The indices of the dimensions to reduce.
Definition: Descriptors.hpp:1523
armnn::Pooling3dDescriptor
A Pooling3dDescriptor for the Pooling3dLayer.
Definition: Descriptors.hpp:419
armnn::ReduceDescriptor
A ReduceDescriptor for the REDUCE operators.
Definition: Descriptors.hpp:1505
armnn::TransposeConvolution2dDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:1448
armnn::OriginsDescriptor::GetViewOrigin
const uint32_t * GetViewOrigin(uint32_t idx) const
Return the view origin at the int value idx.
Definition: Descriptors.cpp:197
armnn::LayerType::Shape
@ Shape
armnn::Convolution2dDescriptor::m_DilationX
uint32_t m_DilationX
Dilation along x axis.
Definition: Descriptors.hpp:566
armnn::DetectionPostProcessDescriptor::m_MaxDetections
uint32_t m_MaxDetections
Maximum numbers of detections.
Definition: Descriptors.hpp:733
armnn::Pooling3dDescriptor::m_PoolWidth
uint32_t m_PoolWidth
Pooling width value.
Definition: Descriptors.hpp:475
armnn::ComparisonDescriptor
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::DataType::Signed32
@ Signed32
armnn::StandInDescriptor
A StandInDescriptor for the StandIn layer.
Definition: Descriptors.hpp:1248
armnn::Pooling3dDescriptor::m_OutputShapeRounding
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
Definition: Descriptors.hpp:487
armnn::LstmDescriptor::m_CifgEnabled
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
Definition: Descriptors.hpp:1113
armnn::Pooling2dDescriptor::m_PoolType
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
Definition: Descriptors.hpp:393
armnnSerializer::GetFlatBufferUnaryOperation
armnnSerializer::UnaryOperation GetFlatBufferUnaryOperation(armnn::UnaryOperation unaryOperation)
Definition: SerializerUtils.cpp:131
armnn::LstmDescriptor::m_LayerNormEnabled
bool m_LayerNormEnabled
Enable/disable layer normalization.
Definition: Descriptors.hpp:1119
armnn::LogicalBinaryDescriptor::m_Operation
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
Definition: Descriptors.hpp:1501
armnn::ActivationFunction::HardSwish
@ HardSwish
armnn::LayerType::Merge
@ Merge
armnnSerializer::GetFlatBufferActivationFunction
serializer::ActivationFunction GetFlatBufferActivationFunction(armnn::ActivationFunction function)
Definition: Serializer.cpp:55
armnn::ViewsDescriptor
A ViewsDescriptor for the SplitterLayer.
Definition: Descriptors.hpp:244
armnn::LayerType::Permute
@ Permute
armnn::PermutationVector::GetSize
SizeType GetSize() const
Definition: Types.hpp:349
armnn::DepthwiseConvolution2dDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:696
armnn::MeanDescriptor::m_Axis
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
Definition: Descriptors.hpp:1157
armnn::Pooling3dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:463
armnn::DetectionPostProcessDescriptor::m_ScaleX
float m_ScaleX
Center size encoding scale x.
Definition: Descriptors.hpp:747
armnn::Convolution2dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:558
armnn::Pooling2dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:415
armnn::StridedSliceDescriptor::m_NewAxisMask
int32_t m_NewAxisMask
New axis mask value.
Definition: Descriptors.hpp:1327
armnn::Convolution2dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:556
armnn::Convolution3dDescriptor::m_DilationX
uint32_t m_DilationX
Dilation along x axis.
Definition: Descriptors.hpp:635
armnn::OriginsDescriptor::GetConcatAxis
unsigned int GetConcatAxis() const
Get the concatenation axis value.
Definition: Descriptors.cpp:162
armnn::LayerType::QLstm
@ QLstm
armnn::LayerType::Pad
@ Pad
armnn::Convolution3dDescriptor::m_DilationZ
uint32_t m_DilationZ
Dilation along z axis.
Definition: Descriptors.hpp:639
armnn::IConnectableLayer::GetOutputSlot
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
armnn::LayerType::Addition
@ Addition
armnn::LayerType::QuantizedLstm
@ QuantizedLstm
armnn::LayerType::BatchNormalization
@ BatchNormalization
armnn::DataType::QAsymmS8
@ QAsymmS8
armnn::LayerType::Reduce
@ Reduce
armnn::Pooling2dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:407
armnn::DepthwiseConvolution2dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:688
armnn::ArgMinMaxFunction
ArgMinMaxFunction
Definition: Types.hpp:102
armnn::DepthwiseConvolution2dDescriptor::m_DilationX
uint32_t m_DilationX
Dilation factor value for width dimension.
Definition: Descriptors.hpp:692
armnn::DetectionPostProcessDescriptor::m_NmsScoreThreshold
float m_NmsScoreThreshold
NMS score threshold.
Definition: Descriptors.hpp:739
armnn::LayerType::Division
@ Division
armnn::ViewsDescriptor::GetNumDimensions
uint32_t GetNumDimensions() const
Get the number of dimensions.
Definition: Descriptors.cpp:300
armnn::NormalizationDescriptor::m_NormMethodType
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
Definition: Descriptors.hpp:783
armnn::QLstmDescriptor::m_CellIntermediateScale
float m_CellIntermediateScale
Cell intermediate quantization scale.
Definition: Descriptors.hpp:1397
armnn::DepthwiseConvolution2dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:680
Serializer.hpp
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnnSerializer::GetFlatBufferArgMinMaxFunction
serializer::ArgMinMaxFunction GetFlatBufferArgMinMaxFunction(armnn::ArgMinMaxFunction function)
Definition: Serializer.cpp:86
armnn::Convolution3dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:621
armnn::IInputSlot
An input connection slot for a layer.
Definition: INetwork.hpp:25
armnn::StandInDescriptor::m_NumOutputs
uint32_t m_NumOutputs
Number of output tensors.
Definition: Descriptors.hpp:1266
armnn::FillDescriptor::m_Value
float m_Value
Definition: Descriptors.hpp:928
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
armnn::QLstmDescriptor::m_OutputIntermediateScale
float m_OutputIntermediateScale
Output intermediate quantization scale.
Definition: Descriptors.hpp:1399
armnn::DetectionPostProcessDescriptor::m_DetectionsPerClass
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
Definition: Descriptors.hpp:737
armnn::DetectionPostProcessDescriptor::m_ScaleH
float m_ScaleH
Center size encoding scale height.
Definition: Descriptors.hpp:753
armnn::NormalizationDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:793
armnn::DepthwiseConvolution2dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:684
armnn::BatchToSpaceNdDescriptor
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
Definition: Descriptors.hpp:863
armnn::OptionalReferenceSwitch::value
const T & value() const
Definition: Optional.hpp:146
armnn::Convolution3dDescriptor::m_PadFront
uint32_t m_PadFront
Padding front value in the depth dimension.
Definition: Descriptors.hpp:625
armnn::IConnectableLayer::GetInputSlot
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
armnnSerializer::GetFlatBufferDataType
armnnSerializer::DataType GetFlatBufferDataType(armnn::DataType dataType)
Definition: SerializerUtils.cpp:67
armnn::SpaceToDepthDescriptor
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
Definition: Descriptors.hpp:1042
armnn::StridedSliceDescriptor::m_End
std::vector< int > m_End
End values for the input that will be sliced.
Definition: Descriptors.hpp:1311
armnn::LayerType::Activation
@ Activation
armnnSerializer::GetFlatBufferPaddingMethod
armnnSerializer::PaddingMethod GetFlatBufferPaddingMethod(armnn::PaddingMethod paddingMethod)
Definition: SerializerUtils.cpp:184
armnn::LayerType::Normalization
@ Normalization
armnn::DetectionPostProcessDescriptor::m_ScaleW
float m_ScaleW
Center size encoding scale weight.
Definition: Descriptors.hpp:751
armnn::ResizeDescriptor::m_AlignCorners
bool m_AlignCorners
Aligned corners.
Definition: Descriptors.hpp:983
armnn::DetectionPostProcessDescriptor
Definition: Descriptors.hpp:701
armnn::ElementwiseUnaryDescriptor::m_Operation
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
Definition: Descriptors.hpp:145
armnn::LayerType::Comparison
@ Comparison
armnn::LayerType::Stack
@ Stack
armnn::FillDescriptor
A FillDescriptor for the FillLayer.
Definition: Descriptors.hpp:913
armnnSerializer::GetFlatBufferPoolingAlgorithm
armnnSerializer::PoolingAlgorithm GetFlatBufferPoolingAlgorithm(armnn::PoolingAlgorithm poolingAlgorithm)
Definition: SerializerUtils.cpp:158
armnn::Convolution3dDescriptor::m_StrideZ
uint32_t m_StrideZ
Stride value when proceeding through input for the depth dimension.
Definition: Descriptors.hpp:633
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:479
armnn::LayerType::Reshape
@ Reshape
armnn::PadDescriptor::m_PaddingMode
PaddingMode m_PaddingMode
Specifies the Padding mode (Constant, Reflect or Symmetric)
Definition: Descriptors.hpp:1191
armnn::ResizeDescriptor::m_TargetHeight
uint32_t m_TargetHeight
Target height value.
Definition: Descriptors.hpp:976
armnn::LayerType::Gather
@ Gather
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LayerType::Fill
@ Fill
armnn::SpaceToDepthDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:1062
armnn::LayerType::Resize
@ Resize
armnn::IOutputSlot::GetTensorInfo
virtual const TensorInfo & GetTensorInfo() const =0
armnn::DepthwiseConvolution2dDescriptor::m_DilationY
uint32_t m_DilationY
Dilation factor value for height dimension.
Definition: Descriptors.hpp:694
armnnSerializer::GetFlatBufferComparisonOperation
armnnSerializer::ComparisonOperation GetFlatBufferComparisonOperation(armnn::ComparisonOperation comparisonOperation)
Definition: SerializerUtils.cpp:11
armnn::GatherDescriptor::m_Axis
int32_t m_Axis
The axis in params to gather indices from.
Definition: Descriptors.hpp:948
armnn::Pooling3dDescriptor::m_PadFront
uint32_t m_PadFront
Padding front value in the depth dimension.
Definition: Descriptors.hpp:471
armnn::IOutputSlot::CalculateIndexOnOwner
virtual unsigned int CalculateIndexOnOwner() const =0
armnn::ArgMinMaxDescriptor::m_Function
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:81
armnn::DepthwiseConvolution2dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:698
armnn::SoftmaxDescriptor::m_Axis
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
Definition: Descriptors.hpp:192
armnn::PadDescriptor::m_PadList
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding for input dimension.
Definition: Descriptors.hpp:1185
armnn::LayerType::Rank
@ Rank
armnn::Convolution2dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:554
armnn::BatchMatMulDescriptor::m_DataLayoutX
DataLayout m_DataLayoutX
Data layout of each input tensor, such as NHWC/NDHWC (leave as default for arbitrary layout)
Definition: Descriptors.hpp:1588
armnn::DepthwiseConvolution2dDescriptor
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
Definition: Descriptors.hpp:647
armnn::MeanDescriptor
A MeanDescriptor for the MeanLayer.
Definition: Descriptors.hpp:1139
armnn::ActivationFunction::Sigmoid
@ Sigmoid
armnn::IConnectableLayer::GetNumOutputSlots
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
armnn::NormalizationDescriptor::m_NormSize
uint32_t m_NormSize
Depth radius value.
Definition: Descriptors.hpp:785
armnnSerializer::GetFlatBufferBinaryOperation
armnnSerializer::BinaryOperation GetFlatBufferBinaryOperation(armnn::BinaryOperation binaryOperation)
Definition: SerializerUtils.cpp:110
armnn::TensorShape::GetDimensionSpecificity
bool GetDimensionSpecificity(unsigned int i) const
Gets information about if the dimension size has been specified or not.
Definition: Tensor.cpp:211
armnn::Pooling3dDescriptor::m_PadBack
uint32_t m_PadBack
Padding back value in the depth dimension.
Definition: Descriptors.hpp:473
armnn::BatchToSpaceNdDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:890
armnnSerializer::ISerializer
Definition: ISerializer.hpp:17
armnn::Pooling2dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:401
armnn::Pooling3dDescriptor::m_StrideZ
uint32_t m_StrideZ
Stride value when proceeding through input for the depth dimension.
Definition: Descriptors.hpp:485
armnnSerializer::GetFlatBufferPaddingMode
armnnSerializer::PaddingMode GetFlatBufferPaddingMode(armnn::PaddingMode paddingMode)
Definition: SerializerUtils.cpp:196
armnn::LayerType::LogicalBinary
@ LogicalBinary
armnn::LayerType::UnidirectionalSequenceLstm
@ UnidirectionalSequenceLstm
armnn::ChannelShuffleDescriptor::m_Axis
uint32_t m_Axis
Axis to apply channel shuffle operation on.
Definition: Descriptors.hpp:1547
armnnSerializer::GetFlatBufferResizeMethod
armnnSerializer::ResizeMethod GetFlatBufferResizeMethod(armnn::ResizeMethod method)
Definition: SerializerUtils.cpp:237
armnn::LayerType::Pooling2d
@ Pooling2d
armnn::L2NormalizationDescriptor
A L2NormalizationDescriptor for the L2NormalizationLayer.
Definition: Descriptors.hpp:797
armnn::DataType::Float32
@ Float32
armnn::IConnectableLayer::GetGuid
virtual LayerGuid GetGuid() const =0
Returns the unique id of the layer.
armnn::ChannelShuffleDescriptor
A ChannelShuffleDescriptor for the ChannelShuffle operator.
Definition: Descriptors.hpp:1529
armnn::IOutputSlot
An output connection slot for a layer.
Definition: INetwork.hpp:41
armnn::Convolution3dDescriptor
A Convolution3dDescriptor for the Convolution3dLayer.
Definition: Descriptors.hpp:576
armnn::LstmDescriptor::m_PeepholeEnabled
bool m_PeepholeEnabled
Enable/disable peephole.
Definition: Descriptors.hpp:1115
armnn::ArgMinMaxFunction::Min
@ Min
armnn::LayerType::GatherNd
@ GatherNd
armnn::BatchMatMulDescriptor::m_TransposeY
bool m_TransposeY
Definition: Descriptors.hpp:1580
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::LayerType::Minimum
@ Minimum
armnn::LayerType::Constant
@ Constant
armnn::DataType::Signed64
@ Signed64
armnn::SpaceToBatchNdDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:1038
armnn::ResizeDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:981
armnn::SpaceToDepthDescriptor::m_BlockSize
unsigned int m_BlockSize
Scalar specifying the input block size. It must be >= 1.
Definition: Descriptors.hpp:1059
armnn::Convolution2dDescriptor
A Convolution2dDescriptor for the Convolution2dLayer.
Definition: Descriptors.hpp:522
armnn::LayerType::Lstm
@ Lstm
armnn::StackDescriptor::m_NumInputs
uint32_t m_NumInputs
Number of input tensors.
Definition: Descriptors.hpp:1242
armnn::Pooling2dDescriptor::m_PoolWidth
uint32_t m_PoolWidth
Pooling width value.
Definition: Descriptors.hpp:403
armnn::TransposeConvolution2dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:1438
armnn::BatchNormalizationDescriptor
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
Definition: Descriptors.hpp:816
armnnSerializer::GetFlatBufferNormalizationAlgorithmChannel
armnnSerializer::NormalizationAlgorithmChannel GetFlatBufferNormalizationAlgorithmChannel(armnn::NormalizationAlgorithmChannel normalizationAlgorithmChannel)
Definition: SerializerUtils.cpp:209
armnn::Pooling3dDescriptor::m_PoolType
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
Definition: Descriptors.hpp:461
armnn::QLstmDescriptor
A QLstmDescriptor for the QLstmLayer.
Definition: Descriptors.hpp:1347
armnn::ActivationDescriptor::m_A
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH,...
Definition: Descriptors.hpp:61
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
armnn::Pooling3dDescriptor::m_PoolDepth
uint32_t m_PoolDepth
Pooling depth value.
Definition: Descriptors.hpp:479
armnn::InstanceNormalizationDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:859
armnn::Convolution3dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:629
armnn::TensorInfo::HasPerAxisQuantization
bool HasPerAxisQuantization() const
Definition: Tensor.cpp:446
armnn::Pooling3dDescriptor::m_PaddingMethod
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
Definition: Descriptors.hpp:489
armnn::ArgMinMaxFunction::Max
@ Max
armnn::SoftmaxDescriptor::m_Beta
float m_Beta
Exponentiation value.
Definition: Descriptors.hpp:190
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
armnn::ResizeDescriptor::m_Method
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
Definition: Descriptors.hpp:979
armnn::Convolution3dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:631
armnn::Pooling2dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:395
armnn::QLstmDescriptor::m_HiddenStateScale
float m_HiddenStateScale
Hidden State quantization scale.
Definition: Descriptors.hpp:1403
armnn::BaseTensor::GetNumBytes
unsigned int GetNumBytes() const
Definition: Tensor.hpp:302
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
armnn::TransposeDescriptor::m_DimMappings
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination,...
Definition: Descriptors.hpp:1481
armnn::Convolution3dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:619
armnn::QLstmDescriptor::m_HiddenStateZeroPoint
int32_t m_HiddenStateZeroPoint
Hidden State zero point.
Definition: Descriptors.hpp:1401
armnn::StridedSliceDescriptor::m_Stride
std::vector< int > m_Stride
Stride values for the input that will be sliced.
Definition: Descriptors.hpp:1313
armnn::LayerType::StandIn
@ StandIn
armnn::ActivationFunction::Square
@ Square
armnn::DetectionPostProcessDescriptor::m_NumClasses
uint32_t m_NumClasses
Number of classes.
Definition: Descriptors.hpp:743
armnn::Pooling2dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:399
armnn::ActivationDescriptor::m_Function
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu,...
Definition: Descriptors.hpp:59
armnn::IInputSlot::GetConnection
virtual const IOutputSlot * GetConnection() const =0
armnn::Convolution2dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:562
armnn::NormalizationDescriptor::m_K
float m_K
Kappa value used for the across channel normalization equation.
Definition: Descriptors.hpp:791
armnn::Convolution3dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NDHWC, NCDHW).
Definition: Descriptors.hpp:643
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::PadDescriptor::m_PadValue
float m_PadValue
Optional value to use for padding, defaults to 0.
Definition: Descriptors.hpp:1188
armnn::BatchToSpaceNdDescriptor::m_BlockShape
std::vector< unsigned int > m_BlockShape
Block shape values.
Definition: Descriptors.hpp:886
armnn::LayerType::DetectionPostProcess
@ DetectionPostProcess
armnn::StridedSliceDescriptor::m_ShrinkAxisMask
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1.
Definition: Descriptors.hpp:1322
armnn::SpaceToBatchNdDescriptor::m_BlockShape
std::vector< unsigned int > m_BlockShape
Block shape value.
Definition: Descriptors.hpp:1033
armnn::NormalizationDescriptor::m_Beta
float m_Beta
Beta value for the normalization equation.
Definition: Descriptors.hpp:789
armnn::LayerType::Mean
@ Mean
armnn::SliceDescriptor::m_Size
std::vector< unsigned int > m_Size
Size of the slice in each dimension.
Definition: Descriptors.hpp:1214
armnn::LstmDescriptor::m_ClippingThresCell
float m_ClippingThresCell
Clipping threshold value for the cell state.
Definition: Descriptors.hpp:1109
armnn::BaseDescriptor
Base class for all descriptors.
Definition: Descriptors.hpp:22
armnn::OriginsDescriptor
An OriginsDescriptor for the ConcatLayer.
Definition: Descriptors.hpp:201
armnn::ReshapeDescriptor
A ReshapeDescriptor for the ReshapeLayer.
Definition: Descriptors.hpp:990
armnn::TransposeConvolution2dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:1450
armnn::TensorShape::GetNumDimensions
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
armnn::IOutputSlot::GetOwningLayerGuid
virtual LayerGuid GetOwningLayerGuid() const =0
armnn::PermuteDescriptor
A PermuteDescriptor for the PermuteLayer.
Definition: Descriptors.hpp:149
armnnSerializer::GetFlatBufferConstTensorData
armnnSerializer::ConstTensorData GetFlatBufferConstTensorData(armnn::DataType dataType)
Definition: SerializerUtils.cpp:45
armnn::TransposeConvolution2dDescriptor
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
Definition: Descriptors.hpp:1407
armnn::FullyConnectedDescriptor::m_TransposeWeightMatrix
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
Definition: Descriptors.hpp:516
armnn::LayerType::DepthToSpace
@ DepthToSpace
armnn::L2NormalizationDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:812
armnn::TensorInfo::GetQuantizationDim
Optional< unsigned int > GetQuantizationDim() const
Definition: Tensor.cpp:494
armnn::DataType::BFloat16
@ BFloat16
armnn::ElementwiseBinaryDescriptor::m_Operation
BinaryOperation m_Operation
Specifies the elementwiseBinary operation to execute.
Definition: Descriptors.hpp:125
armnn::SliceDescriptor::m_Begin
std::vector< unsigned int > m_Begin
Beginning indices of the slice in each dimension.
Definition: Descriptors.hpp:1211
armnn::ActivationFunction::TanH
@ TanH
armnn::DepthwiseConvolution2dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:682
armnn::INetwork::ExecuteStrategy
void ExecuteStrategy(IStrategy &strategy) const
Definition: Network.cpp:642
armnn::Pooling2dDescriptor
A Pooling2dDescriptor for the Pooling2dLayer.
Definition: Descriptors.hpp:359
armnn::Pooling3dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:483
armnn::Pooling3dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:469
armnn::LogicalBinaryDescriptor
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
Definition: Descriptors.hpp:1485
armnn::LstmDescriptor::m_ProjectionEnabled
bool m_ProjectionEnabled
Enable/disable the projection layer.
Definition: Descriptors.hpp:1117
armnn::LayerType::Switch
@ Switch
armnn::BatchMatMulDescriptor::m_DataLayoutY
DataLayout m_DataLayoutY
Definition: Descriptors.hpp:1589
armnn::QLstmDescriptor::m_InputIntermediateScale
float m_InputIntermediateScale
Input intermediate quantization scale.
Definition: Descriptors.hpp:1393
armnn::StridedSliceDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:1330
armnn::INetwork
Main network class which provides the interface for building up a neural network.
Definition: INetwork.hpp:335
armnn::MeanDescriptor::m_KeepDims
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept.
Definition: Descriptors.hpp:1159
armnn::ViewsDescriptor::GetOrigins
const OriginsDescriptor & GetOrigins() const
Get the View Origins.
Definition: Descriptors.cpp:345
armnn::Pooling2dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:397
armnn::TransposeConvolution2dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:1444
armnn::DetectionPostProcessDescriptor::m_ScaleY
float m_ScaleY
Center size encoding scale y.
Definition: Descriptors.hpp:749
armnn::DataType::QSymmS8
@ QSymmS8
armnn::QLstmDescriptor::m_CifgEnabled
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
Definition: Descriptors.hpp:1385
armnnSerializer::ISerializerPtr
std::unique_ptr< ISerializer, void(*)(ISerializer *serializer)> ISerializerPtr
Definition: ISerializer.hpp:15
Descriptors.hpp
armnnSerializer::GetFlatBufferReduceOperation
armnnSerializer::ReduceOperation GetFlatBufferReduceOperation(armnn::ReduceOperation reduceOperation)
Definition: SerializerUtils.cpp:250
armnnSerializer::GetFlatBufferDataLayout
armnnSerializer::DataLayout GetFlatBufferDataLayout(armnn::DataLayout dataLayout)
Definition: SerializerUtils.cpp:94
armnn::DetectionPostProcessDescriptor::m_MaxClassesPerDetection
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
Definition: Descriptors.hpp:735
armnn::StridedSliceDescriptor::m_EndMask
int32_t m_EndMask
End mask value.
Definition: Descriptors.hpp:1320
armnn::DepthwiseConvolution2dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:686
armnn::InstanceNormalizationDescriptor::m_Gamma
float m_Gamma
Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0.
Definition: Descriptors.hpp:853
armnn::IConnectableLayer::GetName
virtual const char * GetName() const =0
Returns the name of the layer.
armnn::LayerType::Concat
@ Concat
NumericCast.hpp
armnn::NormalizationDescriptor::m_NormChannelType
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
Definition: Descriptors.hpp:781
armnn::Convolution3dDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:641
armnn::LstmDescriptor::m_ActivationFunc
uint32_t m_ActivationFunc
The activation function to use.
Definition: Descriptors.hpp:1107
armnn::LstmDescriptor::m_ClippingThresProj
float m_ClippingThresProj
Clipping threshold value for the projection.
Definition: Descriptors.hpp:1111
armnn::ArgMinMaxDescriptor
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
armnn::DataType::QSymmS16
@ QSymmS16
armnn::QLstmDescriptor::m_PeepholeEnabled
bool m_PeepholeEnabled
Enable/disable peephole.
Definition: Descriptors.hpp:1387
armnn::BatchNormalizationDescriptor::m_Eps
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
Definition: Descriptors.hpp:829
armnn::Pooling3dDescriptor::m_PoolHeight
uint32_t m_PoolHeight
Pooling height value.
Definition: Descriptors.hpp:477
armnn::ActivationDescriptor::m_B
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:63
armnn::LayerType::Cast
@ Cast
armnn::IConnectableLayer::GetType
virtual LayerType GetType() const =0
Returns the armnn::LayerType of this layer.
armnn::DepthwiseConvolution2dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:690
armnn::ActivationFunction::ReLu
@ ReLu
IgnoreUnused.hpp
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::ActivationFunction::Sqrt
@ Sqrt
armnn::TransposeConvolution2dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:1446
armnn::Convolution3dDescriptor::m_PadBack
uint32_t m_PadBack
Padding back value in the depth dimension.
Definition: Descriptors.hpp:627
armnn::Pooling2dDescriptor::m_PaddingMethod
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
Definition: Descriptors.hpp:413
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::LayerType::Splitter
@ Splitter
armnn::ReduceDescriptor::m_KeepDims
bool m_KeepDims
if true then output shape has no change.
Definition: Descriptors.hpp:1521
armnn::Convolution2dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:564
armnn::LayerType::LogSoftmax
@ LogSoftmax
armnn::TensorInfo::GetQuantizationScales
std::vector< float > GetQuantizationScales() const
Definition: Tensor.cpp:451
armnn::ResizeDescriptor::m_HalfPixelCenters
bool m_HalfPixelCenters
Half Pixel Centers.
Definition: Descriptors.hpp:985
armnn::DetectionPostProcessDescriptor::m_NmsIouThreshold
float m_NmsIouThreshold
Intersection over union threshold.
Definition: Descriptors.hpp:741
armnn::LayerType::Output
@ Output
armnn::IConnectableLayer::GetNumInputSlots
virtual unsigned int GetNumInputSlots() const =0
Returns the number of connectable input slots.
armnn::ReshapeDescriptor::m_TargetShape
TensorShape m_TargetShape
Target shape value.
Definition: Descriptors.hpp:1006
armnn::DataType::Boolean
@ Boolean
armnn::BatchMatMulDescriptor::m_AdjointY
bool m_AdjointY
Definition: Descriptors.hpp:1585
LayerGuid
arm::pipe::ProfilingGuid LayerGuid
Define LayerGuid type.
Definition: Types.hpp:26
armnn::BaseTensor::GetMemoryArea
MemoryType GetMemoryArea() const
Definition: Tensor.hpp:305
armnn::InvalidArgumentException
Definition: Exceptions.hpp:80
armnn::LayerType::Multiplication
@ Multiplication
armnn::BatchMatMulDescriptor::m_AdjointX
bool m_AdjointX
Adjoint the slices of each input tensor Transpose and Adjoint can not both be set to true for the sam...
Definition: Descriptors.hpp:1584
armnn::Pooling3dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCDHW, NDHWC).
Definition: Descriptors.hpp:491
armnn::StackDescriptor::m_Axis
uint32_t m_Axis
0-based axis along which to stack the input tensors.
Definition: Descriptors.hpp:1240
armnn::LayerType::Prelu
@ Prelu
armnn::QLstmDescriptor::m_ProjectionEnabled
bool m_ProjectionEnabled
Enable/disable the projection layer.
Definition: Descriptors.hpp:1389
armnn::InstanceNormalizationDescriptor
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
Definition: Descriptors.hpp:835
armnn::ActivationFunction::BoundedReLu
@ BoundedReLu
min(a, max(b, input)) ReLu1 & ReLu6.
armnn::NormalizationDescriptor::m_Alpha
float m_Alpha
Alpha value for the normalization equation.
Definition: Descriptors.hpp:787
armnn::DetectionPostProcessDescriptor::m_UseRegularNms
bool m_UseRegularNms
Use Regular NMS.
Definition: Descriptors.hpp:745
armnn::LayerType::Dequantize
@ Dequantize
armnn::ElementwiseBinaryDescriptor
A ElementwiseBinaryDescriptor for the ElementwiseBinaryLayer.
Definition: Descriptors.hpp:109
armnnSerializer::GetFlatBufferNormalizationAlgorithmMethod
armnnSerializer::NormalizationAlgorithmMethod GetFlatBufferNormalizationAlgorithmMethod(armnn::NormalizationAlgorithmMethod normalizationAlgorithmMethod)
Definition: SerializerUtils.cpp:223
armnn::Convolution3dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:623
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:198
armnn::BaseTensor::GetInfo
const TensorInfo & GetInfo() const
Definition: Tensor.hpp:295
armnn::StridedSliceDescriptor::m_EllipsisMask
int32_t m_EllipsisMask
Ellipsis mask value.
Definition: Descriptors.hpp:1324
armnn::ViewsDescriptor::GetViewSizes
const uint32_t * GetViewSizes(uint32_t idx) const
Get the view sizes at the int value idx.
Definition: Descriptors.cpp:340
armnn::InstanceNormalizationDescriptor::m_Beta
float m_Beta
Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0.
Definition: Descriptors.hpp:855
armnn::ResizeDescriptor::m_TargetWidth
uint32_t m_TargetWidth
Target width value.
Definition: Descriptors.hpp:974
armnn::SpaceToBatchNdDescriptor
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
Definition: Descriptors.hpp:1010
armnn::Pooling2dDescriptor::m_OutputShapeRounding
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
Definition: Descriptors.hpp:411
armnn::ActivationFunction::LeakyReLu
@ LeakyReLu
armnn::ActivationFunction
ActivationFunction
Definition: Types.hpp:86