ArmNN
 22.11
Serializer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "Serializer.hpp"
6 #include "SerializerUtils.hpp"
7 
8 #include <armnn/Descriptors.hpp>
9 #include <armnn/LstmParams.hpp>
13 
14 #include <fmt/format.h>
15 #include <iostream>
16 
17 using namespace armnn;
18 namespace fb = flatbuffers;
19 namespace serializer = armnnSerializer;
20 
21 namespace armnnSerializer
22 {
23 
24 ISerializer::ISerializer() : pSerializerImpl(new SerializerImpl())
25 {
26 }
27 
28 ISerializer::~ISerializer() = default;
29 
30 ISerializer* ISerializer::CreateRaw()
31 {
32  return new ISerializer();
33 }
34 
35 ISerializerPtr ISerializer::Create()
36 {
37  return ISerializerPtr(CreateRaw(), &ISerializer::Destroy);
38 }
39 
40 void ISerializer::Destroy(ISerializer* serializer)
41 {
42  delete serializer;
43 }
44 
45 void ISerializer::Serialize(const armnn::INetwork& inNetwork)
46 {
47  pSerializerImpl->Serialize(inNetwork);
48 }
49 
50 bool ISerializer::SaveSerializedToStream(std::ostream& stream)
51 {
52  return pSerializerImpl->SaveSerializedToStream(stream);
53 }
54 
56 {
57  switch (function)
58  {
60  return serializer::ActivationFunction::ActivationFunction_Sigmoid;
62  return serializer::ActivationFunction::ActivationFunction_TanH;
64  return serializer::ActivationFunction::ActivationFunction_Linear;
66  return serializer::ActivationFunction::ActivationFunction_ReLu;
68  return serializer::ActivationFunction::ActivationFunction_BoundedReLu;
70  return serializer::ActivationFunction::ActivationFunction_LeakyReLu;
72  return serializer::ActivationFunction::ActivationFunction_Abs;
74  return serializer::ActivationFunction::ActivationFunction_Sqrt;
76  return serializer::ActivationFunction::ActivationFunction_Square;
78  return serializer::ActivationFunction::ActivationFunction_Elu;
80  return serializer::ActivationFunction::ActivationFunction_HardSwish;
81  default:
82  return serializer::ActivationFunction::ActivationFunction_Sigmoid;
83  }
84 }
85 
87 {
88  switch (function)
89  {
91  return serializer::ArgMinMaxFunction::ArgMinMaxFunction_Max;
93  default:
94  return serializer::ArgMinMaxFunction::ArgMinMaxFunction_Min;
95  }
96 }
97 
98 uint32_t SerializerStrategy::GetSerializedId(LayerGuid guid)
99 {
100  if (m_guidMap.empty())
101  {
102  m_guidMap.insert(std::make_pair(guid, m_layerId));
103  }
104  else if (m_guidMap.find(guid) == m_guidMap.end())
105  {
106  ++m_layerId;
107  m_guidMap.insert(std::make_pair(guid, m_layerId));
108 
109  return m_layerId;
110  }
111  return m_guidMap[guid];
112 }
113 
114 // Build FlatBuffer for Input Layer
115 void SerializerStrategy::SerializeInputLayer(const armnn::IConnectableLayer* layer, LayerBindingId id, const char* name)
116 {
117  IgnoreUnused(name);
118 
119  // Create FlatBuffer BaseLayer
120  auto flatBufferInputBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Input);
121 
122  // Create FlatBuffer BindableBaseLayer
123  auto flatBufferInputBindableBaseLayer = serializer::CreateBindableLayerBase(m_flatBufferBuilder,
124  flatBufferInputBaseLayer,
125  id);
126  // Push layer binding id to outputIds.
127  m_inputIds.push_back(id);
128 
129  // Create the FlatBuffer InputLayer
130  auto flatBufferInputLayer = serializer::CreateInputLayer(m_flatBufferBuilder, flatBufferInputBindableBaseLayer);
131 
132  // Add the AnyLayer to the FlatBufferLayers
133  CreateAnyLayer(flatBufferInputLayer.o, serializer::Layer::Layer_InputLayer);
134 }
135 
136 // Build FlatBuffer for Output Layer
137 void SerializerStrategy::SerializeOutputLayer(const armnn::IConnectableLayer* layer,
138  LayerBindingId id, const char* name)
139 {
140  IgnoreUnused(name);
141 
142  // Create FlatBuffer BaseLayer
143  auto flatBufferOutputBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Output);
144 
145  // Create FlatBuffer BindableBaseLayer
146  auto flatBufferOutputBindableBaseLayer = serializer::CreateBindableLayerBase(m_flatBufferBuilder,
147  flatBufferOutputBaseLayer,
148  id);
149  // Push layer binding id to outputIds.
150  m_outputIds.push_back(id);
151 
152  // Create the FlatBuffer OutputLayer
153  auto flatBufferOutputLayer = serializer::CreateOutputLayer(m_flatBufferBuilder, flatBufferOutputBindableBaseLayer);
154  // Add the AnyLayer to the FlatBufferLayers
155  CreateAnyLayer(flatBufferOutputLayer.o, serializer::Layer::Layer_OutputLayer);
156 }
157 
158 // Build FlatBuffer for Activation Layer
159 void SerializerStrategy::SerializeActivationLayer(const armnn::IConnectableLayer* layer,
160  const armnn::ActivationDescriptor& descriptor,
161  const char* name)
162 {
163  IgnoreUnused(name);
164 
165  // Create FlatBuffer BaseLayer
166  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Activation);
167 
168  // Create the FlatBuffer ActivationDescriptor
169  auto flatBufferDescriptor = CreateActivationDescriptor(m_flatBufferBuilder,
171  descriptor.m_A,
172  descriptor.m_B);
173 
174  // Create the FlatBuffer ActivationLayer
175  auto flatBufferAdditionLayer = CreateActivationLayer(m_flatBufferBuilder,
176  flatBufferBaseLayer,
177  flatBufferDescriptor);
178 
179  // Add the AnyLayer to the FlatBufferLayers
180  CreateAnyLayer(flatBufferAdditionLayer.o, serializer::Layer::Layer_ActivationLayer);
181 }
182 
183 // Build FlatBuffer for Addition Layer
184 void SerializerStrategy::SerializeAdditionLayer(const armnn::IConnectableLayer* layer, const char* name)
185 {
186  IgnoreUnused(name);
187 
188  // Create FlatBuffer BaseLayer
189  auto flatBufferAdditionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Addition);
190 
191  // Create the FlatBuffer AdditionLayer
192  auto flatBufferAdditionLayer = serializer::CreateAdditionLayer(m_flatBufferBuilder, flatBufferAdditionBaseLayer);
193 
194  // Add the AnyLayer to the FlatBufferLayers
195  CreateAnyLayer(flatBufferAdditionLayer.o, serializer::Layer::Layer_AdditionLayer);
196 }
197 
198 // Build FlatBuffer for ArgMinMax Layer
199 void SerializerStrategy::SerializeArgMinMaxLayer(const armnn::IConnectableLayer *layer,
200  const armnn::ArgMinMaxDescriptor& descriptor,
201  const char *name)
202 {
203  IgnoreUnused(name);
204 
205  // Create FlatBuffer BaseLayer
206  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ArgMinMax);
207 
208  // Create FlatBuffer Descriptor
209  auto flatBufferDescriptor = CreateArgMinMaxDescriptor(m_flatBufferBuilder,
211  descriptor.m_Axis);
212 
213  // Create FlatBuffer ArgMinMaxLayer
214  auto flatBufferLayer = CreateArgMinMaxLayer(m_flatBufferBuilder,
215  flatBufferBaseLayer,
216  flatBufferDescriptor);
217 
218  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ArgMinMaxLayer);
219 }
220 
221 void SerializerStrategy::SerializeBatchMatMulLayer(const armnn::IConnectableLayer* layer,
222  const armnn::BatchMatMulDescriptor& descriptor,
223  const char* name)
224 {
225  IgnoreUnused(name);
226 
227  // Create FlatBuffer BaseLayer
228  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchMatMul);
229 
230  // Create the FlatBuffer BatchMatMulDescriptor
231  auto flatBufferDescriptor = CreateBatchMatMulDescriptor(m_flatBufferBuilder,
232  descriptor.m_TransposeX,
233  descriptor.m_TransposeY,
234  descriptor.m_AdjointX,
235  descriptor.m_AdjointY,
238 
239  // Create the FlatBuffer BatchMatMulLayer
240  auto flatBufferBatchMatMulLayer = CreateBatchMatMulLayer(m_flatBufferBuilder,
241  flatBufferBaseLayer,
242  flatBufferDescriptor);
243 
244  // Add the AnyLayer to the FlatBufferLayers
245  CreateAnyLayer(flatBufferBatchMatMulLayer.o, serializer::Layer::Layer_BatchMatMulLayer);
246 }
247 
248 // Build FlatBuffer for BatchToSpaceNd Layer
249 void SerializerStrategy::SerializeBatchToSpaceNdLayer(const armnn::IConnectableLayer* layer,
250  const armnn::BatchToSpaceNdDescriptor& descriptor,
251  const char* name)
252 {
253  IgnoreUnused(name);
254 
255  // Create FlatBuffer BaseLayer
256  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchToSpaceNd);
257 
258  std::vector<unsigned int> crops;
259  crops.reserve(descriptor.m_Crops.size() * 2);
260  for (auto& crop : descriptor.m_Crops)
261  {
262  crops.push_back(crop.first);
263  crops.push_back(crop.second);
264  }
265 
266  auto flatBufferDescriptor =
267  CreateBatchToSpaceNdDescriptor(m_flatBufferBuilder,
268  m_flatBufferBuilder.CreateVector(descriptor.m_BlockShape),
269  m_flatBufferBuilder.CreateVector(crops),
271 
272  auto flatBufferLayer = serializer::CreateBatchToSpaceNdLayer(m_flatBufferBuilder,
273  flatBufferBaseLayer,
274  flatBufferDescriptor);
275 
276  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_BatchToSpaceNdLayer);
277 }
278 
279 void SerializerStrategy::SerializeBatchNormalizationLayer(
280  const armnn::IConnectableLayer* layer,
281  const armnn::BatchNormalizationDescriptor& batchNormDescriptor,
282  const std::vector<armnn::ConstTensor>& constants,
283  const char* name)
284 {
285  IgnoreUnused(name);
286 
287  const armnn::ConstTensor& mean = constants[0];
288  const armnn::ConstTensor& variance = constants[1];
289  const armnn::ConstTensor& beta = constants[2];
290  const armnn::ConstTensor& gamma = constants[3];
291 
292  auto fbBatchNormalizationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchNormalization);
293  auto fbBatchNormalizationDescriptor = serializer::CreateBatchNormalizationDescriptor(
294  m_flatBufferBuilder,
295  batchNormDescriptor.m_Eps,
296  GetFlatBufferDataLayout(batchNormDescriptor.m_DataLayout));
297 
298  auto fbMeanConstTensorInfo = CreateConstTensorInfo(mean);
299  auto fbVarianceConstTensorInfo = CreateConstTensorInfo(variance);
300  auto fbBetaConstTensorInfo = CreateConstTensorInfo(beta);
301  auto fbGammaConstTensorInfo = CreateConstTensorInfo(gamma);
302  auto fbBatchNormalizationLayer = serializer::CreateBatchNormalizationLayer(m_flatBufferBuilder,
303  fbBatchNormalizationBaseLayer,
304  fbBatchNormalizationDescriptor,
305  fbMeanConstTensorInfo,
306  fbVarianceConstTensorInfo,
307  fbBetaConstTensorInfo,
308  fbGammaConstTensorInfo);
309 
310  CreateAnyLayer(fbBatchNormalizationLayer.o, serializer::Layer::Layer_BatchNormalizationLayer);
311 }
312 
313 void SerializerStrategy::SerializeCastLayer(const armnn::IConnectableLayer* layer,
314  const char* name)
315 {
316  IgnoreUnused(name);
317 
318  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Cast);
319  auto fbCastLayer = serializer::CreateCastLayer(m_flatBufferBuilder, fbBaseLayer);
320  CreateAnyLayer(fbCastLayer.o, serializer::Layer::Layer_CastLayer);
321 }
322 
323 void SerializerStrategy::SerializeChannelShuffleLayer(const armnn::IConnectableLayer* layer,
324  const armnn::ChannelShuffleDescriptor& descriptor,
325  const char* name)
326 {
327  IgnoreUnused(name);
328  auto fbDescriptor = CreateChannelShuffleDescriptor(m_flatBufferBuilder,
329  descriptor.m_Axis,
330  descriptor.m_NumGroups);
331  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ChannelShuffle);
332  auto fbChannelShuffleLayer = serializer::CreateChannelShuffleLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
333  CreateAnyLayer(fbChannelShuffleLayer.o, serializer::Layer::Layer_ChannelShuffleLayer);
334 }
335 
336 void SerializerStrategy::SerializeComparisonLayer(const armnn::IConnectableLayer* layer,
337  const armnn::ComparisonDescriptor& descriptor,
338  const char* name)
339 {
340  IgnoreUnused(name);
341 
342  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Comparison);
343  auto fbDescriptor = serializer::CreateComparisonDescriptor(
344  m_flatBufferBuilder,
346 
347  auto fbLayer = serializer::CreateComparisonLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
348  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_ComparisonLayer);
349 }
350 
351 // Build FlatBuffer for Constant Layer
352 void SerializerStrategy::SerializeConstantLayer(const armnn::IConnectableLayer* layer,
353  const std::vector<armnn::ConstTensor>& constants,
354  const char* name)
355 {
356  IgnoreUnused(name);
357 
358  armnn::ConstTensor input = constants[0];
359 
360  // Create FlatBuffer BaseLayer
361  auto flatBufferConstantBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Constant);
362 
363  auto flatBufferConstTensorInfo = CreateConstTensorInfo(input);
364 
365  // Create the FlatBuffer ConstantLayer
366  auto flatBufferLayer = CreateConstantLayer(m_flatBufferBuilder,
367  flatBufferConstantBaseLayer,
368  flatBufferConstTensorInfo);
369 
370  // Add the AnyLayer to the FlatBufferLayers
371  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ConstantLayer);
372 }
373 
374 // Build FlatBuffer for Convolution2dLayer
375 void SerializerStrategy::SerializeConvolution2dLayer(const armnn::IConnectableLayer* layer,
376  const armnn::Convolution2dDescriptor& descriptor,
377  const char* name)
378 {
379  IgnoreUnused(name);
380 
381  // Create FlatBuffer BaseLayer
382  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution2d);
383 
384  auto flatBufferDescriptor = CreateConvolution2dDescriptor(m_flatBufferBuilder,
385  descriptor.m_PadLeft,
386  descriptor.m_PadRight,
387  descriptor.m_PadTop,
388  descriptor.m_PadBottom,
389  descriptor.m_StrideX,
390  descriptor.m_StrideY,
391  descriptor.m_DilationX,
392  descriptor.m_DilationY,
393  descriptor.m_BiasEnabled,
395 
396  // Create the FlatBuffer Convolution2dLayer
397  auto flatBufferLayer = CreateConvolution2dLayer(m_flatBufferBuilder,
398  flatBufferBaseLayer,
399  flatBufferDescriptor);
400 
401  // Add the AnyLayer to the FlatBufferLayers
402  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_Convolution2dLayer);
403 }
404 
405 // Build FlatBuffer for Convolution3dLayer
406 void SerializerStrategy::SerializeConvolution3dLayer(const armnn::IConnectableLayer* layer,
407  const armnn::Convolution3dDescriptor& descriptor,
408  const char* name)
409 {
410  IgnoreUnused(name);
411 
412  // Create FlatBuffer BaseLayer
413  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution3d);
414 
415  auto flatBufferDescriptor = CreateConvolution3dDescriptor(m_flatBufferBuilder,
416  descriptor.m_PadLeft,
417  descriptor.m_PadRight,
418  descriptor.m_PadTop,
419  descriptor.m_PadBottom,
420  descriptor.m_PadFront,
421  descriptor.m_PadBack,
422  descriptor.m_StrideX,
423  descriptor.m_StrideY,
424  descriptor.m_StrideZ,
425  descriptor.m_DilationX,
426  descriptor.m_DilationY,
427  descriptor.m_DilationZ,
428  descriptor.m_BiasEnabled,
430 
431  // Create the FlatBuffer Convolution3dLayer
432  auto flatBufferLayer = CreateConvolution3dLayer(m_flatBufferBuilder,
433  flatBufferBaseLayer,
434  flatBufferDescriptor);
435 
436  // Add the AnyLayer to the FlatBufferLayers
437  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_Convolution3dLayer);
438 }
439 
440 void SerializerStrategy::SerializeDepthToSpaceLayer(const armnn::IConnectableLayer* layer,
441  const armnn::DepthToSpaceDescriptor& descriptor,
442  const char* name)
443 {
444  IgnoreUnused(name);
445 
446  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_DepthToSpace);
447  auto fbDescriptor = CreateDepthToSpaceDescriptor(m_flatBufferBuilder,
448  descriptor.m_BlockSize,
450 
451  auto fbLayer = serializer::CreateDepthToSpaceLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
452 
453  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_DepthToSpaceLayer);
454 }
455 
456 void SerializerStrategy::SerializeDepthwiseConvolution2dLayer(const armnn::IConnectableLayer* layer,
457  const armnn::DepthwiseConvolution2dDescriptor& descriptor,
458  const char* name)
459 {
460  IgnoreUnused(name);
461 
462  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_DepthwiseConvolution2d);
463  auto fbDescriptor = CreateDepthwiseConvolution2dDescriptor(m_flatBufferBuilder,
464  descriptor.m_PadLeft,
465  descriptor.m_PadRight,
466  descriptor.m_PadTop,
467  descriptor.m_PadBottom,
468  descriptor.m_StrideX,
469  descriptor.m_StrideY,
470  descriptor.m_DilationX,
471  descriptor.m_DilationY,
472  descriptor.m_BiasEnabled,
474 
475  auto flatBufferLayer = CreateDepthwiseConvolution2dLayer(m_flatBufferBuilder,
476  fbBaseLayer,
477  fbDescriptor);
478 
479  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_DepthwiseConvolution2dLayer);
480 }
481 
482 void SerializerStrategy::SerializeDequantizeLayer(const armnn::IConnectableLayer* layer,
483  const char* name)
484 {
485  IgnoreUnused(name);
486 
487  auto fbDequantizeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Dequantize);
488  auto fbDequantizeLayer = serializer::CreateDequantizeLayer(m_flatBufferBuilder, fbDequantizeBaseLayer);
489 
490  CreateAnyLayer(fbDequantizeLayer.o, serializer::Layer::Layer_DequantizeLayer);
491 }
492 
493 void SerializerStrategy::SerializeDetectionPostProcessLayer(const armnn::IConnectableLayer* layer,
494  const armnn::DetectionPostProcessDescriptor& descriptor,
495  const std::vector<armnn::ConstTensor>& constants,
496  const char* name)
497 {
498  IgnoreUnused(name);
499 
500  const armnn::ConstTensor& anchors = constants[0];
501 
502  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_DetectionPostProcess);
503  auto fbDescriptor = CreateDetectionPostProcessDescriptor(m_flatBufferBuilder,
504  descriptor.m_MaxDetections,
505  descriptor.m_MaxClassesPerDetection,
506  descriptor.m_DetectionsPerClass,
507  descriptor.m_NmsScoreThreshold,
508  descriptor.m_NmsIouThreshold,
509  descriptor.m_NumClasses,
510  descriptor.m_UseRegularNms,
511  descriptor.m_ScaleX,
512  descriptor.m_ScaleY,
513  descriptor.m_ScaleW,
514  descriptor.m_ScaleH);
515 
516  flatbuffers::Offset<serializer::ConstTensor> fbAnchorsConstTensorInfo = CreateConstTensorInfo(anchors);
517 
518  auto flatBufferLayer = CreateDetectionPostProcessLayer(m_flatBufferBuilder,
519  fbBaseLayer,
520  fbDescriptor,
521  fbAnchorsConstTensorInfo);
522 
523  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_DetectionPostProcessLayer);
524 }
525 
526 void SerializerStrategy::SerializeDivisionLayer(const armnn::IConnectableLayer* layer, const char* name)
527 {
528  IgnoreUnused(name);
529 
530  auto fbDivisionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Division);
531  auto fbDivisionLayer = serializer::CreateDivisionLayer(m_flatBufferBuilder, fbDivisionBaseLayer);
532 
533  CreateAnyLayer(fbDivisionLayer.o, serializer::Layer::Layer_DivisionLayer);
534 }
535 
536 void SerializerStrategy::SerializeElementwiseUnaryLayer(const armnn::IConnectableLayer* layer,
537  const armnn::ElementwiseUnaryDescriptor& descriptor,
538  const char* name)
539 {
540  IgnoreUnused(name);
541 
542  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ElementwiseUnary);
543  auto fbDescriptor = serializer::CreateElementwiseUnaryDescriptor(
544  m_flatBufferBuilder,
546 
547  auto fbLayer = serializer::CreateElementwiseUnaryLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
548  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_ElementwiseUnaryLayer);
549 }
550 
551 void SerializerStrategy::SerializeFillLayer(const armnn::IConnectableLayer* layer,
552  const armnn::FillDescriptor& fillDescriptor,
553  const char* name)
554 {
555  IgnoreUnused(name);
556 
557  auto fbFillBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Fill);
558 
559  auto fbDescriptor = serializer::CreateFillDescriptor(m_flatBufferBuilder, fillDescriptor.m_Value);
560 
561  auto fbFillLayer = serializer::CreateFillLayer(m_flatBufferBuilder, fbFillBaseLayer, fbDescriptor);
562 
563  CreateAnyLayer(fbFillLayer.o, serializer::Layer::Layer_FillLayer);
564 }
565 
566 void SerializerStrategy::SerializeFloorLayer(const armnn::IConnectableLayer *layer, const char *name)
567 {
568  IgnoreUnused(name);
569 
570  auto flatBufferFloorBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Floor);
571  auto flatBufferFloorLayer = serializer::CreateFloorLayer(m_flatBufferBuilder, flatBufferFloorBaseLayer);
572 
573  CreateAnyLayer(flatBufferFloorLayer.o, serializer::Layer::Layer_FloorLayer);
574 }
575 
576 void SerializerStrategy::SerializeGatherLayer(const armnn::IConnectableLayer* layer,
577  const armnn::GatherDescriptor& gatherDescriptor,
578  const char* name)
579 {
580  IgnoreUnused(name);
581 
582  auto fbGatherDescriptor = CreateGatherDescriptor(m_flatBufferBuilder,
583  gatherDescriptor.m_Axis);
584  auto fbGatherBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Gather);
585  auto flatBufferLayer = serializer::CreateGatherLayer(m_flatBufferBuilder, fbGatherBaseLayer, fbGatherDescriptor);
586 
587  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_GatherLayer);
588 }
589 
590 void SerializerStrategy::SerializeGatherNdLayer(const armnn::IConnectableLayer* layer,
591  const char* name)
592 {
593  IgnoreUnused(name);
594 
595  auto fbGatherNdBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_GatherNd);
596  auto flatBufferLayer = serializer::CreateGatherNdLayer(m_flatBufferBuilder, fbGatherNdBaseLayer);
597 
598  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_GatherNdLayer);
599 }
600 
601 void SerializerStrategy::SerializeInstanceNormalizationLayer(
602  const armnn::IConnectableLayer* layer,
603  const armnn::InstanceNormalizationDescriptor& instanceNormalizationDescriptor,
604  const char* name)
605 {
606  IgnoreUnused(name);
607 
608  auto fbDescriptor = serializer::CreateInstanceNormalizationDescriptor(
609  m_flatBufferBuilder,
610  instanceNormalizationDescriptor.m_Gamma,
611  instanceNormalizationDescriptor.m_Beta,
612  instanceNormalizationDescriptor.m_Eps,
613  GetFlatBufferDataLayout(instanceNormalizationDescriptor.m_DataLayout));
614 
615  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_InstanceNormalization);
616  auto fbLayer = serializer::CreateInstanceNormalizationLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
617 
618  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_InstanceNormalizationLayer);
619 }
620 
621 void SerializerStrategy::SerializeL2NormalizationLayer(const armnn::IConnectableLayer* layer,
622  const armnn::L2NormalizationDescriptor& l2NormalizationDescriptor,
623  const char* name)
624 {
625  IgnoreUnused(name);
626 
627  // Create FlatBuffer BaseLayer
628  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_L2Normalization);
629 
630  // Create the FlatBuffer L2Normalization Descriptor
631  auto fbDescriptor = serializer::CreateL2NormalizationDescriptor(
632  m_flatBufferBuilder,
633  GetFlatBufferDataLayout(l2NormalizationDescriptor.m_DataLayout),
634  l2NormalizationDescriptor.m_Eps);
635 
636  // Create FlatBuffer layer
637  auto fbLayer = serializer::CreateL2NormalizationLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
638 
639  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_L2NormalizationLayer);
640 }
641 
642 void SerializerStrategy::SerializeLogicalBinaryLayer(const armnn::IConnectableLayer* layer,
643  const armnn::LogicalBinaryDescriptor& descriptor,
644  const char* name)
645 {
646  IgnoreUnused(name);
647 
648  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_LogicalBinary);
649  auto fbDescriptor = serializer::CreateLogicalBinaryDescriptor(
650  m_flatBufferBuilder,
652 
653  auto fbLayer = serializer::CreateLogicalBinaryLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
654  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_LogicalBinaryLayer);
655 }
656 
657 void SerializerStrategy::SerializeLogSoftmaxLayer(const armnn::IConnectableLayer* layer,
658  const armnn::LogSoftmaxDescriptor& logSoftmaxDescriptor,
659  const char* name)
660 {
661  IgnoreUnused(name);
662 
663  // Create FlatBuffer BaseLayer
664  auto flatBufferLogSoftmaxBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_LogSoftmax);
665 
666  // Create the FlatBuffer LogSoftmaxDescriptor
667  auto flatBufferLogSoftmaxDesc =
668  serializer::CreateLogSoftmaxDescriptor(m_flatBufferBuilder,
669  logSoftmaxDescriptor.m_Beta,
670  logSoftmaxDescriptor.m_Axis);
671 
672  // Create the FlatBuffer LogSoftmaxLayer
673  auto flatBufferLogSoftmaxLayer =
674  serializer::CreateLogSoftmaxLayer(m_flatBufferBuilder,
675  flatBufferLogSoftmaxBaseLayer,
676  flatBufferLogSoftmaxDesc);
677 
678  CreateAnyLayer(flatBufferLogSoftmaxLayer.o, serializer::Layer::Layer_LogSoftmaxLayer);
679 }
680 
681 void SerializerStrategy::SerializeLstmLayer(const armnn::IConnectableLayer* layer,
682  const armnn::LstmDescriptor& descriptor,
683  const std::vector<armnn::ConstTensor>& constants,
684  const char* name)
685 {
686  IgnoreUnused(name);
687 
688  auto fbLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Lstm);
689 
690  auto fbLstmDescriptor = serializer::CreateLstmDescriptor(
691  m_flatBufferBuilder,
692  descriptor.m_ActivationFunc,
693  descriptor.m_ClippingThresCell,
694  descriptor.m_ClippingThresProj,
695  descriptor.m_CifgEnabled,
696  descriptor.m_PeepholeEnabled,
697  descriptor.m_ProjectionEnabled,
698  descriptor.m_LayerNormEnabled);
699 
700  // Index for constants vector
701  std::size_t i = 0;
702 
703  // Get mandatory/basic input parameters
704  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]); //InputToForgetWeights
705  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]); //InputToCellWeights
706  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]); //InputToOutputWeights
707  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToForgetWeights
708  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToCellWeights
709  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToOutputWeights
710  auto forgetGateBias = CreateConstTensorInfo(constants[i++]); //ForgetGateBias
711  auto cellBias = CreateConstTensorInfo(constants[i++]); //CellBias
712  auto outputGateBias = CreateConstTensorInfo(constants[i++]); //OutputGateBias
713 
714 
715 
716  //Define optional parameters, these will be set depending on configuration in Lstm descriptor
717  flatbuffers::Offset<serializer::ConstTensor> inputToInputWeights;
718  flatbuffers::Offset<serializer::ConstTensor> recurrentToInputWeights;
719  flatbuffers::Offset<serializer::ConstTensor> cellToInputWeights;
720  flatbuffers::Offset<serializer::ConstTensor> inputGateBias;
721  flatbuffers::Offset<serializer::ConstTensor> projectionWeights;
722  flatbuffers::Offset<serializer::ConstTensor> projectionBias;
723  flatbuffers::Offset<serializer::ConstTensor> cellToForgetWeights;
724  flatbuffers::Offset<serializer::ConstTensor> cellToOutputWeights;
725  flatbuffers::Offset<serializer::ConstTensor> inputLayerNormWeights;
726  flatbuffers::Offset<serializer::ConstTensor> forgetLayerNormWeights;
727  flatbuffers::Offset<serializer::ConstTensor> cellLayerNormWeights;
728  flatbuffers::Offset<serializer::ConstTensor> outputLayerNormWeights;
729 
730  if (!descriptor.m_CifgEnabled)
731  {
732  inputToInputWeights = CreateConstTensorInfo(constants[i++]); //InputToInputWeights
733  recurrentToInputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToInputWeights
734  inputGateBias = CreateConstTensorInfo(constants[i++]); //InputGateBias
735  }
736 
737  if (descriptor.m_PeepholeEnabled)
738  {
739  if (!descriptor.m_CifgEnabled)
740  {
741  cellToInputWeights = CreateConstTensorInfo(constants[i++]); //CellToInputWeights
742  }
743  cellToForgetWeights = CreateConstTensorInfo(constants[i++]); //CellToForgetWeights
744  cellToOutputWeights = CreateConstTensorInfo(constants[i++]); //CellToOutputWeights
745  }
746 
747  if (descriptor.m_ProjectionEnabled)
748  {
749  projectionWeights = CreateConstTensorInfo(constants[i++]); //ProjectionWeights
750  projectionBias = CreateConstTensorInfo(constants[i++]); //ProjectionBias
751  }
752 
753  if (descriptor.m_LayerNormEnabled)
754  {
755  if (!descriptor.m_CifgEnabled)
756  {
757  inputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //InputLayerNormWeights
758  }
759  forgetLayerNormWeights = CreateConstTensorInfo(constants[i++]); //ForgetLayerNormWeights
760  cellLayerNormWeights = CreateConstTensorInfo(constants[i++]); //CellLayerNormWeights
761  outputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //OutputLayerNormWeights
762  }
763 
764  auto fbLstmParams = serializer::CreateLstmInputParams(
765  m_flatBufferBuilder,
766  inputToForgetWeights,
767  inputToCellWeights,
768  inputToOutputWeights,
769  recurrentToForgetWeights,
770  recurrentToCellWeights,
771  recurrentToOutputWeights,
772  forgetGateBias,
773  cellBias,
774  outputGateBias,
775  inputToInputWeights,
776  recurrentToInputWeights,
777  cellToInputWeights,
778  inputGateBias,
779  projectionWeights,
780  projectionBias,
781  cellToForgetWeights,
782  cellToOutputWeights,
783  inputLayerNormWeights,
784  forgetLayerNormWeights,
785  cellLayerNormWeights,
786  outputLayerNormWeights);
787 
788  auto fbLstmLayer = serializer::CreateLstmLayer(
789  m_flatBufferBuilder,
790  fbLstmBaseLayer,
791  fbLstmDescriptor,
792  fbLstmParams);
793 
794  CreateAnyLayer(fbLstmLayer.o, serializer::Layer::Layer_LstmLayer);
795 }
796 
797 void SerializerStrategy::SerializeMaximumLayer(const armnn::IConnectableLayer* layer, const char* name)
798 {
799  IgnoreUnused(name);
800 
801  auto fbMaximumBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Maximum);
802  auto fbMaximumLayer = serializer::CreateMaximumLayer(m_flatBufferBuilder, fbMaximumBaseLayer);
803 
804  CreateAnyLayer(fbMaximumLayer.o, serializer::Layer::Layer_MaximumLayer);
805 }
806 
807 void SerializerStrategy::SerializeMeanLayer(const armnn::IConnectableLayer* layer,
808  const armnn::MeanDescriptor& descriptor,
809  const char* name)
810 {
811  IgnoreUnused(name);
812 
813  auto fbMeanBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Mean);
814  auto fbMeanDescriptor = serializer::CreateMeanDescriptor(m_flatBufferBuilder,
815  m_flatBufferBuilder.CreateVector(descriptor.m_Axis),
816  descriptor.m_KeepDims);
817 
818  auto fbMeanLayer = serializer::CreateMeanLayer(m_flatBufferBuilder,
819  fbMeanBaseLayer,
820  fbMeanDescriptor);
821 
822  CreateAnyLayer(fbMeanLayer.o, serializer::Layer::Layer_MeanLayer);
823 }
824 
825 void SerializerStrategy::SerializeMinimumLayer(const armnn::IConnectableLayer* layer, const char* name)
826 {
827  IgnoreUnused(name);
828 
829  auto fbMinimumBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Minimum);
830  auto fbMinimumLayer = serializer::CreateMinimumLayer(m_flatBufferBuilder, fbMinimumBaseLayer);
831 
832  CreateAnyLayer(fbMinimumLayer.o, serializer::Layer::Layer_MinimumLayer);
833 }
834 
835 void SerializerStrategy::SerializeMergeLayer(const armnn::IConnectableLayer* layer, const char* name)
836 {
837  IgnoreUnused(name);
838 
839  auto fbMergeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Merge);
840  auto fbMergeLayer = serializer::CreateMergeLayer(m_flatBufferBuilder, fbMergeBaseLayer);
841 
842  CreateAnyLayer(fbMergeLayer.o, serializer::Layer::Layer_MergeLayer);
843 }
844 
845 void SerializerStrategy::SerializeConcatLayer(const armnn::IConnectableLayer* layer,
846  const armnn::ConcatDescriptor& concatDescriptor,
847  const char* name)
848 {
849  IgnoreUnused(name);
850 
851  auto flatBufferConcatBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Concat);
852 
853  std::vector<flatbuffers::Offset<UintVector>> views;
854  for (unsigned int v = 0; v < concatDescriptor.GetNumViews(); ++v)
855  {
856  const uint32_t* origin = concatDescriptor.GetViewOrigin(v);
857  std::vector<uint32_t> origins;
858  for (unsigned int d = 0; d < concatDescriptor.GetNumDimensions(); ++d)
859  {
860  origins.push_back(origin[d]);
861  }
862  auto view = m_flatBufferBuilder.CreateVector(origins);
863  auto uintVector = CreateUintVector(m_flatBufferBuilder, view);
864  views.push_back(uintVector);
865  }
866 
867  auto flatBufferConcatDescriptor = CreateOriginsDescriptor(m_flatBufferBuilder,
868  concatDescriptor.GetConcatAxis(),
869  concatDescriptor.GetNumViews(),
870  concatDescriptor.GetNumDimensions(),
871  m_flatBufferBuilder.CreateVector(views));
872 
873  auto flatBufferLayer = CreateConcatLayer(m_flatBufferBuilder,
874  flatBufferConcatBaseLayer,
875  flatBufferConcatDescriptor);
876 
877  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ConcatLayer);
878 }
879 
880 void SerializerStrategy::SerializeMultiplicationLayer(const armnn::IConnectableLayer* layer, const char* name)
881 {
882  IgnoreUnused(name);
883 
884  auto fbMultiplicationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Multiplication);
885  auto fbMultiplicationLayer = serializer::CreateMultiplicationLayer(m_flatBufferBuilder,
886  fbMultiplicationBaseLayer);
887 
888  CreateAnyLayer(fbMultiplicationLayer.o, serializer::Layer::Layer_MultiplicationLayer);
889 }
890 
891 void SerializerStrategy::SerializePadLayer(const armnn::IConnectableLayer* layer,
892  const armnn::PadDescriptor& padDescriptor,
893  const char* name)
894 {
895  IgnoreUnused(name);
896 
897  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pad);
898 
899  std::vector<unsigned int> padList;
900  for (auto& p: padDescriptor.m_PadList)
901  {
902  padList.push_back(p.first);
903  padList.push_back(p.second);
904  }
905 
906  auto flatBufferPadDesc = serializer::CreatePadDescriptor(m_flatBufferBuilder,
907  m_flatBufferBuilder.CreateVector(padList),
908  padDescriptor.m_PadValue,
909  GetFlatBufferPaddingMode(padDescriptor.m_PaddingMode));
910 
911  auto flatBufferPadLayer = serializer::CreatePadLayer(m_flatBufferBuilder,
912  flatBufferBaseLayer,
913  flatBufferPadDesc);
914 
915  CreateAnyLayer(flatBufferPadLayer.o, serializer::Layer::Layer_PadLayer);
916 }
917 
918 void SerializerStrategy::SerializePermuteLayer(const armnn::IConnectableLayer* layer,
919  const armnn::PermuteDescriptor& permuteDescriptor,
920  const char* name)
921 {
922  IgnoreUnused(name);
923 
924  // Create FlatBuffer BaseLayer
925  auto flatBufferPermuteBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Permute);
926 
927  std::vector<unsigned int> dimMappings;
928  for (unsigned int i=0; i<permuteDescriptor.m_DimMappings.GetSize(); ++i)
929  {
930  dimMappings.push_back(permuteDescriptor.m_DimMappings[i]);
931  }
932 
933  auto flatBufferPermuteDesc = serializer::CreatePermuteDescriptor(m_flatBufferBuilder,
934  m_flatBufferBuilder.CreateVector(dimMappings));
935 
936  // Create the FlatBuffer PermuteLayer
937  auto flatBufferPermuteLayer = serializer::CreatePermuteLayer(m_flatBufferBuilder,
938  flatBufferPermuteBaseLayer,
939  flatBufferPermuteDesc);
940 
941  // Add the AnyLayer to the FlatBufferLayers
942  CreateAnyLayer(flatBufferPermuteLayer.o, serializer::Layer::Layer_PermuteLayer);
943 }
944 
945 // Build FlatBuffer for Rank Layer
946 void SerializerStrategy::SerializeRankLayer(const armnn::IConnectableLayer* layer,
947  const char* name)
948 {
949  IgnoreUnused(name);
950  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Rank);
951  auto flatBufferRankLayer = serializer::CreateRankLayer(m_flatBufferBuilder, flatBufferBaseLayer);
952 
953  CreateAnyLayer(flatBufferRankLayer.o, serializer::Layer::Layer_RankLayer);
954 }
955 
956 void SerializerStrategy::SerializeReduceLayer(const armnn::IConnectableLayer* layer,
957  const armnn::ReduceDescriptor& reduceDescriptor,
958  const char*)
959 {
960  auto fbReduceBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Reduce);
961  auto fbDescriptor = CreateReduceDescriptor(m_flatBufferBuilder,
962  reduceDescriptor.m_KeepDims,
963  m_flatBufferBuilder.CreateVector(reduceDescriptor.m_vAxis),
965  auto fbReduceLayer = serializer::CreateReduceLayer(m_flatBufferBuilder,
966  fbReduceBaseLayer,
967  fbDescriptor);
968 
969  CreateAnyLayer(fbReduceLayer.o, serializer::Layer::Layer_ReduceLayer);
970 }
971 
972 // Build FlatBuffer for Reshape Layer
973 void SerializerStrategy::SerializeReshapeLayer(const armnn::IConnectableLayer* layer,
974  const armnn::ReshapeDescriptor& reshapeDescriptor,
975  const char* name)
976 {
977  IgnoreUnused(name);
978 
979  // Create FlatBuffer BaseLayer
980  auto flatBufferReshapeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Reshape);
981 
982  std::vector<unsigned int> targetShape;
983  for (unsigned int i =0; i < reshapeDescriptor.m_TargetShape.GetNumDimensions(); i++)
984  {
985  targetShape.push_back(reshapeDescriptor.m_TargetShape[i]);
986  }
987 
988  auto flatBufferReshapeDesc = serializer::CreateReshapeDescriptor(m_flatBufferBuilder,
989  m_flatBufferBuilder.CreateVector(targetShape));
990 
991  // Create the FlatBuffer ReshapeLayer
992  auto flatBufferReshapeLayer = serializer::CreateReshapeLayer(m_flatBufferBuilder, flatBufferReshapeBaseLayer,
993  flatBufferReshapeDesc);
994 
995  // Add the AnyLayer to the FlatBufferLayers
996  CreateAnyLayer(flatBufferReshapeLayer.o, serializer::Layer::Layer_ReshapeLayer);
997 }
998 
999 void SerializerStrategy::SerializeResizeLayer(const armnn::IConnectableLayer* layer,
1000  const armnn::ResizeDescriptor& resizeDescriptor,
1001  const char* name)
1002 {
1003  IgnoreUnused(name);
1004 
1005  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Resize);
1006 
1007  auto flatBufferDescriptor =
1008  CreateResizeDescriptor(m_flatBufferBuilder,
1009  resizeDescriptor.m_TargetHeight,
1010  resizeDescriptor.m_TargetWidth,
1011  GetFlatBufferResizeMethod(resizeDescriptor.m_Method),
1012  GetFlatBufferDataLayout(resizeDescriptor.m_DataLayout),
1013  resizeDescriptor.m_AlignCorners,
1014  resizeDescriptor.m_HalfPixelCenters);
1015 
1016  auto flatBufferLayer = serializer::CreateResizeLayer(m_flatBufferBuilder,
1017  flatBufferBaseLayer,
1018  flatBufferDescriptor);
1019 
1020  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ResizeLayer);
1021 }
1022 
1023 void SerializerStrategy::SerializeSliceLayer(const armnn::IConnectableLayer* layer,
1024  const armnn::SliceDescriptor& sliceDescriptor,
1025  const char* name)
1026 {
1027  IgnoreUnused(name);
1028 
1029  auto fbSliceBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Slice);
1030  auto fbSliceDescriptor = CreateSliceDescriptor(m_flatBufferBuilder,
1031  m_flatBufferBuilder.CreateVector(sliceDescriptor.m_Begin),
1032  m_flatBufferBuilder.CreateVector(sliceDescriptor.m_Size));
1033 
1034  auto fbSliceLayer = serializer::CreateSliceLayer(m_flatBufferBuilder, fbSliceBaseLayer, fbSliceDescriptor);
1035 
1036  CreateAnyLayer(fbSliceLayer.o, serializer::Layer::Layer_SliceLayer);
1037 }
1038 
1039 // Build FlatBuffer for Softmax Layer
1040 void SerializerStrategy::SerializeSoftmaxLayer(const armnn::IConnectableLayer* layer,
1041  const armnn::SoftmaxDescriptor& softmaxDescriptor,
1042  const char* name)
1043 {
1044  IgnoreUnused(name);
1045 
1046  // Create FlatBuffer BaseLayer
1047  auto flatBufferSoftmaxBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Softmax);
1048 
1049  // Create the FlatBuffer SoftmaxDescriptor
1050  auto flatBufferSoftmaxDesc =
1051  serializer::CreateSoftmaxDescriptor(m_flatBufferBuilder,
1052  softmaxDescriptor.m_Beta,
1053  softmaxDescriptor.m_Axis);
1054 
1055  // Create the FlatBuffer SoftmaxLayer
1056  auto flatBufferSoftmaxLayer =
1057  serializer::CreateSoftmaxLayer(m_flatBufferBuilder,
1058  flatBufferSoftmaxBaseLayer,
1059  flatBufferSoftmaxDesc);
1060 
1061  CreateAnyLayer(flatBufferSoftmaxLayer.o, serializer::Layer::Layer_SoftmaxLayer);
1062 }
1063 
1064 void SerializerStrategy::SerializePooling2dLayer(const armnn::IConnectableLayer* layer,
1065  const armnn::Pooling2dDescriptor& pooling2dDescriptor,
1066  const char* name)
1067 {
1068  IgnoreUnused(name);
1069 
1070  auto fbPooling2dBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pooling2d);
1071  auto fbPooling2dDescriptor = serializer::CreatePooling2dDescriptor(
1072  m_flatBufferBuilder,
1073  GetFlatBufferPoolingAlgorithm(pooling2dDescriptor.m_PoolType),
1074  pooling2dDescriptor.m_PadLeft,
1075  pooling2dDescriptor.m_PadRight,
1076  pooling2dDescriptor.m_PadTop,
1077  pooling2dDescriptor.m_PadBottom,
1078  pooling2dDescriptor.m_PoolWidth,
1079  pooling2dDescriptor.m_PoolHeight,
1080  pooling2dDescriptor.m_StrideX,
1081  pooling2dDescriptor.m_StrideY,
1083  GetFlatBufferPaddingMethod(pooling2dDescriptor.m_PaddingMethod),
1084  GetFlatBufferDataLayout(pooling2dDescriptor.m_DataLayout));
1085 
1086  auto fbPooling2dLayer = serializer::CreatePooling2dLayer(m_flatBufferBuilder,
1087  fbPooling2dBaseLayer,
1088  fbPooling2dDescriptor);
1089 
1090  CreateAnyLayer(fbPooling2dLayer.o, serializer::Layer::Layer_Pooling2dLayer);
1091 }
1092 
1093 void SerializerStrategy::SerializePooling3dLayer(const armnn::IConnectableLayer* layer,
1094  const armnn::Pooling3dDescriptor& pooling3dDescriptor,
1095  const char* name)
1096 {
1097  IgnoreUnused(name);
1098 
1099  auto fbPooling3dBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pooling3d);
1100  auto fbPooling3dDescriptor = serializer::CreatePooling3dDescriptor(
1101  m_flatBufferBuilder,
1102  GetFlatBufferPoolingAlgorithm(pooling3dDescriptor.m_PoolType),
1103  pooling3dDescriptor.m_PadLeft,
1104  pooling3dDescriptor.m_PadRight,
1105  pooling3dDescriptor.m_PadTop,
1106  pooling3dDescriptor.m_PadBottom,
1107  pooling3dDescriptor.m_PadFront,
1108  pooling3dDescriptor.m_PadBack,
1109  pooling3dDescriptor.m_PoolWidth,
1110  pooling3dDescriptor.m_PoolHeight,
1111  pooling3dDescriptor.m_PoolDepth,
1112  pooling3dDescriptor.m_StrideX,
1113  pooling3dDescriptor.m_StrideY,
1114  pooling3dDescriptor.m_StrideZ,
1116  GetFlatBufferPaddingMethod(pooling3dDescriptor.m_PaddingMethod),
1117  GetFlatBufferDataLayout(pooling3dDescriptor.m_DataLayout));
1118 
1119  auto fbPooling3dLayer = serializer::CreatePooling3dLayer(m_flatBufferBuilder,
1120  fbPooling3dBaseLayer,
1121  fbPooling3dDescriptor);
1122 
1123  CreateAnyLayer(fbPooling3dLayer.o, serializer::Layer::Layer_Pooling3dLayer);
1124 }
1125 
1126 void SerializerStrategy::SerializePreluLayer(const armnn::IConnectableLayer* layer,
1127  const char* name)
1128 {
1129  IgnoreUnused(name);
1130 
1131  // Create FlatBuffer BaseLayer
1132  auto flatBufferPreluBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Prelu);
1133 
1134  // Create the FlatBuffer AdditionLayer
1135  auto flatBufferPreluLayer = serializer::CreatePreluLayer(m_flatBufferBuilder, flatBufferPreluBaseLayer);
1136 
1137  // Add the AnyLayer to the FlatBufferLayers
1138  CreateAnyLayer(flatBufferPreluLayer.o, serializer::Layer::Layer_PreluLayer);
1139 }
1140 
1141 void SerializerStrategy::SerializeQuantizeLayer(const armnn::IConnectableLayer *layer, const char *name)
1142 {
1143  IgnoreUnused(name);
1144 
1145  auto fbQuantizeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Quantize);
1146  auto fbQuantizeLayer = serializer::CreateQuantizeLayer(m_flatBufferBuilder,
1147  fbQuantizeBaseLayer);
1148  CreateAnyLayer(fbQuantizeLayer.o, serializer::Layer::Layer_QuantizeLayer);
1149 }
1150 
1151 // Build FlatBuffer for FullyConnected Layer
1152 void SerializerStrategy::SerializeFullyConnectedLayer(const armnn::IConnectableLayer* layer,
1153  const armnn::FullyConnectedDescriptor& fullyConnectedDescriptor,
1154  const char*)
1155 {
1156  // Create FlatBuffer BaseLayer
1157  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_FullyConnected);
1158 
1159  // Create FlatBuffer FullyConnectedDescriptor
1160  auto flatBufferDescriptor =
1161  serializer::CreateFullyConnectedDescriptor(m_flatBufferBuilder,
1162  fullyConnectedDescriptor.m_BiasEnabled,
1163  fullyConnectedDescriptor.m_TransposeWeightMatrix,
1164  fullyConnectedDescriptor.m_ConstantWeights);
1165 
1166  // Create FlatBuffer FullyConnectedLayer
1167  auto flatBufferLayer = serializer::CreateFullyConnectedLayer(m_flatBufferBuilder,
1168  flatBufferBaseLayer,
1169  flatBufferDescriptor);
1170 
1171  // Add created FullyConnectedLayer to the FlatBufferLayers
1172  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_FullyConnectedLayer);
1173 }
1174 
1175 // Build FlatBuffer for SpaceToBatchNd Layer
1176 void SerializerStrategy::SerializeSpaceToBatchNdLayer(const armnn::IConnectableLayer* layer,
1177  const armnn::SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
1178  const char* name)
1179 {
1180  IgnoreUnused(name);
1181 
1182  // Create FlatBuffer BaseLayer
1183  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_SpaceToBatchNd);
1184 
1185  std::vector<unsigned int> padList;
1186  padList.reserve(spaceToBatchNdDescriptor.m_PadList.size()*2);
1187  for (auto& pad : spaceToBatchNdDescriptor.m_PadList)
1188  {
1189  padList.push_back(pad.first);
1190  padList.push_back(pad.second);
1191  }
1192 
1193  auto flatBufferDescriptor =
1194  CreateSpaceToBatchNdDescriptor(m_flatBufferBuilder,
1195  m_flatBufferBuilder.CreateVector(spaceToBatchNdDescriptor.m_BlockShape),
1196  m_flatBufferBuilder.CreateVector(padList),
1197  GetFlatBufferDataLayout(spaceToBatchNdDescriptor.m_DataLayout));
1198 
1199  auto flatBufferLayer = serializer::CreateSpaceToBatchNdLayer(m_flatBufferBuilder,
1200  flatBufferBaseLayer,
1201  flatBufferDescriptor);
1202 
1203  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_SpaceToBatchNdLayer);
1204 }
1205 
1206 // Build FlatBuffer for SpaceToDepthLayer
1207 void SerializerStrategy::SerializeSpaceToDepthLayer(const armnn::IConnectableLayer* layer,
1208  const armnn::SpaceToDepthDescriptor& spaceToDepthDescriptor,
1209  const char* name)
1210 {
1211  IgnoreUnused(name);
1212 
1213  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_SpaceToDepth);
1214  auto flatBufferDescriptor =
1215  CreateSpaceToDepthDescriptor(m_flatBufferBuilder,
1216  spaceToDepthDescriptor.m_BlockSize,
1217  GetFlatBufferDataLayout(spaceToDepthDescriptor.m_DataLayout));
1218 
1219  auto flatBufferLayer = serializer::CreateSpaceToDepthLayer(m_flatBufferBuilder,
1220  flatBufferBaseLayer,
1221  flatBufferDescriptor);
1222 
1223  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_SpaceToDepthLayer);
1224 }
1225 
1226 // Build FlatBuffer for Splitter Layer
1227 void SerializerStrategy::SerializeSplitterLayer(const armnn::IConnectableLayer* layer,
1228  const armnn::ViewsDescriptor& viewsDescriptor,
1229  const char* name)
1230 {
1231  IgnoreUnused(name);
1232 
1233  // Create FlatBuffer ViewOrigins
1234  std::vector<flatbuffers::Offset<UintVector>> flatBufferViewOrigins;
1235  flatBufferViewOrigins.reserve(viewsDescriptor.GetNumViews());
1236 
1237  for(unsigned int vIdx = 0; vIdx < viewsDescriptor.GetNumViews(); ++vIdx)
1238  {
1239  std::vector<uint32_t> viewOrigin;
1240  viewOrigin.reserve(viewsDescriptor.GetNumDimensions());
1241 
1242  // Copy vector
1243  for(unsigned int dIdx = 0; dIdx < viewsDescriptor.GetNumDimensions(); ++dIdx)
1244  {
1245  viewOrigin.push_back(viewsDescriptor.GetViewOrigin(vIdx)[dIdx]);
1246  }
1247 
1248  flatBufferViewOrigins.push_back(CreateUintVector(m_flatBufferBuilder,
1249  m_flatBufferBuilder.CreateVector(viewOrigin)));
1250  }
1251 
1252  // Create FlatBuffer OriginsDescriptor
1253  auto flatBufferOriginDescriptor = CreateOriginsDescriptor(m_flatBufferBuilder,
1254  viewsDescriptor.GetOrigins().GetConcatAxis(),
1255  viewsDescriptor.GetOrigins().GetNumViews(),
1256  viewsDescriptor.GetOrigins().GetNumDimensions(),
1257  m_flatBufferBuilder.CreateVector(flatBufferViewOrigins));
1258 
1259  // Create FlatBuffer ViewOrigins
1260  std::vector<flatbuffers::Offset<UintVector>> flatBufferViewSizes;
1261  flatBufferViewSizes.reserve(viewsDescriptor.GetNumViews());
1262 
1263  for(unsigned int vIdx = 0; vIdx < viewsDescriptor.GetNumViews(); ++vIdx)
1264  {
1265  std::vector<uint32_t> viewSize;
1266  viewSize.reserve(viewsDescriptor.GetNumDimensions());
1267 
1268  // Copy vector
1269  for(unsigned int dIdx = 0; dIdx < viewsDescriptor.GetNumDimensions(); ++dIdx)
1270  {
1271  viewSize.push_back(viewsDescriptor.GetViewSizes(vIdx)[dIdx]);
1272  }
1273 
1274  flatBufferViewSizes.push_back(CreateUintVector(m_flatBufferBuilder,
1275  m_flatBufferBuilder.CreateVector(viewSize)));
1276  }
1277 
1278  // Create FlatBuffer ViewsDescriptor
1279  auto flatBufferViewsDescriptor = CreateViewsDescriptor(m_flatBufferBuilder,
1280  flatBufferOriginDescriptor,
1281  m_flatBufferBuilder.CreateVector(flatBufferViewSizes));
1282 
1283  // Create FlatBuffer BaseLayer
1284  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Splitter);
1285 
1286  auto flatBufferSplitterLayer = serializer::CreateSplitterLayer(m_flatBufferBuilder,
1287  flatBufferBaseLayer,
1288  flatBufferViewsDescriptor);
1289 
1290  CreateAnyLayer(flatBufferSplitterLayer.o, serializer::Layer::Layer_SplitterLayer);
1291 }
1292 
1293 void SerializerStrategy::SerializeNormalizationLayer(const armnn::IConnectableLayer* layer,
1294  const armnn::NormalizationDescriptor& descriptor,
1295  const char* name)
1296 {
1297  IgnoreUnused(name);
1298 
1299  auto fbNormalizationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Normalization);
1300 
1301  auto fbNormalizationDescriptor = serializer::CreateNormalizationDescriptor(
1302  m_flatBufferBuilder,
1305  descriptor.m_NormSize,
1306  descriptor.m_Alpha,
1307  descriptor.m_Beta,
1308  descriptor.m_K,
1310 
1311  auto flatBufferLayer = serializer::CreateNormalizationLayer(m_flatBufferBuilder,
1312  fbNormalizationBaseLayer,
1313  fbNormalizationDescriptor);
1314 
1315  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_NormalizationLayer);
1316 }
1317 
1318 void SerializerStrategy::SerializeShapeLayer(const armnn::IConnectableLayer* layer,
1319  const char* name)
1320 {
1321  IgnoreUnused(name);
1322 
1323  auto shapeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Shape);
1324  auto shapeLayer = serializer::CreateShapeLayer(m_flatBufferBuilder, shapeBaseLayer);
1325 
1326  CreateAnyLayer(shapeLayer.o, serializer::Layer::Layer_ShapeLayer);
1327 }
1328 
1329 void SerializerStrategy::SerializeStackLayer(const armnn::IConnectableLayer* layer,
1330  const armnn::StackDescriptor& stackDescriptor,
1331  const char* name)
1332 {
1333  IgnoreUnused(name);
1334 
1335  auto stackBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Stack);
1336 
1337  std::vector<unsigned int> inputShape;
1338  for (unsigned int i =0; i < stackDescriptor.m_InputShape.GetNumDimensions(); i++)
1339  {
1340  inputShape.push_back(stackDescriptor.m_InputShape[i]);
1341  }
1342 
1343  auto flatBufferStackDescriptor = CreateStackDescriptor(m_flatBufferBuilder,
1344  stackDescriptor.m_Axis,
1345  stackDescriptor.m_NumInputs,
1346  m_flatBufferBuilder.CreateVector(inputShape));
1347 
1348  auto stackLayer = serializer::CreateStackLayer(m_flatBufferBuilder, stackBaseLayer, flatBufferStackDescriptor);
1349  CreateAnyLayer(stackLayer.o, serializer::Layer::Layer_StackLayer);
1350 }
1351 
1352 void SerializerStrategy::SerializeStandInLayer(const armnn::IConnectableLayer *layer,
1353  const armnn::StandInDescriptor& standInDescriptor,
1354  const char *name)
1355 {
1356  IgnoreUnused(name);
1357 
1358  auto fbDescriptor = serializer::CreateStandInDescriptor(m_flatBufferBuilder,
1359  standInDescriptor.m_NumInputs,
1360  standInDescriptor.m_NumOutputs);
1361 
1362  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_StandIn);
1363  auto fbLayer = serializer::CreateStandInLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
1364 
1365  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_StandInLayer);
1366 }
1367 
1368 void SerializerStrategy::SerializeStridedSliceLayer(const armnn::IConnectableLayer* layer,
1369  const armnn::StridedSliceDescriptor& stridedSliceDescriptor,
1370  const char* name)
1371 {
1372  IgnoreUnused(name);
1373 
1374  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_StridedSlice);
1375 
1376  auto flatBufferDescriptor =
1377  CreateStridedSliceDescriptor(m_flatBufferBuilder,
1378  m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.m_Begin),
1379  m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.m_End),
1380  m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.m_Stride),
1381  stridedSliceDescriptor.m_BeginMask,
1382  stridedSliceDescriptor.m_EndMask,
1383  stridedSliceDescriptor.m_ShrinkAxisMask,
1384  stridedSliceDescriptor.m_EllipsisMask,
1385  stridedSliceDescriptor.m_NewAxisMask,
1386  GetFlatBufferDataLayout(stridedSliceDescriptor.m_DataLayout));
1387 
1388  auto flatBufferLayer = serializer::CreateStridedSliceLayer(m_flatBufferBuilder,
1389  flatBufferBaseLayer,
1390  flatBufferDescriptor);
1391 
1392  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_StridedSliceLayer);
1393 }
1394 
1395 void SerializerStrategy::SerializeSubtractionLayer(const armnn::IConnectableLayer* layer, const char* name)
1396 {
1397  IgnoreUnused(name);
1398 
1399  auto fbSubtractionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Subtraction);
1400  auto fbSubtractionLayer = serializer::CreateSubtractionLayer(m_flatBufferBuilder, fbSubtractionBaseLayer);
1401 
1402  CreateAnyLayer(fbSubtractionLayer.o, serializer::Layer::Layer_SubtractionLayer);
1403 }
1404 
1405 void SerializerStrategy::SerializeSwitchLayer(const armnn::IConnectableLayer* layer, const char* name)
1406 {
1407  IgnoreUnused(name);
1408 
1409  auto fbSwitchBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Switch);
1410  auto fbSwitchLayer = serializer::CreateSwitchLayer(m_flatBufferBuilder, fbSwitchBaseLayer);
1411 
1412  CreateAnyLayer(fbSwitchLayer.o, serializer::Layer::Layer_SwitchLayer);
1413 }
1414 
1415 void SerializerStrategy::SerializeTransposeConvolution2dLayer(
1416  const armnn::IConnectableLayer* layer,
1417  const armnn::TransposeConvolution2dDescriptor& descriptor,
1418  const std::vector<armnn::ConstTensor>& constants,
1419  const char* name)
1420 {
1421  IgnoreUnused(name);
1422 
1423  const armnn::ConstTensor& weights = constants.at(0);
1424 
1425  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution2d);
1426  auto fbDescriptor = CreateTransposeConvolution2dDescriptor(m_flatBufferBuilder,
1427  descriptor.m_PadLeft,
1428  descriptor.m_PadRight,
1429  descriptor.m_PadTop,
1430  descriptor.m_PadBottom,
1431  descriptor.m_StrideX,
1432  descriptor.m_StrideY,
1433  descriptor.m_BiasEnabled,
1435 
1436  // weights & biases
1437  auto fbWeightsConstTensorInfo = CreateConstTensorInfo(weights);
1438  flatbuffers::Offset<serializer::ConstTensor> fbBiasesConstTensorInfo;
1439  if (constants.size() > 1)
1440  {
1441  const armnn::ConstTensor& biases = constants.at(1);
1442  fbBiasesConstTensorInfo = CreateConstTensorInfo(biases);
1443  }
1444 
1445  auto fbLayer = CreateTransposeConvolution2dLayer(m_flatBufferBuilder,
1446  fbBaseLayer,
1447  fbDescriptor,
1448  fbWeightsConstTensorInfo,
1449  fbBiasesConstTensorInfo);
1450 
1451  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_TransposeConvolution2dLayer);
1452 }
1453 
1454 void SerializerStrategy::SerializeTransposeLayer(const armnn::IConnectableLayer* layer,
1455  const armnn::TransposeDescriptor& descriptor,
1456  const char* name)
1457 {
1458  IgnoreUnused(name);
1459 
1460  // Create FlatBuffer BaseLayer
1461  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Transpose);
1462 
1463  std::vector<unsigned int> dimMappings;
1464  for (unsigned int i=0; i<descriptor.m_DimMappings.GetSize(); ++i)
1465  {
1466  dimMappings.push_back(descriptor.m_DimMappings[i]);
1467  }
1468 
1469  auto flatBufferDesc = serializer::CreateTransposeDescriptor(m_flatBufferBuilder,
1470  m_flatBufferBuilder.CreateVector(dimMappings));
1471 
1472  // Create the FlatBuffer TransposeLayer
1473  auto flatBufferLayer = serializer::CreateTransposeLayer(m_flatBufferBuilder,
1474  flatBufferBaseLayer,
1475  flatBufferDesc);
1476 
1477  // Add the AnyLayer to the FlatBufferLayers
1478  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_TransposeLayer);
1479 }
1480 
1481 void SerializerStrategy::SerializeQLstmLayer(const armnn::IConnectableLayer* layer,
1482  const armnn::QLstmDescriptor& descriptor,
1483  const std::vector<armnn::ConstTensor>& constants,
1484  const char* name)
1485 {
1486  IgnoreUnused(name);
1487 
1488  auto fbQLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_QLstm);
1489 
1490  auto fbQLstmDescriptor = serializer::CreateQLstmDescriptor(
1491  m_flatBufferBuilder,
1492  descriptor.m_CifgEnabled,
1493  descriptor.m_PeepholeEnabled,
1494  descriptor.m_ProjectionEnabled,
1495  descriptor.m_LayerNormEnabled,
1496  descriptor.m_CellClip,
1497  descriptor.m_ProjectionClip,
1498  descriptor.m_InputIntermediateScale,
1499  descriptor.m_ForgetIntermediateScale,
1500  descriptor.m_CellIntermediateScale,
1501  descriptor.m_OutputIntermediateScale,
1502  descriptor.m_HiddenStateZeroPoint,
1503  descriptor.m_HiddenStateScale
1504  );
1505 
1506  // Index for constants vector
1507  std::size_t i = 0;
1508 
1509  // Mandatory params
1510  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]); //InputToForgetWeights
1511  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]); //InputToCellWeights
1512  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]); //InputToOutputWeights
1513  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToForgetWeights
1514  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToCellWeights
1515  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToOutputWeights
1516  auto forgetGateBias = CreateConstTensorInfo(constants[i++]); //ForgetGateBias
1517  auto cellBias = CreateConstTensorInfo(constants[i++]); //CellBias
1518  auto outputGateBias = CreateConstTensorInfo(constants[i++]); //OutputGateBias
1519 
1520  // CIFG
1521  flatbuffers::Offset<serializer::ConstTensor> inputToInputWeights;
1522  flatbuffers::Offset<serializer::ConstTensor> recurrentToInputWeights;
1523  flatbuffers::Offset<serializer::ConstTensor> inputGateBias;
1524 
1525  if (!descriptor.m_CifgEnabled)
1526  {
1527  inputToInputWeights = CreateConstTensorInfo(constants[i++]); //InputToInputWeights
1528  recurrentToInputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToInputWeights
1529  inputGateBias = CreateConstTensorInfo(constants[i++]); //InputGateBias
1530  }
1531 
1532  // Peephole
1533  flatbuffers::Offset<serializer::ConstTensor> cellToInputWeights;
1534  flatbuffers::Offset<serializer::ConstTensor> cellToForgetWeights;
1535  flatbuffers::Offset<serializer::ConstTensor> cellToOutputWeights;
1536 
1537  if (descriptor.m_PeepholeEnabled)
1538  {
1539  if (!descriptor.m_CifgEnabled)
1540  {
1541  cellToInputWeights = CreateConstTensorInfo(constants[i++]); //CellToInputWeights
1542  }
1543  cellToForgetWeights = CreateConstTensorInfo(constants[i++]); //CellToForgetWeights
1544  cellToOutputWeights = CreateConstTensorInfo(constants[i++]); //CellToOutputWeights
1545  }
1546 
1547  // Projection
1548  flatbuffers::Offset<serializer::ConstTensor> projectionWeights;
1549  flatbuffers::Offset<serializer::ConstTensor> projectionBias;
1550 
1551  if (descriptor.m_ProjectionEnabled)
1552  {
1553  projectionWeights = CreateConstTensorInfo(constants[i++]); //ProjectionWeights
1554  projectionBias = CreateConstTensorInfo(constants[i++]); //ProjectionBias
1555  }
1556 
1557  // Layer norm
1558  flatbuffers::Offset<serializer::ConstTensor> inputLayerNormWeights;
1559  flatbuffers::Offset<serializer::ConstTensor> forgetLayerNormWeights;
1560  flatbuffers::Offset<serializer::ConstTensor> cellLayerNormWeights;
1561  flatbuffers::Offset<serializer::ConstTensor> outputLayerNormWeights;
1562 
1563  if (descriptor.m_LayerNormEnabled)
1564  {
1565  if (!descriptor.m_CifgEnabled)
1566  {
1567  inputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //InputLayerNormWeights
1568  }
1569  forgetLayerNormWeights = CreateConstTensorInfo(constants[i++]); //ForgetLayerNormWeights
1570  cellLayerNormWeights = CreateConstTensorInfo(constants[i++]); //CellLayerNormWeights
1571  outputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //OutputLayerNormWeights
1572  }
1573 
1574  auto fbQLstmParams = serializer::CreateQLstmInputParams(
1575  m_flatBufferBuilder,
1576  inputToForgetWeights,
1577  inputToCellWeights,
1578  inputToOutputWeights,
1579  recurrentToForgetWeights,
1580  recurrentToCellWeights,
1581  recurrentToOutputWeights,
1582  forgetGateBias,
1583  cellBias,
1584  outputGateBias,
1585  inputToInputWeights,
1586  recurrentToInputWeights,
1587  inputGateBias,
1588  projectionWeights,
1589  projectionBias,
1590  cellToInputWeights,
1591  cellToForgetWeights,
1592  cellToOutputWeights,
1593  inputLayerNormWeights,
1594  forgetLayerNormWeights,
1595  cellLayerNormWeights,
1596  outputLayerNormWeights);
1597 
1598  auto fbQLstmLayer = serializer::CreateQLstmLayer(
1599  m_flatBufferBuilder,
1600  fbQLstmBaseLayer,
1601  fbQLstmDescriptor,
1602  fbQLstmParams);
1603 
1604  CreateAnyLayer(fbQLstmLayer.o, serializer::Layer::Layer_QLstmLayer);
1605 }
1606 
1607 void SerializerStrategy::SerializeQuantizedLstmLayer(const armnn::IConnectableLayer* layer,
1608  const std::vector<armnn::ConstTensor>& constants,
1609  const char* name)
1610 {
1611  IgnoreUnused(name);
1612 
1613  auto fbQuantizedLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_QuantizedLstm);
1614 
1615  // index for constants vector
1616  size_t i = 0;
1617 
1618  // Get input parameters
1619  auto inputToInputWeights = CreateConstTensorInfo(constants[i++]);
1620  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]);
1621  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]);
1622  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]);
1623 
1624  auto recurrentToInputWeights = CreateConstTensorInfo(constants[i++]);
1625  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]);
1626  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]);
1627  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]);
1628 
1629  auto inputGateBias = CreateConstTensorInfo(constants[i++]);
1630  auto forgetGateBias = CreateConstTensorInfo(constants[i++]);
1631  auto cellBias = CreateConstTensorInfo(constants[i++]);
1632  auto outputGateBias = CreateConstTensorInfo(constants[i++]);
1633 
1634  auto fbQuantizedLstmParams = serializer::CreateQuantizedLstmInputParams(
1635  m_flatBufferBuilder,
1636  inputToInputWeights,
1637  inputToForgetWeights,
1638  inputToCellWeights,
1639  inputToOutputWeights,
1640  recurrentToInputWeights,
1641  recurrentToForgetWeights,
1642  recurrentToCellWeights,
1643  recurrentToOutputWeights,
1644  inputGateBias,
1645  forgetGateBias,
1646  cellBias,
1647  outputGateBias);
1648 
1649  auto fbQuantizedLstmLayer = serializer::CreateQuantizedLstmLayer(
1650  m_flatBufferBuilder,
1651  fbQuantizedLstmBaseLayer,
1652  fbQuantizedLstmParams);
1653 
1654  CreateAnyLayer(fbQuantizedLstmLayer.o, serializer::Layer::Layer_QuantizedLstmLayer);
1655 }
1656 
1657 void SerializerStrategy::SerializeUnidirectionalSequenceLstmLayer(
1658  const armnn::IConnectableLayer* layer,
1660  const std::vector<armnn::ConstTensor>& constants,
1661  const char* name)
1662 {
1663  IgnoreUnused(name);
1664 
1665  auto fbUnidirectionalSequenceLstmBaseLayer =
1666  CreateLayerBase(layer, serializer::LayerType::LayerType_UnidirectionalSequenceLstm);
1667 
1668  auto fbUnidirectionalSequenceLstmDescriptor = serializer::CreateUnidirectionalSequenceLstmDescriptor(
1669  m_flatBufferBuilder,
1670  descriptor.m_ActivationFunc,
1671  descriptor.m_ClippingThresCell,
1672  descriptor.m_ClippingThresProj,
1673  descriptor.m_CifgEnabled,
1674  descriptor.m_PeepholeEnabled,
1675  descriptor.m_ProjectionEnabled,
1676  descriptor.m_LayerNormEnabled,
1677  descriptor.m_TimeMajor);
1678 
1679  // Index for constants vector
1680  std::size_t i = 0;
1681 
1682  // Get mandatory/basic input parameters
1683  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]); //InputToForgetWeights
1684  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]); //InputToCellWeights
1685  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]); //InputToOutputWeights
1686  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToForgetWeights
1687  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToCellWeights
1688  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToOutputWeights
1689  auto forgetGateBias = CreateConstTensorInfo(constants[i++]); //ForgetGateBias
1690  auto cellBias = CreateConstTensorInfo(constants[i++]); //CellBias
1691  auto outputGateBias = CreateConstTensorInfo(constants[i++]); //OutputGateBias
1692 
1693  //Define optional parameters, these will be set depending on configuration in Lstm descriptor
1694  flatbuffers::Offset<serializer::ConstTensor> inputToInputWeights;
1695  flatbuffers::Offset<serializer::ConstTensor> recurrentToInputWeights;
1696  flatbuffers::Offset<serializer::ConstTensor> cellToInputWeights;
1697  flatbuffers::Offset<serializer::ConstTensor> inputGateBias;
1698  flatbuffers::Offset<serializer::ConstTensor> projectionWeights;
1699  flatbuffers::Offset<serializer::ConstTensor> projectionBias;
1700  flatbuffers::Offset<serializer::ConstTensor> cellToForgetWeights;
1701  flatbuffers::Offset<serializer::ConstTensor> cellToOutputWeights;
1702  flatbuffers::Offset<serializer::ConstTensor> inputLayerNormWeights;
1703  flatbuffers::Offset<serializer::ConstTensor> forgetLayerNormWeights;
1704  flatbuffers::Offset<serializer::ConstTensor> cellLayerNormWeights;
1705  flatbuffers::Offset<serializer::ConstTensor> outputLayerNormWeights;
1706 
1707  if (!descriptor.m_CifgEnabled)
1708  {
1709  inputToInputWeights = CreateConstTensorInfo(constants[i++]); //InputToInputWeights
1710  recurrentToInputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToInputWeights
1711  inputGateBias = CreateConstTensorInfo(constants[i++]); //InputGateBias
1712  }
1713 
1714  if (descriptor.m_PeepholeEnabled)
1715  {
1716  if (!descriptor.m_CifgEnabled)
1717  {
1718  cellToInputWeights = CreateConstTensorInfo(constants[i++]); //CellToInputWeights
1719  }
1720  cellToForgetWeights = CreateConstTensorInfo(constants[i++]); //CellToForgetWeights
1721  cellToOutputWeights = CreateConstTensorInfo(constants[i++]); //CellToOutputWeights
1722  }
1723 
1724  if (descriptor.m_ProjectionEnabled)
1725  {
1726  projectionWeights = CreateConstTensorInfo(constants[i++]); //ProjectionWeights
1727  projectionBias = CreateConstTensorInfo(constants[i++]); //ProjectionBias
1728  }
1729 
1730  if (descriptor.m_LayerNormEnabled)
1731  {
1732  if (!descriptor.m_CifgEnabled)
1733  {
1734  inputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //InputLayerNormWeights
1735  }
1736  forgetLayerNormWeights = CreateConstTensorInfo(constants[i++]); //ForgetLayerNormWeights
1737  cellLayerNormWeights = CreateConstTensorInfo(constants[i++]); //CellLayerNormWeights
1738  outputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //OutputLayerNormWeights
1739  }
1740 
1741  auto fbUnidirectionalSequenceLstmParams = serializer::CreateLstmInputParams(
1742  m_flatBufferBuilder,
1743  inputToForgetWeights,
1744  inputToCellWeights,
1745  inputToOutputWeights,
1746  recurrentToForgetWeights,
1747  recurrentToCellWeights,
1748  recurrentToOutputWeights,
1749  forgetGateBias,
1750  cellBias,
1751  outputGateBias,
1752  inputToInputWeights,
1753  recurrentToInputWeights,
1754  cellToInputWeights,
1755  inputGateBias,
1756  projectionWeights,
1757  projectionBias,
1758  cellToForgetWeights,
1759  cellToOutputWeights,
1760  inputLayerNormWeights,
1761  forgetLayerNormWeights,
1762  cellLayerNormWeights,
1763  outputLayerNormWeights);
1764 
1765  auto fbUnidirectionalSequenceLstmLayer = serializer::CreateUnidirectionalSequenceLstmLayer(
1766  m_flatBufferBuilder,
1767  fbUnidirectionalSequenceLstmBaseLayer,
1768  fbUnidirectionalSequenceLstmDescriptor,
1769  fbUnidirectionalSequenceLstmParams);
1770 
1771  CreateAnyLayer(fbUnidirectionalSequenceLstmLayer.o, serializer::Layer::Layer_UnidirectionalSequenceLstmLayer);
1772 }
1773 
1774 fb::Offset<serializer::LayerBase> SerializerStrategy::CreateLayerBase(const IConnectableLayer* layer,
1775  const serializer::LayerType layerType)
1776 {
1777 
1778  uint32_t fbIndex = GetSerializedId(layer->GetGuid());
1779 
1780  std::vector<fb::Offset<serializer::InputSlot>> inputSlots = CreateInputSlots(layer);
1781  std::vector<fb::Offset<serializer::OutputSlot>> outputSlots = CreateOutputSlots(layer);
1782 
1783  return serializer::CreateLayerBase(m_flatBufferBuilder,
1784  fbIndex,
1785  m_flatBufferBuilder.CreateString(layer->GetName()),
1786  layerType,
1787  m_flatBufferBuilder.CreateVector(inputSlots),
1788  m_flatBufferBuilder.CreateVector(outputSlots));
1789 }
1790 
1791 void SerializerStrategy::CreateAnyLayer(const flatbuffers::Offset<void>& layer, const serializer::Layer serializerLayer)
1792 {
1793 
1794  auto anyLayer = armnnSerializer::CreateAnyLayer(m_flatBufferBuilder, serializerLayer, layer);
1795  m_serializedLayers.push_back(anyLayer);
1796 }
1797 
1798 template <typename T>
1799 flatbuffers::Offset<flatbuffers::Vector<T>> SerializerStrategy::CreateDataVector(const void* memory, unsigned int size)
1800 {
1801  const T* buffer = reinterpret_cast<const T*>(memory);
1802  std::vector<T> vector(buffer, buffer + (size / sizeof(T)));
1803  auto fbVector = m_flatBufferBuilder.CreateVector(vector);
1804  return fbVector;
1805 }
1806 
1807 flatbuffers::Offset<TensorInfo> SerializerStrategy::CreateTensorInfo(const armnn::TensorInfo& tensorInfo)
1808 {
1809  // Get the dimensions
1810  std::vector<unsigned int> shape;
1811  std::vector<bool> specificity;
1812  // This assumes that the TensorShape constructors have ensured that the size of m_DimensionsSpecificity
1813  // matches the size of dimensions.
1814  for(unsigned int dim = 0; dim < tensorInfo.GetShape().GetNumDimensions(); ++dim)
1815  {
1816  specificity.push_back(tensorInfo.GetShape().GetDimensionSpecificity(dim));
1817 
1818  if (tensorInfo.GetShape().GetDimensionSpecificity(dim))
1819  {
1820  shape.push_back(tensorInfo.GetShape()[dim]);
1821  }
1822  else
1823  {
1824  shape.push_back(0);
1825  }
1826  }
1827 
1828  if (tensorInfo.HasPerAxisQuantization())
1829  {
1830  // Create FlatBuffer TensorInfo
1831  auto flatBufferTensorInfo =
1832  serializer::CreateTensorInfo(m_flatBufferBuilder,
1833  m_flatBufferBuilder.CreateVector(shape),
1834  GetFlatBufferDataType(tensorInfo.GetDataType()),
1835  tensorInfo.GetQuantizationScales()[0],
1836  tensorInfo.GetQuantizationOffset(),
1837  m_flatBufferBuilder.CreateVector(tensorInfo.GetQuantizationScales()),
1838  tensorInfo.GetQuantizationDim().value(),
1839  static_cast<unsigned int>
1840  (tensorInfo.GetShape().GetDimensionality()),
1841  m_flatBufferBuilder.CreateVector(specificity));
1842  return flatBufferTensorInfo;
1843  }
1844 
1845  // Create FlatBuffer TensorInfo
1846  auto flatBufferTensorInfo = serializer::CreateTensorInfo(m_flatBufferBuilder,
1847  m_flatBufferBuilder.CreateVector(shape),
1848  GetFlatBufferDataType(tensorInfo.GetDataType()),
1849  tensorInfo.GetQuantizationScale(),
1850  tensorInfo.GetQuantizationOffset(),
1851  0,
1852  0,
1853  static_cast<unsigned int>
1854  (tensorInfo.GetShape().GetDimensionality()),
1855  m_flatBufferBuilder.CreateVector(specificity));
1856  return flatBufferTensorInfo;
1857 }
1858 
1859 flatbuffers::Offset<serializer::ConstTensor>
1860  SerializerStrategy::CreateConstTensorInfo(const armnn::ConstTensor& constTensor)
1861 {
1862  armnn::TensorInfo tensorInfo = constTensor.GetInfo();
1863 
1864  flatbuffers::Offset<void> fbPayload;
1865 
1866  switch (tensorInfo.GetDataType())
1867  {
1869  {
1870  auto fbVector = CreateDataVector<int64_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1871  flatbuffers::Offset<serializer::LongData> flatBuffersData = serializer::CreateLongData(
1872  m_flatBufferBuilder,
1873  fbVector);
1874  fbPayload = flatBuffersData.o;
1875  break;
1876  }
1879  {
1880  auto fbVector = CreateDataVector<int32_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1881  flatbuffers::Offset<serializer::IntData> flatBuffersData = serializer::CreateIntData(
1882  m_flatBufferBuilder,
1883  fbVector);
1884  fbPayload = flatBuffersData.o;
1885  break;
1886  }
1890  {
1891  auto fbVector = CreateDataVector<int16_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1892  flatbuffers::Offset<serializer::ShortData> flatBuffersData = serializer::CreateShortData(
1893  m_flatBufferBuilder,
1894  fbVector);
1895  fbPayload = flatBuffersData.o;
1896  break;
1897  }
1902  default:
1903  {
1904  auto fbVector = CreateDataVector<int8_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1905  flatbuffers::Offset<serializer::ByteData> flatBuffersData = serializer::CreateByteData(
1906  m_flatBufferBuilder,
1907  fbVector);
1908  fbPayload = flatBuffersData.o;
1909  }
1910  }
1911  flatbuffers::Offset<serializer::ConstTensor> flatBufferConstTensor = serializer::CreateConstTensor(
1912  m_flatBufferBuilder,
1913  CreateTensorInfo(tensorInfo),
1915  fbPayload);
1916  return flatBufferConstTensor;
1917 }
1918 
1919 flatbuffers::Offset<armnnSerializer::FeatureCompatibilityVersions> SerializerStrategy::GetVersionTable()
1920 {
1921  flatbuffers::Offset<armnnSerializer::FeatureCompatibilityVersions> versionsTable =
1922  serializer::CreateFeatureCompatibilityVersions(
1923  m_flatBufferBuilder,
1924  1, // Binding ids scheme version
1925  1, // Weights layout scheme version
1926  1 // Constant tensors as inputs version
1927  );
1928  return versionsTable;
1929 }
1930 
1931 std::vector<fb::Offset<serializer::InputSlot>>
1932  SerializerStrategy::CreateInputSlots(const armnn::IConnectableLayer* layer)
1933 {
1934  std::vector<fb::Offset<serializer::InputSlot>> inputSlots;
1935 
1936  // Get the InputSlots
1937  for (unsigned int slotIndex = 0; slotIndex<layer->GetNumInputSlots(); ++slotIndex)
1938  {
1939  const IInputSlot& inputSlot = layer->GetInputSlot(slotIndex);
1940 
1941  // Get the Connection for the InputSlot
1942  const IOutputSlot* connection = inputSlot.GetConnection();
1943 
1944  // Create FlatBuffer Connection
1945  serializer::Connection conn(GetSerializedId(inputSlot.GetConnection()->GetOwningLayerGuid()),
1946  connection->CalculateIndexOnOwner());
1947  // Create FlatBuffer InputSlot
1948  inputSlots.push_back(serializer::CreateInputSlot(m_flatBufferBuilder, slotIndex, &conn));
1949  }
1950  return inputSlots;
1951 }
1952 
1953 std::vector<fb::Offset<serializer::OutputSlot>>
1954  SerializerStrategy::CreateOutputSlots(const armnn::IConnectableLayer* layer)
1955 {
1956  std::vector<fb::Offset<serializer::OutputSlot>> outputSlots;
1957 
1958  // Get the OutputSlots
1959  for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
1960  {
1961  const IOutputSlot& outputSlot = layer->GetOutputSlot(slotIndex);
1962  const armnn::TensorInfo& tensorInfo = outputSlot.GetTensorInfo();
1963 
1964  // Create FlatBuffer Outputslot
1965  outputSlots.push_back(serializer::CreateOutputSlot(m_flatBufferBuilder,
1966  slotIndex,
1967  CreateTensorInfo(tensorInfo)));
1968  }
1969  return outputSlots;
1970 }
1971 
1972 void SerializerStrategy::ExecuteStrategy(const armnn::IConnectableLayer* layer,
1973  const BaseDescriptor& descriptor,
1974  const std::vector<armnn::ConstTensor>& constants,
1975  const char* name,
1976  const armnn::LayerBindingId id)
1977 {
1978  IgnoreUnused(constants);
1979 
1980  switch (layer->GetType())
1981  {
1983  {
1984  const armnn::ActivationDescriptor& layerDescriptor =
1985  static_cast<const armnn::ActivationDescriptor&>(descriptor);
1986  SerializeActivationLayer(layer, layerDescriptor, name);
1987  break;
1988  }
1990  {
1991  SerializeAdditionLayer(layer, name);
1992  break;
1993  }
1995  {
1996  const armnn::ArgMinMaxDescriptor& layerDescriptor =
1997  static_cast<const armnn::ArgMinMaxDescriptor&>(descriptor);
1998  SerializeArgMinMaxLayer(layer, layerDescriptor, name);
1999  break;
2000  }
2002  {
2003  const armnn::BatchMatMulDescriptor& layerDescriptor =
2004  static_cast<const armnn::BatchMatMulDescriptor&>(descriptor);
2005  SerializeBatchMatMulLayer(layer,
2006  layerDescriptor,
2007  name);
2008  break;
2009  }
2011  {
2012  const armnn::BatchNormalizationDescriptor& layerDescriptor =
2013  static_cast<const armnn::BatchNormalizationDescriptor&>(descriptor);
2014  SerializeBatchNormalizationLayer(layer,
2015  layerDescriptor,
2016  constants,
2017  name);
2018  break;
2019  }
2021  {
2022  const armnn::BatchToSpaceNdDescriptor& layerDescriptor =
2023  static_cast<const armnn::BatchToSpaceNdDescriptor&>(descriptor);
2024  SerializeBatchToSpaceNdLayer(layer,
2025  layerDescriptor,
2026  name);
2027  break;
2028  }
2029  case armnn::LayerType::Cast :
2030  {
2031  SerializeCastLayer(layer, name);
2032  break;
2033  }
2035  {
2036  const armnn::ChannelShuffleDescriptor& layerDescriptor =
2037  static_cast<const armnn::ChannelShuffleDescriptor&>(descriptor);
2038  SerializeChannelShuffleLayer(layer,
2039  layerDescriptor,
2040  name);
2041  break;
2042  }
2044  {
2045  const armnn::ComparisonDescriptor& layerDescriptor =
2046  static_cast<const armnn::ComparisonDescriptor&>(descriptor);
2047  SerializeComparisonLayer(layer,
2048  layerDescriptor,
2049  name);
2050  break;
2051  }
2053  {
2054  const armnn::ConcatDescriptor& layerDescriptor =
2055  static_cast<const armnn::ConcatDescriptor&>(descriptor);
2056  SerializeConcatLayer(layer,
2057  layerDescriptor,
2058  name);
2059  break;
2060  }
2062  {
2063  SerializeConstantLayer(layer,
2064  constants,
2065  name);
2066  break;
2067  }
2069  {
2070  const armnn::Convolution2dDescriptor& layerDescriptor =
2071  static_cast<const armnn::Convolution2dDescriptor&>(descriptor);
2072  SerializeConvolution2dLayer(layer,
2073  layerDescriptor,
2074  name);
2075  break;
2076  }
2078  {
2079  const armnn::Convolution3dDescriptor& layerDescriptor =
2080  static_cast<const armnn::Convolution3dDescriptor&>(descriptor);
2081  SerializeConvolution3dLayer(layer,
2082  layerDescriptor,
2083  name);
2084  break;
2085  }
2087  {
2088  const armnn::DepthToSpaceDescriptor& layerDescriptor =
2089  static_cast<const armnn::DepthToSpaceDescriptor&>(descriptor);
2090  SerializeDepthToSpaceLayer(layer,
2091  layerDescriptor,
2092  name);
2093  break;
2094  }
2096  {
2097  const armnn::DepthwiseConvolution2dDescriptor& layerDescriptor =
2098  static_cast<const armnn::DepthwiseConvolution2dDescriptor&>(descriptor);
2099  SerializeDepthwiseConvolution2dLayer(layer,
2100  layerDescriptor,
2101  name);
2102  break;
2103  }
2105  {
2106  SerializeDequantizeLayer(layer,
2107  name);
2108  break;
2109  }
2111  {
2112  const armnn::DetectionPostProcessDescriptor& layerDescriptor =
2113  static_cast<const armnn::DetectionPostProcessDescriptor&>(descriptor);
2114  SerializeDetectionPostProcessLayer(layer, layerDescriptor, constants, name);
2115  break;
2116  }
2118  {
2119  SerializeDivisionLayer(layer, name);
2120  break;
2121  }
2123  {
2124  const armnn::ElementwiseUnaryDescriptor& layerDescriptor =
2125  static_cast<const armnn::ElementwiseUnaryDescriptor&>(descriptor);
2126  SerializeElementwiseUnaryLayer(layer, layerDescriptor, name);
2127  break;
2128  }
2129  case armnn::LayerType::Fill :
2130  {
2131  const armnn::FillDescriptor& layerDescriptor =
2132  static_cast<const armnn::FillDescriptor&>(descriptor);
2133  SerializeFillLayer(layer, layerDescriptor, name);
2134  break;
2135  }
2137  {
2138  SerializeFloorLayer(layer, name);
2139  break;
2140  }
2142  {
2143  const armnn::FullyConnectedDescriptor& layerDescriptor =
2144  static_cast<const armnn::FullyConnectedDescriptor&>(descriptor);
2145  SerializeFullyConnectedLayer(layer, layerDescriptor, name);
2146  break;
2147  }
2149  {
2150  const armnn::GatherDescriptor& layerDescriptor =
2151  static_cast<const armnn::GatherDescriptor&>(descriptor);
2152  SerializeGatherLayer(layer, layerDescriptor, name);
2153  break;
2154  }
2156  {
2157  SerializeGatherNdLayer(layer, name);
2158  break;
2159  }
2161  {
2162  SerializeInputLayer(layer, id, name);
2163  break;
2164  }
2166  {
2167  const armnn::InstanceNormalizationDescriptor& layerDescriptor =
2168  static_cast<const armnn::InstanceNormalizationDescriptor&>(descriptor);
2169  SerializeInstanceNormalizationLayer(layer, layerDescriptor, name);
2170  break;
2171  }
2173  {
2174  const armnn::L2NormalizationDescriptor& layerDescriptor =
2175  static_cast<const armnn::L2NormalizationDescriptor&>(descriptor);
2176  SerializeL2NormalizationLayer(layer, layerDescriptor, name);
2177  break;
2178  }
2180  {
2181  const armnn::LogicalBinaryDescriptor& layerDescriptor =
2182  static_cast<const armnn::LogicalBinaryDescriptor&>(descriptor);
2183  SerializeLogicalBinaryLayer(layer, layerDescriptor, name);
2184  break;
2185  }
2187  {
2188  const armnn::LogSoftmaxDescriptor& layerDescriptor =
2189  static_cast<const armnn::LogSoftmaxDescriptor&>(descriptor);
2190  SerializeLogSoftmaxLayer(layer, layerDescriptor, name);
2191  break;
2192  }
2193  case armnn::LayerType::Lstm :
2194  {
2195  const armnn::LstmDescriptor& layerDescriptor =
2196  static_cast<const armnn::LstmDescriptor&>(descriptor);
2197  SerializeLstmLayer(layer, layerDescriptor, constants, name);
2198  break;
2199  }
2201  {
2202  const armnn::QLstmDescriptor& layerDescriptor =
2203  static_cast<const armnn::QLstmDescriptor&>(descriptor);
2204  SerializeQLstmLayer(layer, layerDescriptor, constants, name);
2205  break;
2206  }
2208  {
2209  SerializeMaximumLayer(layer, name);
2210  break;
2211  }
2212  case armnn::LayerType::Mean :
2213  {
2214  const armnn::MeanDescriptor& layerDescriptor =
2215  static_cast<const armnn::MeanDescriptor&>(descriptor);
2216  SerializeMeanLayer(layer, layerDescriptor, name);
2217  break;
2218  }
2220  {
2221  SerializeMergeLayer(layer, name);
2222  break;
2223  }
2225  {
2226  SerializeMinimumLayer(layer, name);
2227  break;
2228  }
2230  {
2231  SerializeMultiplicationLayer(layer, name);
2232  break;
2233  }
2235  {
2236  const armnn::NormalizationDescriptor& layerDescriptor =
2237  static_cast<const armnn::NormalizationDescriptor&>(descriptor);
2238  SerializeNormalizationLayer(layer, layerDescriptor, name);
2239  break;
2240  }
2242  {
2243  SerializeOutputLayer(layer, id, name);
2244  break;
2245  }
2246  case armnn::LayerType::Pad :
2247  {
2248  const armnn::PadDescriptor& layerDescriptor =
2249  static_cast<const armnn::PadDescriptor&>(descriptor);
2250  SerializePadLayer(layer, layerDescriptor, name);
2251  break;
2252  }
2254  {
2255  const armnn::PermuteDescriptor& layerDescriptor =
2256  static_cast<const armnn::PermuteDescriptor&>(descriptor);
2257  SerializePermuteLayer(layer, layerDescriptor, name);
2258  break;
2259  }
2261  {
2262  const armnn::Pooling2dDescriptor& layerDescriptor =
2263  static_cast<const armnn::Pooling2dDescriptor&>(descriptor);
2264  SerializePooling2dLayer(layer, layerDescriptor, name);
2265  break;
2266  }
2268  {
2269  const armnn::Pooling3dDescriptor& layerDescriptor =
2270  static_cast<const armnn::Pooling3dDescriptor&>(descriptor);
2271  SerializePooling3dLayer(layer, layerDescriptor, name);
2272  break;
2273  }
2275  {
2276  SerializePreluLayer(layer, name);
2277  break;
2278  }
2280  {
2281  SerializeQuantizeLayer(layer, name);
2282  break;
2283  }
2285  SerializeQuantizedLstmLayer(layer, constants, name);
2286  break;
2288  {
2289  const armnn::ReshapeDescriptor &layerDescriptor =
2290  static_cast<const armnn::ReshapeDescriptor &>(descriptor);
2291  SerializeReshapeLayer(layer, layerDescriptor, name);
2292  break;
2293  }
2295  {
2296  SerializeRankLayer(layer, name);
2297  break;
2298  }
2300  {
2301  const armnn::ReduceDescriptor& layerDescriptor =
2302  static_cast<const armnn::ReduceDescriptor&>(descriptor);
2303  SerializeReduceLayer(layer, layerDescriptor, name);
2304  break;
2305  }
2307  {
2308  const armnn::ResizeDescriptor& layerDescriptor =
2309  static_cast<const armnn::ResizeDescriptor&>(descriptor);
2310  SerializeResizeLayer(layer, layerDescriptor, name);
2311  break;
2312  }
2314  {
2315  SerializeShapeLayer(layer, name);
2316  break;
2317  }
2319  {
2320  const armnn::SliceDescriptor& layerDescriptor =
2321  static_cast<const armnn::SliceDescriptor&>(descriptor);
2322  SerializeSliceLayer(layer, layerDescriptor, name);
2323  break;
2324  }
2326  {
2327  const armnn::SoftmaxDescriptor& layerDescriptor =
2328  static_cast<const armnn::SoftmaxDescriptor&>(descriptor);
2329  SerializeSoftmaxLayer(layer, layerDescriptor, name);
2330  break;
2331  }
2333  {
2334  const armnn::SpaceToBatchNdDescriptor& layerDescriptor =
2335  static_cast<const armnn::SpaceToBatchNdDescriptor&>(descriptor);
2336  SerializeSpaceToBatchNdLayer(layer, layerDescriptor, name);
2337  break;
2338  }
2340  {
2341  const armnn::SpaceToDepthDescriptor& layerDescriptor =
2342  static_cast<const armnn::SpaceToDepthDescriptor&>(descriptor);
2343  SerializeSpaceToDepthLayer(layer, layerDescriptor, name);
2344  break;
2345  }
2347  {
2348  const armnn::SplitterDescriptor& layerDescriptor =
2349  static_cast<const armnn::SplitterDescriptor&>(descriptor);
2350  SerializeSplitterLayer(layer, layerDescriptor, name);
2351  break;
2352  }
2354  {
2355  const armnn::StackDescriptor& layerDescriptor =
2356  static_cast<const armnn::StackDescriptor&>(descriptor);
2357  SerializeStackLayer(layer, layerDescriptor, name);
2358  break;
2359  }
2361  {
2362  const armnn::StandInDescriptor& layerDescriptor =
2363  static_cast<const armnn::StandInDescriptor&>(descriptor);
2364  SerializeStandInLayer(layer, layerDescriptor, name);
2365  break;
2366  }
2368  {
2369  const armnn::StridedSliceDescriptor& layerDescriptor =
2370  static_cast<const armnn::StridedSliceDescriptor&>(descriptor);
2371  SerializeStridedSliceLayer(layer, layerDescriptor, name);
2372  break;
2373  }
2375  {
2376  SerializeSubtractionLayer(layer, name);
2377  break;
2378  }
2380  {
2381  SerializeSwitchLayer(layer, name);
2382  break;
2383  }
2385  {
2386  const armnn::TransposeDescriptor& layerDescriptor =
2387  static_cast<const armnn::TransposeDescriptor&>(descriptor);
2388  SerializeTransposeLayer(layer, layerDescriptor, name);
2389  break;
2390  }
2392  {
2393  const armnn::TransposeConvolution2dDescriptor& layerDescriptor =
2394  static_cast<const armnn::TransposeConvolution2dDescriptor&>(descriptor);
2395  SerializeTransposeConvolution2dLayer(layer, layerDescriptor, constants, name);
2396  break;
2397  }
2399  {
2400  const armnn::UnidirectionalSequenceLstmDescriptor& layerDescriptor =
2401  static_cast<const armnn::UnidirectionalSequenceLstmDescriptor&>(descriptor);
2402  SerializeUnidirectionalSequenceLstmLayer(layer, layerDescriptor, constants, name);
2403  break;
2404  }
2405  default:
2406  {
2408  fmt::format("A layer of unknown type was given to the serializer. Layer name: {}; Layer Id: {}",
2409  layer->GetName(),
2410  id));
2411  }
2412  }
2413 }
2414 
2415 void ISerializer::SerializerImpl::Serialize(const INetwork& inNetwork)
2416 {
2417  // Iterate through to network
2418  inNetwork.ExecuteStrategy(m_SerializerStrategy);
2419  flatbuffers::FlatBufferBuilder& fbBuilder = m_SerializerStrategy.GetFlatBufferBuilder();
2420 
2421  // Create FlatBuffer SerializedGraph
2422  auto serializedGraph = serializer::CreateSerializedGraph(
2423  fbBuilder,
2424  fbBuilder.CreateVector(m_SerializerStrategy.GetSerializedLayers()),
2425  fbBuilder.CreateVector(m_SerializerStrategy.GetInputIds()),
2426  fbBuilder.CreateVector(m_SerializerStrategy.GetOutputIds()),
2427  m_SerializerStrategy.GetVersionTable());
2428 
2429  // Serialize the graph
2430  fbBuilder.Finish(serializedGraph);
2431 }
2432 
2433 
2434 bool ISerializer::SerializerImpl::SaveSerializedToStream(std::ostream& stream)
2435 {
2436  flatbuffers::FlatBufferBuilder& fbBuilder = m_SerializerStrategy.GetFlatBufferBuilder();
2437 
2438  auto bytesToWrite = armnn::numeric_cast<std::streamsize>(fbBuilder.GetSize());
2439  stream.write(reinterpret_cast<const char*>(fbBuilder.GetBufferPointer()), bytesToWrite);
2440  return !stream.bad();
2441 }
2442 
2443 } // namespace armnnSerializer
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
float m_Eps
Used to avoid dividing by zero.
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
armnnSerializer::UnaryOperation GetFlatBufferUnaryOperation(armnn::UnaryOperation comparisonOperation)
bool m_ProjectionEnabled
Enable/disable the projection layer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
uint32_t m_Axis
0-based axis along which to stack the input tensors.
A ViewsDescriptor for the SplitterLayer.
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:68
float m_ScaleW
Center size encoding scale weight.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
void ExecuteStrategy(IStrategy &strategy) const
Definition: Network.cpp:441
virtual unsigned int GetNumInputSlots() const =0
Returns the number of connectable input slots.
float m_K
Kappa value used for the across channel normalization equation.
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
uint32_t m_PoolWidth
Pooling width value.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
float m_ClippingThresProj
Clipping threshold value for the projection.
uint32_t m_PoolDepth
Pooling depth value.
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1...
A ReshapeDescriptor for the ReshapeLayer.
std::vector< int > m_Begin
Begin values for the input that will be sliced.
uint32_t m_PadBack
Padding back value in the depth dimension.
float m_PadValue
Optional value to use for padding, defaults to 0.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t GetNumDimensions() const
Get the number of dimensions.
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
float m_ScaleX
Center size encoding scale x.
TensorShape m_InputShape
Required shape of all input tensors.
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
Dimensionality GetDimensionality() const
Function that returns the tensor type.
Definition: Tensor.hpp:92
bool HasPerAxisQuantization() const
Definition: Tensor.cpp:446
uint32_t m_PoolWidth
Pooling width value.
bool m_PeepholeEnabled
Enable/disable peephole.
armnnSerializer::OutputShapeRounding GetFlatBufferOutputShapeRounding(armnn::OutputShapeRounding outputShapeRounding)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
A Convolution2dDescriptor for the Convolution2dLayer.
float m_Alpha
Alpha value for the normalization equation.
uint32_t m_PadLeft
Padding left value in the width dimension.
bool m_KeepDims
if true then output shape has no change.
float m_HiddenStateScale
Hidden State quantization scale.
bool m_BiasEnabled
Enable/disable bias.
bool m_TransposeX
Transpose the slices of each input tensor Transpose and Adjoint can not both be set to true for the s...
Optional< unsigned int > GetQuantizationDim() const
Definition: Tensor.cpp:494
float m_OutputIntermediateScale
Output intermediate quantization scale.
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
float m_Gamma
Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0. ...
float m_Beta
Exponentiation value.
std::vector< unsigned int > m_Size
Size of the slice in each dimension.
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:81
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
bool m_AdjointX
Adjoint the slices of each input tensor Transpose and Adjoint can not both be set to true for the sam...
serializer::ActivationFunction GetFlatBufferActivationFunction(armnn::ActivationFunction function)
Definition: Serializer.cpp:55
Main network class which provides the interface for building up a neural network. ...
Definition: INetwork.hpp:261
uint32_t m_PadRight
Padding right value in the width dimension.
armnnSerializer::NormalizationAlgorithmMethod GetFlatBufferNormalizationAlgorithmMethod(armnn::NormalizationAlgorithmMethod normalizationAlgorithmMethod)
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
MemoryType GetMemoryArea() const
Definition: Tensor.hpp:305
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding for input dimension.
uint32_t GetNumViews() const
Get the number of views.
ReduceOperation m_ReduceOperation
Specifies the reduction operation to execute.
bool m_TimeMajor
Enable/disable time major.
Copyright (c) 2021 ARM Limited and Contributors.
DataLayout m_DataLayout
The data layout to be used (NCDHW, NDHWC).
void IgnoreUnused(Ts &&...)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
int32_t m_BeginMask
Begin mask value.
uint32_t m_PadFront
Padding front value in the depth dimension.
SizeType GetSize() const
Definition: Types.hpp:338
uint32_t m_DilationY
Dilation along y axis.
int32_t m_EndMask
End mask value.
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding values for the input dimension: heightPad{top, bottom} widthPad{left, right}.
uint32_t m_PoolHeight
Pooling height value.
std::vector< float > GetQuantizationScales() const
Definition: Tensor.cpp:451
uint32_t m_DilationX
Dilation along x axis.
uint32_t m_DilationY
Dilation factor value for height dimension.
armnnSerializer::ConstTensorData GetFlatBufferConstTensorData(armnn::DataType dataType)
bool GetDimensionSpecificity(unsigned int i) const
Gets information about if the dimension size has been specified or not.
Definition: Tensor.cpp:211
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
armnnSerializer::ReduceOperation GetFlatBufferReduceOperation(armnn::ReduceOperation reduceOperation)
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:290
armnnSerializer::DataType GetFlatBufferDataType(armnn::DataType dataType)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_NumOutputs
Number of output tensors.
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
A ResizeDescriptor for the ResizeLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
Base class for all descriptors.
Definition: Descriptors.hpp:22
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
A StackDescriptor for the StackLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
serializer::ArgMinMaxFunction GetFlatBufferArgMinMaxFunction(armnn::ArgMinMaxFunction function)
Definition: Serializer.cpp:86
TensorShape m_TargetShape
Target shape value.
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_MaxDetections
Maximum numbers of detections.
A PadDescriptor for the PadLayer.
const uint32_t * GetViewOrigin(uint32_t idx) const
Return the view origin at the int value idx.
DataLayout m_DataLayoutX
Data layout of each input tensor, such as NHWC/NDHWC (leave as default for arbitrary layout) ...
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_PadBack
Padding back value in the depth dimension.
armnnSerializer::NormalizationAlgorithmChannel GetFlatBufferNormalizationAlgorithmChannel(armnn::NormalizationAlgorithmChannel normalizationAlgorithmChannel)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
bool m_LayerNormEnabled
Enable/disable layer normalization.
float m_NmsIouThreshold
Intersection over union threshold.
armnnSerializer::LogicalBinaryOperation GetFlatBufferLogicalBinaryOperation(armnn::LogicalBinaryOperation logicalBinaryOperation)
An LstmDescriptor for the LstmLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
uint32_t m_DilationX
Dilation factor value for width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
std::vector< unsigned int > m_Begin
Beginning indices of the slice in each dimension.
int32_t m_NewAxisMask
New axis mask value.
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept...
std::vector< unsigned int > m_BlockShape
Block shape values.
float m_Eps
Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f...
An output connection slot for a layer.
Definition: INetwork.hpp:41
A L2NormalizationDescriptor for the L2NormalizationLayer.
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:478
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
armnnSerializer::PaddingMode GetFlatBufferPaddingMode(armnn::PaddingMode paddingMode)
float GetQuantizationScale() const
Definition: Tensor.cpp:461
DataType GetDataType() const
Definition: Tensor.hpp:198
An OriginsDescriptor for the ConcatLayer.
A ReduceDescriptor for the REDUCE operators.
float m_ProjectionClip
Clipping threshold value for the projection.
A FullyConnectedDescriptor for the FullyConnectedLayer.
int32_t m_EllipsisMask
Ellipsis mask value.
virtual LayerGuid GetGuid() const =0
Returns the unique id of the layer.
bool m_BiasEnabled
Enable/disable bias.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
float m_InputIntermediateScale
Input intermediate quantization scale.
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
uint32_t m_TargetWidth
Target width value.
A GatherDescriptor for the GatherLayer.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_PeepholeEnabled
Enable/disable peephole.
uint32_t m_NumClasses
Number of classes.
bool m_HalfPixelCenters
Half Pixel Centers.
void Serialize(const armnn::INetwork &inNetwork)
Serializes the network to ArmNN SerializedGraph.
Definition: Serializer.cpp:45
uint32_t m_PadTop
Padding top value in the height dimension.
A StandInDescriptor for the StandIn layer.
A QLstmDescriptor for the QLstmLayer.
virtual unsigned int CalculateIndexOnOwner() const =0
bool m_UseRegularNms
Use Regular NMS.
uint32_t m_PadFront
Padding front value in the depth dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
std::vector< unsigned int > m_BlockShape
Block shape value.
std::vector< int > m_Stride
Stride values for the input that will be sliced.
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
const TensorInfo & GetInfo() const
Definition: Tensor.hpp:295
min(a, max(b, input)) ReLu1 & ReLu6.
uint32_t m_NumInputs
Number of input tensors.
uint32_t m_PadLeft
Padding left value in the width dimension.
uint32_t m_TargetHeight
Target height value.
uint32_t m_ActivationFunc
The activation function to use.
A SliceDescriptor for the SliceLayer.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A Convolution3dDescriptor for the Convolution3dLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
virtual LayerType GetType() const =0
Returns the armnn::LayerType of this layer.
float m_ClippingThresCell
Clipping threshold value for the cell state.
unsigned int m_BlockSize
Scalar specifying the input block size. It must be >= 1.
uint32_t m_NumGroups
Number of groups for the channel shuffle operation.
A BatchMatMulDescriptor for the BatchMatMul operator.
const uint32_t * GetViewOrigin(uint32_t idx) const
Get the view origin at the int value idx.
PaddingMode m_PaddingMode
Specifies the Padding mode (Constant, Reflect or Symmetric)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_ForgetIntermediateScale
Forget intermediate quantization scale.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_Beta
Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0. ...
armnnSerializer::DataLayout GetFlatBufferDataLayout(armnn::DataLayout dataLayout)
A Pooling3dDescriptor for the Pooling3dLayer.
uint32_t m_StrideZ
Stride value when proceeding through input for the depth dimension.
std::vector< uint32_t > m_vAxis
The indices of the dimensions to reduce.
float m_ScaleH
Center size encoding scale height.
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
std::vector< int > m_End
End values for the input that will be sliced.
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
DataLayout m_DataLayout
The data layout to be used (NDHWC, NCDHW).
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
const uint32_t * GetViewSizes(uint32_t idx) const
Get the view sizes at the int value idx.
float m_CellClip
Clipping threshold value for the cell state.
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:61
uint32_t m_DilationX
Dilation along x axis.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
arm::pipe::ProfilingGuid LayerGuid
Define LayerGuid type.
Definition: Types.hpp:26
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
std::unique_ptr< ISerializer, void(*)(ISerializer *serializer)> ISerializerPtr
Definition: ISerializer.hpp:15
uint32_t m_PadLeft
Padding left value in the width dimension.
armnnSerializer::ComparisonOperation GetFlatBufferComparisonOperation(armnn::ComparisonOperation comparisonOperation)
bool m_AlignCorners
Aligned corners.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
int32_t m_Axis
The axis in params to gather indices from.
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
uint32_t m_PadTop
Padding top value in the height dimension.
bool m_ProjectionEnabled
Enable/disable the projection layer.
ArgMinMaxFunction
Definition: Types.hpp:102
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
armnnSerializer::ResizeMethod GetFlatBufferResizeMethod(armnn::ResizeMethod method)
uint32_t m_NumInputs
Number of input tensors.
uint32_t GetNumDimensions() const
Get the number of dimensions.
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
A MeanDescriptor for the MeanLayer.
virtual const IOutputSlot * GetConnection() const =0
armnnSerializer::PaddingMethod GetFlatBufferPaddingMethod(armnn::PaddingMethod paddingMethod)
bool m_LayerNormEnabled
Enable/disable layer normalization.
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
uint32_t m_PadRight
Padding right value in the width dimension.
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
virtual const TensorInfo & GetTensorInfo() const =0
uint32_t m_Axis
Axis to apply channel shuffle operation on.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:83
virtual const char * GetName() const =0
Returns the name of the layer.
float m_ScaleY
Center size encoding scale y.
uint32_t GetNumViews() const
Get the number of views.
float m_NmsScoreThreshold
NMS score threshold.
virtual LayerGuid GetOwningLayerGuid() const =0
A Pooling2dDescriptor for the Pooling2dLayer.
A NormalizationDescriptor for the NormalizationLayer.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
unsigned int GetConcatAxis() const
Get the concatenation axis value.
A ChannelShuffleDescriptor for the ChannelShuffle operator.
float m_CellIntermediateScale
Cell intermediate quantization scale.
uint32_t m_DilationZ
Dilation along z axis.
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:63
A SoftmaxDescriptor for the SoftmaxLayer.
float m_Beta
Beta value for the normalization equation.
uint32_t m_StrideZ
Stride value when proceeding through input for the depth dimension.
const OriginsDescriptor & GetOrigins() const
Get the View Origins.
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
uint32_t m_NormSize
Depth radius value.
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:59
An input connection slot for a layer.
Definition: INetwork.hpp:25
armnnSerializer::PoolingAlgorithm GetFlatBufferPoolingAlgorithm(armnn::PoolingAlgorithm poolingAlgorithm)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
uint32_t m_DilationY
Dilation along y axis.
A FillDescriptor for the FillLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
unsigned int GetNumBytes() const
Definition: Tensor.hpp:302
ActivationFunction
Definition: Types.hpp:86
A PermuteDescriptor for the PermuteLayer.
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
Definition: Types.hpp:468
uint32_t m_PadRight
Padding right value in the width dimension.
int32_t m_HiddenStateZeroPoint
Hidden State zero point.
bool m_ConstantWeights
Enable/disable constant weights and biases.