ArmNN
 23.08
Serializer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017,2019-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "Serializer.hpp"
6 #include "SerializerUtils.hpp"
7 
8 #include <armnn/Descriptors.hpp>
9 #include <armnn/LstmParams.hpp>
13 
14 #include <fmt/format.h>
15 #include <iostream>
16 
17 using namespace armnn;
18 namespace fb = flatbuffers;
19 namespace serializer = armnnSerializer;
20 
21 namespace armnnSerializer
22 {
23 
24 ISerializer::ISerializer() : pSerializerImpl(new SerializerImpl())
25 {
26 }
27 
28 ISerializer::~ISerializer() = default;
29 
30 ISerializer* ISerializer::CreateRaw()
31 {
32  return new ISerializer();
33 }
34 
35 ISerializerPtr ISerializer::Create()
36 {
37  return ISerializerPtr(CreateRaw(), &ISerializer::Destroy);
38 }
39 
40 void ISerializer::Destroy(ISerializer* serializer)
41 {
42  delete serializer;
43 }
44 
45 void ISerializer::Serialize(const armnn::INetwork& inNetwork)
46 {
47  pSerializerImpl->Serialize(inNetwork);
48 }
49 
50 bool ISerializer::SaveSerializedToStream(std::ostream& stream)
51 {
52  return pSerializerImpl->SaveSerializedToStream(stream);
53 }
54 
56 {
57  switch (function)
58  {
60  return serializer::ActivationFunction::ActivationFunction_Sigmoid;
62  return serializer::ActivationFunction::ActivationFunction_TanH;
64  return serializer::ActivationFunction::ActivationFunction_Linear;
66  return serializer::ActivationFunction::ActivationFunction_ReLu;
68  return serializer::ActivationFunction::ActivationFunction_BoundedReLu;
70  return serializer::ActivationFunction::ActivationFunction_LeakyReLu;
72  return serializer::ActivationFunction::ActivationFunction_Abs;
74  return serializer::ActivationFunction::ActivationFunction_Sqrt;
76  return serializer::ActivationFunction::ActivationFunction_Square;
78  return serializer::ActivationFunction::ActivationFunction_Elu;
80  return serializer::ActivationFunction::ActivationFunction_HardSwish;
81  default:
82  return serializer::ActivationFunction::ActivationFunction_Sigmoid;
83  }
84 }
85 
87 {
88  switch (function)
89  {
91  return serializer::ArgMinMaxFunction::ArgMinMaxFunction_Max;
93  default:
94  return serializer::ArgMinMaxFunction::ArgMinMaxFunction_Min;
95  }
96 }
97 
98 uint32_t SerializerStrategy::GetSerializedId(LayerGuid guid)
99 {
100  if (m_guidMap.empty())
101  {
102  m_guidMap.insert(std::make_pair(guid, m_layerId));
103  }
104  else if (m_guidMap.find(guid) == m_guidMap.end())
105  {
106  ++m_layerId;
107  m_guidMap.insert(std::make_pair(guid, m_layerId));
108 
109  return m_layerId;
110  }
111  return m_guidMap[guid];
112 }
113 
114 // Build FlatBuffer for Input Layer
115 void SerializerStrategy::SerializeInputLayer(const armnn::IConnectableLayer* layer, LayerBindingId id, const char* name)
116 {
117  IgnoreUnused(name);
118 
119  // Create FlatBuffer BaseLayer
120  auto flatBufferInputBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Input);
121 
122  // Create FlatBuffer BindableBaseLayer
123  auto flatBufferInputBindableBaseLayer = serializer::CreateBindableLayerBase(m_flatBufferBuilder,
124  flatBufferInputBaseLayer,
125  id);
126  // Push layer binding id to outputIds.
127  m_inputIds.push_back(id);
128 
129  // Create the FlatBuffer InputLayer
130  auto flatBufferInputLayer = serializer::CreateInputLayer(m_flatBufferBuilder, flatBufferInputBindableBaseLayer);
131 
132  // Add the AnyLayer to the FlatBufferLayers
133  CreateAnyLayer(flatBufferInputLayer.o, serializer::Layer::Layer_InputLayer);
134 }
135 
136 // Build FlatBuffer for Output Layer
137 void SerializerStrategy::SerializeOutputLayer(const armnn::IConnectableLayer* layer,
138  LayerBindingId id, const char* name)
139 {
140  IgnoreUnused(name);
141 
142  // Create FlatBuffer BaseLayer
143  auto flatBufferOutputBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Output);
144 
145  // Create FlatBuffer BindableBaseLayer
146  auto flatBufferOutputBindableBaseLayer = serializer::CreateBindableLayerBase(m_flatBufferBuilder,
147  flatBufferOutputBaseLayer,
148  id);
149  // Push layer binding id to outputIds.
150  m_outputIds.push_back(id);
151 
152  // Create the FlatBuffer OutputLayer
153  auto flatBufferOutputLayer = serializer::CreateOutputLayer(m_flatBufferBuilder, flatBufferOutputBindableBaseLayer);
154  // Add the AnyLayer to the FlatBufferLayers
155  CreateAnyLayer(flatBufferOutputLayer.o, serializer::Layer::Layer_OutputLayer);
156 }
157 
158 // Build FlatBuffer for Activation Layer
159 void SerializerStrategy::SerializeActivationLayer(const armnn::IConnectableLayer* layer,
160  const armnn::ActivationDescriptor& descriptor,
161  const char* name)
162 {
163  IgnoreUnused(name);
164 
165  // Create FlatBuffer BaseLayer
166  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Activation);
167 
168  // Create the FlatBuffer ActivationDescriptor
169  auto flatBufferDescriptor = CreateActivationDescriptor(m_flatBufferBuilder,
171  descriptor.m_A,
172  descriptor.m_B);
173 
174  // Create the FlatBuffer ActivationLayer
175  auto flatBufferAdditionLayer = CreateActivationLayer(m_flatBufferBuilder,
176  flatBufferBaseLayer,
177  flatBufferDescriptor);
178 
179  // Add the AnyLayer to the FlatBufferLayers
180  CreateAnyLayer(flatBufferAdditionLayer.o, serializer::Layer::Layer_ActivationLayer);
181 }
182 
183 // Build FlatBuffer for Addition Layer
184 void SerializerStrategy::SerializeAdditionLayer(const armnn::IConnectableLayer* layer, const char* name)
185 {
186  IgnoreUnused(name);
187 
188  // Create FlatBuffer BaseLayer
189  auto flatBufferAdditionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Addition);
190 
191  // Create the FlatBuffer AdditionLayer
192  auto flatBufferAdditionLayer = serializer::CreateAdditionLayer(m_flatBufferBuilder, flatBufferAdditionBaseLayer);
193 
194  // Add the AnyLayer to the FlatBufferLayers
195  CreateAnyLayer(flatBufferAdditionLayer.o, serializer::Layer::Layer_AdditionLayer);
196 }
197 
198 // Build FlatBuffer for ArgMinMax Layer
199 void SerializerStrategy::SerializeArgMinMaxLayer(const armnn::IConnectableLayer *layer,
200  const armnn::ArgMinMaxDescriptor& descriptor,
201  const char *name)
202 {
203  IgnoreUnused(name);
204 
205  // Create FlatBuffer BaseLayer
206  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ArgMinMax);
207 
208  // Create FlatBuffer Descriptor
209  auto flatBufferDescriptor = CreateArgMinMaxDescriptor(m_flatBufferBuilder,
211  descriptor.m_Axis);
212 
213  // Create FlatBuffer ArgMinMaxLayer
214  auto flatBufferLayer = CreateArgMinMaxLayer(m_flatBufferBuilder,
215  flatBufferBaseLayer,
216  flatBufferDescriptor);
217 
218  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ArgMinMaxLayer);
219 }
220 
221 void SerializerStrategy::SerializeBatchMatMulLayer(const armnn::IConnectableLayer* layer,
222  const armnn::BatchMatMulDescriptor& descriptor,
223  const char* name)
224 {
225  IgnoreUnused(name);
226 
227  // Create FlatBuffer BaseLayer
228  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchMatMul);
229 
230  // Create the FlatBuffer BatchMatMulDescriptor
231  auto flatBufferDescriptor = CreateBatchMatMulDescriptor(m_flatBufferBuilder,
232  descriptor.m_TransposeX,
233  descriptor.m_TransposeY,
234  descriptor.m_AdjointX,
235  descriptor.m_AdjointY,
238 
239  // Create the FlatBuffer BatchMatMulLayer
240  auto flatBufferBatchMatMulLayer = CreateBatchMatMulLayer(m_flatBufferBuilder,
241  flatBufferBaseLayer,
242  flatBufferDescriptor);
243 
244  // Add the AnyLayer to the FlatBufferLayers
245  CreateAnyLayer(flatBufferBatchMatMulLayer.o, serializer::Layer::Layer_BatchMatMulLayer);
246 }
247 
248 // Build FlatBuffer for BatchToSpaceNd Layer
249 void SerializerStrategy::SerializeBatchToSpaceNdLayer(const armnn::IConnectableLayer* layer,
250  const armnn::BatchToSpaceNdDescriptor& descriptor,
251  const char* name)
252 {
253  IgnoreUnused(name);
254 
255  // Create FlatBuffer BaseLayer
256  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchToSpaceNd);
257 
258  std::vector<unsigned int> crops;
259  crops.reserve(descriptor.m_Crops.size() * 2);
260  for (auto& crop : descriptor.m_Crops)
261  {
262  crops.push_back(crop.first);
263  crops.push_back(crop.second);
264  }
265 
266  auto flatBufferDescriptor =
267  CreateBatchToSpaceNdDescriptor(m_flatBufferBuilder,
268  m_flatBufferBuilder.CreateVector(descriptor.m_BlockShape),
269  m_flatBufferBuilder.CreateVector(crops),
271 
272  auto flatBufferLayer = serializer::CreateBatchToSpaceNdLayer(m_flatBufferBuilder,
273  flatBufferBaseLayer,
274  flatBufferDescriptor);
275 
276  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_BatchToSpaceNdLayer);
277 }
278 
279 void SerializerStrategy::SerializeBatchNormalizationLayer(
280  const armnn::IConnectableLayer* layer,
281  const armnn::BatchNormalizationDescriptor& batchNormDescriptor,
282  const std::vector<armnn::ConstTensor>& constants,
283  const char* name)
284 {
285  IgnoreUnused(name);
286 
287  const armnn::ConstTensor& mean = constants[0];
288  const armnn::ConstTensor& variance = constants[1];
289  const armnn::ConstTensor& beta = constants[2];
290  const armnn::ConstTensor& gamma = constants[3];
291 
292  auto fbBatchNormalizationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchNormalization);
293  auto fbBatchNormalizationDescriptor = serializer::CreateBatchNormalizationDescriptor(
294  m_flatBufferBuilder,
295  batchNormDescriptor.m_Eps,
296  GetFlatBufferDataLayout(batchNormDescriptor.m_DataLayout));
297 
298  auto fbMeanConstTensorInfo = CreateConstTensorInfo(mean);
299  auto fbVarianceConstTensorInfo = CreateConstTensorInfo(variance);
300  auto fbBetaConstTensorInfo = CreateConstTensorInfo(beta);
301  auto fbGammaConstTensorInfo = CreateConstTensorInfo(gamma);
302  auto fbBatchNormalizationLayer = serializer::CreateBatchNormalizationLayer(m_flatBufferBuilder,
303  fbBatchNormalizationBaseLayer,
304  fbBatchNormalizationDescriptor,
305  fbMeanConstTensorInfo,
306  fbVarianceConstTensorInfo,
307  fbBetaConstTensorInfo,
308  fbGammaConstTensorInfo);
309 
310  CreateAnyLayer(fbBatchNormalizationLayer.o, serializer::Layer::Layer_BatchNormalizationLayer);
311 }
312 
313 void SerializerStrategy::SerializeCastLayer(const armnn::IConnectableLayer* layer,
314  const char* name)
315 {
316  IgnoreUnused(name);
317 
318  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Cast);
319  auto fbCastLayer = serializer::CreateCastLayer(m_flatBufferBuilder, fbBaseLayer);
320  CreateAnyLayer(fbCastLayer.o, serializer::Layer::Layer_CastLayer);
321 }
322 
323 void SerializerStrategy::SerializeChannelShuffleLayer(const armnn::IConnectableLayer* layer,
324  const armnn::ChannelShuffleDescriptor& descriptor,
325  const char* name)
326 {
327  IgnoreUnused(name);
328  auto fbDescriptor = CreateChannelShuffleDescriptor(m_flatBufferBuilder,
329  descriptor.m_Axis,
330  descriptor.m_NumGroups);
331  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ChannelShuffle);
332  auto fbChannelShuffleLayer = serializer::CreateChannelShuffleLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
333  CreateAnyLayer(fbChannelShuffleLayer.o, serializer::Layer::Layer_ChannelShuffleLayer);
334 }
335 
336 void SerializerStrategy::SerializeComparisonLayer(const armnn::IConnectableLayer* layer,
337  const armnn::ComparisonDescriptor& descriptor,
338  const char* name)
339 {
340  IgnoreUnused(name);
341 
342  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Comparison);
343  auto fbDescriptor = serializer::CreateComparisonDescriptor(
344  m_flatBufferBuilder,
346 
347  auto fbLayer = serializer::CreateComparisonLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
348  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_ComparisonLayer);
349 }
350 
351 // Build FlatBuffer for Constant Layer
352 void SerializerStrategy::SerializeConstantLayer(const armnn::IConnectableLayer* layer,
353  const std::vector<armnn::ConstTensor>& constants,
354  const char* name)
355 {
356  IgnoreUnused(name);
357 
358  armnn::ConstTensor input = constants[0];
359 
360  // Create FlatBuffer BaseLayer
361  auto flatBufferConstantBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Constant);
362 
363  auto flatBufferConstTensorInfo = CreateConstTensorInfo(input);
364 
365  // Create the FlatBuffer ConstantLayer
366  auto flatBufferLayer = CreateConstantLayer(m_flatBufferBuilder,
367  flatBufferConstantBaseLayer,
368  flatBufferConstTensorInfo);
369 
370  // Add the AnyLayer to the FlatBufferLayers
371  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ConstantLayer);
372 }
373 
374 // Build FlatBuffer for Convolution2dLayer
375 void SerializerStrategy::SerializeConvolution2dLayer(const armnn::IConnectableLayer* layer,
376  const armnn::Convolution2dDescriptor& descriptor,
377  const char* name)
378 {
379  IgnoreUnused(name);
380 
381  // Create FlatBuffer BaseLayer
382  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution2d);
383 
384  auto flatBufferDescriptor = CreateConvolution2dDescriptor(m_flatBufferBuilder,
385  descriptor.m_PadLeft,
386  descriptor.m_PadRight,
387  descriptor.m_PadTop,
388  descriptor.m_PadBottom,
389  descriptor.m_StrideX,
390  descriptor.m_StrideY,
391  descriptor.m_DilationX,
392  descriptor.m_DilationY,
393  descriptor.m_BiasEnabled,
395 
396  // Create the FlatBuffer Convolution2dLayer
397  auto flatBufferLayer = CreateConvolution2dLayer(m_flatBufferBuilder,
398  flatBufferBaseLayer,
399  flatBufferDescriptor);
400 
401  // Add the AnyLayer to the FlatBufferLayers
402  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_Convolution2dLayer);
403 }
404 
405 // Build FlatBuffer for Convolution3dLayer
406 void SerializerStrategy::SerializeConvolution3dLayer(const armnn::IConnectableLayer* layer,
407  const armnn::Convolution3dDescriptor& descriptor,
408  const char* name)
409 {
410  IgnoreUnused(name);
411 
412  // Create FlatBuffer BaseLayer
413  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution3d);
414 
415  auto flatBufferDescriptor = CreateConvolution3dDescriptor(m_flatBufferBuilder,
416  descriptor.m_PadLeft,
417  descriptor.m_PadRight,
418  descriptor.m_PadTop,
419  descriptor.m_PadBottom,
420  descriptor.m_PadFront,
421  descriptor.m_PadBack,
422  descriptor.m_StrideX,
423  descriptor.m_StrideY,
424  descriptor.m_StrideZ,
425  descriptor.m_DilationX,
426  descriptor.m_DilationY,
427  descriptor.m_DilationZ,
428  descriptor.m_BiasEnabled,
430 
431  // Create the FlatBuffer Convolution3dLayer
432  auto flatBufferLayer = CreateConvolution3dLayer(m_flatBufferBuilder,
433  flatBufferBaseLayer,
434  flatBufferDescriptor);
435 
436  // Add the AnyLayer to the FlatBufferLayers
437  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_Convolution3dLayer);
438 }
439 
440 void SerializerStrategy::SerializeDepthToSpaceLayer(const armnn::IConnectableLayer* layer,
441  const armnn::DepthToSpaceDescriptor& descriptor,
442  const char* name)
443 {
444  IgnoreUnused(name);
445 
446  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_DepthToSpace);
447  auto fbDescriptor = CreateDepthToSpaceDescriptor(m_flatBufferBuilder,
448  descriptor.m_BlockSize,
450 
451  auto fbLayer = serializer::CreateDepthToSpaceLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
452 
453  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_DepthToSpaceLayer);
454 }
455 
456 void SerializerStrategy::SerializeDepthwiseConvolution2dLayer(const armnn::IConnectableLayer* layer,
457  const armnn::DepthwiseConvolution2dDescriptor& descriptor,
458  const char* name)
459 {
460  IgnoreUnused(name);
461 
462  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_DepthwiseConvolution2d);
463  auto fbDescriptor = CreateDepthwiseConvolution2dDescriptor(m_flatBufferBuilder,
464  descriptor.m_PadLeft,
465  descriptor.m_PadRight,
466  descriptor.m_PadTop,
467  descriptor.m_PadBottom,
468  descriptor.m_StrideX,
469  descriptor.m_StrideY,
470  descriptor.m_DilationX,
471  descriptor.m_DilationY,
472  descriptor.m_BiasEnabled,
474 
475  auto flatBufferLayer = CreateDepthwiseConvolution2dLayer(m_flatBufferBuilder,
476  fbBaseLayer,
477  fbDescriptor);
478 
479  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_DepthwiseConvolution2dLayer);
480 }
481 
482 void SerializerStrategy::SerializeDequantizeLayer(const armnn::IConnectableLayer* layer,
483  const char* name)
484 {
485  IgnoreUnused(name);
486 
487  auto fbDequantizeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Dequantize);
488  auto fbDequantizeLayer = serializer::CreateDequantizeLayer(m_flatBufferBuilder, fbDequantizeBaseLayer);
489 
490  CreateAnyLayer(fbDequantizeLayer.o, serializer::Layer::Layer_DequantizeLayer);
491 }
492 
493 void SerializerStrategy::SerializeDetectionPostProcessLayer(const armnn::IConnectableLayer* layer,
494  const armnn::DetectionPostProcessDescriptor& descriptor,
495  const std::vector<armnn::ConstTensor>& constants,
496  const char* name)
497 {
498  IgnoreUnused(name);
499 
500  const armnn::ConstTensor& anchors = constants[0];
501 
502  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_DetectionPostProcess);
503  auto fbDescriptor = CreateDetectionPostProcessDescriptor(m_flatBufferBuilder,
504  descriptor.m_MaxDetections,
505  descriptor.m_MaxClassesPerDetection,
506  descriptor.m_DetectionsPerClass,
507  descriptor.m_NmsScoreThreshold,
508  descriptor.m_NmsIouThreshold,
509  descriptor.m_NumClasses,
510  descriptor.m_UseRegularNms,
511  descriptor.m_ScaleX,
512  descriptor.m_ScaleY,
513  descriptor.m_ScaleW,
514  descriptor.m_ScaleH);
515 
516  flatbuffers::Offset<serializer::ConstTensor> fbAnchorsConstTensorInfo = CreateConstTensorInfo(anchors);
517 
518  auto flatBufferLayer = CreateDetectionPostProcessLayer(m_flatBufferBuilder,
519  fbBaseLayer,
520  fbDescriptor,
521  fbAnchorsConstTensorInfo);
522 
523  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_DetectionPostProcessLayer);
524 }
525 
526 void SerializerStrategy::SerializeDivisionLayer(const armnn::IConnectableLayer* layer, const char* name)
527 {
528  IgnoreUnused(name);
529 
530  auto fbDivisionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Division);
531  auto fbDivisionLayer = serializer::CreateDivisionLayer(m_flatBufferBuilder, fbDivisionBaseLayer);
532 
533  CreateAnyLayer(fbDivisionLayer.o, serializer::Layer::Layer_DivisionLayer);
534 }
535 
536 void SerializerStrategy::SerializeElementwiseBinaryLayer(const armnn::IConnectableLayer* layer,
537  const armnn::ElementwiseBinaryDescriptor& descriptor,
538  const char* name)
539 {
540  IgnoreUnused(name);
541 
542  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ElementwiseBinary);
543  auto fbDescriptor = serializer::CreateElementwiseBinaryDescriptor(
544  m_flatBufferBuilder,
546 
547  auto fbLayer = serializer::CreateElementwiseBinaryLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
548  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_ElementwiseBinaryLayer);
549 }
550 
551 void SerializerStrategy::SerializeElementwiseUnaryLayer(const armnn::IConnectableLayer* layer,
552  const armnn::ElementwiseUnaryDescriptor& descriptor,
553  const char* name)
554 {
555  IgnoreUnused(name);
556 
557  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ElementwiseUnary);
558  auto fbDescriptor = serializer::CreateElementwiseUnaryDescriptor(
559  m_flatBufferBuilder,
561 
562  auto fbLayer = serializer::CreateElementwiseUnaryLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
563  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_ElementwiseUnaryLayer);
564 }
565 
566 void SerializerStrategy::SerializeFillLayer(const armnn::IConnectableLayer* layer,
567  const armnn::FillDescriptor& fillDescriptor,
568  const char* name)
569 {
570  IgnoreUnused(name);
571 
572  auto fbFillBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Fill);
573 
574  auto fbDescriptor = serializer::CreateFillDescriptor(m_flatBufferBuilder, fillDescriptor.m_Value);
575 
576  auto fbFillLayer = serializer::CreateFillLayer(m_flatBufferBuilder, fbFillBaseLayer, fbDescriptor);
577 
578  CreateAnyLayer(fbFillLayer.o, serializer::Layer::Layer_FillLayer);
579 }
580 
581 void SerializerStrategy::SerializeFloorLayer(const armnn::IConnectableLayer *layer, const char *name)
582 {
583  IgnoreUnused(name);
584 
585  auto flatBufferFloorBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Floor);
586  auto flatBufferFloorLayer = serializer::CreateFloorLayer(m_flatBufferBuilder, flatBufferFloorBaseLayer);
587 
588  CreateAnyLayer(flatBufferFloorLayer.o, serializer::Layer::Layer_FloorLayer);
589 }
590 
591 void SerializerStrategy::SerializeGatherLayer(const armnn::IConnectableLayer* layer,
592  const armnn::GatherDescriptor& gatherDescriptor,
593  const char* name)
594 {
595  IgnoreUnused(name);
596 
597  auto fbGatherDescriptor = CreateGatherDescriptor(m_flatBufferBuilder,
598  gatherDescriptor.m_Axis);
599  auto fbGatherBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Gather);
600  auto flatBufferLayer = serializer::CreateGatherLayer(m_flatBufferBuilder, fbGatherBaseLayer, fbGatherDescriptor);
601 
602  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_GatherLayer);
603 }
604 
605 void SerializerStrategy::SerializeGatherNdLayer(const armnn::IConnectableLayer* layer,
606  const char* name)
607 {
608  IgnoreUnused(name);
609 
610  auto fbGatherNdBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_GatherNd);
611  auto flatBufferLayer = serializer::CreateGatherNdLayer(m_flatBufferBuilder, fbGatherNdBaseLayer);
612 
613  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_GatherNdLayer);
614 }
615 
616 void SerializerStrategy::SerializeInstanceNormalizationLayer(
617  const armnn::IConnectableLayer* layer,
618  const armnn::InstanceNormalizationDescriptor& instanceNormalizationDescriptor,
619  const char* name)
620 {
621  IgnoreUnused(name);
622 
623  auto fbDescriptor = serializer::CreateInstanceNormalizationDescriptor(
624  m_flatBufferBuilder,
625  instanceNormalizationDescriptor.m_Gamma,
626  instanceNormalizationDescriptor.m_Beta,
627  instanceNormalizationDescriptor.m_Eps,
628  GetFlatBufferDataLayout(instanceNormalizationDescriptor.m_DataLayout));
629 
630  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_InstanceNormalization);
631  auto fbLayer = serializer::CreateInstanceNormalizationLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
632 
633  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_InstanceNormalizationLayer);
634 }
635 
636 void SerializerStrategy::SerializeL2NormalizationLayer(const armnn::IConnectableLayer* layer,
637  const armnn::L2NormalizationDescriptor& l2NormalizationDescriptor,
638  const char* name)
639 {
640  IgnoreUnused(name);
641 
642  // Create FlatBuffer BaseLayer
643  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_L2Normalization);
644 
645  // Create the FlatBuffer L2Normalization Descriptor
646  auto fbDescriptor = serializer::CreateL2NormalizationDescriptor(
647  m_flatBufferBuilder,
648  GetFlatBufferDataLayout(l2NormalizationDescriptor.m_DataLayout),
649  l2NormalizationDescriptor.m_Eps);
650 
651  // Create FlatBuffer layer
652  auto fbLayer = serializer::CreateL2NormalizationLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
653 
654  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_L2NormalizationLayer);
655 }
656 
657 void SerializerStrategy::SerializeLogicalBinaryLayer(const armnn::IConnectableLayer* layer,
658  const armnn::LogicalBinaryDescriptor& descriptor,
659  const char* name)
660 {
661  IgnoreUnused(name);
662 
663  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_LogicalBinary);
664  auto fbDescriptor = serializer::CreateLogicalBinaryDescriptor(
665  m_flatBufferBuilder,
667 
668  auto fbLayer = serializer::CreateLogicalBinaryLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
669  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_LogicalBinaryLayer);
670 }
671 
672 void SerializerStrategy::SerializeLogSoftmaxLayer(const armnn::IConnectableLayer* layer,
673  const armnn::LogSoftmaxDescriptor& logSoftmaxDescriptor,
674  const char* name)
675 {
676  IgnoreUnused(name);
677 
678  // Create FlatBuffer BaseLayer
679  auto flatBufferLogSoftmaxBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_LogSoftmax);
680 
681  // Create the FlatBuffer LogSoftmaxDescriptor
682  auto flatBufferLogSoftmaxDesc =
683  serializer::CreateLogSoftmaxDescriptor(m_flatBufferBuilder,
684  logSoftmaxDescriptor.m_Beta,
685  logSoftmaxDescriptor.m_Axis);
686 
687  // Create the FlatBuffer LogSoftmaxLayer
688  auto flatBufferLogSoftmaxLayer =
689  serializer::CreateLogSoftmaxLayer(m_flatBufferBuilder,
690  flatBufferLogSoftmaxBaseLayer,
691  flatBufferLogSoftmaxDesc);
692 
693  CreateAnyLayer(flatBufferLogSoftmaxLayer.o, serializer::Layer::Layer_LogSoftmaxLayer);
694 }
695 
696 void SerializerStrategy::SerializeLstmLayer(const armnn::IConnectableLayer* layer,
697  const armnn::LstmDescriptor& descriptor,
698  const std::vector<armnn::ConstTensor>& constants,
699  const char* name)
700 {
701  IgnoreUnused(name);
702 
703  auto fbLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Lstm);
704 
705  auto fbLstmDescriptor = serializer::CreateLstmDescriptor(
706  m_flatBufferBuilder,
707  descriptor.m_ActivationFunc,
708  descriptor.m_ClippingThresCell,
709  descriptor.m_ClippingThresProj,
710  descriptor.m_CifgEnabled,
711  descriptor.m_PeepholeEnabled,
712  descriptor.m_ProjectionEnabled,
713  descriptor.m_LayerNormEnabled);
714 
715  // Index for constants vector
716  std::size_t i = 0;
717 
718  // Get mandatory/basic input parameters
719  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]); //InputToForgetWeights
720  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]); //InputToCellWeights
721  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]); //InputToOutputWeights
722  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToForgetWeights
723  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToCellWeights
724  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToOutputWeights
725  auto forgetGateBias = CreateConstTensorInfo(constants[i++]); //ForgetGateBias
726  auto cellBias = CreateConstTensorInfo(constants[i++]); //CellBias
727  auto outputGateBias = CreateConstTensorInfo(constants[i++]); //OutputGateBias
728 
729 
730 
731  //Define optional parameters, these will be set depending on configuration in Lstm descriptor
732  flatbuffers::Offset<serializer::ConstTensor> inputToInputWeights;
733  flatbuffers::Offset<serializer::ConstTensor> recurrentToInputWeights;
734  flatbuffers::Offset<serializer::ConstTensor> cellToInputWeights;
735  flatbuffers::Offset<serializer::ConstTensor> inputGateBias;
736  flatbuffers::Offset<serializer::ConstTensor> projectionWeights;
737  flatbuffers::Offset<serializer::ConstTensor> projectionBias;
738  flatbuffers::Offset<serializer::ConstTensor> cellToForgetWeights;
739  flatbuffers::Offset<serializer::ConstTensor> cellToOutputWeights;
740  flatbuffers::Offset<serializer::ConstTensor> inputLayerNormWeights;
741  flatbuffers::Offset<serializer::ConstTensor> forgetLayerNormWeights;
742  flatbuffers::Offset<serializer::ConstTensor> cellLayerNormWeights;
743  flatbuffers::Offset<serializer::ConstTensor> outputLayerNormWeights;
744 
745  if (!descriptor.m_CifgEnabled)
746  {
747  inputToInputWeights = CreateConstTensorInfo(constants[i++]); //InputToInputWeights
748  recurrentToInputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToInputWeights
749  inputGateBias = CreateConstTensorInfo(constants[i++]); //InputGateBias
750  }
751 
752  if (descriptor.m_PeepholeEnabled)
753  {
754  if (!descriptor.m_CifgEnabled)
755  {
756  cellToInputWeights = CreateConstTensorInfo(constants[i++]); //CellToInputWeights
757  }
758  cellToForgetWeights = CreateConstTensorInfo(constants[i++]); //CellToForgetWeights
759  cellToOutputWeights = CreateConstTensorInfo(constants[i++]); //CellToOutputWeights
760  }
761 
762  if (descriptor.m_ProjectionEnabled)
763  {
764  projectionWeights = CreateConstTensorInfo(constants[i++]); //ProjectionWeights
765  projectionBias = CreateConstTensorInfo(constants[i++]); //ProjectionBias
766  }
767 
768  if (descriptor.m_LayerNormEnabled)
769  {
770  if (!descriptor.m_CifgEnabled)
771  {
772  inputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //InputLayerNormWeights
773  }
774  forgetLayerNormWeights = CreateConstTensorInfo(constants[i++]); //ForgetLayerNormWeights
775  cellLayerNormWeights = CreateConstTensorInfo(constants[i++]); //CellLayerNormWeights
776  outputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //OutputLayerNormWeights
777  }
778 
779  auto fbLstmParams = serializer::CreateLstmInputParams(
780  m_flatBufferBuilder,
781  inputToForgetWeights,
782  inputToCellWeights,
783  inputToOutputWeights,
784  recurrentToForgetWeights,
785  recurrentToCellWeights,
786  recurrentToOutputWeights,
787  forgetGateBias,
788  cellBias,
789  outputGateBias,
790  inputToInputWeights,
791  recurrentToInputWeights,
792  cellToInputWeights,
793  inputGateBias,
794  projectionWeights,
795  projectionBias,
796  cellToForgetWeights,
797  cellToOutputWeights,
798  inputLayerNormWeights,
799  forgetLayerNormWeights,
800  cellLayerNormWeights,
801  outputLayerNormWeights);
802 
803  auto fbLstmLayer = serializer::CreateLstmLayer(
804  m_flatBufferBuilder,
805  fbLstmBaseLayer,
806  fbLstmDescriptor,
807  fbLstmParams);
808 
809  CreateAnyLayer(fbLstmLayer.o, serializer::Layer::Layer_LstmLayer);
810 }
811 
812 void SerializerStrategy::SerializeMaximumLayer(const armnn::IConnectableLayer* layer, const char* name)
813 {
814  IgnoreUnused(name);
815 
816  auto fbMaximumBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Maximum);
817  auto fbMaximumLayer = serializer::CreateMaximumLayer(m_flatBufferBuilder, fbMaximumBaseLayer);
818 
819  CreateAnyLayer(fbMaximumLayer.o, serializer::Layer::Layer_MaximumLayer);
820 }
821 
822 void SerializerStrategy::SerializeMeanLayer(const armnn::IConnectableLayer* layer,
823  const armnn::MeanDescriptor& descriptor,
824  const char* name)
825 {
826  IgnoreUnused(name);
827 
828  auto fbMeanBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Mean);
829  auto fbMeanDescriptor = serializer::CreateMeanDescriptor(m_flatBufferBuilder,
830  m_flatBufferBuilder.CreateVector(descriptor.m_Axis),
831  descriptor.m_KeepDims);
832 
833  auto fbMeanLayer = serializer::CreateMeanLayer(m_flatBufferBuilder,
834  fbMeanBaseLayer,
835  fbMeanDescriptor);
836 
837  CreateAnyLayer(fbMeanLayer.o, serializer::Layer::Layer_MeanLayer);
838 }
839 
840 void SerializerStrategy::SerializeMinimumLayer(const armnn::IConnectableLayer* layer, const char* name)
841 {
842  IgnoreUnused(name);
843 
844  auto fbMinimumBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Minimum);
845  auto fbMinimumLayer = serializer::CreateMinimumLayer(m_flatBufferBuilder, fbMinimumBaseLayer);
846 
847  CreateAnyLayer(fbMinimumLayer.o, serializer::Layer::Layer_MinimumLayer);
848 }
849 
850 void SerializerStrategy::SerializeMergeLayer(const armnn::IConnectableLayer* layer, const char* name)
851 {
852  IgnoreUnused(name);
853 
854  auto fbMergeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Merge);
855  auto fbMergeLayer = serializer::CreateMergeLayer(m_flatBufferBuilder, fbMergeBaseLayer);
856 
857  CreateAnyLayer(fbMergeLayer.o, serializer::Layer::Layer_MergeLayer);
858 }
859 
860 void SerializerStrategy::SerializeConcatLayer(const armnn::IConnectableLayer* layer,
861  const armnn::ConcatDescriptor& concatDescriptor,
862  const char* name)
863 {
864  IgnoreUnused(name);
865 
866  auto flatBufferConcatBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Concat);
867 
868  std::vector<flatbuffers::Offset<UintVector>> views;
869  for (unsigned int v = 0; v < concatDescriptor.GetNumViews(); ++v)
870  {
871  const uint32_t* origin = concatDescriptor.GetViewOrigin(v);
872  std::vector<uint32_t> origins;
873  for (unsigned int d = 0; d < concatDescriptor.GetNumDimensions(); ++d)
874  {
875  origins.push_back(origin[d]);
876  }
877  auto view = m_flatBufferBuilder.CreateVector(origins);
878  auto uintVector = CreateUintVector(m_flatBufferBuilder, view);
879  views.push_back(uintVector);
880  }
881 
882  auto flatBufferConcatDescriptor = CreateOriginsDescriptor(m_flatBufferBuilder,
883  concatDescriptor.GetConcatAxis(),
884  concatDescriptor.GetNumViews(),
885  concatDescriptor.GetNumDimensions(),
886  m_flatBufferBuilder.CreateVector(views));
887 
888  auto flatBufferLayer = CreateConcatLayer(m_flatBufferBuilder,
889  flatBufferConcatBaseLayer,
890  flatBufferConcatDescriptor);
891 
892  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ConcatLayer);
893 }
894 
895 void SerializerStrategy::SerializeMultiplicationLayer(const armnn::IConnectableLayer* layer, const char* name)
896 {
897  IgnoreUnused(name);
898 
899  auto fbMultiplicationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Multiplication);
900  auto fbMultiplicationLayer = serializer::CreateMultiplicationLayer(m_flatBufferBuilder,
901  fbMultiplicationBaseLayer);
902 
903  CreateAnyLayer(fbMultiplicationLayer.o, serializer::Layer::Layer_MultiplicationLayer);
904 }
905 
906 void SerializerStrategy::SerializePadLayer(const armnn::IConnectableLayer* layer,
907  const armnn::PadDescriptor& padDescriptor,
908  const char* name)
909 {
910  IgnoreUnused(name);
911 
912  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pad);
913 
914  std::vector<unsigned int> padList;
915  for (auto& p: padDescriptor.m_PadList)
916  {
917  padList.push_back(p.first);
918  padList.push_back(p.second);
919  }
920 
921  auto flatBufferPadDesc = serializer::CreatePadDescriptor(m_flatBufferBuilder,
922  m_flatBufferBuilder.CreateVector(padList),
923  padDescriptor.m_PadValue,
924  GetFlatBufferPaddingMode(padDescriptor.m_PaddingMode));
925 
926  auto flatBufferPadLayer = serializer::CreatePadLayer(m_flatBufferBuilder,
927  flatBufferBaseLayer,
928  flatBufferPadDesc);
929 
930  CreateAnyLayer(flatBufferPadLayer.o, serializer::Layer::Layer_PadLayer);
931 }
932 
933 void SerializerStrategy::SerializePermuteLayer(const armnn::IConnectableLayer* layer,
934  const armnn::PermuteDescriptor& permuteDescriptor,
935  const char* name)
936 {
937  IgnoreUnused(name);
938 
939  // Create FlatBuffer BaseLayer
940  auto flatBufferPermuteBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Permute);
941 
942  std::vector<unsigned int> dimMappings;
943  for (unsigned int i=0; i<permuteDescriptor.m_DimMappings.GetSize(); ++i)
944  {
945  dimMappings.push_back(permuteDescriptor.m_DimMappings[i]);
946  }
947 
948  auto flatBufferPermuteDesc = serializer::CreatePermuteDescriptor(m_flatBufferBuilder,
949  m_flatBufferBuilder.CreateVector(dimMappings));
950 
951  // Create the FlatBuffer PermuteLayer
952  auto flatBufferPermuteLayer = serializer::CreatePermuteLayer(m_flatBufferBuilder,
953  flatBufferPermuteBaseLayer,
954  flatBufferPermuteDesc);
955 
956  // Add the AnyLayer to the FlatBufferLayers
957  CreateAnyLayer(flatBufferPermuteLayer.o, serializer::Layer::Layer_PermuteLayer);
958 }
959 
960 // Build FlatBuffer for Rank Layer
961 void SerializerStrategy::SerializeRankLayer(const armnn::IConnectableLayer* layer,
962  const char* name)
963 {
964  IgnoreUnused(name);
965  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Rank);
966  auto flatBufferRankLayer = serializer::CreateRankLayer(m_flatBufferBuilder, flatBufferBaseLayer);
967 
968  CreateAnyLayer(flatBufferRankLayer.o, serializer::Layer::Layer_RankLayer);
969 }
970 
971 void SerializerStrategy::SerializeReduceLayer(const armnn::IConnectableLayer* layer,
972  const armnn::ReduceDescriptor& reduceDescriptor,
973  const char*)
974 {
975  auto fbReduceBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Reduce);
976  auto fbDescriptor = CreateReduceDescriptor(m_flatBufferBuilder,
977  reduceDescriptor.m_KeepDims,
978  m_flatBufferBuilder.CreateVector(reduceDescriptor.m_vAxis),
980  auto fbReduceLayer = serializer::CreateReduceLayer(m_flatBufferBuilder,
981  fbReduceBaseLayer,
982  fbDescriptor);
983 
984  CreateAnyLayer(fbReduceLayer.o, serializer::Layer::Layer_ReduceLayer);
985 }
986 
987 // Build FlatBuffer for Reshape Layer
988 void SerializerStrategy::SerializeReshapeLayer(const armnn::IConnectableLayer* layer,
989  const armnn::ReshapeDescriptor& reshapeDescriptor,
990  const char* name)
991 {
992  IgnoreUnused(name);
993 
994  // Create FlatBuffer BaseLayer
995  auto flatBufferReshapeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Reshape);
996 
997  std::vector<unsigned int> targetShape;
998  for (unsigned int i =0; i < reshapeDescriptor.m_TargetShape.GetNumDimensions(); i++)
999  {
1000  targetShape.push_back(reshapeDescriptor.m_TargetShape[i]);
1001  }
1002 
1003  auto flatBufferReshapeDesc = serializer::CreateReshapeDescriptor(m_flatBufferBuilder,
1004  m_flatBufferBuilder.CreateVector(targetShape));
1005 
1006  // Create the FlatBuffer ReshapeLayer
1007  auto flatBufferReshapeLayer = serializer::CreateReshapeLayer(m_flatBufferBuilder, flatBufferReshapeBaseLayer,
1008  flatBufferReshapeDesc);
1009 
1010  // Add the AnyLayer to the FlatBufferLayers
1011  CreateAnyLayer(flatBufferReshapeLayer.o, serializer::Layer::Layer_ReshapeLayer);
1012 }
1013 
1014 void SerializerStrategy::SerializeResizeLayer(const armnn::IConnectableLayer* layer,
1015  const armnn::ResizeDescriptor& resizeDescriptor,
1016  const char* name)
1017 {
1018  IgnoreUnused(name);
1019 
1020  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Resize);
1021 
1022  auto flatBufferDescriptor =
1023  CreateResizeDescriptor(m_flatBufferBuilder,
1024  resizeDescriptor.m_TargetHeight,
1025  resizeDescriptor.m_TargetWidth,
1026  GetFlatBufferResizeMethod(resizeDescriptor.m_Method),
1027  GetFlatBufferDataLayout(resizeDescriptor.m_DataLayout),
1028  resizeDescriptor.m_AlignCorners,
1029  resizeDescriptor.m_HalfPixelCenters);
1030 
1031  auto flatBufferLayer = serializer::CreateResizeLayer(m_flatBufferBuilder,
1032  flatBufferBaseLayer,
1033  flatBufferDescriptor);
1034 
1035  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ResizeLayer);
1036 }
1037 
1038 void SerializerStrategy::SerializeReverseV2Layer(const armnn::IConnectableLayer* layer,
1039  const char* name)
1040 {
1041  IgnoreUnused(name);
1042 
1043  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ReverseV2);
1044 
1045  auto flatBufferLayer = serializer::CreateReverseV2Layer(m_flatBufferBuilder,
1046  flatBufferBaseLayer);
1047 
1048  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ReverseV2Layer);
1049 }
1050 
1051 void SerializerStrategy::SerializeSliceLayer(const armnn::IConnectableLayer* layer,
1052  const armnn::SliceDescriptor& sliceDescriptor,
1053  const char* name)
1054 {
1055  IgnoreUnused(name);
1056 
1057  auto fbSliceBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Slice);
1058  auto fbSliceDescriptor = CreateSliceDescriptor(m_flatBufferBuilder,
1059  m_flatBufferBuilder.CreateVector(sliceDescriptor.m_Begin),
1060  m_flatBufferBuilder.CreateVector(sliceDescriptor.m_Size));
1061 
1062  auto fbSliceLayer = serializer::CreateSliceLayer(m_flatBufferBuilder, fbSliceBaseLayer, fbSliceDescriptor);
1063 
1064  CreateAnyLayer(fbSliceLayer.o, serializer::Layer::Layer_SliceLayer);
1065 }
1066 
1067 // Build FlatBuffer for Softmax Layer
1068 void SerializerStrategy::SerializeSoftmaxLayer(const armnn::IConnectableLayer* layer,
1069  const armnn::SoftmaxDescriptor& softmaxDescriptor,
1070  const char* name)
1071 {
1072  IgnoreUnused(name);
1073 
1074  // Create FlatBuffer BaseLayer
1075  auto flatBufferSoftmaxBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Softmax);
1076 
1077  // Create the FlatBuffer SoftmaxDescriptor
1078  auto flatBufferSoftmaxDesc =
1079  serializer::CreateSoftmaxDescriptor(m_flatBufferBuilder,
1080  softmaxDescriptor.m_Beta,
1081  softmaxDescriptor.m_Axis);
1082 
1083  // Create the FlatBuffer SoftmaxLayer
1084  auto flatBufferSoftmaxLayer =
1085  serializer::CreateSoftmaxLayer(m_flatBufferBuilder,
1086  flatBufferSoftmaxBaseLayer,
1087  flatBufferSoftmaxDesc);
1088 
1089  CreateAnyLayer(flatBufferSoftmaxLayer.o, serializer::Layer::Layer_SoftmaxLayer);
1090 }
1091 
1092 void SerializerStrategy::SerializePooling2dLayer(const armnn::IConnectableLayer* layer,
1093  const armnn::Pooling2dDescriptor& pooling2dDescriptor,
1094  const char* name)
1095 {
1096  IgnoreUnused(name);
1097 
1098  auto fbPooling2dBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pooling2d);
1099  auto fbPooling2dDescriptor = serializer::CreatePooling2dDescriptor(
1100  m_flatBufferBuilder,
1101  GetFlatBufferPoolingAlgorithm(pooling2dDescriptor.m_PoolType),
1102  pooling2dDescriptor.m_PadLeft,
1103  pooling2dDescriptor.m_PadRight,
1104  pooling2dDescriptor.m_PadTop,
1105  pooling2dDescriptor.m_PadBottom,
1106  pooling2dDescriptor.m_PoolWidth,
1107  pooling2dDescriptor.m_PoolHeight,
1108  pooling2dDescriptor.m_StrideX,
1109  pooling2dDescriptor.m_StrideY,
1111  GetFlatBufferPaddingMethod(pooling2dDescriptor.m_PaddingMethod),
1112  GetFlatBufferDataLayout(pooling2dDescriptor.m_DataLayout));
1113 
1114  auto fbPooling2dLayer = serializer::CreatePooling2dLayer(m_flatBufferBuilder,
1115  fbPooling2dBaseLayer,
1116  fbPooling2dDescriptor);
1117 
1118  CreateAnyLayer(fbPooling2dLayer.o, serializer::Layer::Layer_Pooling2dLayer);
1119 }
1120 
1121 void SerializerStrategy::SerializePooling3dLayer(const armnn::IConnectableLayer* layer,
1122  const armnn::Pooling3dDescriptor& pooling3dDescriptor,
1123  const char* name)
1124 {
1125  IgnoreUnused(name);
1126 
1127  auto fbPooling3dBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pooling3d);
1128  auto fbPooling3dDescriptor = serializer::CreatePooling3dDescriptor(
1129  m_flatBufferBuilder,
1130  GetFlatBufferPoolingAlgorithm(pooling3dDescriptor.m_PoolType),
1131  pooling3dDescriptor.m_PadLeft,
1132  pooling3dDescriptor.m_PadRight,
1133  pooling3dDescriptor.m_PadTop,
1134  pooling3dDescriptor.m_PadBottom,
1135  pooling3dDescriptor.m_PadFront,
1136  pooling3dDescriptor.m_PadBack,
1137  pooling3dDescriptor.m_PoolWidth,
1138  pooling3dDescriptor.m_PoolHeight,
1139  pooling3dDescriptor.m_PoolDepth,
1140  pooling3dDescriptor.m_StrideX,
1141  pooling3dDescriptor.m_StrideY,
1142  pooling3dDescriptor.m_StrideZ,
1144  GetFlatBufferPaddingMethod(pooling3dDescriptor.m_PaddingMethod),
1145  GetFlatBufferDataLayout(pooling3dDescriptor.m_DataLayout));
1146 
1147  auto fbPooling3dLayer = serializer::CreatePooling3dLayer(m_flatBufferBuilder,
1148  fbPooling3dBaseLayer,
1149  fbPooling3dDescriptor);
1150 
1151  CreateAnyLayer(fbPooling3dLayer.o, serializer::Layer::Layer_Pooling3dLayer);
1152 }
1153 
1154 void SerializerStrategy::SerializePreluLayer(const armnn::IConnectableLayer* layer,
1155  const char* name)
1156 {
1157  IgnoreUnused(name);
1158 
1159  // Create FlatBuffer BaseLayer
1160  auto flatBufferPreluBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Prelu);
1161 
1162  // Create the FlatBuffer AdditionLayer
1163  auto flatBufferPreluLayer = serializer::CreatePreluLayer(m_flatBufferBuilder, flatBufferPreluBaseLayer);
1164 
1165  // Add the AnyLayer to the FlatBufferLayers
1166  CreateAnyLayer(flatBufferPreluLayer.o, serializer::Layer::Layer_PreluLayer);
1167 }
1168 
1169 void SerializerStrategy::SerializeQuantizeLayer(const armnn::IConnectableLayer *layer, const char *name)
1170 {
1171  IgnoreUnused(name);
1172 
1173  auto fbQuantizeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Quantize);
1174  auto fbQuantizeLayer = serializer::CreateQuantizeLayer(m_flatBufferBuilder,
1175  fbQuantizeBaseLayer);
1176  CreateAnyLayer(fbQuantizeLayer.o, serializer::Layer::Layer_QuantizeLayer);
1177 }
1178 
1179 // Build FlatBuffer for FullyConnected Layer
1180 void SerializerStrategy::SerializeFullyConnectedLayer(const armnn::IConnectableLayer* layer,
1181  const armnn::FullyConnectedDescriptor& fullyConnectedDescriptor,
1182  const char*)
1183 {
1184  // Create FlatBuffer BaseLayer
1185  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_FullyConnected);
1186 
1187  // Create FlatBuffer FullyConnectedDescriptor
1188  auto flatBufferDescriptor =
1189  serializer::CreateFullyConnectedDescriptor(m_flatBufferBuilder,
1190  fullyConnectedDescriptor.m_BiasEnabled,
1191  fullyConnectedDescriptor.m_TransposeWeightMatrix,
1192  fullyConnectedDescriptor.m_ConstantWeights);
1193 
1194  // Create FlatBuffer FullyConnectedLayer
1195  auto flatBufferLayer = serializer::CreateFullyConnectedLayer(m_flatBufferBuilder,
1196  flatBufferBaseLayer,
1197  flatBufferDescriptor);
1198 
1199  // Add created FullyConnectedLayer to the FlatBufferLayers
1200  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_FullyConnectedLayer);
1201 }
1202 
1203 // Build FlatBuffer for SpaceToBatchNd Layer
1204 void SerializerStrategy::SerializeSpaceToBatchNdLayer(const armnn::IConnectableLayer* layer,
1205  const armnn::SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
1206  const char* name)
1207 {
1208  IgnoreUnused(name);
1209 
1210  // Create FlatBuffer BaseLayer
1211  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_SpaceToBatchNd);
1212 
1213  std::vector<unsigned int> padList;
1214  padList.reserve(spaceToBatchNdDescriptor.m_PadList.size()*2);
1215  for (auto& pad : spaceToBatchNdDescriptor.m_PadList)
1216  {
1217  padList.push_back(pad.first);
1218  padList.push_back(pad.second);
1219  }
1220 
1221  auto flatBufferDescriptor =
1222  CreateSpaceToBatchNdDescriptor(m_flatBufferBuilder,
1223  m_flatBufferBuilder.CreateVector(spaceToBatchNdDescriptor.m_BlockShape),
1224  m_flatBufferBuilder.CreateVector(padList),
1225  GetFlatBufferDataLayout(spaceToBatchNdDescriptor.m_DataLayout));
1226 
1227  auto flatBufferLayer = serializer::CreateSpaceToBatchNdLayer(m_flatBufferBuilder,
1228  flatBufferBaseLayer,
1229  flatBufferDescriptor);
1230 
1231  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_SpaceToBatchNdLayer);
1232 }
1233 
1234 // Build FlatBuffer for SpaceToDepthLayer
1235 void SerializerStrategy::SerializeSpaceToDepthLayer(const armnn::IConnectableLayer* layer,
1236  const armnn::SpaceToDepthDescriptor& spaceToDepthDescriptor,
1237  const char* name)
1238 {
1239  IgnoreUnused(name);
1240 
1241  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_SpaceToDepth);
1242  auto flatBufferDescriptor =
1243  CreateSpaceToDepthDescriptor(m_flatBufferBuilder,
1244  spaceToDepthDescriptor.m_BlockSize,
1245  GetFlatBufferDataLayout(spaceToDepthDescriptor.m_DataLayout));
1246 
1247  auto flatBufferLayer = serializer::CreateSpaceToDepthLayer(m_flatBufferBuilder,
1248  flatBufferBaseLayer,
1249  flatBufferDescriptor);
1250 
1251  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_SpaceToDepthLayer);
1252 }
1253 
1254 // Build FlatBuffer for Splitter Layer
1255 void SerializerStrategy::SerializeSplitterLayer(const armnn::IConnectableLayer* layer,
1256  const armnn::ViewsDescriptor& viewsDescriptor,
1257  const char* name)
1258 {
1259  IgnoreUnused(name);
1260 
1261  // Create FlatBuffer ViewOrigins
1262  std::vector<flatbuffers::Offset<UintVector>> flatBufferViewOrigins;
1263  flatBufferViewOrigins.reserve(viewsDescriptor.GetNumViews());
1264 
1265  for(unsigned int vIdx = 0; vIdx < viewsDescriptor.GetNumViews(); ++vIdx)
1266  {
1267  std::vector<uint32_t> viewOrigin;
1268  viewOrigin.reserve(viewsDescriptor.GetNumDimensions());
1269 
1270  // Copy vector
1271  for(unsigned int dIdx = 0; dIdx < viewsDescriptor.GetNumDimensions(); ++dIdx)
1272  {
1273  viewOrigin.push_back(viewsDescriptor.GetViewOrigin(vIdx)[dIdx]);
1274  }
1275 
1276  flatBufferViewOrigins.push_back(CreateUintVector(m_flatBufferBuilder,
1277  m_flatBufferBuilder.CreateVector(viewOrigin)));
1278  }
1279 
1280  // Create FlatBuffer OriginsDescriptor
1281  auto flatBufferOriginDescriptor = CreateOriginsDescriptor(m_flatBufferBuilder,
1282  viewsDescriptor.GetOrigins().GetConcatAxis(),
1283  viewsDescriptor.GetOrigins().GetNumViews(),
1284  viewsDescriptor.GetOrigins().GetNumDimensions(),
1285  m_flatBufferBuilder.CreateVector(flatBufferViewOrigins));
1286 
1287  // Create FlatBuffer ViewOrigins
1288  std::vector<flatbuffers::Offset<UintVector>> flatBufferViewSizes;
1289  flatBufferViewSizes.reserve(viewsDescriptor.GetNumViews());
1290 
1291  for(unsigned int vIdx = 0; vIdx < viewsDescriptor.GetNumViews(); ++vIdx)
1292  {
1293  std::vector<uint32_t> viewSize;
1294  viewSize.reserve(viewsDescriptor.GetNumDimensions());
1295 
1296  // Copy vector
1297  for(unsigned int dIdx = 0; dIdx < viewsDescriptor.GetNumDimensions(); ++dIdx)
1298  {
1299  viewSize.push_back(viewsDescriptor.GetViewSizes(vIdx)[dIdx]);
1300  }
1301 
1302  flatBufferViewSizes.push_back(CreateUintVector(m_flatBufferBuilder,
1303  m_flatBufferBuilder.CreateVector(viewSize)));
1304  }
1305 
1306  // Create FlatBuffer ViewsDescriptor
1307  auto flatBufferViewsDescriptor = CreateViewsDescriptor(m_flatBufferBuilder,
1308  flatBufferOriginDescriptor,
1309  m_flatBufferBuilder.CreateVector(flatBufferViewSizes));
1310 
1311  // Create FlatBuffer BaseLayer
1312  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Splitter);
1313 
1314  auto flatBufferSplitterLayer = serializer::CreateSplitterLayer(m_flatBufferBuilder,
1315  flatBufferBaseLayer,
1316  flatBufferViewsDescriptor);
1317 
1318  CreateAnyLayer(flatBufferSplitterLayer.o, serializer::Layer::Layer_SplitterLayer);
1319 }
1320 
1321 void SerializerStrategy::SerializeNormalizationLayer(const armnn::IConnectableLayer* layer,
1322  const armnn::NormalizationDescriptor& descriptor,
1323  const char* name)
1324 {
1325  IgnoreUnused(name);
1326 
1327  auto fbNormalizationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Normalization);
1328 
1329  auto fbNormalizationDescriptor = serializer::CreateNormalizationDescriptor(
1330  m_flatBufferBuilder,
1333  descriptor.m_NormSize,
1334  descriptor.m_Alpha,
1335  descriptor.m_Beta,
1336  descriptor.m_K,
1338 
1339  auto flatBufferLayer = serializer::CreateNormalizationLayer(m_flatBufferBuilder,
1340  fbNormalizationBaseLayer,
1341  fbNormalizationDescriptor);
1342 
1343  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_NormalizationLayer);
1344 }
1345 
1346 void SerializerStrategy::SerializeShapeLayer(const armnn::IConnectableLayer* layer,
1347  const char* name)
1348 {
1349  IgnoreUnused(name);
1350 
1351  auto shapeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Shape);
1352  auto shapeLayer = serializer::CreateShapeLayer(m_flatBufferBuilder, shapeBaseLayer);
1353 
1354  CreateAnyLayer(shapeLayer.o, serializer::Layer::Layer_ShapeLayer);
1355 }
1356 
1357 void SerializerStrategy::SerializeStackLayer(const armnn::IConnectableLayer* layer,
1358  const armnn::StackDescriptor& stackDescriptor,
1359  const char* name)
1360 {
1361  IgnoreUnused(name);
1362 
1363  auto stackBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Stack);
1364 
1365  std::vector<unsigned int> inputShape;
1366  for (unsigned int i =0; i < stackDescriptor.m_InputShape.GetNumDimensions(); i++)
1367  {
1368  inputShape.push_back(stackDescriptor.m_InputShape[i]);
1369  }
1370 
1371  auto flatBufferStackDescriptor = CreateStackDescriptor(m_flatBufferBuilder,
1372  stackDescriptor.m_Axis,
1373  stackDescriptor.m_NumInputs,
1374  m_flatBufferBuilder.CreateVector(inputShape));
1375 
1376  auto stackLayer = serializer::CreateStackLayer(m_flatBufferBuilder, stackBaseLayer, flatBufferStackDescriptor);
1377  CreateAnyLayer(stackLayer.o, serializer::Layer::Layer_StackLayer);
1378 }
1379 
1380 void SerializerStrategy::SerializeStandInLayer(const armnn::IConnectableLayer *layer,
1381  const armnn::StandInDescriptor& standInDescriptor,
1382  const char *name)
1383 {
1384  IgnoreUnused(name);
1385 
1386  auto fbDescriptor = serializer::CreateStandInDescriptor(m_flatBufferBuilder,
1387  standInDescriptor.m_NumInputs,
1388  standInDescriptor.m_NumOutputs);
1389 
1390  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_StandIn);
1391  auto fbLayer = serializer::CreateStandInLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
1392 
1393  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_StandInLayer);
1394 }
1395 
1396 void SerializerStrategy::SerializeStridedSliceLayer(const armnn::IConnectableLayer* layer,
1397  const armnn::StridedSliceDescriptor& stridedSliceDescriptor,
1398  const char* name)
1399 {
1400  IgnoreUnused(name);
1401 
1402  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_StridedSlice);
1403 
1404  auto flatBufferDescriptor =
1405  CreateStridedSliceDescriptor(m_flatBufferBuilder,
1406  m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.m_Begin),
1407  m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.m_End),
1408  m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.m_Stride),
1409  stridedSliceDescriptor.m_BeginMask,
1410  stridedSliceDescriptor.m_EndMask,
1411  stridedSliceDescriptor.m_ShrinkAxisMask,
1412  stridedSliceDescriptor.m_EllipsisMask,
1413  stridedSliceDescriptor.m_NewAxisMask,
1414  GetFlatBufferDataLayout(stridedSliceDescriptor.m_DataLayout));
1415 
1416  auto flatBufferLayer = serializer::CreateStridedSliceLayer(m_flatBufferBuilder,
1417  flatBufferBaseLayer,
1418  flatBufferDescriptor);
1419 
1420  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_StridedSliceLayer);
1421 }
1422 
1423 void SerializerStrategy::SerializeSubtractionLayer(const armnn::IConnectableLayer* layer, const char* name)
1424 {
1425  IgnoreUnused(name);
1426 
1427  auto fbSubtractionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Subtraction);
1428  auto fbSubtractionLayer = serializer::CreateSubtractionLayer(m_flatBufferBuilder, fbSubtractionBaseLayer);
1429 
1430  CreateAnyLayer(fbSubtractionLayer.o, serializer::Layer::Layer_SubtractionLayer);
1431 }
1432 
1433 void SerializerStrategy::SerializeSwitchLayer(const armnn::IConnectableLayer* layer, const char* name)
1434 {
1435  IgnoreUnused(name);
1436 
1437  auto fbSwitchBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Switch);
1438  auto fbSwitchLayer = serializer::CreateSwitchLayer(m_flatBufferBuilder, fbSwitchBaseLayer);
1439 
1440  CreateAnyLayer(fbSwitchLayer.o, serializer::Layer::Layer_SwitchLayer);
1441 }
1442 
1443 void SerializerStrategy::SerializeTileLayer(const armnn::IConnectableLayer* layer,
1444  const armnn::TileDescriptor& descriptor,
1445  const char* name)
1446 {
1447  IgnoreUnused(name);
1448 
1449  // Create FlatBuffer BaseLayer
1450  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Tile);
1451 
1452  auto flatBufferDesc = serializer::CreateTileDescriptor(m_flatBufferBuilder,
1453  m_flatBufferBuilder.CreateVector(descriptor.m_Multiples));
1454 
1455  // Create the FlatBuffer TileLayer
1456  auto flatBufferLayer = serializer::CreateTileLayer(m_flatBufferBuilder,
1457  flatBufferBaseLayer,
1458  flatBufferDesc);
1459 
1460  // Add the AnyLayer to the FlatBufferLayers
1461  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_TileLayer);
1462 }
1463 
1464 void SerializerStrategy::SerializeTransposeConvolution2dLayer(
1465  const armnn::IConnectableLayer* layer,
1466  const armnn::TransposeConvolution2dDescriptor& descriptor,
1467  const std::vector<armnn::ConstTensor>& constants,
1468  const char* name)
1469 {
1470  IgnoreUnused(name);
1471 
1472  const armnn::ConstTensor& weights = constants.at(0);
1473 
1474  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution2d);
1475  auto fbDescriptor = CreateTransposeConvolution2dDescriptor(m_flatBufferBuilder,
1476  descriptor.m_PadLeft,
1477  descriptor.m_PadRight,
1478  descriptor.m_PadTop,
1479  descriptor.m_PadBottom,
1480  descriptor.m_StrideX,
1481  descriptor.m_StrideY,
1482  descriptor.m_BiasEnabled,
1484 
1485  // weights & biases
1486  auto fbWeightsConstTensorInfo = CreateConstTensorInfo(weights);
1487  flatbuffers::Offset<serializer::ConstTensor> fbBiasesConstTensorInfo;
1488  if (constants.size() > 1)
1489  {
1490  const armnn::ConstTensor& biases = constants.at(1);
1491  fbBiasesConstTensorInfo = CreateConstTensorInfo(biases);
1492  }
1493 
1494  auto fbLayer = CreateTransposeConvolution2dLayer(m_flatBufferBuilder,
1495  fbBaseLayer,
1496  fbDescriptor,
1497  fbWeightsConstTensorInfo,
1498  fbBiasesConstTensorInfo);
1499 
1500  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_TransposeConvolution2dLayer);
1501 }
1502 
1503 void SerializerStrategy::SerializeTransposeLayer(const armnn::IConnectableLayer* layer,
1504  const armnn::TransposeDescriptor& descriptor,
1505  const char* name)
1506 {
1507  IgnoreUnused(name);
1508 
1509  // Create FlatBuffer BaseLayer
1510  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Transpose);
1511 
1512  std::vector<unsigned int> dimMappings;
1513  for (unsigned int i=0; i<descriptor.m_DimMappings.GetSize(); ++i)
1514  {
1515  dimMappings.push_back(descriptor.m_DimMappings[i]);
1516  }
1517 
1518  auto flatBufferDesc = serializer::CreateTransposeDescriptor(m_flatBufferBuilder,
1519  m_flatBufferBuilder.CreateVector(dimMappings));
1520 
1521  // Create the FlatBuffer TransposeLayer
1522  auto flatBufferLayer = serializer::CreateTransposeLayer(m_flatBufferBuilder,
1523  flatBufferBaseLayer,
1524  flatBufferDesc);
1525 
1526  // Add the AnyLayer to the FlatBufferLayers
1527  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_TransposeLayer);
1528 }
1529 
1530 void SerializerStrategy::SerializeQLstmLayer(const armnn::IConnectableLayer* layer,
1531  const armnn::QLstmDescriptor& descriptor,
1532  const std::vector<armnn::ConstTensor>& constants,
1533  const char* name)
1534 {
1535  IgnoreUnused(name);
1536 
1537  auto fbQLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_QLstm);
1538 
1539  auto fbQLstmDescriptor = serializer::CreateQLstmDescriptor(
1540  m_flatBufferBuilder,
1541  descriptor.m_CifgEnabled,
1542  descriptor.m_PeepholeEnabled,
1543  descriptor.m_ProjectionEnabled,
1544  descriptor.m_LayerNormEnabled,
1545  descriptor.m_CellClip,
1546  descriptor.m_ProjectionClip,
1547  descriptor.m_InputIntermediateScale,
1548  descriptor.m_ForgetIntermediateScale,
1549  descriptor.m_CellIntermediateScale,
1550  descriptor.m_OutputIntermediateScale,
1551  descriptor.m_HiddenStateZeroPoint,
1552  descriptor.m_HiddenStateScale
1553  );
1554 
1555  // Index for constants vector
1556  std::size_t i = 0;
1557 
1558  // Mandatory params
1559  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]); //InputToForgetWeights
1560  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]); //InputToCellWeights
1561  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]); //InputToOutputWeights
1562  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToForgetWeights
1563  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToCellWeights
1564  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToOutputWeights
1565  auto forgetGateBias = CreateConstTensorInfo(constants[i++]); //ForgetGateBias
1566  auto cellBias = CreateConstTensorInfo(constants[i++]); //CellBias
1567  auto outputGateBias = CreateConstTensorInfo(constants[i++]); //OutputGateBias
1568 
1569  // CIFG
1570  flatbuffers::Offset<serializer::ConstTensor> inputToInputWeights;
1571  flatbuffers::Offset<serializer::ConstTensor> recurrentToInputWeights;
1572  flatbuffers::Offset<serializer::ConstTensor> inputGateBias;
1573 
1574  if (!descriptor.m_CifgEnabled)
1575  {
1576  inputToInputWeights = CreateConstTensorInfo(constants[i++]); //InputToInputWeights
1577  recurrentToInputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToInputWeights
1578  inputGateBias = CreateConstTensorInfo(constants[i++]); //InputGateBias
1579  }
1580 
1581  // Peephole
1582  flatbuffers::Offset<serializer::ConstTensor> cellToInputWeights;
1583  flatbuffers::Offset<serializer::ConstTensor> cellToForgetWeights;
1584  flatbuffers::Offset<serializer::ConstTensor> cellToOutputWeights;
1585 
1586  if (descriptor.m_PeepholeEnabled)
1587  {
1588  if (!descriptor.m_CifgEnabled)
1589  {
1590  cellToInputWeights = CreateConstTensorInfo(constants[i++]); //CellToInputWeights
1591  }
1592  cellToForgetWeights = CreateConstTensorInfo(constants[i++]); //CellToForgetWeights
1593  cellToOutputWeights = CreateConstTensorInfo(constants[i++]); //CellToOutputWeights
1594  }
1595 
1596  // Projection
1597  flatbuffers::Offset<serializer::ConstTensor> projectionWeights;
1598  flatbuffers::Offset<serializer::ConstTensor> projectionBias;
1599 
1600  if (descriptor.m_ProjectionEnabled)
1601  {
1602  projectionWeights = CreateConstTensorInfo(constants[i++]); //ProjectionWeights
1603  projectionBias = CreateConstTensorInfo(constants[i++]); //ProjectionBias
1604  }
1605 
1606  // Layer norm
1607  flatbuffers::Offset<serializer::ConstTensor> inputLayerNormWeights;
1608  flatbuffers::Offset<serializer::ConstTensor> forgetLayerNormWeights;
1609  flatbuffers::Offset<serializer::ConstTensor> cellLayerNormWeights;
1610  flatbuffers::Offset<serializer::ConstTensor> outputLayerNormWeights;
1611 
1612  if (descriptor.m_LayerNormEnabled)
1613  {
1614  if (!descriptor.m_CifgEnabled)
1615  {
1616  inputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //InputLayerNormWeights
1617  }
1618  forgetLayerNormWeights = CreateConstTensorInfo(constants[i++]); //ForgetLayerNormWeights
1619  cellLayerNormWeights = CreateConstTensorInfo(constants[i++]); //CellLayerNormWeights
1620  outputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //OutputLayerNormWeights
1621  }
1622 
1623  auto fbQLstmParams = serializer::CreateQLstmInputParams(
1624  m_flatBufferBuilder,
1625  inputToForgetWeights,
1626  inputToCellWeights,
1627  inputToOutputWeights,
1628  recurrentToForgetWeights,
1629  recurrentToCellWeights,
1630  recurrentToOutputWeights,
1631  forgetGateBias,
1632  cellBias,
1633  outputGateBias,
1634  inputToInputWeights,
1635  recurrentToInputWeights,
1636  inputGateBias,
1637  projectionWeights,
1638  projectionBias,
1639  cellToInputWeights,
1640  cellToForgetWeights,
1641  cellToOutputWeights,
1642  inputLayerNormWeights,
1643  forgetLayerNormWeights,
1644  cellLayerNormWeights,
1645  outputLayerNormWeights);
1646 
1647  auto fbQLstmLayer = serializer::CreateQLstmLayer(
1648  m_flatBufferBuilder,
1649  fbQLstmBaseLayer,
1650  fbQLstmDescriptor,
1651  fbQLstmParams);
1652 
1653  CreateAnyLayer(fbQLstmLayer.o, serializer::Layer::Layer_QLstmLayer);
1654 }
1655 
1656 void SerializerStrategy::SerializeQuantizedLstmLayer(const armnn::IConnectableLayer* layer,
1657  const std::vector<armnn::ConstTensor>& constants,
1658  const char* name)
1659 {
1660  IgnoreUnused(name);
1661 
1662  auto fbQuantizedLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_QuantizedLstm);
1663 
1664  // index for constants vector
1665  size_t i = 0;
1666 
1667  // Get input parameters
1668  auto inputToInputWeights = CreateConstTensorInfo(constants[i++]);
1669  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]);
1670  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]);
1671  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]);
1672 
1673  auto recurrentToInputWeights = CreateConstTensorInfo(constants[i++]);
1674  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]);
1675  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]);
1676  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]);
1677 
1678  auto inputGateBias = CreateConstTensorInfo(constants[i++]);
1679  auto forgetGateBias = CreateConstTensorInfo(constants[i++]);
1680  auto cellBias = CreateConstTensorInfo(constants[i++]);
1681  auto outputGateBias = CreateConstTensorInfo(constants[i++]);
1682 
1683  auto fbQuantizedLstmParams = serializer::CreateQuantizedLstmInputParams(
1684  m_flatBufferBuilder,
1685  inputToInputWeights,
1686  inputToForgetWeights,
1687  inputToCellWeights,
1688  inputToOutputWeights,
1689  recurrentToInputWeights,
1690  recurrentToForgetWeights,
1691  recurrentToCellWeights,
1692  recurrentToOutputWeights,
1693  inputGateBias,
1694  forgetGateBias,
1695  cellBias,
1696  outputGateBias);
1697 
1698  auto fbQuantizedLstmLayer = serializer::CreateQuantizedLstmLayer(
1699  m_flatBufferBuilder,
1700  fbQuantizedLstmBaseLayer,
1701  fbQuantizedLstmParams);
1702 
1703  CreateAnyLayer(fbQuantizedLstmLayer.o, serializer::Layer::Layer_QuantizedLstmLayer);
1704 }
1705 
1706 void SerializerStrategy::SerializeUnidirectionalSequenceLstmLayer(
1707  const armnn::IConnectableLayer* layer,
1709  const std::vector<armnn::ConstTensor>& constants,
1710  const char* name)
1711 {
1712  IgnoreUnused(name);
1713 
1714  auto fbUnidirectionalSequenceLstmBaseLayer =
1715  CreateLayerBase(layer, serializer::LayerType::LayerType_UnidirectionalSequenceLstm);
1716 
1717  auto fbUnidirectionalSequenceLstmDescriptor = serializer::CreateUnidirectionalSequenceLstmDescriptor(
1718  m_flatBufferBuilder,
1719  descriptor.m_ActivationFunc,
1720  descriptor.m_ClippingThresCell,
1721  descriptor.m_ClippingThresProj,
1722  descriptor.m_CifgEnabled,
1723  descriptor.m_PeepholeEnabled,
1724  descriptor.m_ProjectionEnabled,
1725  descriptor.m_LayerNormEnabled,
1726  descriptor.m_TimeMajor);
1727 
1728  // Index for constants vector
1729  std::size_t i = 0;
1730 
1731  // Get mandatory/basic input parameters
1732  auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]); //InputToForgetWeights
1733  auto inputToCellWeights = CreateConstTensorInfo(constants[i++]); //InputToCellWeights
1734  auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]); //InputToOutputWeights
1735  auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToForgetWeights
1736  auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToCellWeights
1737  auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToOutputWeights
1738  auto forgetGateBias = CreateConstTensorInfo(constants[i++]); //ForgetGateBias
1739  auto cellBias = CreateConstTensorInfo(constants[i++]); //CellBias
1740  auto outputGateBias = CreateConstTensorInfo(constants[i++]); //OutputGateBias
1741 
1742  //Define optional parameters, these will be set depending on configuration in Lstm descriptor
1743  flatbuffers::Offset<serializer::ConstTensor> inputToInputWeights;
1744  flatbuffers::Offset<serializer::ConstTensor> recurrentToInputWeights;
1745  flatbuffers::Offset<serializer::ConstTensor> cellToInputWeights;
1746  flatbuffers::Offset<serializer::ConstTensor> inputGateBias;
1747  flatbuffers::Offset<serializer::ConstTensor> projectionWeights;
1748  flatbuffers::Offset<serializer::ConstTensor> projectionBias;
1749  flatbuffers::Offset<serializer::ConstTensor> cellToForgetWeights;
1750  flatbuffers::Offset<serializer::ConstTensor> cellToOutputWeights;
1751  flatbuffers::Offset<serializer::ConstTensor> inputLayerNormWeights;
1752  flatbuffers::Offset<serializer::ConstTensor> forgetLayerNormWeights;
1753  flatbuffers::Offset<serializer::ConstTensor> cellLayerNormWeights;
1754  flatbuffers::Offset<serializer::ConstTensor> outputLayerNormWeights;
1755 
1756  if (!descriptor.m_CifgEnabled)
1757  {
1758  inputToInputWeights = CreateConstTensorInfo(constants[i++]); //InputToInputWeights
1759  recurrentToInputWeights = CreateConstTensorInfo(constants[i++]); //RecurrentToInputWeights
1760  inputGateBias = CreateConstTensorInfo(constants[i++]); //InputGateBias
1761  }
1762 
1763  if (descriptor.m_PeepholeEnabled)
1764  {
1765  if (!descriptor.m_CifgEnabled)
1766  {
1767  cellToInputWeights = CreateConstTensorInfo(constants[i++]); //CellToInputWeights
1768  }
1769  cellToForgetWeights = CreateConstTensorInfo(constants[i++]); //CellToForgetWeights
1770  cellToOutputWeights = CreateConstTensorInfo(constants[i++]); //CellToOutputWeights
1771  }
1772 
1773  if (descriptor.m_ProjectionEnabled)
1774  {
1775  projectionWeights = CreateConstTensorInfo(constants[i++]); //ProjectionWeights
1776  projectionBias = CreateConstTensorInfo(constants[i++]); //ProjectionBias
1777  }
1778 
1779  if (descriptor.m_LayerNormEnabled)
1780  {
1781  if (!descriptor.m_CifgEnabled)
1782  {
1783  inputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //InputLayerNormWeights
1784  }
1785  forgetLayerNormWeights = CreateConstTensorInfo(constants[i++]); //ForgetLayerNormWeights
1786  cellLayerNormWeights = CreateConstTensorInfo(constants[i++]); //CellLayerNormWeights
1787  outputLayerNormWeights = CreateConstTensorInfo(constants[i++]); //OutputLayerNormWeights
1788  }
1789 
1790  auto fbUnidirectionalSequenceLstmParams = serializer::CreateLstmInputParams(
1791  m_flatBufferBuilder,
1792  inputToForgetWeights,
1793  inputToCellWeights,
1794  inputToOutputWeights,
1795  recurrentToForgetWeights,
1796  recurrentToCellWeights,
1797  recurrentToOutputWeights,
1798  forgetGateBias,
1799  cellBias,
1800  outputGateBias,
1801  inputToInputWeights,
1802  recurrentToInputWeights,
1803  cellToInputWeights,
1804  inputGateBias,
1805  projectionWeights,
1806  projectionBias,
1807  cellToForgetWeights,
1808  cellToOutputWeights,
1809  inputLayerNormWeights,
1810  forgetLayerNormWeights,
1811  cellLayerNormWeights,
1812  outputLayerNormWeights);
1813 
1814  auto fbUnidirectionalSequenceLstmLayer = serializer::CreateUnidirectionalSequenceLstmLayer(
1815  m_flatBufferBuilder,
1816  fbUnidirectionalSequenceLstmBaseLayer,
1817  fbUnidirectionalSequenceLstmDescriptor,
1818  fbUnidirectionalSequenceLstmParams);
1819 
1820  CreateAnyLayer(fbUnidirectionalSequenceLstmLayer.o, serializer::Layer::Layer_UnidirectionalSequenceLstmLayer);
1821 }
1822 
1823 fb::Offset<serializer::LayerBase> SerializerStrategy::CreateLayerBase(const IConnectableLayer* layer,
1824  const serializer::LayerType layerType)
1825 {
1826 
1827  uint32_t fbIndex = GetSerializedId(layer->GetGuid());
1828 
1829  std::vector<fb::Offset<serializer::InputSlot>> inputSlots = CreateInputSlots(layer);
1830  std::vector<fb::Offset<serializer::OutputSlot>> outputSlots = CreateOutputSlots(layer);
1831 
1832  return serializer::CreateLayerBase(m_flatBufferBuilder,
1833  fbIndex,
1834  m_flatBufferBuilder.CreateString(layer->GetName()),
1835  layerType,
1836  m_flatBufferBuilder.CreateVector(inputSlots),
1837  m_flatBufferBuilder.CreateVector(outputSlots));
1838 }
1839 
1840 void SerializerStrategy::CreateAnyLayer(const flatbuffers::Offset<void>& layer, const serializer::Layer serializerLayer)
1841 {
1842 
1843  auto anyLayer = armnnSerializer::CreateAnyLayer(m_flatBufferBuilder, serializerLayer, layer);
1844  m_serializedLayers.push_back(anyLayer);
1845 }
1846 
1847 template <typename T>
1848 flatbuffers::Offset<flatbuffers::Vector<T>> SerializerStrategy::CreateDataVector(const void* memory, unsigned int size)
1849 {
1850  const T* buffer = reinterpret_cast<const T*>(memory);
1851  std::vector<T> vector(buffer, buffer + (size / sizeof(T)));
1852  auto fbVector = m_flatBufferBuilder.CreateVector(vector);
1853  return fbVector;
1854 }
1855 
1856 flatbuffers::Offset<TensorInfo> SerializerStrategy::CreateTensorInfo(const armnn::TensorInfo& tensorInfo)
1857 {
1858  // Get the dimensions
1859  std::vector<unsigned int> shape;
1860  std::vector<bool> specificity;
1861  // This assumes that the TensorShape constructors have ensured that the size of m_DimensionsSpecificity
1862  // matches the size of dimensions.
1863  for(unsigned int dim = 0; dim < tensorInfo.GetShape().GetNumDimensions(); ++dim)
1864  {
1865  specificity.push_back(tensorInfo.GetShape().GetDimensionSpecificity(dim));
1866 
1867  if (tensorInfo.GetShape().GetDimensionSpecificity(dim))
1868  {
1869  shape.push_back(tensorInfo.GetShape()[dim]);
1870  }
1871  else
1872  {
1873  shape.push_back(0);
1874  }
1875  }
1876 
1877  if (tensorInfo.HasPerAxisQuantization())
1878  {
1879  // Create FlatBuffer TensorInfo
1880  auto flatBufferTensorInfo =
1881  serializer::CreateTensorInfo(m_flatBufferBuilder,
1882  m_flatBufferBuilder.CreateVector(shape),
1883  GetFlatBufferDataType(tensorInfo.GetDataType()),
1884  tensorInfo.GetQuantizationScales()[0],
1885  tensorInfo.GetQuantizationOffset(),
1886  m_flatBufferBuilder.CreateVector(tensorInfo.GetQuantizationScales()),
1887  tensorInfo.GetQuantizationDim().value(),
1888  static_cast<unsigned int>
1889  (tensorInfo.GetShape().GetDimensionality()),
1890  m_flatBufferBuilder.CreateVector(specificity));
1891  return flatBufferTensorInfo;
1892  }
1893 
1894  // Create FlatBuffer TensorInfo
1895  auto flatBufferTensorInfo = serializer::CreateTensorInfo(m_flatBufferBuilder,
1896  m_flatBufferBuilder.CreateVector(shape),
1897  GetFlatBufferDataType(tensorInfo.GetDataType()),
1898  tensorInfo.GetQuantizationScale(),
1899  tensorInfo.GetQuantizationOffset(),
1900  0,
1901  0,
1902  static_cast<unsigned int>
1903  (tensorInfo.GetShape().GetDimensionality()),
1904  m_flatBufferBuilder.CreateVector(specificity));
1905  return flatBufferTensorInfo;
1906 }
1907 
1908 flatbuffers::Offset<serializer::ConstTensor>
1909  SerializerStrategy::CreateConstTensorInfo(const armnn::ConstTensor& constTensor)
1910 {
1911  armnn::TensorInfo tensorInfo = constTensor.GetInfo();
1912 
1913  flatbuffers::Offset<void> fbPayload;
1914 
1915  switch (tensorInfo.GetDataType())
1916  {
1918  {
1919  auto fbVector = CreateDataVector<int64_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1920  flatbuffers::Offset<serializer::LongData> flatBuffersData = serializer::CreateLongData(
1921  m_flatBufferBuilder,
1922  fbVector);
1923  fbPayload = flatBuffersData.o;
1924  break;
1925  }
1928  {
1929  auto fbVector = CreateDataVector<int32_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1930  flatbuffers::Offset<serializer::IntData> flatBuffersData = serializer::CreateIntData(
1931  m_flatBufferBuilder,
1932  fbVector);
1933  fbPayload = flatBuffersData.o;
1934  break;
1935  }
1939  {
1940  auto fbVector = CreateDataVector<int16_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1941  flatbuffers::Offset<serializer::ShortData> flatBuffersData = serializer::CreateShortData(
1942  m_flatBufferBuilder,
1943  fbVector);
1944  fbPayload = flatBuffersData.o;
1945  break;
1946  }
1951  default:
1952  {
1953  auto fbVector = CreateDataVector<int8_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1954  flatbuffers::Offset<serializer::ByteData> flatBuffersData = serializer::CreateByteData(
1955  m_flatBufferBuilder,
1956  fbVector);
1957  fbPayload = flatBuffersData.o;
1958  }
1959  }
1960  flatbuffers::Offset<serializer::ConstTensor> flatBufferConstTensor = serializer::CreateConstTensor(
1961  m_flatBufferBuilder,
1962  CreateTensorInfo(tensorInfo),
1964  fbPayload);
1965  return flatBufferConstTensor;
1966 }
1967 
1968 flatbuffers::Offset<armnnSerializer::FeatureCompatibilityVersions> SerializerStrategy::GetVersionTable()
1969 {
1970  flatbuffers::Offset<armnnSerializer::FeatureCompatibilityVersions> versionsTable =
1971  serializer::CreateFeatureCompatibilityVersions(
1972  m_flatBufferBuilder,
1973  1, // Binding ids scheme version
1974  1, // Weights layout scheme version
1975  1 // Constant tensors as inputs version
1976  );
1977  return versionsTable;
1978 }
1979 
1980 std::vector<fb::Offset<serializer::InputSlot>>
1981  SerializerStrategy::CreateInputSlots(const armnn::IConnectableLayer* layer)
1982 {
1983  std::vector<fb::Offset<serializer::InputSlot>> inputSlots;
1984 
1985  // Get the InputSlots
1986  for (unsigned int slotIndex = 0; slotIndex<layer->GetNumInputSlots(); ++slotIndex)
1987  {
1988  const IInputSlot& inputSlot = layer->GetInputSlot(slotIndex);
1989 
1990  // Get the Connection for the InputSlot
1991  const IOutputSlot* connection = inputSlot.GetConnection();
1992  bool isOverridden = inputSlot.IsTensorInfoOverridden();
1993 
1994  flatbuffers::Offset<TensorInfo> overriddenTensorInfo = CreateTensorInfo(inputSlot.GetTensorInfo());
1995 
1996  // Create FlatBuffer Connection
1997  serializer::Connection conn(GetSerializedId(inputSlot.GetConnection()->GetOwningLayerGuid()),
1998  connection->CalculateIndexOnOwner());
1999  // Create FlatBuffer InputSlot
2000  inputSlots.push_back(serializer::CreateInputSlot(m_flatBufferBuilder, slotIndex, &conn, isOverridden,
2001  overriddenTensorInfo));
2002  }
2003  return inputSlots;
2004 }
2005 
2006 std::vector<fb::Offset<serializer::OutputSlot>>
2007  SerializerStrategy::CreateOutputSlots(const armnn::IConnectableLayer* layer)
2008 {
2009  std::vector<fb::Offset<serializer::OutputSlot>> outputSlots;
2010 
2011  // Get the OutputSlots
2012  for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
2013  {
2014  const IOutputSlot& outputSlot = layer->GetOutputSlot(slotIndex);
2015  const armnn::TensorInfo& tensorInfo = outputSlot.GetTensorInfo();
2016 
2017  // Create FlatBuffer Outputslot
2018  outputSlots.push_back(serializer::CreateOutputSlot(m_flatBufferBuilder,
2019  slotIndex,
2020  CreateTensorInfo(tensorInfo)));
2021  }
2022  return outputSlots;
2023 }
2024 
2025 void SerializerStrategy::ExecuteStrategy(const armnn::IConnectableLayer* layer,
2026  const BaseDescriptor& descriptor,
2027  const std::vector<armnn::ConstTensor>& constants,
2028  const char* name,
2029  const armnn::LayerBindingId id)
2030 {
2031  IgnoreUnused(constants);
2032 
2033  switch (layer->GetType())
2034  {
2036  {
2037  const armnn::ActivationDescriptor& layerDescriptor =
2038  static_cast<const armnn::ActivationDescriptor&>(descriptor);
2039  SerializeActivationLayer(layer, layerDescriptor, name);
2040  break;
2041  }
2043  {
2044  SerializeAdditionLayer(layer, name);
2045  break;
2046  }
2048  {
2049  const armnn::ArgMinMaxDescriptor& layerDescriptor =
2050  static_cast<const armnn::ArgMinMaxDescriptor&>(descriptor);
2051  SerializeArgMinMaxLayer(layer, layerDescriptor, name);
2052  break;
2053  }
2055  {
2056  const armnn::BatchMatMulDescriptor& layerDescriptor =
2057  static_cast<const armnn::BatchMatMulDescriptor&>(descriptor);
2058  SerializeBatchMatMulLayer(layer,
2059  layerDescriptor,
2060  name);
2061  break;
2062  }
2064  {
2065  const armnn::BatchNormalizationDescriptor& layerDescriptor =
2066  static_cast<const armnn::BatchNormalizationDescriptor&>(descriptor);
2067  SerializeBatchNormalizationLayer(layer,
2068  layerDescriptor,
2069  constants,
2070  name);
2071  break;
2072  }
2074  {
2075  const armnn::BatchToSpaceNdDescriptor& layerDescriptor =
2076  static_cast<const armnn::BatchToSpaceNdDescriptor&>(descriptor);
2077  SerializeBatchToSpaceNdLayer(layer,
2078  layerDescriptor,
2079  name);
2080  break;
2081  }
2082  case armnn::LayerType::Cast :
2083  {
2084  SerializeCastLayer(layer, name);
2085  break;
2086  }
2088  {
2089  const armnn::ChannelShuffleDescriptor& layerDescriptor =
2090  static_cast<const armnn::ChannelShuffleDescriptor&>(descriptor);
2091  SerializeChannelShuffleLayer(layer,
2092  layerDescriptor,
2093  name);
2094  break;
2095  }
2097  {
2098  const armnn::ComparisonDescriptor& layerDescriptor =
2099  static_cast<const armnn::ComparisonDescriptor&>(descriptor);
2100  SerializeComparisonLayer(layer,
2101  layerDescriptor,
2102  name);
2103  break;
2104  }
2106  {
2107  const armnn::ConcatDescriptor& layerDescriptor =
2108  static_cast<const armnn::ConcatDescriptor&>(descriptor);
2109  SerializeConcatLayer(layer,
2110  layerDescriptor,
2111  name);
2112  break;
2113  }
2115  {
2116  SerializeConstantLayer(layer,
2117  constants,
2118  name);
2119  break;
2120  }
2122  {
2123  const armnn::Convolution2dDescriptor& layerDescriptor =
2124  static_cast<const armnn::Convolution2dDescriptor&>(descriptor);
2125  SerializeConvolution2dLayer(layer,
2126  layerDescriptor,
2127  name);
2128  break;
2129  }
2131  {
2132  const armnn::Convolution3dDescriptor& layerDescriptor =
2133  static_cast<const armnn::Convolution3dDescriptor&>(descriptor);
2134  SerializeConvolution3dLayer(layer,
2135  layerDescriptor,
2136  name);
2137  break;
2138  }
2140  {
2141  const armnn::DepthToSpaceDescriptor& layerDescriptor =
2142  static_cast<const armnn::DepthToSpaceDescriptor&>(descriptor);
2143  SerializeDepthToSpaceLayer(layer,
2144  layerDescriptor,
2145  name);
2146  break;
2147  }
2149  {
2150  const armnn::DepthwiseConvolution2dDescriptor& layerDescriptor =
2151  static_cast<const armnn::DepthwiseConvolution2dDescriptor&>(descriptor);
2152  SerializeDepthwiseConvolution2dLayer(layer,
2153  layerDescriptor,
2154  name);
2155  break;
2156  }
2158  {
2159  SerializeDequantizeLayer(layer,
2160  name);
2161  break;
2162  }
2164  {
2165  const armnn::DetectionPostProcessDescriptor& layerDescriptor =
2166  static_cast<const armnn::DetectionPostProcessDescriptor&>(descriptor);
2167  SerializeDetectionPostProcessLayer(layer, layerDescriptor, constants, name);
2168  break;
2169  }
2171  {
2172  SerializeDivisionLayer(layer, name);
2173  break;
2174  }
2176  {
2177  const armnn::ElementwiseBinaryDescriptor& layerDescriptor =
2178  static_cast<const armnn::ElementwiseBinaryDescriptor&>(descriptor);
2179  SerializeElementwiseBinaryLayer(layer, layerDescriptor, name);
2180  break;
2181  }
2183  {
2184  const armnn::ElementwiseUnaryDescriptor& layerDescriptor =
2185  static_cast<const armnn::ElementwiseUnaryDescriptor&>(descriptor);
2186  SerializeElementwiseUnaryLayer(layer, layerDescriptor, name);
2187  break;
2188  }
2189  case armnn::LayerType::Fill :
2190  {
2191  const armnn::FillDescriptor& layerDescriptor =
2192  static_cast<const armnn::FillDescriptor&>(descriptor);
2193  SerializeFillLayer(layer, layerDescriptor, name);
2194  break;
2195  }
2197  {
2198  SerializeFloorLayer(layer, name);
2199  break;
2200  }
2202  {
2203  const armnn::FullyConnectedDescriptor& layerDescriptor =
2204  static_cast<const armnn::FullyConnectedDescriptor&>(descriptor);
2205  SerializeFullyConnectedLayer(layer, layerDescriptor, name);
2206  break;
2207  }
2209  {
2210  const armnn::GatherDescriptor& layerDescriptor =
2211  static_cast<const armnn::GatherDescriptor&>(descriptor);
2212  SerializeGatherLayer(layer, layerDescriptor, name);
2213  break;
2214  }
2216  {
2217  SerializeGatherNdLayer(layer, name);
2218  break;
2219  }
2221  {
2222  SerializeInputLayer(layer, id, name);
2223  break;
2224  }
2226  {
2227  const armnn::InstanceNormalizationDescriptor& layerDescriptor =
2228  static_cast<const armnn::InstanceNormalizationDescriptor&>(descriptor);
2229  SerializeInstanceNormalizationLayer(layer, layerDescriptor, name);
2230  break;
2231  }
2233  {
2234  const armnn::L2NormalizationDescriptor& layerDescriptor =
2235  static_cast<const armnn::L2NormalizationDescriptor&>(descriptor);
2236  SerializeL2NormalizationLayer(layer, layerDescriptor, name);
2237  break;
2238  }
2240  {
2241  const armnn::LogicalBinaryDescriptor& layerDescriptor =
2242  static_cast<const armnn::LogicalBinaryDescriptor&>(descriptor);
2243  SerializeLogicalBinaryLayer(layer, layerDescriptor, name);
2244  break;
2245  }
2247  {
2248  const armnn::LogSoftmaxDescriptor& layerDescriptor =
2249  static_cast<const armnn::LogSoftmaxDescriptor&>(descriptor);
2250  SerializeLogSoftmaxLayer(layer, layerDescriptor, name);
2251  break;
2252  }
2253  case armnn::LayerType::Lstm :
2254  {
2255  const armnn::LstmDescriptor& layerDescriptor =
2256  static_cast<const armnn::LstmDescriptor&>(descriptor);
2257  SerializeLstmLayer(layer, layerDescriptor, constants, name);
2258  break;
2259  }
2261  {
2262  const armnn::QLstmDescriptor& layerDescriptor =
2263  static_cast<const armnn::QLstmDescriptor&>(descriptor);
2264  SerializeQLstmLayer(layer, layerDescriptor, constants, name);
2265  break;
2266  }
2268  {
2269  SerializeMaximumLayer(layer, name);
2270  break;
2271  }
2272  case armnn::LayerType::Mean :
2273  {
2274  const armnn::MeanDescriptor& layerDescriptor =
2275  static_cast<const armnn::MeanDescriptor&>(descriptor);
2276  SerializeMeanLayer(layer, layerDescriptor, name);
2277  break;
2278  }
2280  {
2281  SerializeMergeLayer(layer, name);
2282  break;
2283  }
2285  {
2286  SerializeMinimumLayer(layer, name);
2287  break;
2288  }
2290  {
2291  SerializeMultiplicationLayer(layer, name);
2292  break;
2293  }
2295  {
2296  const armnn::NormalizationDescriptor& layerDescriptor =
2297  static_cast<const armnn::NormalizationDescriptor&>(descriptor);
2298  SerializeNormalizationLayer(layer, layerDescriptor, name);
2299  break;
2300  }
2302  {
2303  SerializeOutputLayer(layer, id, name);
2304  break;
2305  }
2306  case armnn::LayerType::Pad :
2307  {
2308  const armnn::PadDescriptor& layerDescriptor =
2309  static_cast<const armnn::PadDescriptor&>(descriptor);
2310  SerializePadLayer(layer, layerDescriptor, name);
2311  break;
2312  }
2314  {
2315  const armnn::PermuteDescriptor& layerDescriptor =
2316  static_cast<const armnn::PermuteDescriptor&>(descriptor);
2317  SerializePermuteLayer(layer, layerDescriptor, name);
2318  break;
2319  }
2321  {
2322  const armnn::Pooling2dDescriptor& layerDescriptor =
2323  static_cast<const armnn::Pooling2dDescriptor&>(descriptor);
2324  SerializePooling2dLayer(layer, layerDescriptor, name);
2325  break;
2326  }
2328  {
2329  const armnn::Pooling3dDescriptor& layerDescriptor =
2330  static_cast<const armnn::Pooling3dDescriptor&>(descriptor);
2331  SerializePooling3dLayer(layer, layerDescriptor, name);
2332  break;
2333  }
2335  {
2336  SerializePreluLayer(layer, name);
2337  break;
2338  }
2340  {
2341  SerializeQuantizeLayer(layer, name);
2342  break;
2343  }
2345  SerializeQuantizedLstmLayer(layer, constants, name);
2346  break;
2348  {
2349  const armnn::ReshapeDescriptor &layerDescriptor =
2350  static_cast<const armnn::ReshapeDescriptor &>(descriptor);
2351  SerializeReshapeLayer(layer, layerDescriptor, name);
2352  break;
2353  }
2355  {
2356  SerializeRankLayer(layer, name);
2357  break;
2358  }
2360  {
2361  const armnn::ReduceDescriptor& layerDescriptor =
2362  static_cast<const armnn::ReduceDescriptor&>(descriptor);
2363  SerializeReduceLayer(layer, layerDescriptor, name);
2364  break;
2365  }
2367  {
2368  const armnn::ResizeDescriptor& layerDescriptor =
2369  static_cast<const armnn::ResizeDescriptor&>(descriptor);
2370  SerializeResizeLayer(layer, layerDescriptor, name);
2371  break;
2372  }
2374  {
2375  SerializeReverseV2Layer(layer, name);
2376  break;
2377  }
2379  {
2380  SerializeShapeLayer(layer, name);
2381  break;
2382  }
2384  {
2385  const armnn::SliceDescriptor& layerDescriptor =
2386  static_cast<const armnn::SliceDescriptor&>(descriptor);
2387  SerializeSliceLayer(layer, layerDescriptor, name);
2388  break;
2389  }
2391  {
2392  const armnn::SoftmaxDescriptor& layerDescriptor =
2393  static_cast<const armnn::SoftmaxDescriptor&>(descriptor);
2394  SerializeSoftmaxLayer(layer, layerDescriptor, name);
2395  break;
2396  }
2398  {
2399  const armnn::SpaceToBatchNdDescriptor& layerDescriptor =
2400  static_cast<const armnn::SpaceToBatchNdDescriptor&>(descriptor);
2401  SerializeSpaceToBatchNdLayer(layer, layerDescriptor, name);
2402  break;
2403  }
2405  {
2406  const armnn::SpaceToDepthDescriptor& layerDescriptor =
2407  static_cast<const armnn::SpaceToDepthDescriptor&>(descriptor);
2408  SerializeSpaceToDepthLayer(layer, layerDescriptor, name);
2409  break;
2410  }
2412  {
2413  const armnn::SplitterDescriptor& layerDescriptor =
2414  static_cast<const armnn::SplitterDescriptor&>(descriptor);
2415  SerializeSplitterLayer(layer, layerDescriptor, name);
2416  break;
2417  }
2419  {
2420  const armnn::StackDescriptor& layerDescriptor =
2421  static_cast<const armnn::StackDescriptor&>(descriptor);
2422  SerializeStackLayer(layer, layerDescriptor, name);
2423  break;
2424  }
2426  {
2427  const armnn::StandInDescriptor& layerDescriptor =
2428  static_cast<const armnn::StandInDescriptor&>(descriptor);
2429  SerializeStandInLayer(layer, layerDescriptor, name);
2430  break;
2431  }
2433  {
2434  const armnn::StridedSliceDescriptor& layerDescriptor =
2435  static_cast<const armnn::StridedSliceDescriptor&>(descriptor);
2436  SerializeStridedSliceLayer(layer, layerDescriptor, name);
2437  break;
2438  }
2440  {
2441  SerializeSubtractionLayer(layer, name);
2442  break;
2443  }
2445  {
2446  SerializeSwitchLayer(layer, name);
2447  break;
2448  }
2450  {
2451  const armnn::TileDescriptor& layerDescriptor =
2452  static_cast<const armnn::TileDescriptor&>(descriptor);
2453  SerializeTileLayer(layer, layerDescriptor, name);
2454  break;
2455  }
2457  {
2458  const armnn::TransposeDescriptor& layerDescriptor =
2459  static_cast<const armnn::TransposeDescriptor&>(descriptor);
2460  SerializeTransposeLayer(layer, layerDescriptor, name);
2461  break;
2462  }
2464  {
2465  const armnn::TransposeConvolution2dDescriptor& layerDescriptor =
2466  static_cast<const armnn::TransposeConvolution2dDescriptor&>(descriptor);
2467  SerializeTransposeConvolution2dLayer(layer, layerDescriptor, constants, name);
2468  break;
2469  }
2471  {
2472  const armnn::UnidirectionalSequenceLstmDescriptor& layerDescriptor =
2473  static_cast<const armnn::UnidirectionalSequenceLstmDescriptor&>(descriptor);
2474  SerializeUnidirectionalSequenceLstmLayer(layer, layerDescriptor, constants, name);
2475  break;
2476  }
2477  default:
2478  {
2480  fmt::format("A layer of unknown type was given to the serializer. Layer name: {}; Layer Id: {}",
2481  layer->GetName(),
2482  id));
2483  }
2484  }
2485 }
2486 
2487 void ISerializer::SerializerImpl::Serialize(const INetwork& inNetwork)
2488 {
2489  // Iterate through to network
2490  inNetwork.ExecuteStrategy(m_SerializerStrategy);
2491  flatbuffers::FlatBufferBuilder& fbBuilder = m_SerializerStrategy.GetFlatBufferBuilder();
2492 
2493  // Create FlatBuffer SerializedGraph
2494  auto serializedGraph = serializer::CreateSerializedGraph(
2495  fbBuilder,
2496  fbBuilder.CreateVector(m_SerializerStrategy.GetSerializedLayers()),
2497  fbBuilder.CreateVector(m_SerializerStrategy.GetInputIds()),
2498  fbBuilder.CreateVector(m_SerializerStrategy.GetOutputIds()),
2499  m_SerializerStrategy.GetVersionTable());
2500 
2501  // Serialize the graph
2502  fbBuilder.Finish(serializedGraph);
2503 }
2504 
2505 
2506 bool ISerializer::SerializerImpl::SaveSerializedToStream(std::ostream& stream)
2507 {
2508  flatbuffers::FlatBufferBuilder& fbBuilder = m_SerializerStrategy.GetFlatBufferBuilder();
2509 
2510  auto bytesToWrite = armnn::numeric_cast<std::streamsize>(fbBuilder.GetSize());
2511  stream.write(reinterpret_cast<const char*>(fbBuilder.GetBufferPointer()), bytesToWrite);
2512  return !stream.bad();
2513 }
2514 
2515 } // namespace armnnSerializer
armnn::BatchNormalizationDescriptor
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
Definition: Descriptors.hpp:828
armnn::Convolution2dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:570
armnn::ArgMinMaxFunction::Max
@ Max
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
armnn::InstanceNormalizationDescriptor::m_Beta
float m_Beta
Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0.
Definition: Descriptors.hpp:867
armnn::SliceDescriptor::m_Begin
std::vector< unsigned int > m_Begin
Beginning indices of the slice in each dimension.
Definition: Descriptors.hpp:1223
armnn::INetwork::ExecuteStrategy
void ExecuteStrategy(IStrategy &strategy) const
Definition: Network.cpp:654
armnn::ChannelShuffleDescriptor::m_Axis
uint32_t m_Axis
Axis to apply channel shuffle operation on.
Definition: Descriptors.hpp:1559
armnn::OriginsDescriptor::GetConcatAxis
unsigned int GetConcatAxis() const
Get the concatenation axis value.
Definition: Descriptors.cpp:162
armnn::DataType::Boolean
@ Boolean
armnn::FullyConnectedDescriptor::m_ConstantWeights
bool m_ConstantWeights
Enable/disable constant weights and biases.
Definition: Descriptors.hpp:530
armnn::Pooling2dDescriptor::m_PaddingMethod
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
Definition: Descriptors.hpp:425
armnn::ViewsDescriptor
A ViewsDescriptor for the SplitterLayer.
Definition: Descriptors.hpp:244
armnn::DetectionPostProcessDescriptor::m_NmsScoreThreshold
float m_NmsScoreThreshold
NMS score threshold.
Definition: Descriptors.hpp:751
armnn::LayerType::Permute
@ Permute
armnn::QLstmDescriptor::m_ForgetIntermediateScale
float m_ForgetIntermediateScale
Forget intermediate quantization scale.
Definition: Descriptors.hpp:1407
armnn::ActivationDescriptor
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
armnn::TransposeConvolution2dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:1448
armnn::BatchMatMulDescriptor::m_TransposeX
bool m_TransposeX
Transpose the slices of each input tensor Transpose and Adjoint can not both be set to true for the s...
Definition: Descriptors.hpp:1591
armnn::OriginsDescriptor::GetNumViews
uint32_t GetNumViews() const
Get the number of views.
Definition: Descriptors.cpp:187
armnn::FullyConnectedDescriptor
A FullyConnectedDescriptor for the FullyConnectedLayer.
Definition: Descriptors.hpp:507
armnn::DetectionPostProcessDescriptor::m_ScaleX
float m_ScaleX
Center size encoding scale x.
Definition: Descriptors.hpp:759
armnn::LayerType::Splitter
@ Splitter
armnn::BaseTensor::GetMemoryArea
MemoryType GetMemoryArea() const
Definition: Tensor.hpp:305
armnn::LayerType::BatchNormalization
@ BatchNormalization
armnn::QLstmDescriptor
A QLstmDescriptor for the QLstmLayer.
Definition: Descriptors.hpp:1359
Serializer.hpp
armnn::TransposeConvolution2dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:1456
armnn::IConnectableLayer::GetNumInputSlots
virtual unsigned int GetNumInputSlots() const =0
Returns the number of connectable input slots.
armnn::ActivationFunction::LeakyReLu
@ LeakyReLu
armnnSerializer::GetFlatBufferNormalizationAlgorithmChannel
armnnSerializer::NormalizationAlgorithmChannel GetFlatBufferNormalizationAlgorithmChannel(armnn::NormalizationAlgorithmChannel normalizationAlgorithmChannel)
Definition: SerializerUtils.cpp:213
armnn::QLstmDescriptor::m_ProjectionEnabled
bool m_ProjectionEnabled
Enable/disable the projection layer.
Definition: Descriptors.hpp:1401
armnn::Pooling3dDescriptor::m_OutputShapeRounding
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
Definition: Descriptors.hpp:499
armnn::ResizeDescriptor::m_HalfPixelCenters
bool m_HalfPixelCenters
Half Pixel Centers.
Definition: Descriptors.hpp:997
armnn::LstmDescriptor::m_TimeMajor
bool m_TimeMajor
Enable/disable time major.
Definition: Descriptors.hpp:1133
armnn::BatchNormalizationDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:843
armnn::Pooling3dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:479
Descriptors.hpp
armnn::SpaceToBatchNdDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:1050
armnn::FullyConnectedDescriptor::m_TransposeWeightMatrix
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
Definition: Descriptors.hpp:528
armnn::ResizeDescriptor::m_TargetHeight
uint32_t m_TargetHeight
Target height value.
Definition: Descriptors.hpp:988
armnn::DepthwiseConvolution2dDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:708
armnn::TensorInfo::GetQuantizationScales
std::vector< float > GetQuantizationScales() const
Definition: Tensor.cpp:451
armnn::Pooling2dDescriptor::m_PoolHeight
uint32_t m_PoolHeight
Pooling height value.
Definition: Descriptors.hpp:417
armnn::StridedSliceDescriptor::m_Begin
std::vector< int > m_Begin
Begin values for the input that will be sliced.
Definition: Descriptors.hpp:1321
armnn::DetectionPostProcessDescriptor::m_ScaleY
float m_ScaleY
Center size encoding scale y.
Definition: Descriptors.hpp:761
armnn::Pooling3dDescriptor
A Pooling3dDescriptor for the Pooling3dLayer.
Definition: Descriptors.hpp:431
armnn::BatchMatMulDescriptor::m_AdjointX
bool m_AdjointX
Adjoint the slices of each input tensor Transpose and Adjoint can not both be set to true for the sam...
Definition: Descriptors.hpp:1596
armnn::DetectionPostProcessDescriptor::m_MaxDetections
uint32_t m_MaxDetections
Maximum numbers of detections.
Definition: Descriptors.hpp:745
armnn::Convolution3dDescriptor::m_PadFront
uint32_t m_PadFront
Padding front value in the depth dimension.
Definition: Descriptors.hpp:637
armnn::ResizeDescriptor
A ResizeDescriptor for the ResizeLayer.
Definition: Descriptors.hpp:964
armnn::IInputSlot::IsTensorInfoOverridden
virtual bool IsTensorInfoOverridden() const =0
Returns true if this InputSlot has an overridden TensorInfo that was set through a call to SetTensorI...
armnn::ArgMinMaxDescriptor
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
armnn::IConnectableLayer::GetName
virtual const char * GetName() const =0
Returns the name of the layer.
armnn::ActivationDescriptor::m_A
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH,...
Definition: Descriptors.hpp:61
armnn::InstanceNormalizationDescriptor
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
Definition: Descriptors.hpp:847
armnn::StridedSliceDescriptor::m_EllipsisMask
int32_t m_EllipsisMask
Ellipsis mask value.
Definition: Descriptors.hpp:1336
armnn::SoftmaxDescriptor::m_Beta
float m_Beta
Exponentiation value.
Definition: Descriptors.hpp:190
armnn::GatherDescriptor
A GatherDescriptor for the GatherLayer.
Definition: Descriptors.hpp:944
armnn::TensorShape::GetDimensionSpecificity
bool GetDimensionSpecificity(unsigned int i) const
Gets information about if the dimension size has been specified or not.
Definition: Tensor.cpp:211
armnn::TensorInfo::GetQuantizationScale
float GetQuantizationScale() const
Definition: Tensor.cpp:461
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
armnn::DepthwiseConvolution2dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:710
armnn::ActivationFunction::Sqrt
@ Sqrt
armnn::BatchMatMulDescriptor::m_DataLayoutX
DataLayout m_DataLayoutX
Data layout of each input tensor, such as NHWC/NDHWC (leave as default for arbitrary layout)
Definition: Descriptors.hpp:1600
armnn::L2NormalizationDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:824
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::LayerType::Floor
@ Floor
armnn::L2NormalizationDescriptor
A L2NormalizationDescriptor for the L2NormalizationLayer.
Definition: Descriptors.hpp:809
armnnSerializer::GetFlatBufferComparisonOperation
armnnSerializer::ComparisonOperation GetFlatBufferComparisonOperation(armnn::ComparisonOperation comparisonOperation)
Definition: SerializerUtils.cpp:11
armnn::BaseTensor::GetNumBytes
unsigned int GetNumBytes() const
Definition: Tensor.hpp:302
armnn::NormalizationDescriptor::m_Beta
float m_Beta
Beta value for the normalization equation.
Definition: Descriptors.hpp:801
armnn::OriginsDescriptor::GetNumDimensions
uint32_t GetNumDimensions() const
Get the number of dimensions.
Definition: Descriptors.cpp:192
armnn::LayerType::Transpose
@ Transpose
armnn::NormalizationDescriptor
A NormalizationDescriptor for the NormalizationLayer.
Definition: Descriptors.hpp:769
armnn::LayerType::Comparison
@ Comparison
armnn::Pooling2dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:421
armnn::BatchToSpaceNdDescriptor::m_BlockShape
std::vector< unsigned int > m_BlockShape
Block shape values.
Definition: Descriptors.hpp:898
armnn::IConnectableLayer::GetGuid
virtual LayerGuid GetGuid() const =0
Returns the unique id of the layer.
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::ChannelShuffleDescriptor
A ChannelShuffleDescriptor for the ChannelShuffle operator.
Definition: Descriptors.hpp:1541
armnn::DataType::Float32
@ Float32
armnn::ResizeDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:993
armnn::ActivationFunction::TanH
@ TanH
armnn::DepthwiseConvolution2dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:692
armnnSerializer::GetFlatBufferDataType
armnnSerializer::DataType GetFlatBufferDataType(armnn::DataType dataType)
Definition: SerializerUtils.cpp:67
armnn::Convolution2dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:576
armnn::Pooling2dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:411
armnn::Convolution3dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:633
armnnSerializer
Definition: ISerializer.hpp:11
armnn::ArgMinMaxDescriptor::m_Function
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:81
armnn::LayerType::Tile
@ Tile
armnn::SpaceToBatchNdDescriptor::m_BlockShape
std::vector< unsigned int > m_BlockShape
Block shape value.
Definition: Descriptors.hpp:1045
armnn::StridedSliceDescriptor::m_BeginMask
int32_t m_BeginMask
Begin mask value.
Definition: Descriptors.hpp:1329
armnn::BatchMatMulDescriptor::m_AdjointY
bool m_AdjointY
Definition: Descriptors.hpp:1597
armnn::Convolution3dDescriptor::m_DilationX
uint32_t m_DilationX
Dilation along x axis.
Definition: Descriptors.hpp:647
armnn::Convolution3dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:635
armnn::DataType::QAsymmU8
@ QAsymmU8
armnn::QLstmDescriptor::m_InputIntermediateScale
float m_InputIntermediateScale
Input intermediate quantization scale.
Definition: Descriptors.hpp:1405
armnn::ArgMinMaxFunction
ArgMinMaxFunction
Definition: Types.hpp:102
armnn::DetectionPostProcessDescriptor::m_ScaleW
float m_ScaleW
Center size encoding scale weight.
Definition: Descriptors.hpp:763
armnn::ActivationFunction::BoundedReLu
@ BoundedReLu
min(a, max(b, input)) ReLu1 & ReLu6.
armnn::LayerType::Stack
@ Stack
armnn::DataType::QSymmS8
@ QSymmS8
armnn::StackDescriptor
A StackDescriptor for the StackLayer.
Definition: Descriptors.hpp:1230
armnn::IOutputSlot::GetTensorInfo
virtual const TensorInfo & GetTensorInfo() const =0
IgnoreUnused.hpp
armnn::Pooling3dDescriptor::m_StrideZ
uint32_t m_StrideZ
Stride value when proceeding through input for the depth dimension.
Definition: Descriptors.hpp:497
armnn::LayerType::Normalization
@ Normalization
armnn::NormalizationDescriptor::m_NormSize
uint32_t m_NormSize
Depth radius value.
Definition: Descriptors.hpp:797
armnn::LayerType::QuantizedLstm
@ QuantizedLstm
armnn::Pooling2dDescriptor::m_PoolWidth
uint32_t m_PoolWidth
Pooling width value.
Definition: Descriptors.hpp:415
armnn::StandInDescriptor::m_NumInputs
uint32_t m_NumInputs
Number of input tensors.
Definition: Descriptors.hpp:1276
armnn::LayerType::Reduce
@ Reduce
armnn::Pooling3dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCDHW, NDHWC).
Definition: Descriptors.hpp:503
armnn::Convolution2dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:566
armnn::BatchToSpaceNdDescriptor::m_Crops
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
Definition: Descriptors.hpp:900
armnn::TensorInfo::HasPerAxisQuantization
bool HasPerAxisQuantization() const
Definition: Tensor.cpp:446
armnn::DepthwiseConvolution2dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:702
armnn::Convolution2dDescriptor::m_DilationY
uint32_t m_DilationY
Dilation along y axis.
Definition: Descriptors.hpp:580
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
armnn::Pooling3dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:481
armnn::DetectionPostProcessDescriptor::m_MaxClassesPerDetection
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
Definition: Descriptors.hpp:747
armnnSerializer::GetFlatBufferResizeMethod
armnnSerializer::ResizeMethod GetFlatBufferResizeMethod(armnn::ResizeMethod method)
Definition: SerializerUtils.cpp:241
armnn::IConnectableLayer::GetNumOutputSlots
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
armnnSerializer::ISerializer
Definition: ISerializer.hpp:17
armnn::QLstmDescriptor::m_CellIntermediateScale
float m_CellIntermediateScale
Cell intermediate quantization scale.
Definition: Descriptors.hpp:1409
armnn::ActivationFunction::HardSwish
@ HardSwish
armnn::DataType::QSymmS16
@ QSymmS16
armnnSerializer::GetFlatBufferActivationFunction
serializer::ActivationFunction GetFlatBufferActivationFunction(armnn::ActivationFunction function)
Definition: Serializer.cpp:55
armnn::NormalizationDescriptor::m_NormMethodType
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
Definition: Descriptors.hpp:795
armnn::LayerType::GatherNd
@ GatherNd
armnn::LayerType::ElementwiseBinary
@ ElementwiseBinary
armnn::TransposeConvolution2dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:1454
NumericCast.hpp
armnn::DataType::BFloat16
@ BFloat16
armnn::Pooling3dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:495
armnn::ReduceDescriptor::m_ReduceOperation
ReduceOperation m_ReduceOperation
Specifies the reduction operation to execute.
Definition: Descriptors.hpp:1537
armnnSerializer::GetFlatBufferConstTensorData
armnnSerializer::ConstTensorData GetFlatBufferConstTensorData(armnn::DataType dataType)
Definition: SerializerUtils.cpp:45
armnn::MeanDescriptor::m_KeepDims
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept.
Definition: Descriptors.hpp:1171
armnn::FillDescriptor::m_Value
float m_Value
Definition: Descriptors.hpp:940
armnn::ElementwiseBinaryDescriptor
A ElementwiseBinaryDescriptor for the ElementwiseBinaryLayer.
Definition: Descriptors.hpp:109
armnn::ViewsDescriptor::GetViewSizes
const uint32_t * GetViewSizes(uint32_t idx) const
Get the view sizes at the int value idx.
Definition: Descriptors.cpp:340
armnnSerializer::GetFlatBufferBinaryOperation
armnnSerializer::BinaryOperation GetFlatBufferBinaryOperation(armnn::BinaryOperation binaryOperation)
Definition: SerializerUtils.cpp:110
armnn::Pooling3dDescriptor::m_PoolType
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
Definition: Descriptors.hpp:473
armnn::ResizeDescriptor::m_Method
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
Definition: Descriptors.hpp:991
armnn::SpaceToBatchNdDescriptor::m_PadList
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding values for the input dimension: heightPad{top, bottom} widthPad{left,...
Definition: Descriptors.hpp:1048
armnn::LayerType::Slice
@ Slice
armnn::LstmDescriptor::m_PeepholeEnabled
bool m_PeepholeEnabled
Enable/disable peephole.
Definition: Descriptors.hpp:1127
armnn::Convolution3dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:631
armnn::NormalizationDescriptor::m_NormChannelType
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
Definition: Descriptors.hpp:793
LstmParams.hpp
armnn::IOutputSlot
An output connection slot for a layer.
Definition: INetwork.hpp:53
armnnSerializer::GetFlatBufferDataLayout
armnnSerializer::DataLayout GetFlatBufferDataLayout(armnn::DataLayout dataLayout)
Definition: SerializerUtils.cpp:94
armnn::StackDescriptor::m_NumInputs
uint32_t m_NumInputs
Number of input tensors.
Definition: Descriptors.hpp:1254
armnn::DataType::Float16
@ Float16
armnn::LstmDescriptor::m_ClippingThresProj
float m_ClippingThresProj
Clipping threshold value for the projection.
Definition: Descriptors.hpp:1123
armnn::LayerType::ChannelShuffle
@ ChannelShuffle
armnn::Pooling3dDescriptor::m_PoolWidth
uint32_t m_PoolWidth
Pooling width value.
Definition: Descriptors.hpp:487
armnn::StridedSliceDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:1342
armnn::Pooling2dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:427
armnn::TensorShape::GetNumDimensions
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
armnn::DepthwiseConvolution2dDescriptor::m_DilationY
uint32_t m_DilationY
Dilation factor value for height dimension.
Definition: Descriptors.hpp:706
armnn::Pooling2dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:413
armnn::LayerType::Subtraction
@ Subtraction
armnn::LayerType::Prelu
@ Prelu
armnn::Pooling3dDescriptor::m_PaddingMethod
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
Definition: Descriptors.hpp:501
armnn::Pooling2dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:409
armnn::FullyConnectedDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:526
armnn::LayerType::LogicalBinary
@ LogicalBinary
armnn::LogicalBinaryDescriptor::m_Operation
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
Definition: Descriptors.hpp:1513
armnn::LayerType::Concat
@ Concat
armnn::PadDescriptor
A PadDescriptor for the PadLayer.
Definition: Descriptors.hpp:1175
armnn::TransposeDescriptor
A TransposeDescriptor for the TransposeLayer.
Definition: Descriptors.hpp:1469
armnn::Convolution3dDescriptor::m_DilationZ
uint32_t m_DilationZ
Dilation along z axis.
Definition: Descriptors.hpp:651
armnn::BatchMatMulDescriptor::m_TransposeY
bool m_TransposeY
Definition: Descriptors.hpp:1592
armnn::ViewsDescriptor::GetOrigins
const OriginsDescriptor & GetOrigins() const
Get the View Origins.
Definition: Descriptors.cpp:345
armnn::DetectionPostProcessDescriptor::m_NumClasses
uint32_t m_NumClasses
Number of classes.
Definition: Descriptors.hpp:755
armnn::TensorInfo::GetQuantizationDim
Optional< unsigned int > GetQuantizationDim() const
Definition: Tensor.cpp:494
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
armnn::LayerType::Merge
@ Merge
armnn::StackDescriptor::m_Axis
uint32_t m_Axis
0-based axis along which to stack the input tensors.
Definition: Descriptors.hpp:1252
armnn::SliceDescriptor
A SliceDescriptor for the SliceLayer.
Definition: Descriptors.hpp:1207
armnn::LayerType::StandIn
@ StandIn
armnn::LayerType::Softmax
@ Softmax
armnnSerializer::GetFlatBufferUnaryOperation
armnnSerializer::UnaryOperation GetFlatBufferUnaryOperation(armnn::UnaryOperation unaryOperation)
Definition: SerializerUtils.cpp:135
armnn::InstanceNormalizationDescriptor::m_Gamma
float m_Gamma
Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0.
Definition: Descriptors.hpp:865
armnn::DetectionPostProcessDescriptor::m_NmsIouThreshold
float m_NmsIouThreshold
Intersection over union threshold.
Definition: Descriptors.hpp:753
armnn::ActivationFunction::Elu
@ Elu
armnn::Convolution2dDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:582
armnn::BatchMatMulDescriptor::m_DataLayoutY
DataLayout m_DataLayoutY
Definition: Descriptors.hpp:1601
armnn::ReshapeDescriptor
A ReshapeDescriptor for the ReshapeLayer.
Definition: Descriptors.hpp:1002
armnnSerializer::GetFlatBufferNormalizationAlgorithmMethod
armnnSerializer::NormalizationAlgorithmMethod GetFlatBufferNormalizationAlgorithmMethod(armnn::NormalizationAlgorithmMethod normalizationAlgorithmMethod)
Definition: SerializerUtils.cpp:227
armnn::InvalidArgumentException
Definition: Exceptions.hpp:80
armnn::LayerType::Quantize
@ Quantize
armnn::PadDescriptor::m_PadValue
float m_PadValue
Optional value to use for padding, defaults to 0.
Definition: Descriptors.hpp:1200
armnn::LayerBindingId
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:303
armnn::ActivationFunction::Linear
@ Linear
armnn::DepthwiseConvolution2dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:694
armnn::Convolution3dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:629
armnn::ActivationDescriptor::m_Function
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu,...
Definition: Descriptors.hpp:59
armnn::NormalizationDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:805
armnn::LayerType::Multiplication
@ Multiplication
armnn::PermuteDescriptor
A PermuteDescriptor for the PermuteLayer.
Definition: Descriptors.hpp:149
armnn::BatchMatMulDescriptor
A BatchMatMulDescriptor for the BatchMatMul operator.
Definition: Descriptors.hpp:1563
armnn::IOutputSlot::GetOwningLayerGuid
virtual LayerGuid GetOwningLayerGuid() const =0
armnnSerializer::GetFlatBufferArgMinMaxFunction
serializer::ArgMinMaxFunction GetFlatBufferArgMinMaxFunction(armnn::ArgMinMaxFunction function)
Definition: Serializer.cpp:86
armnn::Convolution3dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:643
armnn::IConnectableLayer::GetType
virtual LayerType GetType() const =0
Returns the armnn::LayerType of this layer.
armnn::LayerType::Addition
@ Addition
armnn::Convolution2dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:584
armnn::QLstmDescriptor::m_ProjectionClip
float m_ProjectionClip
Clipping threshold value for the projection.
Definition: Descriptors.hpp:1395
armnnSerializer::GetFlatBufferPaddingMethod
armnnSerializer::PaddingMethod GetFlatBufferPaddingMethod(armnn::PaddingMethod paddingMethod)
Definition: SerializerUtils.cpp:188
armnn::GatherDescriptor::m_Axis
int32_t m_Axis
The axis in params to gather indices from.
Definition: Descriptors.hpp:960
armnn::Convolution3dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:641
armnn::SpaceToBatchNdDescriptor
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
Definition: Descriptors.hpp:1022
armnn::Convolution2dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:572
armnn::Convolution3dDescriptor
A Convolution3dDescriptor for the Convolution3dLayer.
Definition: Descriptors.hpp:588
armnn::LayerType::DepthToSpace
@ DepthToSpace
armnn::ReshapeDescriptor::m_TargetShape
TensorShape m_TargetShape
Target shape value.
Definition: Descriptors.hpp:1018
armnn::BaseDescriptor
Base class for all descriptors.
Definition: Descriptors.hpp:22
armnn::TransposeConvolution2dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:1458
armnn::StridedSliceDescriptor::m_EndMask
int32_t m_EndMask
End mask value.
Definition: Descriptors.hpp:1332
armnn::BaseTensor::GetInfo
const TensorInfo & GetInfo() const
Definition: Tensor.hpp:295
armnn::Pooling2dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:407
armnn::PermuteDescriptor::m_DimMappings
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination,...
Definition: Descriptors.hpp:173
armnn::ActivationFunction
ActivationFunction
Definition: Types.hpp:86
armnn::StandInDescriptor::m_NumOutputs
uint32_t m_NumOutputs
Number of output tensors.
Definition: Descriptors.hpp:1278
armnn::Pooling3dDescriptor::m_PadFront
uint32_t m_PadFront
Padding front value in the depth dimension.
Definition: Descriptors.hpp:483
SerializerUtils.hpp
armnn::StackDescriptor::m_InputShape
TensorShape m_InputShape
Required shape of all input tensors.
Definition: Descriptors.hpp:1256
armnn::LayerType::DetectionPostProcess
@ DetectionPostProcess
armnn::Convolution2dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:574
armnn::QLstmDescriptor::m_OutputIntermediateScale
float m_OutputIntermediateScale
Output intermediate quantization scale.
Definition: Descriptors.hpp:1411
armnn::LayerType::Pooling2d
@ Pooling2d
armnn::Convolution2dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:568
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:198
armnnSerializer::GetFlatBufferReduceOperation
armnnSerializer::ReduceOperation GetFlatBufferReduceOperation(armnn::ReduceOperation reduceOperation)
Definition: SerializerUtils.cpp:254
armnn::DetectionPostProcessDescriptor::m_DetectionsPerClass
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
Definition: Descriptors.hpp:749
LayerGuid
arm::pipe::ProfilingGuid LayerGuid
Define LayerGuid type.
Definition: Types.hpp:26
armnn::LayerType::Division
@ Division
armnn::DetectionPostProcessDescriptor::m_ScaleH
float m_ScaleH
Center size encoding scale height.
Definition: Descriptors.hpp:765
armnn::DataType::Signed32
@ Signed32
armnn::LayerType::Shape
@ Shape
armnn::ReduceDescriptor::m_KeepDims
bool m_KeepDims
if true then output shape has no change.
Definition: Descriptors.hpp:1533
armnn::BatchToSpaceNdDescriptor
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
Definition: Descriptors.hpp:875
armnn::Convolution2dDescriptor
A Convolution2dDescriptor for the Convolution2dLayer.
Definition: Descriptors.hpp:534
armnn::ActivationFunction::Abs
@ Abs
armnn::DepthwiseConvolution2dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:698
armnn::ComparisonDescriptor
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
armnn::FillDescriptor
A FillDescriptor for the FillLayer.
Definition: Descriptors.hpp:925
armnn::DataType::QAsymmS8
@ QAsymmS8
armnn::ComparisonDescriptor::m_Operation
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
Definition: Descriptors.hpp:105
armnn::Pooling3dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:477
armnn::ElementwiseUnaryDescriptor::m_Operation
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
Definition: Descriptors.hpp:145
armnn::ArgMinMaxFunction::Min
@ Min
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::LayerType::Gather
@ Gather
armnn::StridedSliceDescriptor::m_ShrinkAxisMask
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1.
Definition: Descriptors.hpp:1334
armnn::StandInDescriptor
A StandInDescriptor for the StandIn layer.
Definition: Descriptors.hpp:1260
armnn::Pooling2dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:419
armnn::LayerType::Pooling3d
@ Pooling3d
armnn::SpaceToDepthDescriptor::m_BlockSize
unsigned int m_BlockSize
Scalar specifying the input block size. It must be >= 1.
Definition: Descriptors.hpp:1071
armnn::ResizeDescriptor::m_TargetWidth
uint32_t m_TargetWidth
Target width value.
Definition: Descriptors.hpp:986
armnn::LayerType::LogSoftmax
@ LogSoftmax
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::Pooling3dDescriptor::m_PoolHeight
uint32_t m_PoolHeight
Pooling height value.
Definition: Descriptors.hpp:489
armnn::StridedSliceDescriptor::m_Stride
std::vector< int > m_Stride
Stride values for the input that will be sliced.
Definition: Descriptors.hpp:1325
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LayerType::Cast
@ Cast
armnn::InstanceNormalizationDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:871
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
armnn::PermutationVector::GetSize
SizeType GetSize() const
Definition: Types.hpp:351
armnn::LstmDescriptor
An LstmDescriptor for the LstmLayer.
Definition: Descriptors.hpp:1081
armnn::StridedSliceDescriptor
A StridedSliceDescriptor for the StridedSliceLayer.
Definition: Descriptors.hpp:1282
armnn::LayerType::Switch
@ Switch
armnn::TransposeDescriptor::m_DimMappings
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination,...
Definition: Descriptors.hpp:1493
armnnSerializer::GetFlatBufferPoolingAlgorithm
armnnSerializer::PoolingAlgorithm GetFlatBufferPoolingAlgorithm(armnn::PoolingAlgorithm poolingAlgorithm)
Definition: SerializerUtils.cpp:162
armnn::Pooling3dDescriptor::m_PadBack
uint32_t m_PadBack
Padding back value in the depth dimension.
Definition: Descriptors.hpp:485
armnn::ResizeDescriptor::m_AlignCorners
bool m_AlignCorners
Aligned corners.
Definition: Descriptors.hpp:995
armnn::TileDescriptor::m_Multiples
std::vector< uint32_t > m_Multiples
The vector to multiply the input shape by.
Definition: Descriptors.hpp:1635
armnn::MeanDescriptor::m_Axis
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
Definition: Descriptors.hpp:1169
armnn::LstmDescriptor::m_CifgEnabled
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
Definition: Descriptors.hpp:1125
armnn::LogicalBinaryDescriptor
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
Definition: Descriptors.hpp:1497
armnn::NormalizationDescriptor::m_Alpha
float m_Alpha
Alpha value for the normalization equation.
Definition: Descriptors.hpp:799
armnn::Pooling3dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:493
armnn::IInputSlot::GetTensorInfo
virtual const TensorInfo & GetTensorInfo() const =0
Gets the TensorInfo for this InputSlot.
armnn::LayerType::Reshape
@ Reshape
armnn::Pooling3dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:475
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
armnn::QLstmDescriptor::m_HiddenStateZeroPoint
int32_t m_HiddenStateZeroPoint
Hidden State zero point.
Definition: Descriptors.hpp:1413
armnn::Convolution2dDescriptor::m_DilationX
uint32_t m_DilationX
Dilation along x axis.
Definition: Descriptors.hpp:578
armnn::SoftmaxDescriptor::m_Axis
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
Definition: Descriptors.hpp:192
armnn::Convolution3dDescriptor::m_PadBack
uint32_t m_PadBack
Padding back value in the depth dimension.
Definition: Descriptors.hpp:639
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::LayerType::Fill
@ Fill
armnn::ReduceDescriptor::m_vAxis
std::vector< uint32_t > m_vAxis
The indices of the dimensions to reduce.
Definition: Descriptors.hpp:1535
armnn::PadDescriptor::m_PadList
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding for input dimension.
Definition: Descriptors.hpp:1197
armnn::LayerType::L2Normalization
@ L2Normalization
armnn::LstmDescriptor::m_LayerNormEnabled
bool m_LayerNormEnabled
Enable/disable layer normalization.
Definition: Descriptors.hpp:1131
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::TransposeConvolution2dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:1452
armnn::ViewsDescriptor::GetNumDimensions
uint32_t GetNumDimensions() const
Get the number of dimensions.
Definition: Descriptors.cpp:300
armnn::TransposeConvolution2dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:1450
armnn::LayerType::Minimum
@ Minimum
armnnSerializer::GetFlatBufferOutputShapeRounding
armnnSerializer::OutputShapeRounding GetFlatBufferOutputShapeRounding(armnn::OutputShapeRounding outputShapeRounding)
Definition: SerializerUtils.cpp:176
armnn::QLstmDescriptor::m_CifgEnabled
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
Definition: Descriptors.hpp:1397
armnn::ElementwiseBinaryDescriptor::m_Operation
BinaryOperation m_Operation
Specifies the elementwiseBinary operation to execute.
Definition: Descriptors.hpp:125
armnn::LayerType::UnidirectionalSequenceLstm
@ UnidirectionalSequenceLstm
armnn::IOutputSlot::CalculateIndexOnOwner
virtual unsigned int CalculateIndexOnOwner() const =0
armnn::LayerType::ReverseV2
@ ReverseV2
armnnSerializer::ISerializerPtr
std::unique_ptr< ISerializer, void(*)(ISerializer *serializer)> ISerializerPtr
Definition: ISerializer.hpp:15
armnn::Convolution3dDescriptor::m_DilationY
uint32_t m_DilationY
Dilation along y axis.
Definition: Descriptors.hpp:649
armnn::OriginsDescriptor
An OriginsDescriptor for the ConcatLayer.
Definition: Descriptors.hpp:201
armnn::ActivationFunction::ReLu
@ ReLu
armnn::IConnectableLayer::GetOutputSlot
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::ElementwiseUnaryDescriptor
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:129
armnn::TransposeConvolution2dDescriptor
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
Definition: Descriptors.hpp:1419
armnn::IConnectableLayer::GetInputSlot
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
armnn::Convolution3dDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:653
armnn::LayerType::ArgMinMax
@ ArgMinMax
armnn::ArgMinMaxDescriptor::m_Axis
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:83
armnn::ActivationDescriptor::m_B
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:63
armnn::LayerType::Pad
@ Pad
armnn::Convolution3dDescriptor::m_StrideZ
uint32_t m_StrideZ
Stride value when proceeding through input for the depth dimension.
Definition: Descriptors.hpp:645
QuantizedLstmParams.hpp
armnn::DetectionPostProcessDescriptor::m_UseRegularNms
bool m_UseRegularNms
Use Regular NMS.
Definition: Descriptors.hpp:757
armnn::ViewsDescriptor::GetNumViews
uint32_t GetNumViews() const
Get the number of views.
Definition: Descriptors.cpp:295
armnn::QLstmDescriptor::m_HiddenStateScale
float m_HiddenStateScale
Hidden State quantization scale.
Definition: Descriptors.hpp:1415
armnn::LayerType::Rank
@ Rank
armnn::NormalizationDescriptor::m_K
float m_K
Kappa value used for the across channel normalization equation.
Definition: Descriptors.hpp:803
armnn::LayerType::Mean
@ Mean
armnn::LstmDescriptor::m_ProjectionEnabled
bool m_ProjectionEnabled
Enable/disable the projection layer.
Definition: Descriptors.hpp:1129
armnn::StridedSliceDescriptor::m_End
std::vector< int > m_End
End values for the input that will be sliced.
Definition: Descriptors.hpp:1323
armnn::PadDescriptor::m_PaddingMode
PaddingMode m_PaddingMode
Specifies the Padding mode (Constant, Reflect or Symmetric)
Definition: Descriptors.hpp:1203
armnn::ConstTensor
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
armnn::ActivationFunction::Square
@ Square
armnn::IConnectableLayer
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:80
armnn::IInputSlot
An input connection slot for a layer.
Definition: INetwork.hpp:25
armnn::Pooling2dDescriptor::m_OutputShapeRounding
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
Definition: Descriptors.hpp:423
armnn::LayerType::Input
@ Input
armnn::TransposeConvolution2dDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:1460
armnn::QLstmDescriptor::m_CellClip
float m_CellClip
Clipping threshold value for the cell state.
Definition: Descriptors.hpp:1393
armnn::LayerType::Resize
@ Resize
armnn::DetectionPostProcessDescriptor
Definition: Descriptors.hpp:713
armnn::QLstmDescriptor::m_LayerNormEnabled
bool m_LayerNormEnabled
Enable/disable layer normalization.
Definition: Descriptors.hpp:1403
armnn::ChannelShuffleDescriptor::m_NumGroups
uint32_t m_NumGroups
Number of groups for the channel shuffle operation.
Definition: Descriptors.hpp:1557
armnn::Convolution3dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NDHWC, NCDHW).
Definition: Descriptors.hpp:655
armnn::L2NormalizationDescriptor::m_Eps
float m_Eps
Used to avoid dividing by zero.
Definition: Descriptors.hpp:822
armnn::OriginsDescriptor::GetViewOrigin
const uint32_t * GetViewOrigin(uint32_t idx) const
Return the view origin at the int value idx.
Definition: Descriptors.cpp:197
armnn::DataType::Signed64
@ Signed64
armnn::TransposeConvolution2dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:1462
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::SliceDescriptor::m_Size
std::vector< unsigned int > m_Size
Size of the slice in each dimension.
Definition: Descriptors.hpp:1226
armnn::Pooling2dDescriptor
A Pooling2dDescriptor for the Pooling2dLayer.
Definition: Descriptors.hpp:371
armnn::LstmDescriptor::m_ActivationFunc
uint32_t m_ActivationFunc
The activation function to use.
Definition: Descriptors.hpp:1119
armnn::BatchToSpaceNdDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:902
armnn::LayerType::Maximum
@ Maximum
armnn::TensorInfo::GetQuantizationOffset
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:478
armnn::LayerType::Activation
@ Activation
armnn::DepthwiseConvolution2dDescriptor
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
Definition: Descriptors.hpp:659
armnn::LayerType::Lstm
@ Lstm
armnn::LayerType::Dequantize
@ Dequantize
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::ReduceDescriptor
A ReduceDescriptor for the REDUCE operators.
Definition: Descriptors.hpp:1517
armnn::DepthwiseConvolution2dDescriptor::m_DilationX
uint32_t m_DilationX
Dilation factor value for width dimension.
Definition: Descriptors.hpp:704
armnnSerializer::GetFlatBufferPaddingMode
armnnSerializer::PaddingMode GetFlatBufferPaddingMode(armnn::PaddingMode paddingMode)
Definition: SerializerUtils.cpp:200
armnn::BatchNormalizationDescriptor::m_Eps
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
Definition: Descriptors.hpp:841
armnn::LstmDescriptor::m_ClippingThresCell
float m_ClippingThresCell
Clipping threshold value for the cell state.
Definition: Descriptors.hpp:1121
armnn::Pooling3dDescriptor::m_PoolDepth
uint32_t m_PoolDepth
Pooling depth value.
Definition: Descriptors.hpp:491
armnn::TensorShape::GetDimensionality
Dimensionality GetDimensionality() const
Function that returns the tensor type.
Definition: Tensor.hpp:92
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:483
armnn::StridedSliceDescriptor::m_NewAxisMask
int32_t m_NewAxisMask
New axis mask value.
Definition: Descriptors.hpp:1339
armnn::MeanDescriptor
A MeanDescriptor for the MeanLayer.
Definition: Descriptors.hpp:1151
armnn::QLstmDescriptor::m_PeepholeEnabled
bool m_PeepholeEnabled
Enable/disable peephole.
Definition: Descriptors.hpp:1399
armnn::LayerType::QLstm
@ QLstm
armnn::OptionalReferenceSwitch::value
const T & value() const
Definition: Optional.hpp:146
armnn::TileDescriptor
Definition: Descriptors.hpp:1619
armnn::SoftmaxDescriptor
A SoftmaxDescriptor for the SoftmaxLayer.
Definition: Descriptors.hpp:177
armnn::Pooling2dDescriptor::m_PoolType
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
Definition: Descriptors.hpp:405
armnn::InstanceNormalizationDescriptor::m_Eps
float m_Eps
Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f.
Definition: Descriptors.hpp:869
armnnSerializer::GetFlatBufferLogicalBinaryOperation
armnnSerializer::LogicalBinaryOperation GetFlatBufferLogicalBinaryOperation(armnn::LogicalBinaryOperation logicalBinaryOperation)
Definition: SerializerUtils.cpp:31
armnn::SpaceToDepthDescriptor
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
Definition: Descriptors.hpp:1054
armnn::LayerType::Output
@ Output
armnn::LayerType::Constant
@ Constant
armnn::ViewsDescriptor::GetViewOrigin
const uint32_t * GetViewOrigin(uint32_t idx) const
Get the view origin at the int value idx.
Definition: Descriptors.cpp:305
armnn::ActivationFunction::Sigmoid
@ Sigmoid
armnn::SpaceToDepthDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:1074
armnn::INetwork
Main network class which provides the interface for building up a neural network.
Definition: INetwork.hpp:347
armnn::DepthwiseConvolution2dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:700
armnn::DepthwiseConvolution2dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:696
armnn::IInputSlot::GetConnection
virtual const IOutputSlot * GetConnection() const =0